input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
c in node:
repl_super(node, c, node.parents[0], gets, sets, methods)
traverse(result, ClassNode, visit)
flatten_statementlists(result, typespace)
def expand_requirejs_class(typespace, cls):
node = FunctionNode(cls.name, 0)
params = ExprListNode([])
slist = StatementList()
vars = [];
cls_scope = {}
#properties
for c in cls:
if type(c) == VarDeclNode:
cls_scope[c.val] = c;
cs = c[2:]
c.children = c.children[:2]
vars.append(c)
for c2 in cs:
cls_scope[c2.val] = c2;
vars.append(c2)
methods = []
for c in cls:
if type(c) in [MethodNode, MethodGetter, MethodSetter]:
if c.name != "constructor":
cls_scope[c.name] = c
methods.append(c)
if glob.g_validate_classes:
validate_class_this_refs(typespace, cls, cls_scope)
#find constructor method
found_con = False
con = None
for m in methods:
if m.name == "constructor":
if found_con: raise SyntaxError("Cannot have multiple constructor methods")
if type(m) != MethodNode: raise SyntaxError("Constructors cannot be get/setters")
found_con = True
params = m[0]
slist = m[1]
con = m
parent = cls.parents[0] if len(cls.parents) != 0 else None
if found_con == False:
#build a default constructor
m = MethodNode("constructor")
print("generating default constructor...");
params = ExprListNode([])
slist = StatementList()
m.add(params)
m.add(slist)
con = m
vars.reverse();
for c in vars:
val = c[0]
if type(val) == ExprNode and len(val) == 0:
#val = IdentNode("undefined");
continue;
a = AssignNode(BinOpNode("this", c.val, "."), val)
slist.prepend(a)
#do getters/setters
gets = {}
sets = {}
props = set()
for m in methods:
if m.name == "constructor": continue
if type(m) == MethodGetter:
gets[m.name] = m
props.add(m.name)
if type(m) == MethodSetter:
sets[m.name] = m
props.add(m.name)
def to_exprfunc(method):
f = FunctionNode("(anonymous)", 0)
f.is_anonymous = True
f.children = method.children
for c in f.children:
c.parent = f
f.type = method.type
f.line = method.line
f.lexpos = method.lexpos
return f
def gen_prop_define(prop, gets, sets, flags=[]):
#since this is called from *within* the parser, we
#can't use js_parse().
name_expr = BinOpNode(IdentNode("Object"), IdentNode("defineProperty"), ".");
fcall = FuncCallNode(name_expr)
exprlist = ExprListNode([])
fcall.add(exprlist)
params = ObjLitNode()
if prop in gets:
an = AssignNode(IdentNode("get"), to_exprfunc(gets[p]))
params.add(an)
if prop in sets:
an = AssignNode(IdentNode("set"), to_exprfunc(sets[p]))
params.add(an)
exprlist.add(IdentNode("this"))
exprlist.add(StrLitNode('"%s"'%prop))
exprlist.add(params)
return fcall;
def gen_method(cls, m):
f = FunctionNode(m.name)
f.children = m.children
f.name = "(anonymous)"
f.is_anonymous = True
for c in f.children:
c.parent = f
if not m.is_static:
an = AssignNode(IdentNode(m.name), f)
f = an
else:
f2 = FuncCallNode(IdentNode("util.static_method"))
f2.add(f)
an = AssignNode(IdentNode(m.name), f2)
f = an
return f
for p in props:
n = gen_prop_define(p, gets, sets)
slist.prepend(n)
if found_con == False:
#call parents hackishly
lst = list(cls.parents)
lst.reverse()
for p in lst:
if type(p) == str: p = IdentNode(p)
bn = BinOpNode(p, "apply", ".")
args = ExprListNode([IdentNode("this"), IdentNode("arguments")])
fn = FuncCallNode(bn)
fn.add(args)
slist.prepend(fn)
node.add(params)
node.add(slist)
node.name = "(anonymous)"
node.is_anonymous = True
an = AssignNode(BinOpNode("exports", cls.name, "."), node)
vn = VarDeclNode(an, local=True)
vn.val = cls.name
node = vn
#add stuff outside of the constructor function
slist = StatementList()
slist.add(node)
node = slist
proto = RJSObjLitNode();
if len(cls.parents) != 0:
#for requirejs, just do single inheritance
if len(cls.parents) > 1:
typespace.error("The type system we use for RequireJS doesn't support multiple inheritance", cls)
p = cls.parents[0]
fn = FuncCallNode(IdentNode("util.inherit"))
fn.add(ExprListNode([IdentNode(cls.name), p, proto]))
slist.add(AssignNode(BinOpNode(cls.name, "prototype", "."), fn))
else:
fn = FuncCallNode(IdentNode("util.init_prototype"))
fn.add(ExprListNode([IdentNode(cls.name), proto]))
slist.add(AssignNode(BinOpNode(cls.name, "prototype", "."), fn))
#generate methods
for m in cls:
if type(m) != MethodNode: continue
if m.name == "constructor": continue
n = gen_method(cls, m)
proto.add(n)
return node
def expand_requirejs_classes(result, typespace):
def visit(n):
n.parent.replace(n, expand_requirejs_class(typespace, n))
traverse(result, ClassNode, visit)
flatten_statementlists(result, typespace)
class VarBinding:
def __init__(self, node, name, type):
self.node = node
self.name = name
self.type = type
def copy(self):
return VarBinding(self.node, self.name, self.type)
def add(self, type):
self.types.add(type)
def remove(self, type):
self.types.remove(type)
def __getitem__(self, i):
return self.types[i]
def __setitem__(self, i, v):
self.types[i] = v
def __len__(self):
return len(self.types)
class NodeScope:
def __init__(self, parent=None):
self.scopestack = []
self.scope = {}
self.childscopes = []
if parent != None:
parent.childscopes.append(self)
#for k in self.parent:
# self[k] = self.parent[k].copy()
self.parent = parent
def __str__(self):
return str(self.scope.keys())
def __repr__(self):
return str(self)
def push(self):
self.scopestack.append(self.scope)
self.scope = dict(self.scope)
if hasattr(glob, "g_debug_typeinfer") and glob.g_debug_typeinfer:
print("===pushing...===")
#for k in self.scope:
# self.scope[k] = self.scope[k].copy()
def pop(self):
if hasattr(glob, "g_debug_typeinfer") and glob.g_debug_typeinfer:
print("===popping...===")
d = self.scope
self.scope = self.scopestack.pop(-1)
return d
def __getitem__(self, item):
return self.scope[item]
def __setitem__(self, item, val):
self.scope[item] = val
def __contains__(self, item):
return item in self.scope
def __delitem__(self, item):
del self.scope[item]
def __len__(self):
return len(self.scope)
def __iter__(self):
return iter(self.scope)
def keys(self):
return self.scope.keys()
def values(self):
return self.scope.values()
class NodeVisit:
def __init__(self):
pass
def traverse(self, node, scope=None, tlevel=0):
if scope == None and tlevel > 0:
raise RuntimeError("NodeVisit.traverse called without scope")
if scope == None:
scope = {}
if scope == None: scope = NodeScope()
typestr = type(node).__name__
if not hasattr(self, typestr) and typestr in self.required_nodes:
raise RuntimeError("Unimplemented node visit for node type %s", typestr)
if not hasattr(self, typestr):
for c in node.children:
self.traverse(c, scope, tlevel)
else:
getattr(self, typestr)(node, scope, self.traverse, tlevel)
def tab2(t):
s = ""
for i in range(t):
s += " "
return s
class RequireJSWriter (NodeVisit):
def __init__(self):
self.required_nodes = []
NodeVisit.__init__(self)
self.buf = ""
def o(self, s):
self.buf += str(s)
def traverse(self, node, scope=None, tlevel=-1):
return NodeVisit.traverse(self, node, scope, tlevel)
def endstatem(self, node):
sb = self.buf.strip()
ret = not sb.endswith("\n")
ret = ret and(not sb.endswith("}") or type(node) in [AssignNode, BinOpNode])
return ret
def IdentNode(self, node, scope, t, tlevel):
self.o(node.val)
def NumLitNode(self, node, scope, t, tlevel):
self.o(node.val)
def StatementList(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
t2 = tab2(tlevel+1)
for c in node:
self.o(t1)
t(c, scope, tlevel+1)
if self.endstatem(c):
self.o(";");
self.o("\n");
def ForLoopNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
self.o("for (")
t(node.children[0], scope, tlevel)
self.o(") {\n")
t(node.children[1], scope, tlevel+1)
self.o(t1+"}\n");
def ForInNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o(" in ")
self.o(node[1])
def ForCNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o("; ")
t(node[1], scope, tlevel)
self.o("; ")
t(node[2], scope, tlevel)
def IfNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
self.o("if (")
t(node[0], scope, tlevel)
self.o(") {\n")
t(node[1], scope, tlevel+1)
self.o(t1+"}\n");
def ElseNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
if self.buf.endswith("\n"):
self.buf = self.buf[:-2]
self.o(" else ")
if type(node[0]) == StatementList:
self.o("{\n")
t(node[0], scope, tlevel+1);
self.o(t1+"}\n");
else:
t(node[0], scope, tlevel)
def BinOpNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
if node.op in ["in", "instanceof", "&&", "||", "<", ">", "<=", ">=", "==", "!=", "===", "!=="]:
self.o(" %s "%node.op)
else:
self.o(node.op)
t(node[1], scope, tlevel)
def NegateNode(self, node, scope, t, tlevel):
self.o("-")
t(node[0], scope, tlevel)
def AssignNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o(" %s "%node.mode)
t(node[1], scope, tlevel)
def WhileNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
self.o("while (")
t(node[0], scope, tlevel)
self.o(") {\n")
t(node[1], scope, tlevel+1)
self.o(t1+"}\n")
def FunctionNode(self, node, scope, t, tlevel):
self.o("function ")
if not node.is_anonymous:
self.o(node.name)
self.o("(")
t(node[0], scope, tlevel)
self.o(") {\n")
t1 = tab2(tlevel+1)
for c in node[1:]:
self.o(t1)
t(c, scope, tlevel+1)
if self.endstatem(node):
self.o(";")
self.o("\n")
self.o(tab2(tlevel)+"}\n")
def ExprListNode(self, node, scope, t, tlevel):
for i, c in enumerate(node):
if i > 0:
self.o(", ")
t(c, scope, tlevel)
def VarDeclNode(self, node, scope, t, tlevel):
if "global" in node.modifiers: return
if "local" in node.modifiers:
self.o("var ")
self.o(node.val)
if len(node[0].gen_js(0).strip()) > 0: #type(node[0]) not in [ExprListNode, ExprListNode] or len(node[0]) > 0:
self.o(" = ")
t(node[0], scope, tlevel)
def BreakNode(self, node, scope, t, tlevel):
self.o("break");
def YieldNode(self, node, scope, t, tlevel):
self.o("yield");
if len(node) != 0:
self.o(" ")
t(node[0], scope, tlevel)
def ContinueNode(self, node, scope, t, tlevel):
self.o("continue");
def ReturnNode(self, node, scope, t, tlevel):
self.o("return");
if | |
off with this setting
self.offhour = self.conf.get_int("schedule", "offhour") # Use 24 hour time. Set hour to turn off display
self.offminutes = self.conf.get_int("schedule", "offminutes") # Set minutes to turn off display
self.onhour = self.conf.get_int("schedule", "onhour") # Use 24 hour time. Set hour to turn on display
self.onminutes = self.conf.get_int("schedule", "onminutes") # Set minutes to on display
# Set number of MINUTES to turn map on temporarily during sleep mode
self.tempsleepon = self.conf.get_int("schedule", "tempsleepon")
# Number of LED pixels. Change this value to match the number of LED's being used on map
self.LED_COUNT = self.conf.get_int("default", "led_count")
# Misc settings
# 0 = No, 1 = Yes, use wipes. Defined by configurator
self.usewipes = self.conf.get_int("rotaryswitch", "usewipes")
# 1 = RGB color codes. 0 = GRB color codes. Populate color codes below with normal RGB codes and script will change if necessary
self.rgb_grb = self.conf.get_int("lights", "rgb_grb")
# Used to determine if board should reboot every day at time set in setting below.
self.use_reboot = self.conf.get_int("modules", "use_reboot")
self.time_reboot = self.conf.get_string("default", "nightly_reboot_hr")
self.homeport_colors = ast.literal_eval(self.conf.get_string("colors", "homeport_colors"))
# ************************************************************
# * End of User defined settings. Normally shouldn't change *
# * any thing under here unless you are confident in change. *
# ************************************************************
# 0 = do not turn refresh off, 1 = turn off the blanking refresh of the LED string between FAA updates.
self.turnoffrefresh = 1
# LED Cycle times - Can change if necessary.
# These cycle times all added together will equal the total amount of time the LED takes to finish displaying one cycle.
self.cycle0_wait = .9
# Each cycle, depending on flight category, winds and weather reported will have various colors assigned.
self.cycle1_wait = .9
# For instance, VFR with 20 kts winds will have the first 3 cycles assigned Green and the last 3 Black for blink effect.
self.cycle2_wait = .08
# The cycle times then reflect how long each color cycle will stay on, producing blinking or flashing effects.
self.cycle3_wait = .1
# Lightning effect uses the short intervals at cycle 2 and cycle 4 to create the quick flash. So be careful if you change them.
self.cycle4_wait = .08
self.cycle5_wait = .5
# List of METAR weather categories to designate weather in area. Many Metars will report multiple conditions, i.e. '-RA BR'.
# The code pulls the first/main weather reported to compare against the lists below. In this example it uses the '-RA' and ignores the 'BR'.
# See https://www.aviationweather.gov/metar/symbol for descriptions. Add or subtract codes as desired.
# Thunderstorm and lightning
self.wx_lghtn_ck = ["TS", "TSRA", "TSGR", "+TSRA",
"TSRG", "FC", "SQ", "VCTS", "VCTSRA", "VCTSDZ", "LTG"]
# Snow in various forms
self.wx_snow_ck = ["BLSN", "DRSN", "-RASN", "RASN", "+RASN", "-SN", "SN", "+SN",
"SG", "IC", "PE", "PL", "-SHRASN", "SHRASN", "+SHRASN", "-SHSN", "SHSN", "+SHSN"]
# Rain in various forms
self.wx_rain_ck = ["-DZ", "DZ", "+DZ", "-DZRA", "DZRA", "-RA",
"RA", "+RA", "-SHRA", "SHRA", "+SHRA", "VIRGA", "VCSH"]
# Freezing Rain
self.wx_frrain_ck = ["-FZDZ", "FZDZ",
"+FZDZ", "-FZRA", "FZRA", "+FZRA"]
# Dust Sand and/or Ash
self.wx_dustsandash_ck = ["DU", "SA", "HZ", "FU",
"VA", "BLDU", "BLSA", "PO", "VCSS", "SS", "+SS", ]
# Fog
self.wx_fog_ck = ["BR", "MIFG", "VCFG", "BCFG", "PRFG", "FG", "FZFG"]
# FIXME: Needs to tie to the list of disabled LEDs
self.nullpins = []
# list definitions
# Used to create weather designation effects.
self.cycle_wait = [self.cycle0_wait, self.cycle1_wait, self.cycle2_wait,
self.cycle3_wait, self.cycle4_wait, self.cycle5_wait]
self.cycles = [0, 1, 2, 3, 4, 5] # Used as a index for the cycle loop.
self.legend_pins = [self.conf.get_int("lights", "leg_pin_vfr"),
self.conf.get_int("lights", "leg_pin_mvfr"),
self.conf.get_int("lights", "leg_pin_ifr"),
self.conf.get_int("lights", "leg_pin_lifr"),
self.conf.get_int("lights", "leg_pin_nowx"),
self.conf.get_int("lights", "leg_pin_hiwinds"),
self.conf.get_int("lights", "leg_pin_lghtn"),
self.conf.get_int("lights", "leg_pin_snow"),
self.conf.get_int("lights", "leg_pin_rain"),
self.conf.get_int("lights", "leg_pin_frrain"),
self.conf.get_int("lights", "leg_pin_dustsandash"),
self.conf.get_int("lights", "leg_pin_fog")] # Used to build legend display
# Setup for IC238 Light Sensor for LED Dimming, does not need to be commented out if sensor is not used, map will remain at full brightness.
# For more info on the sensor visit; http://www.uugear.com/portfolio/using-light-sensor-module-with-raspberry-pi/
# set mode to BCM and use BCM pin numbering, rather than BOARD pin numbering.
GPIO.setmode(GPIO.BCM)
# set pin 4 as input for light sensor, if one is used. If no sensor used board remains at high brightness always.
GPIO.setup(4, GPIO.IN)
# set pin 22 to momentary push button to force FAA Weather Data update if button is used.
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Setup GPIO pins for rotary switch to choose between Metars, or Tafs and which hour of TAF
# Not all the pins are required to be used. If only METARS are desired, then no Rotary Switch is needed.
# set pin 0 to ground for METARS
GPIO.setup(0, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 5 to ground for TAF + 1 hour
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 6 to ground for TAF + 2 hours
GPIO.setup(6, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 13 to ground for TAF + 3 hours
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 19 to ground for TAF + 4 hours
GPIO.setup(19, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 26 to ground for TAF + 5 hours
GPIO.setup(26, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 21 to ground for TAF + 6 hours
GPIO.setup(21, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 20 to ground for TAF + 7 hours
GPIO.setup(20, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 16 to ground for TAF + 8 hours
GPIO.setup(16, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 12 to ground for TAF + 9 hours
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 1 to ground for TAF + 10 hours
GPIO.setup(1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set pin 7 to ground for TAF + 11 hours
GPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# LED self.strip configuration:
self.LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
# LED signal frequency in hertz (usually 800khz)
self.LED_FREQ_HZ = 800000
self.LED_DMA = 5 # DMA channel to use for generating signal (try 5)
# True to invert the signal (when using NPN transistor level shift)
self.LED_INVERT = False
self.LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
self.LED_STRIP = ws.WS2811_STRIP_GRB # Strip type and color ordering
# 255 # starting brightness. It will be changed below.
self.LED_BRIGHTNESS = self.conf.get_int("lights", "bright_value")
# Setup paths for restart on change routine. Routine from;
# https://blog.petrzemek.net/2014/03/23/restarting-a-python-script-within-itself
# self.LOCAL_CONFIG_FILE_PATH = '/NeoSectional/config.py'
# self.WATCHED_FILES = [self.LOCAL_CONFIG_FILE_PATH, __file__]
# self.WATCHED_FILES_MTIMES = [(f, getmtime(f))
# for f in self.WATCHED_FILES]
# debugging.info(
# 'Watching ' + self.LOCAL_CONFIG_FILE_PATH + ' For Change')
# Timer calculations
self.lights_out = time_(self.conf.get_int("schedule", "offhour"),
self.conf.get_int("schedule", "offminutes"), 0)
self.timeoff = self.lights_out
self.lights_on = time_(self.onhour, self.onminutes, 0)
self.end_time = self.lights_on
# Set flag for next round if sleep timer is interrupted by button push.
self.temp_lights_on = 0
# MOS Data Settings
# location of the downloaded local MOS file.
self.mos_filepath = '/NeoSectional/data/GFSMAV'
self.categories = ['HR', 'CLD', 'WDR', 'WSP', 'P06',
'T06', 'POZ', 'POS', 'TYP', 'CIG', 'VIS', 'OBV']
self.obv_wx = {'N': 'None', 'HZ': 'HZ', 'BR': 'RA',
'FG': 'FG', 'BL': 'HZ'} # Decode from MOS to TAF/METAR
# Decode from MOS to TAF/METAR
self.typ_wx = {'S': 'SN', 'Z': 'FZRA', 'R': 'RA'}
# Outer Dictionary, keyed by airport ID
self.mos_dict = collections.OrderedDict()
# Middle Dictionary, keyed by hour of forcast. Will contain a list of data for categories.
self.hour_dict = collections.OrderedDict()
# Used to determine that an airport from our airports file is currently being read.
self.ap_flag = 0
# Used by Heat Map. Do not change - assumed by routines below.
self.low_visits = (0, 0, 255) # Start with Blue - Do Not Change
# Increment to Red as visits get closer to 100 - Do Not Change
self.high_visits = (255, 0, 0)
self.fadehome = -1 # start with neg number
self.homeap = self.conf.get_string("colors", "color_vfr") # If 100, then home airport - designate with Green
# color_fog2 # (10, 10, 10) # dk grey to denote airports never visited
self.no_visits = (20, 20, 20)
# Misc Settings
# Toggle used for logging when ambient sensor changes from bright | |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import socket
from six import string_types
from six.moves.urllib.parse import urlparse
from swift.common.utils import (
SWIFT_CONF_FILE, md5_hash_for_file, set_swift_dir)
from swift.common.ring import Ring
from swift.common.storage_policy import POLICIES, reload_storage_policies
import eventlet
import json
import optparse
import time
import sys
import six
import os
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
def seconds2timeunit(seconds):
elapsed = seconds
unit = 'seconds'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'minutes'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'hours'
if elapsed >= 24:
elapsed = elapsed / 24.0
unit = 'days'
return elapsed, unit
def size_suffix(size):
suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
for suffix in suffixes:
if size < 1000:
return "%s %s" % (size, suffix)
size = size // 1000
return "%s %s" % (size, suffix)
class Scout(object):
"""
Obtain swift recon information
"""
def __init__(self, recon_type, verbose=False, suppress_errors=False,
timeout=5):
self.recon_type = recon_type
self.verbose = verbose
self.suppress_errors = suppress_errors
self.timeout = timeout
def scout_host(self, base_url, recon_type):
"""
Perform the actual HTTP request to obtain swift recon telemetry.
:param base_url: the base url of the host you wish to check. str of the
format 'http://127.0.0.1:6200/recon/'
:param recon_type: the swift recon check to request.
:returns: tuple of (recon url used, response body, and status)
"""
url = base_url + recon_type
try:
body = urllib2.urlopen(url, timeout=self.timeout).read()
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
content = json.loads(body)
if self.verbose:
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
def scout(self, host):
"""
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, status, time start
and time end)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type)
ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host):
"""
Obtain Server header by calling OPTIONS.
:param host: host to check
:returns: Server type, status
"""
try:
url = "http://%s:%s/" % (host[0], host[1])
req = urllib2.Request(url)
req.get_method = lambda: 'OPTIONS'
conn = urllib2.urlopen(req)
header = conn.info().get('Server')
server_header = header.split('/')
content = server_header[0]
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
class SwiftRecon(object):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
def __init__(self):
self.verbose = False
self.suppress_errors = False
self.timeout = 5
self.pool_size = 30
self.pool = eventlet.GreenPool(self.pool_size)
self.check_types = ['account', 'container', 'object']
self.server_type = 'object'
def _gen_stats(self, stats, name=None):
"""Compute various stats from a list of values."""
cstats = [x for x in stats if x is not None]
if len(cstats) > 0:
ret_dict = {'low': min(cstats), 'high': max(cstats),
'total': sum(cstats), 'reported': len(cstats),
'number_none': len(stats) - len(cstats), 'name': name}
ret_dict['average'] = ret_dict['total'] / float(len(cstats))
ret_dict['perc_none'] = \
ret_dict['number_none'] * 100.0 / len(stats)
else:
ret_dict = {'reported': 0}
return ret_dict
def _print_stats(self, stats):
"""
print out formatted stats to console
:param stats: dict of stats generated by _gen_stats
"""
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time in UTC
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names):
"""
Get a list of hosts in the rings.
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_names: Collection of ring names, such as
['object', 'object-2']
:returns: a set of tuples containing the ip and port of hosts
"""
rings = [Ring(swift_dir, ring_name=n) for n in ring_names]
devs = [d for r in rings for d in r.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
devs = [d for d in devs if d['zone'] == zone_filter]
return set((d['ip'], d['port']) for d in devs)
def get_ringmd5(self, hosts, swift_dir):
"""
Compare ring md5sum's with those on remote host
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
:param swift_dir: The local directory with the ring files.
"""
matches = 0
errors = 0
ring_names = set()
if self.server_type == 'object':
for ring_name in os.listdir(swift_dir):
if ring_name.startswith('object') and \
ring_name.endswith('.ring.gz'):
ring_names.add(ring_name)
else:
ring_name = '%s.ring.gz' % self.server_type
ring_names.add(ring_name)
rings = {}
for ring_name in ring_names:
rings[ring_name] = md5_hash_for_file(
os.path.join(swift_dir, ring_name))
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
success = True
for remote_ring_file, remote_ring_sum in response.items():
remote_ring_name = os.path.basename(remote_ring_file)
if not remote_ring_name.startswith(self.server_type):
continue
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print("!! %s (%s => %s) doesn't match on disk md5sum" % (
url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = md5_hash_for_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
Obtain and print async pending statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking async pendings" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def driveaudit_check(self, hosts):
"""
Obtain and print drive audit error statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)]
"""
scan = {}
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status, ts_start, ts_end in | |
from random import choice
from copy import deepcopy
from game_data import GameData
from agents import Agent
import numpy as np
import random
import pickle
import pandas as pd
class IsaacAgent(Agent):
def __init__(self, max_time=2, max_depth=300):
self.max_time = max_time
self.max_depth = max_depth
# self.heuristic = [
# [0], [0], [0], [0], [0], [0], [0],
# [0], [0], [0], [0], [0], [0], [0],
# [0], [0], [0], [0], [0], [0], [0],
# [0], [0], [0], [0], [0], [0], [0], # ...
# [0], [0], [-1], [-1], [-1], [0], [0], # odd player
# [0], [1, -1], [0], [0], [0], [1, -1], [0] # even player
# ]
self.heuristic = [
[0], [0], [0], [0], [0], [0], [0],
[0], [0], [1, -1], [2, -2], [1, -1], [0], [0],
[0], [0], [1, -2], [2, -2], [1, -2], [0], [0],
[0], [0], [3, -2], [3, -2], [3, -2], [0], [0],
[0], [0], [2, -3], [2, -3], [2, -3], [0], [0],
[0], [1, -1], [3, -3], [4, -4], [3, -3], [1, -1], [0]
]
self.game_data = None
self.model = pickle.load(open("./c4model.sav", 'rb'))
def get_name(self) -> str:
return "IsaacAgent"
def get_move(self, game_data) -> int:
self.game_data = game_data
rows_reversed_connect4_board = []
for row in list(game_data.game_board):
rows_reversed_connect4_board.append(row[::-1])
connect4_board = list(np.concatenate(rows_reversed_connect4_board).flat)[::-1]
for sn, sv in enumerate(connect4_board):
if sv == 0:
connect4_board[sn] = ' '
elif sv == 1:
connect4_board[sn] = 'R'
else:
connect4_board[sn] = 'B'
# self.print_board(connect4_board)
turn = self.player(connect4_board)
actions = self.actions(connect4_board)
best_action = random.choice(actions)
if turn == 'R':
# max player
local_best_min_v = -float('inf')
for action in actions:
self.current_depth = 0
min_v = self.min_value(self.result(connect4_board, action))
# print(f"Action: {action + 1}, Min Value: {min_v}")
if min_v > local_best_min_v:
local_best_min_v = min_v
best_action = action
else:
# min player
local_best_max_v = float('inf')
for action in actions:
self.current_depth = 0
max_v = self.max_value(self.result(connect4_board, action))
# print(f"Action: {action + 1}, Max Value: {max_v}")
if max_v < local_best_max_v:
local_best_max_v = max_v
best_action = action
return best_action
def print_board(self, board):
for l in range(0, 42, 7):
row = ''.join([board[l + i] + '|' for i in range(7)])
print(row[:13])
print('-+-+-+-+-+-+-')
def player(self, board):
return 'B' if board.count('R') > board.count('B') else 'R'
def is_tie(self, board):
return len([sq for sq in board if sq == ' ']) == 0
def utility(self, board):
return 0 if self.is_tie(board) else -1000 if self.player(board) == "R" else 1000
def terminal(self, board):
# use modulo 7 to detect new row
row = 0
for sq in range(42):
if sq % 7 == 0:
row += 1
distance_to_new_row = 7 * row - (sq + 1)
distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0]
if board[sq] == ' ':
continue
# 4 horizontally
if distance_to_new_row >= 3 and board[sq] == board[sq + 1] and board[sq] == board[sq + 2] and board[sq] == board[sq + 3]:
return True
# 4 vertically
elif distance_to_column_end > 2 and board[sq] == board[sq + 7] and board[sq] == board[sq + 14] and board[sq] == board[sq + 21]:
return True
# 4 diagonally
elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 24 < len(board) and board[sq] == board[sq + 8] and board[sq] == board[sq + 16] and board[sq] == board[sq + 24]:
return True
elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 18 < len(board) and board[sq] == board[sq - 6] and board[sq] == board[sq - 12] and board[sq] == board[sq - 18]:
return True
return self.is_tie(board)
def actions(self, board):
return [sn for sn in range(7) if board[sn] == ' ']
def result(self, board, action):
result = board[:]
for r in range(6):
current_sq = board[action + 35 - r * 7]
if current_sq == ' ':
result[action + 35 - r * 7] = self.player(board)
break
return result
def count_two_in_row(self, board, player):
two_in_row = 0
row = 0
for sq in range(42):
if sq % 7 == 0:
row += 1
distance_to_new_row = 7 * row - (sq + 1)
distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0]
if board[sq] != player or board[sq].isdigit() or board[sq] == ' ':
continue
# 4 horizontally
if distance_to_new_row >= 3 and board[sq] == board[sq + 1]:
two_in_row += 1
# 4 vertically
elif distance_to_column_end > 2 and board[sq] == board[sq + 7]:
two_in_row += 1
# 4 diagonally
elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 8 < len(board) and board[sq] == board[sq + 8]:
two_in_row += 1
elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 6 < len(board) and board[sq] == board[sq - 6]:
two_in_row += 1
return two_in_row
def count_three_in_row(self, board, player):
three_in_row = 0
row = 0
for sq in range(42):
if sq % 7 == 0:
row += 1
distance_to_new_row = 7 * row - (sq + 1)
distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0]
if board[sq] != player or board[sq].isdigit() or board[sq] == ' ':
continue
# 4 horizontally
if distance_to_new_row >= 3 and board[sq] == board[sq + 1] and board[sq] == board[sq + 2]:
three_in_row += 1
# 4 vertically
elif distance_to_column_end > 2 and board[sq] == board[sq + 7] and board[sq] == board[sq + 14]:
three_in_row += 1
# 4 diagonally
elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 16 < len(board) and board[sq] == board[sq + 8] and board[sq] == board[sq + 16]:
three_in_row += 1
elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 12 < len(board) and board[sq] == board[sq - 6] and board[sq] == board[sq - 12]:
three_in_row += 1
return three_in_row
def evaluate(self, board):
"""
Heuristic:
- Squares value:
[0, 0, -1, -1, -1, 0, 0,
0, 0, 2, 2, 2, 0, 0,
0, 0, -2, -2, -2, 0, 0,
0, 0, 3, 3, 3, 0, 0,
0, 0, -3, -3, -3, 0, 0,
0, 0, 1, 1, 1, 0, 0]
- Include win squares of each player and where they are located.
Heuristic based off Odd-Even strategy: https://www.youtube.com/watch?v=YqqcNjQMX18
"""
total_score = 0
for vn, values in enumerate(self.heuristic):
for value in values:
if value < 0 and board[vn] == 'B':
total_score += value
elif value > 0 and board[vn] == 'R':
total_score += value
# three_in_row_modifier = 10
# total_score += self.count_three_in_row(board, 'R') * three_in_row_modifier
# total_score -= self.count_three_in_row(board, 'B') * three_in_row_modifier
# total_score += self.count_two_in_row(board, 'R') * three_in_row_modifier
# total_score -= self.count_two_in_row(board, 'B') * three_in_row_modifier
# divisor = 5
# for i in range(7):
# action_result = self.result(board, i)
# if self.terminal(action_result):
# total_score += self.utility(action_result) / divisor
# print(total_score)
# multiplier = 2
# r_win_states = 0
# b_win_states = 0
# for i in range(7):
# action_result = self.result(board, i)
# if self.terminal(action_result):
# if self.utility(action_result) == 1000:
# r_win_states += 1
# else:
# b_win_states += 1
# total_score += r_win_states * multiplier
# total_score -= b_win_states * multiplier
# if r_win_states >= 2:
# total_score += 400
# elif b_win_states >= 2:
# total_score -= 400
# print(f"Red Win States: {r_win_states}, Blue Win States: {b_win_states}")
# multiplier = 30
# conv_data = []
# for sq in board:
# if sq.isdigit() or sq == ' ':
# conv_data.append(0)
# elif sq == 'R':
# conv_data.append(1)
# else:
# conv_data.append(-1)
# c4_board = pd.Series(conv_data, index=[f"pos_{sn + 1}" for sn, sv in enumerate(board)])
# total_score += self.model.predict([c4_board])[0][0]
return total_score
def min_value(self, board):
if self.terminal(board):
return self.utility(board)
if self.current_depth > self.max_depth:
return self.evaluate(board)
self.current_depth += 1
v = float('inf')
for action in self.actions(board):
max_v = self.max_value(self.result(board, action))
v = min(v, max_v)
return v
def max_value(self, board):
if self.terminal(board):
return self.utility(board)
if self.current_depth > self.max_depth:
return self.evaluate(board)
self.current_depth += 1
v = -float('inf')
for action in self.actions(board):
min_v = self.min_value(self.result(board, action))
v = max(v, min_v)
| |
"""Generated message classes for cloudbuild version v1.
Builds container images in the cloud.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudbuild'
class Build(_messages.Message):
"""A build resource in the Container Builder API. At a high level, a Build
describes where to find source code, how to build it (for example, the
builder image to run on the source), and what tag to apply to the built
image when it is pushed to Google Container Registry. Fields can include
the following variables which will be expanded when the build is created: -
$PROJECT_ID: the project ID of the build. - $BUILD_ID: the autogenerated ID
of the build. - $REPO_NAME: the source repository name specified by
RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. -
$TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or
$COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the
specified branch or tag.
Enums:
StatusValueValuesEnum: Status of the build. @OutputOnly
Fields:
buildTriggerId: The ID of the BuildTrigger that triggered this build, if
it was triggered automatically. @OutputOnly
createTime: Time at which the request to create the build was received.
@OutputOnly
finishTime: Time at which execution of the build was finished. The
difference between finish_time and start_time is the duration of the
build's execution. @OutputOnly
id: Unique identifier of the build. @OutputOnly
images: A list of images to be pushed upon the successful completion of
all build steps. The images will be pushed using the builder service
account's credentials. The digests of the pushed images will be stored
in the Build resource's results field. If any of the images fail to be
pushed, the build is marked FAILURE.
logUrl: URL to logs for this build in Google Cloud Logging. @OutputOnly
logsBucket: Google Cloud Storage bucket where logs should be written (see
[Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-
naming#requirements)). Logs file names will be of the format
`${logs_bucket}/log-${build_id}.txt`.
options: Special options for this build.
projectId: ID of the project. @OutputOnly.
results: Results of the build. @OutputOnly
source: Describes where to find the source files to build.
sourceProvenance: A permanent fixed identifier for source. @OutputOnly
startTime: Time at which execution of the build was started. @OutputOnly
status: Status of the build. @OutputOnly
statusDetail: Customer-readable message about the current status.
@OutputOnly
steps: Describes the operations to be performed on the workspace.
timeout: Amount of time that this build should be allowed to run, to
second granularity. If this amount of time elapses, work on the build
will cease and the build status will be TIMEOUT. Default time is ten
minutes.
"""
class StatusValueValuesEnum(_messages.Enum):
"""Status of the build. @OutputOnly
Values:
STATUS_UNKNOWN: Status of the build is unknown.
QUEUED: Build is queued; work has not yet begun.
WORKING: Build is being executed.
SUCCESS: Build finished successfully.
FAILURE: Build failed to complete successfully.
INTERNAL_ERROR: Build failed due to an internal cause.
TIMEOUT: Build took longer than was allowed.
CANCELLED: Build was canceled by a user.
"""
STATUS_UNKNOWN = 0
QUEUED = 1
WORKING = 2
SUCCESS = 3
FAILURE = 4
INTERNAL_ERROR = 5
TIMEOUT = 6
CANCELLED = 7
buildTriggerId = _messages.StringField(1)
createTime = _messages.StringField(2)
finishTime = _messages.StringField(3)
id = _messages.StringField(4)
images = _messages.StringField(5, repeated=True)
logUrl = _messages.StringField(6)
logsBucket = _messages.StringField(7)
options = _messages.MessageField('BuildOptions', 8)
projectId = _messages.StringField(9)
results = _messages.MessageField('Results', 10)
source = _messages.MessageField('Source', 11)
sourceProvenance = _messages.MessageField('SourceProvenance', 12)
startTime = _messages.StringField(13)
status = _messages.EnumField('StatusValueValuesEnum', 14)
statusDetail = _messages.StringField(15)
steps = _messages.MessageField('BuildStep', 16, repeated=True)
timeout = _messages.StringField(17)
class BuildOperationMetadata(_messages.Message):
"""Metadata for build operations.
Fields:
build: The build that the operation is tracking.
"""
build = _messages.MessageField('Build', 1)
class BuildOptions(_messages.Message):
"""Optional arguments to enable specific features of builds.
Enums:
RequestedVerifyOptionValueValuesEnum: Requested verifiability options.
SourceProvenanceHashValueListEntryValuesEnum:
Fields:
requestedVerifyOption: Requested verifiability options.
sourceProvenanceHash: Requested hash for SourceProvenance.
"""
class RequestedVerifyOptionValueValuesEnum(_messages.Enum):
"""Requested verifiability options.
Values:
NOT_VERIFIED: Not a verifiable build. (default)
VERIFIED: Verified build.
"""
NOT_VERIFIED = 0
VERIFIED = 1
class SourceProvenanceHashValueListEntryValuesEnum(_messages.Enum):
"""SourceProvenanceHashValueListEntryValuesEnum enum type.
Values:
NONE: <no description>
SHA256: <no description>
"""
NONE = 0
SHA256 = 1
requestedVerifyOption = _messages.EnumField('RequestedVerifyOptionValueValuesEnum', 1)
sourceProvenanceHash = _messages.EnumField('SourceProvenanceHashValueListEntryValuesEnum', 2, repeated=True)
class BuildStep(_messages.Message):
"""BuildStep describes a step to perform in the build pipeline.
Fields:
args: A list of arguments that will be presented to the step when it is
started. If the image used to run the step's container has an
entrypoint, these args will be used as arguments to that entrypoint. If
the image does not define an entrypoint, the first element in args will
be used as the entrypoint, and the remainder will be used as arguments.
dir: Working directory (relative to project source root) to use when
running this operation's container.
entrypoint: Optional entrypoint to be used instead of the build step
image's default If unset, the image's default will be used.
env: A list of environment variable definitions to be used when running a
step. The elements are of the form "KEY=VALUE" for the environment
variable "KEY" being given the value "VALUE".
id: Optional unique identifier for this build step, used in wait_for to
reference this build step as a dependency.
name: The name of the container image that will run this particular build
step. If the image is already available in the host's Docker daemon's
cache, it will be run directly. If not, the host will attempt to pull
the image first, using the builder service account's credentials if
necessary. The Docker daemon's cache will already have the latest
versions of all of the officially supported build steps
(https://github.com/GoogleCloudPlatform/cloud-builders). The Docker
daemon will also have cached many of the layers for some popular images,
like "ubuntu", "debian", but they will be refreshed at the time you
attempt to use them. If you built an image in a previous build step, it
will be stored in the host's Docker daemon's cache and is available to
use as the name for a later build step.
waitFor: The ID(s) of the step(s) that this build step depends on. This
build step will not start until all the build steps in wait_for have
completed successfully. If wait_for is empty, this build step will start
when all previous build steps in the Build.Steps list have completed
successfully.
"""
args = _messages.StringField(1, repeated=True)
dir = _messages.StringField(2)
entrypoint = _messages.StringField(3)
env = _messages.StringField(4, repeated=True)
id = _messages.StringField(5)
name = _messages.StringField(6)
waitFor = _messages.StringField(7, repeated=True)
class BuildTrigger(_messages.Message):
"""Configuration for an automated build in response to source repository
changes.
Fields:
build: Contents of the build template.
createTime: Time when the trigger was created. @OutputOnly
description: Human-readable description of this trigger.
disabled: If true, the trigger will never result in a build.
filename: Path, from the source root, to a file whose contents is used for
the template.
id: Unique identifier of the trigger. @OutputOnly
triggerTemplate: Template describing the types of source changes to
trigger a build. Branch and tag names in trigger templates are
interpreted as regular expressions. Any branch or tag change that
matches that regular expression will trigger a build.
"""
build = _messages.MessageField('Build', 1)
createTime = _messages.StringField(2)
description = _messages.StringField(3)
disabled = _messages.BooleanField(4)
filename = _messages.StringField(5)
id = _messages.StringField(6)
triggerTemplate = _messages.MessageField('RepoSource', 7)
class BuiltImage(_messages.Message):
"""BuiltImage describes an image built by the pipeline.
Fields:
digest: Docker Registry 2.0 digest.
name: Name used to push the container image to Google Container Registry,
as presented to `docker push`.
"""
digest = _messages.StringField(1)
name = _messages.StringField(2)
class CancelBuildRequest(_messages.Message):
"""Request to cancel an ongoing build."""
class CancelOperationRequest(_messages.Message):
"""The request message for Operations.CancelOperation."""
class CloudbuildOperationsCancelRequest(_messages.Message):
"""A CloudbuildOperationsCancelRequest object.
Fields:
cancelOperationRequest: A CancelOperationRequest resource to be passed as
the request body.
name: The name of the operation resource to be cancelled.
"""
cancelOperationRequest = _messages.MessageField('CancelOperationRequest', 1)
name = _messages.StringField(2, required=True)
class CloudbuildOperationsGetRequest(_messages.Message):
"""A CloudbuildOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class CloudbuildOperationsListRequest(_messages.Message):
"""A CloudbuildOperationsListRequest | |
paths = []
# Add in starting point.
paths.append(([AtomImage(self.atom, [0, 0, 0])], 1.0))
# Increment until desired shell.
for step in range(shell):
# Get the new list.
new_paths = []
# For each current path.
for path in paths:
# Get last atom in current path.
last_step = path[0][len(path[0]) - 1]
# Get each possible step.
new_steps = OrderedDict()
# Surface area of possible steps.
surface_area = 0.0
for face in cells[last_step.get_atom_id()].get_faces():
# Adjust image based on position of last atom.
new_supercell = face.get_outside_atom().get_supercell() +\
last_step.get_supercell()
# Store the next step.
next_step = AtomImage(face.get_outside_atom().get_atom(),
new_supercell)
area = face.get_area()
surface_area += area
new_steps[next_step] = area
# Eliminate backtracking steps.
for previous_step in path[0]:
if previous_step in new_steps:
surface_area -= new_steps.pop(previous_step)
# Create new paths, making sure to update weights.
for (k,v) in iteritems(new_steps):
# Increment path.
new_path = list(path[0])
new_path.append(k)
# Increment weight.
new_weight = path[1] * v / surface_area
# Add it to new_paths.
new_paths.append((new_path, new_weight))
# Update paths.
paths = new_paths
# Now that all the paths are gathered, output only the last step and
# weights of all paths that lead to that step.
output = OrderedDict()
for path in paths:
# Get the last step.
atom = path[0][len(path[0]) - 1]
# Update map.
if atom in output:
output[atom] += path[1]
else:
output[atom] = path[1]
return output
def get_neighbor_shell(self, cells, index):
"""Function to get list of atoms in a certain coordination shell.
A neighbor shell includes all atoms that are a certain number of
neighbors away that are not in any smaller neighbor shell (e.g. 2nd
shell neighbors cannot also be 1st nearest neighbors).
Parameters
----------
cells : array-like
A list of VoronoiCell's of all other atoms (arranged by Atom ID).
index : int
Index of neighbor shell.
Returns
-------
type : array-like
All neighbors in that. A list of AtomImage's.
"""
return self.get_neighbor_shells(cells, index)[index]
def get_num_shared_bonds(self, cell, direction, neighbors):
"""Function to compute number of shared bonds.
Atoms are defined as bonded if they share a face between their
Voronoi cells.
This code determines how many common entries occur between a Voronoi
cell and a list of neighbor IDs from this cell. Note that in order for
bonds to be shared, they must connect to the same image of a certain
atom. That is where the direction parameter comes into play.
Parameters
----------
cell : VoronoiCell
Voronoi cell of neighboring atom.
direction : array-like
Difference between image of neighboring atom and this cell.
neighbors : array-like
A list of AtomImage's containing IDs and Image Positions of all
neighboring atoms to this cell.
Returns
-------
type : int
Number of shared neighbors between this cell and the neighboring
atom.
"""
n_shared = 0
for face in cell.get_faces():
other_cell_n_id = face.get_outside_atom().get_atom_id()
other_cell_n_image = face.get_outside_atom().get_supercell() + \
direction
# Make that image.
other_cell_n = AtomImage(self.atom.get_cell().get_atom(
other_cell_n_id), other_cell_n_image)
# Check against every neighbor of this cell.
for n in neighbors:
if other_cell_n.__eq__(n):
n_shared += 1
return n_shared
def get_coordination_shell_shape(self, cells):
"""Function to get the shape of the coordination polyhedron around this
atom.
This is determined by counting the number of "bonds" between
first neighbor shell atoms. Here, atoms are "bonded" if their Voronoi
cells share a face. This is similar to get_polyhedron_shape. See
section H of the paper listed in the References section for more
details.
Parameters
----------
cells : array-like
VoronoiCell's of all other atoms (arranged by atom ID).
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“Structural evolution and kinetics in Cu-Zr metallic liquids from
molecular dynamics simulations,” Physical Review B, vol. 88, no. 13,
Oct. 2013.
Returns
-------
output : OrderedDict
Ordered dictionary containing the number of bonds, number of
first shell neighbors with that many bonds as key,value pairs.
"""
# Get IDs of every neighbor.
neighbors = [face.get_outside_atom() for face in self.faces]
# Get number of mutual neighbors for each neighbor.
output = OrderedDict()
for n in neighbors:
n_shared = self.get_num_shared_bonds(cells[n.get_atom_id()],
n.get_supercell(), neighbors)
if n_shared in output:
output[n_shared] += 1
else:
output[n_shared] = 1
return output
def get_polyhedron_shape(self):
"""Function to compute the polyhedron shape index.
This shape is defined by the "Voronoi Index", which is defined as the
number of faces with a certain number of sides. This is often
expressed as (n_3, n_4, n_5, n_6), where n_x is the number of faces
with x number of sides.
http://rspa.royalsocietypublishing.org/cgi/doi/10.1098/rspa.1970.0190
Finney (1970).
References
----------
.. [1] <NAME>, “Random Packings and the Structure of Simple
Liquids. II. The Molecular Geometry of Simple Liquids,” Proceedings
of the Royal Society A: Mathematical, Physical and Engineering
Sciences, vol. 319, no. 1539, pp. 495–507, Nov. 1970.
Returns
-------
output : OrderedDict
Ordered dictionary containing number of edges, number of faces
with that edge count as key,value pairs.
"""
output = OrderedDict()
for face in self.faces:
n_vertices = face.n_vertices()
if n_vertices in output:
output[n_vertices] += 1
else:
output[n_vertices] = 1
return output
def get_volume(self):
"""Function to get volume of this cell
Returns
-------
type : float
Volume.
"""
if np.isnan(self.volume):
self.volume = 0
atom_center = self.atom.get_position_cartesian()
for face in self.faces:
area = face.get_area()
from_center = face.get_centroid() - atom_center
n = face.get_normal()
n /= norm(n)
h = np.dot(from_center, n)
self.volume += area * h / 3.0 # Face normal is away from
# center.
return self.volume
def get_surface_area(self):
"""Function to get surface area of cell.
Returns
-------
type : float
Surface area.
"""
return sum([face.get_area() for face in self.faces])
def get_min_max_vertex_distance(self):
"""Function to get the minimum and maximum distance between any two
vertices.
Returns
-------
min_dist : float
Minimum distance.
max_dist : float
Maximum distance.
"""
vertices = self.get_vertices()
l_v = len(vertices)
min_dist = float("inf")
max_dist = -min_dist
for i in range(l_v):
for j in range(i+1, l_v):
dist = vertices[i].distance_from(vertices[j])
min_dist = min(dist, min_dist)
max_dist = max(dist, max_dist)
return min_dist, max_dist
def geometry_is_valid(self):
"""Function to determine whether the geometry of this structure is
sound.
Returns
-------
type : bool
True if valid, else False.
"""
for face in self.faces:
if not face.is_closed():
return False
for f in face.get_neighboring_faces():
if f not in self.faces:
return False
return True
def compute_cell(self, image_finder, cutoff):
"""Function to compute cell, given ability to generate images.
Parameters
----------
image_finder : PairDistanceAnalysis
Tool to find images within a cutoff.
cutoff : float
Initial cutoff. Will be increased if too small.
Returns
-------
Raises
------
Exception
If cell fails to compute.
"""
cur_cutoff = cutoff
n_attempts = 0
while n_attempts < 4:
n_attempts += 1
image_finder.set_cutoff_distance(cur_cutoff)
# Find all nearby images.
images = [image[0] for image in
image_finder.get_all_neighbors_of_atom(
self.atom.get_id())]
# Compute cell.
try:
self.compute_cell_helper(images)
except Exception:
cur_cutoff *= 1.5
continue
return
raise Exception("Cell failed to compute.")
def compute_cell_helper(self, images):
"""Function to compute the Voronoi cell, given list of images.
Parameters
----------
images : array-like
List of images to consider, where key is the atomID
and value is the distance.
"""
# Clear cached volume.
self.volume = np.nan
# Get all possible faces.
possible_faces = self.compute_faces(images)
# Get the faces corresponding to the direct polyhedron.
direct_faces = self.compute_direct_neighbors(possible_faces)
# Construct direct polyhedron.
for df in direct_faces:
try:
df.assemble_face_from_faces(direct_faces)
except Exception:
raise Exception("Direct polyhedron failed to construct.")
self.faces = list(direct_faces)
# Get the faces that might actually be direct faces.
possible_indirect_faces = self.compute_possible_indirect_neighbors(
possible_faces)
# Use these faces to compute indirect neighbors.
for face in possible_indirect_faces:
self.compute_intersection(face)
def compute_faces(self, images):
"""Function to compute the center of the face corresponding to each
neighbor.
Parameters
----------
images : array-like
List of all images of this atom.
Returns
-------
output : array-like
List of faces.
Raises
------
RuntimeError
If it fails to create face.
"""
# Generate faces.
output = []
for image in images:
# Check if the image is this atom.
if image.get_atom_id() == self.atom.get_id() and np.array_equal(
image.get_supercell(), [0, 0, 0]):
# If so, skip.
continue
# Make the appropriate face.
try:
output.append(VoronoiFace(self.atom, image, | |
i get something by merging with the previous component?
if len(composingElements) > 0:
tmp,tmp2 = analyzeByParticle([composingElements[-1] + '_' + splitp], species)
if tmp != [] and tmp2 != []:
flag = False
splitp = composingElements[-1] + '_' + splitp
composingElements.pop()
closestList = tmp
localEquivalenceTranslator,_,_ = self.processNamingConventions2([tmp[0],tmp2[0]])
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].extend(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
#do i get something by merging with the next component?
if flag and splitpindex + 1 != len(splitparticle):
tmp,tmp2 = analyzeByParticle([splitp+ '_' + splitparticle[splitpindex+1]],species)
if tmp!= [] and tmp2 != []:
splitp = splitp+ '_' + splitparticle[splitpindex+1]
splitpindex += 1
closestList = tmp
localEquivalenceTranslator,_,_ = self.processNamingConventions2([tmp[0],tmp2[0]])
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].append(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
else:
return [],[]
elif flag:
return [],[]
basicElements.append(min(closestList,key=len))
#if what i have is a known compound just add it
if splitp in species:
composingElements.append(splitp)
#if not create it
else:
closestList = get_close_matches(splitp,species)
closestList = [x for x in closestList if len(x) < len(splitp)]
flag = False
for element in closestList:
localEquivalenceTranslator,_,_ = self.processNamingConventions2([element,splitp])
if len(localEquivalenceTranslator.keys()) == 0:
basicElements = []
composingElements = []
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].append(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
flag = True
if flag:
composingElements.append(splitp)
return basicElements,composingElements
additionalHandling = []
#lexical handling
for particle in sorted(particles, key=len):
composingElements = []
basicElements = []
# can you break it down into small bites?
if '_' in particle:
splitparticle = particle.split('_')
#print '---',splitparticle
splitparticle = [x for x in splitparticle if x]
#print splitparticle
basicElements,composingElements = analyzeByParticle(splitparticle,species)
if basicElements == composingElements and basicElements:
closeMatches = get_close_matches(particle,species)
matches = [x for x in closeMatches if len(x) < len(particle) and len(x) >= 3]
for match in matches:
difference = difflib.ndiff(match,particle)
differenceList = tuple([x for x in difference if '+' in x])
if differenceList in self.namingConventions['patterns']:
logMess('INFO:LAE005', 'matching {0}={1}'.format(particle, [match]))
addToDependencyGraph(dependencyGraph,particle,[match])
if len(matches) > 0:
continue
elif particle not in composingElements and composingElements != [] and all([x in species for x in composingElements]):
addToDependencyGraph(dependencyGraph, particle, composingElements)
for element in composingElements:
if element not in dependencyGraph:
addToDependencyGraph(dependencyGraph, element, [])
if element not in particles:
additionalHandling.append(element)
continue
else:
for basicElement in basicElements:
if basicElement in particle and basicElement != particle:
fuzzyList = self.processAdHocNamingConventions(basicElement, particle, localSpeciesDict, False, species)
if self.testAgainstExistingConventions(fuzzyList[0][1], self.namingConventions['modificationList']):
addToDependencyGraph(dependencyGraph, particle, [basicElement])
logMess('INFO:LAE005', '{0} can be mapped to {1} through existing naming conventions'.format(particle, [basicElement]))
break
continue
# if bottom up doesn't work try a top down approach
for comparisonParticle in particles:
if particle == comparisonParticle:
continue
# try to map remaining orphaned molecules to each other based on simple, but known modifications
if comparisonParticle in particle:
fuzzyList = self.processAdHocNamingConventions(particle,comparisonParticle,localSpeciesDict, False, species)
if self.testAgainstExistingConventions(fuzzyList[0][1],self.namingConventions['modificationList']):
if particle in annotationDict and comparisonParticle in annotationDict:
baseSet = set([y for x in annotationDict[particle] for y in annotationDict[particle][x]])
modSet = set([y for x in annotationDict[comparisonParticle] for y in annotationDict[comparisonParticle][x]])
if len(baseSet.intersection(modSet)) == 0:
baseDB = set([x.split('/')[-2] for x in baseSet if 'identifiers.org' in x])
modDB = set([x.split('/')[-2] for x in modSet if 'identifiers.org' in x])
#we stil ahve to check that they both reference the same database
if len(baseDB.intersection(modDB)) > 0:
logMess('ERROR:ANN202', '{0}:{1}:can be mapped through naming conventions but the annotation information does not match'.format(particle, comparisonParticle))
continue
addToDependencyGraph(dependencyGraph,particle,[comparisonParticle])
logMess('INFO:LAE005', '{0} can be mapped to {1} through existing naming conventions'.format(particle, [comparisonParticle]))
break
else:
common_root = detectOntology.findLongestSubstring(particle, comparisonParticle)
# some arbitrary threshold of what makes a good minimum lenght for the common root
if len(common_root) > 0 and common_root not in originalDependencyGraph:
fuzzyList = self.processAdHocNamingConventions(common_root,comparisonParticle,localSpeciesDict, False, species)
fuzzyList2 = self.processAdHocNamingConventions(common_root,particle,localSpeciesDict, False, species)
particleMap = self.testAgainstExistingConventions(fuzzyList[0][1], self.namingConventions['modificationList'])
compParticleMap = fuzzyList2, self.testAgainstExistingConventions(fuzzyList2[0][1], self.namingConventions['modificationList'])
if particleMap and compParticleMap:
if particle in annotationDict and comparisonParticle in annotationDict:
baseSet = set([y for x in annotationDict[particle] for y in annotationDict[particle][x]])
modSet = set([y for x in annotationDict[comparisonParticle] for y in annotationDict[comparisonParticle][x]])
if len(baseSet.intersection(modSet)) == 0:
logMess('ERROR:ANN202', '{0}:{1}:can be mapped through naming conventions but the annotation information does not match'.format(particle,comparisonParticle))
break
addToDependencyGraph(dependencyGraph, particle, [common_root])
addToDependencyGraph(dependencyGraph, comparisonParticle, [common_root])
addToDependencyGraph(dependencyGraph, common_root, [])
logMess('INFO:LAE006', '{0}:{1}:can be mapped together through new common molecule {2} by existing naming conventions'.format(particle, comparisonParticle, common_root))
break
#if len(additionalHandling) > 0:
#print self.findClosestModification(set(additionalHandling),species)
return dependencyGraph,equivalenceTranslator
def loadConfigFiles(self,fileName):
'''
the reactionDefinition file must contain the definitions of the basic reaction types
we wnat to parse and what are the requirements of a given reaction type to be considered
as such
'''
reactionDefinition = ''
if fileName == '':
return []
with open(fileName,'r') as fp:
reactionDefinition = json.load(fp)
return reactionDefinition
def identifyReactions2(self,rule,reactionDefinition):
'''
This method goes through the list of common reactions listed in ruleDictionary
and tries to find how are they related according to the information in reactionDefinition
'''
result = []
for idx,element in enumerate(reactionDefinition['reactions']):
tmp1 = rule[0] if rule[0] not in ['0',['0']] else []
tmp2 = rule[1] if rule[1] not in ['0',['0']] else []
if(len(tmp1) == len(element[0]) and len(tmp2) == len(element[1])):
result.append(1)
# for (el1,el2) in (element[0],rule[0]):
# if element[0].count(el1) == element[]
else:
result.append(0)
return result
def species2Rules(self,rules):
'''
This method goes through the rule list and classifies species tuples in a dictionary
according to the reactions they appear in.
'''
ruleDictionary = {}
for idx,rule in enumerate(rules):
reaction2 = rule #list(parseReactions(rule))
totalElements = [item for sublist in reaction2 for item in sublist]
if tuple(totalElements) in ruleDictionary:
ruleDictionary[tuple(totalElements)].append(idx)
else:
ruleDictionary[tuple(totalElements)] = [idx]
return ruleDictionary
def checkCompliance(self,ruleCompliance,tupleCompliance,ruleBook):
'''
This method is mainly useful when a single reaction can be possibly classified
in different ways, but in the context of its tuple partners it can only be classified
as one
'''
ruleResult = np.zeros(len(ruleBook))
for validTupleIndex in np.nonzero(tupleCompliance):
for index in validTupleIndex:
for alternative in ruleBook[index]:
if 'r' in alternative and np.any([ruleCompliance[temp] for temp in alternative['r']]):
ruleResult[index] = 1
break
#check if just this is enough
if 'n' in alternative:
ruleResult[index] = 1
break
return ruleResult
def levenshtein(self,s1, s2):
l1 = len(s1)
l2 = len(s2)
matrix = [range(l1 + 1)] * (l2 + 1)
for zz in range(l2 + 1):
matrix[zz] = range(zz,zz + l1 + 1)
for zz in range(0,l2):
for sz in range(0,l1):
if s1[sz] == s2[zz]:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz])
else:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1)
return matrix[l2][l1]
def analyzeUserDefinedEquivalences(self,molecules,conventions):
equivalences = {}
smolecules = [x.strip('()') for x in molecules]
modifiedElement = {}
for convention in conventions:
baseMol = []
modMol = []
for molecule in smolecules:
if convention[0] in molecule and convention[1] not in molecule:
baseMol.append(molecule)
elif convention[1] in molecule:
modMol.append(molecule)
if convention[2] not in equivalences:
equivalences[convention[2]] = []
equivalences[convention[2]].append((convention[0],convention[1]))
if convention[0] not in modifiedElement:
modifiedElement[convention[0]] = []
modifiedElement[convention[0]].append((convention[0],convention[1]))
'''
for mol1 in baseMol:
for mol2 in modMol:
score = self.levenshtein(mol1,mol2)
if score == self.levenshtein(convention[0],convention[1]):
equivalences[convention[2]].append((mol1,mol2))
modifiedElement[convention[0]].append((mol1,mol2))
break
'''
return equivalences,modifiedElement
def processNamingConventions2(self, molecules, threshold=4, onlyUser=False):
# normal naming conventions
strippedMolecules = [x.strip('()') for x in molecules]
tmpTranslator = {}
translationKeys = []
conventionDict = {}
# FIXME: This line contains the single biggest execution bottleneck in the code
# we should be able to delete it
# user defined equivalence
if not onlyUser:
tmpTranslator, translationKeys, conventionDict = detectOntology.analyzeNamingConventions(strippedMolecules,
self.namingConventions, similarityThreshold=threshold)
# user defined naming convention
if self.userEquivalencesDict is None and hasattr(self, 'userEquivalences'):
self.userEquivalencesDict, self.modifiedElementDictionary = self.analyzeUserDefinedEquivalences(molecules, self.userEquivalences)
else:
if self.userEquivalencesDict is None:
self.userEquivalencesDict = {}
'''
for name in self.userEquivalencesDict:
equivalenceTranslator[name] = self.userEquivalencesDict[name]
'''
# add stuff to the main translator
for element in self.userEquivalencesDict:
if element not in tmpTranslator:
tmpTranslator[element] = []
tmpTranslator[element].extend(self.userEquivalencesDict[element])
return tmpTranslator, translationKeys, conventionDict
def processAdHocNamingConventions(self, reactant, product,
localSpeciesDict, compartmentChangeFlag, moleculeSet):
'''
1-1 string comparison. This method will attempt to detect if there's
a modifiation relatinship between string <reactant> and <product>
>>> sa = SBMLAnalyzer(None,'./config/reactionDefinitions.json','./config/namingConventions.json')
>>> sa.processAdHocNamingConventions('EGF_EGFR_2','EGF_EGFR_2_P', {}, False, ['EGF','EGFR', 'EGF_EGFR_2'])
[[[['EGF_EGFR_2'], | |
#!/usr/bin/env python3
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import seaborn as sn
import column_names as cols
file_formats = ["pdf", "svg"]
def save(name):
if not os.path.isdir("figs"):
os.mkdir("figs")
for fmt in file_formats:
plt.savefig(
"figs/%s.%s" % (name, fmt),
bbox_inches='tight',
pad_inches=0,
transparent=True,
)
# entire community
df = pd.read_csv("data/spack-user-survey-2020-responses.csv")
# get reasonable column names
df.columns = [cols.description_to_name[c.strip()] for c in df.columns]
# just members of ECP
ecp = df[df.in_ecp == "Yes"]
#
# Are you part of ECP?
#
ax = df.in_ecp.value_counts().plot.pie(
figsize=(6,4),
fontsize=12,
autopct=lambda p: ("%.1f%%" % p) if p > 4 else "",
explode=[0.05] * 2,
ylabel='',
legend=False,
labels=[''] * 2,
pctdistance=0.7,
title=cols.names["in_ecp"],
textprops={'color':"w"}
)
ax.legend(loc="lower left", fontsize=12, bbox_to_anchor=(-.2, 0),
frameon=False, labels=["All", "ECP"])
save("pie_in_ecp")
#
# Pie charts
#
def two_pies(col, legend_cols=2, same=False):
"""Plot two pie charts to compare all responses with ECP responses.
Args:
col (str): name of column to compare
legend_cols (int): number of columns in the legend
same (bool): whether ECP results were pretty much the same as all (in
which case we omit the ECP-specific ones)
"""
plt.close()
combined = pd.DataFrame()
combined["All"] = df[col].value_counts()
if not same:
combined["ECP"] = ecp[col].value_counts()
axes = combined.plot.pie(
subplots=True,
layout=(1, 2),
figsize=(8, 8),
fontsize=12,
autopct=lambda p: ("%.1f%%" % p) if p > 4 else "",
explode=[0.05] * len(combined),
legend=False,
labels=[''] * combined.shape[0],
ylabel='',
pctdistance=0.7,
title=cols.names[col],
textprops={'color':"w"}
)
plt.tight_layout()
axes[0][0].set_title("All\n(ECP responses were similar)")
if not same:
axes[0][0].set_title("All")
axes[0][1].set_title("ECP")
axes[0][0].get_figure().subplots_adjust(top=1.3)
axes[0][0].legend(
ncol=legend_cols,
bbox_to_anchor=(0, 0),
loc="upper left",
labels=combined.index,
fontsize=12,
frameon=False,
)
save("two_pies_" + col)
two_pies("user_type")
two_pies("workplace")
two_pies("country", legend_cols=3)
two_pies("how_find_out")
two_pies("how_bad_no_py26", legend_cols=1)
two_pies("how_bad_only_py3", legend_cols=1)
two_pies("would_attend_workshop")
two_pies("did_tutorial")
two_pies("how_often_docs")
two_pies("commercial_support")
#
# Simple bar charts
#
def two_bars(col):
"""Plot two bar charts to compare all responses with ECP responses.
Args:
col (str): name of column to compare
"""
plt.close()
combined = pd.DataFrame()
combined["All"] = df[col].value_counts(sort=False)
combined["ECP"] = ecp[col].value_counts(sort=False)
axes = combined.plot.bar(
subplots=True,
layout=(1, 2),
figsize=(8, 3),
fontsize=12,
legend=False,
ylabel='',
xlabel="at least N years",
title=cols.names[col],
)
plt.tight_layout()
axes[0][0].set_title("All")
axes[0][1].set_title("ECP")
save("two_bars_" + col)
# not pie charts
two_bars("how_long_using")
#
# Multi-choice bar charts
#
def two_multi_bars(col, sort=None, index=None, filt=None, name=None,
figsize=(5, 4)):
"""Plot two bar charts to compare all responses with ECP responses.
Args:
col (str): name of column to compare
index (list): custom index for plot
filt (callable): optional function to filter column by
name (str): name for the figure
figsize (tuple): dimensions in inches for the figure
"""
if filt is None:
filt = lambda x: x
if name is None:
name = col
plt.close()
combined = pd.DataFrame(index=index)
split = filt(df[col].str.split(',\s+', expand=True))
combined["All"] = split.stack().value_counts()
combined["All"] /= df.shape[0]
combined["All"] *= 100
split = filt(ecp[col].str.split(',\s+', expand=True))
combined["ECP"] = split.stack().value_counts()
combined["ECP"] /= ecp.shape[0]
combined["ECP"] *= 100
if not index:
combined = combined.sort_values(by="All", ascending=True)
ax = combined.plot.barh(
figsize=figsize,
legend=True,
title=cols.names[col],
)
ax.legend(loc="lower right", fontsize=12, frameon=False)
plt.xlabel("Percent of respondents")
plt.tight_layout()
save("two_multi_bars_" + name)
two_multi_bars("app_area", figsize=(5, 5))
two_multi_bars("how_contributed")
two_multi_bars("spack_versions",
filt=lambda df: df.replace("Not sure. ", "do not know").replace(
"Do not know", "do not know"))
two_multi_bars("os", filt=lambda df: df.replace(
"Windows Subsystem for Linux (WSL)", "WSL"))
two_multi_bars("python_version",
index=['2.6', '2.7', '3.5', '3.6', '3.7', '3.8'])
two_multi_bars("how_use_pkgs", figsize=(6, 5), filt=lambda df: df.replace(
["Environment Modules (TCL modules)"], "TCL Modules"))
two_multi_bars(
"used_features",
filt=lambda df: df.replace(r' \([^)]*\)', '', regex=True).replace(
"Concretization preferences in packages.yaml",
"Concretization preferences"
).replace("Externals in packages.yaml", "External packages"),
figsize=(6, 5))
two_multi_bars("cpus_next_year")
two_multi_bars("gpus_next_year")
two_multi_bars("compilers_next_year", figsize=(7, 4))
two_multi_bars("how_get_help")
two_multi_bars(
"num_installations", index=reversed([
"1 - 10", "10 - 100", "100 - 200", "200 - 500", "500-1,000", "> 1,000"
]))
linuxes = [
"Gentoo", "Cray", "Amazon Linux", "Alpine", "TOSS", "Arch",
"Fedora", "SuSE", "Debian", "Ubuntu", "Red Hat", "CentOS",
]
def linuxize(df):
linux = df.replace(linuxes, "Linux").replace(
"Windows Subsystem for Linux (WSL)", "WSL")
is_duplicate = linux.apply(pd.Series.duplicated, axis=1)
return linux.where(~is_duplicate, None)
two_multi_bars("os", filt=linuxize, name="os_simple")
mods = ("Environment Modules (TCL modules)", "Lmod")
def modulize(df):
"""Add another column for "any module system"."""
has_modules = df.apply(lambda ser: ser.isin(mods).any(), axis=1)
mod_col = has_modules.apply(lambda c: "Modules (TCL or Lmod)" if c else None)
frame = pd.concat([df, mod_col], axis=1)
frame = frame.replace(["Environment Modules (TCL modules)"], "TCL Modules")
return frame
two_multi_bars("how_use_pkgs", filt=modulize, name="how_use_pkgs_any",
figsize=(6, 5))
gpus = ("NVIDIA", "AMD", "Intel")
def any_gpu(df):
"""Add another column for "any module system"."""
has_gpu = df.apply(lambda ser: ser.isin(gpus).any(), axis=1)
extra_col = has_gpu.apply(lambda c: "Any GPU" if c else None)
frame = pd.concat([df, extra_col], axis=1)
return frame
two_multi_bars("gpus_next_year", filt=any_gpu, name="gpus_next_year_any")
#
# Multi-choice bar charts
#
def feature_bar_chart(
df, title, name, feature_cols, ratings, xlabels, figsize, rot=25,
ha="right", ymax=None, colors=None):
# value counts for all columns
values = df[feature_cols].apply(
pd.Series.value_counts, sort=False).reindex(ratings).transpose()
ax = values.plot.bar(y=ratings, figsize=figsize, rot=0, color=colors)
ax.legend(ncol=5, labels=ratings, frameon=False)
plt.xticks(rotation=rot)
if ymax:
plt.ylim(0, ymax)
if xlabels:
ax.set_xticklabels(xlabels, ha=ha)
plt.tight_layout()
plt.title(title)
save("feature_bars_" + name)
def score_averages(df, feature_cols, ratings, weights):
"""Calculate average scores for features
Args:
df (DataFrame): data set
feature_cols (list): list of column names to average
ratings (list): values from the feature cols associated w/weights,
e.g. "bad", "ok", "good"
weights (dict): weights associated with ratings, e.g.,
{"bad": 0, "ok": 1, "good": 2}.
"""
values = df[feature_cols].apply(pd.Series.value_counts).reindex(ratings)
w = pd.Series(weights, index=ratings)
return values.multiply(w, axis="index").sum() / values.sum()
def heat_map(
title, filename, feature_cols, ratings, weights, labels, transpose,
cmap, data_sets):
"""Generate a heat ma of
Args:
title (str): title for figure
filename (str): name for figure file
feature_cols (list): list of column names to average
ratings (list): values from the feature cols associated w/weights,
e.g. "bad", "ok", "good"
weights (dict): weights associated with ratings, e.g.,
{"bad": 0, "ok": 1, "good": 2}.
labels (list): labels for the features -- default is feature col names.
transpose (bool): True for features on X axis, False for labels on Y.
cmap (str): Name of colormap to use
data_sets (dict str -> DataFrame): names for y axis of heat map,
mapped to data frames to get stats from.
"""
plt.close()
plt.figure()
heat_map = pd.DataFrame({
name: score_averages(frame, feature_cols, ratings, weights)
for name, frame in data_sets.items()
})
if transpose:
heat_map = heat_map.transpose()
# sort from highest to loweset rated
heat_map = heat_map.sort_values(by=heat_map.columns[0], ascending=False)
# order labels by value sort
if not transpose:
feature_labels = dict(zip(feature_cols, labels))
labels = [feature_labels[col] for col in heat_map.index]
ax = sn.heatmap(
heat_map, cmap=cmap, annot=True, vmin=0, vmax=4, square=True,
fmt=".1f", annot_kws={"size": 9})
cbar = ax.collections[0].colorbar
cbar.set_ticks(range(5))
cbar.set_ticklabels([
"%d - %s" % (i, s.replace(' ', '\n '))
for i, s in enumerate(ratings)
])
cbar.ax.tick_params(labelsize=9)
plt.title(title + "\n", fontsize=11)
plt.xticks(rotation=45)
if transpose:
ax.set_xticklabels(labels, ha="right")
else:
ax.set_yticklabels(labels)
ax.set_xticklabels(data_sets.keys(), ha="right")
ax.tick_params(axis='both', which='major', labelsize=9)
plt.tight_layout()
save("heat_map_" + filename)
ratings = [
"Not Important",
"Slightly Important",
"Somewhat important",
"Very Important",
"Critical"
]
weights = { r: i for i, r in enumerate(ratings) }
feature_cols = [
'feature_use_existing_installs',
'feature_new_concretizer',
'feature_better_flag_handling',
'feature_developer_support',
'feature_separate_build_deps',
'feature_language_virtuals',
'feature_pkg_notifications',
'feature_build_testing',
'feature_optimized_binaries',
'feature_testing',
'feature_cloud_integration',
'feature_windows',
]
xlabels = [
"Reuse existing installs",
"New concretizer",
"Better flag handling",
"Better dev support",
"Separate build-deps",
"Language virtuals",
"Pkg maintainer notif.",
"Build testing (CI)",
"Optimized binaries",
"Package testing",
"Cloud integration",
"Windows support",
]
plt.close()
feature_cmap = cm.get_cmap("cool")
feature_bar_colors = [feature_cmap(v) for v in [0.0, 0.25, 0.5, 0.75, 1.0]]
feature_bar_chart(
df, "Rank these upcoming Spack features by importance",
"all_features", feature_cols, ratings, xlabels, figsize=(12, 3),
colors=feature_bar_colors)
feature_bar_chart(
ecp, "Rank these upcoming Spack features by importance (ECP)",
"ecp_features", feature_cols, ratings, xlabels, figsize=(12, 3),
colors=feature_bar_colors)
heat_map(
"Average feature importance by workplace",
"features_by_workplace",
feature_cols, ratings, weights, xlabels, False, feature_cmap, {
"All" : df,
"ECP" : df[df.in_ecp == "Yes"],
"NNSA" : df[df.workplace == "DOE/NNSA Lab (e.g., LLNL/LANL/SNL)"],
"ASCR" : df[df.workplace == "DOE/Office of Science Lab (ORNL/ANL/LBL)"],
"Industry" : df[(df.workplace == "Company")
| (df.workplace == "Cloud Provider")],
"University" : df[df.workplace == "University HPC/Computing Center"],
"Public Lab" : df[df.workplace == "Other Public Research Lab"],
}
)
heat_map(
"Average feature importance by job type",
"features_by_job",
feature_cols, ratings, weights, xlabels, False, feature_cmap, {
"All" : df,
"Developer" : df[(df.user_type == "Software Developer")
| (df.user_type == "All of the Above")],
"Scientist" : df[(df.user_type == "Scientist/Researcher")
| (df.user_type == "All of the Above")],
"Sys Admin" : df[(df.user_type == "System Administrator")
| (df.user_type == "All of the Above")],
"User Support" : df[(df.user_type == "User Support Staff")
| (df.user_type == "All of the Above")],
"Manager" : df[(df.user_type == "Manager")
| (df.user_type == "All of the Above")],
}
)
#
# Quality ratings
#
ratings = ["Horrible", "Bad", "OK", "Good", "Excellent"]
weights = { r: i for i, r in enumerate(ratings) }
feature_cols = [
"quality_spack",
"quality_community",
"quality_docs",
"quality_packages",
]
xlabels = ["Spack", "Community", "Docs", "Packages"]
quality_cmap = "RdYlGn"
# These colors match to the Red/Yellow/Green heat maps
bar_colors = ["#cc2222", "orange", "#dddd00", "#94c772", "green"]
plt.close()
feature_bar_chart(df, "Rate the overall quality of...",
"all_quality", feature_cols, ratings, xlabels,
figsize=(7, 2), rot=0, ha="center", ymax=110, colors=bar_colors)
feature_bar_chart(ecp, "Rate the overall quality of... (ECP)",
"ecp_quality", feature_cols, ratings, xlabels,
figsize=(7, 2), rot=0, | |
from abc import ABC, abstractmethod
import numpy as np
import cv2
class PoseEstimator(ABC):
"""
Abstract base class for pose estimators decoding the NN results.
Provides a common interface for all decoders. Specifically provides:
- get_input_frame method to convert an arbitrary image into the right
size/shape and array configuration for the NN input.
- get_original_frame method to reverse the transformations of
get_input_frame to convert back frames obtained through the NNs
passthrough channel
- get_pose_data method that takes the raw NN output packet and decodes it
to return a personwise keypoint array. Keypoints are automatically
transformed back into coordinates for the original frame from NN input
coordinates
- draw_results method to draw personwise keypoints onto an image
- handling of hyperparameters for the algorithm, parameters can be
specified in _specific_options dictionary
Derived classes need to:
- implement decode_results method, which takes a list of the individual
network outputs, already reshaped into their true shape and decodes them
into personwise keypoints in NN input coordinates. Transformation back
into original coordinates is handled automatically.
- set self._output_shape in the constructor as this depends on the network
architecture and determines the correct shape of the output arrays
- lists of the landmarks and their order output by the system and the
connections between landmarks.
"""
# empty placeholders for landmark and connection lists
landmarks = []
connections = []
# Possible options(hyperparameters) for tuning the decoding algorithm
# Dict of the form
# option_name: {'max_val': maximum value for slider,
# 'divider': divide slider by this value for use,
# 'default': default value,
# 'description': Description for command line arg}
# Example: detection_threshold:{"max_val": 100, "divider": 100, "default":
# 30, "description": "..."} means the slider for detection_threshold will
# range from 0 to 100 with a default of 30 and the slider value will be
# divided by 100 before use as threshold. You don't have to do this
# transformation yourself, e.g. if you want to specify a percentage value
# with a step size of 1% define an option with max_val 100 and divider 100
# and the self._option variable will automatically be set to
# integer_val/divider and thus be percentage value in the range 0 to 1.
# The command line arg will be described as "..." when running -h
# _general_options will apply to all PoseEstimator objects,
# _specific_options allows to define extra options in subclasses which only
# apply there
_general_options = {
"detection_threshold": {
"max_val":
100,
"divider":
100,
"default":
15,
"description":
"Set the confidence threshold for keypoint detection in %%."
}
}
# As _general_options but to be overwritten by specific implementations for
# defining their unique parameters
_specific_options = {}
def __init__(self, model_config, **kwargs):
"""
Parameters
----------
model_config : dict
Dictionary with configuration parameters of the model read from the
'models.json' file
kwargs:
Command line arguments to determine selected hyperparameters.
"""
self._input_shape = tuple(model_config["input_size"])
self._output_layers = model_config["output_layers"]
self._num_keypoints = len(self.landmarks)
for option_name, option in self.get_options():
if option_name in kwargs:
# kwargs (command line args take priority)
value = kwargs[option_name]
else:
value = option["default"]
self.set_option(option_name, value)
self._pad_top = None
self._pad_left = None
self._scale_factor = None
@classmethod
def get_options(cls):
"""
Iterate over all options, general and class specific.
"""
for option in cls._general_options.items():
yield option
for option in cls._specific_options.items():
yield option
@classmethod
def get_general_options(cls):
"""
Iterate over options common to all PoseEstimator classes.
"""
for option in cls._general_options.items():
yield option
@classmethod
def get_specific_options(cls):
"""
Iterate over options specific to the PoseEstimator class at hand.
"""
for option in cls._specific_options.items():
yield option
def set_option(self, option, value):
"""
Set the given option to the given value.
Values are specified in integer domain as this is what the OpenCV
trackbars support. For float value the divider specified in the option
is applied before assigning the value.
Options not applying to the model are ignored.
Parameters
----------
option : str
identifier of the option in the options dict
value : int
Value to set the option to, as int before divider was applied.
"""
for option_dict in ("_general_options", "_specific_options"):
option_dict = getattr(self, option_dict)
if option in option_dict:
if option_dict[option]["divider"] != 1:
value /= float(option_dict[option]["divider"])
setattr(self, "_" + option, value)
break
def get_input_frame(self, frame):
"""
Pads and rescales the frame to fit the network input size.
Takes an arbitrary picture and scales it to fit into the networks
receptive field. Pads either horizontally or vertically to fit the
exact size required as input by the network.
Parameters
----------
frame : numpy array
Frame to be send to device for processing
Returns
-------
nn_frame : numpy array
Frame suitable to be passed into the pose estimation network.
"""
self._scale_factor = min(self._input_shape[1] / frame.shape[0],
self._input_shape[0] / frame.shape[1])
scaled = cv2.resize(frame, (int(frame.shape[1] * self._scale_factor),
int(frame.shape[0] * self._scale_factor)))
pad_width = (self._input_shape[0] - scaled.shape[1]) / 2
pad_height = (self._input_shape[1] - scaled.shape[0]) / 2
# floor&ceil values to account for possibly odd amount of padding
self._pad_top = int(np.floor(pad_height))
self._pad_left = int(np.floor(pad_width))
self._pad_bottom = int(np.ceil(pad_height))
self._pad_right = int(np.ceil(pad_width))
nn_frame = cv2.copyMakeBorder(scaled, self._pad_top, self._pad_bottom,
self._pad_left, self._pad_right,
cv2.BORDER_CONSTANT)
return nn_frame.transpose(2, 0, 1)
def get_original_frame(self, frame):
"""
Transforms a frame from NN input shape back into original shape.
Removes any padding and scaling applied in get_input_frame.
Parameters
----------
frame : numpy array
Frame to be transformed
Returns
-------
frame : numpy array
Frame with transformations removed
"""
if self._pad_top is not None:
frame = frame[self._pad_top:frame.shape[0] - self._pad_bottom,
self._pad_left:frame.shape[1] - self._pad_right]
if self._scale_factor is not None and self._scale_factor != 1:
frame = cv2.resize(frame,
(int(frame.shape[1] / self._scale_factor),
int(frame.shape[0] / self._scale_factor)))
return frame
def get_pose_data(self, raw_output):
"""
Decodes raw outputs into pose data.
Retrieves network outputs from raw data packet and decodes results into
a personwise keypoint array. Decoding is done by calling the abstract
method decode_results which needs to be implemented by the individual
classes.
After decoding any padding and scaling that may have been applied in
get_input_frame is removed from the keypoints to fit the original
frame.
Parameters
----------
raw_output : depthai.NNData object
Raw output from the neural network retrieved from depthai pipeline.
Returns
-------
personwise_keypoints : numpy array
Numpy array of shape (n, self._num_keypoints, 3) where n is the
number of detected people. For each person contains each
landmark as (x,y,confidence), if a landmark was not detected all 3
values will be zero.
"""
outputs = self._convert_raw_outputs(raw_output)
personwise_keypoints = self.decode_results(outputs)
# If the frame got scaled and padded before sending the frame to the NN
# (in get_input_frame in case of sending a local file) we need to
# remove this padding and scaling from the keypoints
if personwise_keypoints.shape[0] > 0:
if self._pad_top is not None:
keypoint_ids = np.nonzero(personwise_keypoints[:, :, -1])
personwise_keypoints[keypoint_ids[0], keypoint_ids[1], :2] -= [
self._pad_left, self._pad_top
]
if self._scale_factor is not None and self._scale_factor != 1:
personwise_keypoints[:, :, :2] /= self._scale_factor
return personwise_keypoints
def draw_results(self, personwise_keypoints, frame):
"""
Draws the detected keypoints onto the given frame.
Skips keypoints that were not detected (by assuming those have a
confidence value of zero).
Parameters
----------
personwise_keypoints : numpy array
Keypoint array of the form [n,self._num_keypoints, 3] with
(x,y,confidence) information for each keypoint as returned by
get_pose_data.
frame : numpy array
The frame to draw on. The frame is modified in place.
"""
for person_id, person in enumerate(personwise_keypoints):
# Draw keypoints
if person_id % 3 == 0:
point_colour = (0, 0, 255)
line_colour = (255, 0, 0)
elif person_id % 3 == 1:
point_colour = (0, 255, 0)
line_colour = (0, 0, 255)
else:
point_colour = (255, 0, 0)
line_colour = (0, 255, 0)
for i in range(len(person)):
# Confidence = 0 means not detected
if person[i][2] == 0:
continue
cv2.circle(frame, tuple(person[i][0:2].astype(int)), 2,
point_colour, -1, cv2.LINE_AA)
# Draw connections
for connection in self.connections:
# Confidence = 0 means not detected
confidences = person[connection, 2]
if 0 in confidences:
continue
pt1, pt2 = person[connection, :2].astype(int)
cv2.line(frame, tuple(pt1), tuple(pt2), line_colour, 1,
cv2.LINE_AA)
def | |
= {
'Elite': levelSwitchE.get(self.level),
'Common': levelSwitchC.get(self.level)
}
return typeSwitch.get(self.unitType)
def MageAttributes(self):
levelSwitchE = {
1: self.setAttributes([4,1,3,3,1,1,0,0,0]),
2: self.setAttributes([4,1,3,3,1,1,0,0,0]),
3: self.setAttributes([5,1,3,3,2,1,0,0,0]),
4: self.setAttributes([5,1,3,3,2,1,0,1,1]),
5: self.setAttributes([5,1,3,4,3,1,1,1,1]),
6: self.setAttributes([7,1,3,4,3,1,1,1,1]),
7: self.setAttributes([7,1,3,4,4,1,1,1,2]),
8: self.setAttributes([8,1,3,4,4,1,2,1,2]),
9: self.setAttributes([8,1,3,5,5,1,2,1,2]),
10: self.setAttributes([10,1,3,5,5,1,3,1,2])
}
levelSwitchC = {
1: self.setAttributes([3,1,3,0,0,1,0,0,0]),
2: self.setAttributes([3,1,3,0,0,1,0,0,0]),
3: self.setAttributes([4,1,3,0,0,2,0,0,0]),
4: self.setAttributes([4,1,3,0,0,2,1,0,0]),
5: self.setAttributes([4,1,3,0,0,2,1,0,0]),
6: self.setAttributes([4,1,3,0,0,2,1,1,0]),
7: self.setAttributes([4,1,3,0,0,2,1,1,0]),
8: self.setAttributes([5,1,3,0,0,2,1,1,0]),
9: self.setAttributes([5,1,4,0,0,2,1,1,0]),
10: self.setAttributes([5,1,4,0,0,2,2,1,0])
}
typeSwitch = {
'Elite': levelSwitchE.get(self.level),
'Common': levelSwitchC.get(self.level)
}
return typeSwitch.get(self.unitType)
def EngineerAttributes(self):
levelSwitchE = {
1: self.setAttributes([4,1,2,2,1,1,-2,-2,0]),
2: self.setAttributes([4,1,2,2,1,1,-2,-2,0]),
3: self.setAttributes([4,1,2,2,1,1,-2,-2,0]),
4: self.setAttributes([5,1,2,2,1,1,-2,-2,0]),
5: self.setAttributes([5,1,2,2,1,1,-2,-2,0]),
6: self.setAttributes([5,1,2,2,1,1,-2,-2,0]),
7: self.setAttributes([6,1,2,2,1,1,-2,-2,0]),
8: self.setAttributes([6,1,2,2,1,1,-2,-2,0]),
9: self.setAttributes([6,1,2,2,1,1,-2,-2,0]),
10: self.setAttributes([7,1,2,2,1,1,-2,-2,0])
}
levelSwitchC = {
1: self.setAttributes([4,1,3,0,0,1,-1,0,0]),
2: self.setAttributes([4,1,3,0,0,1,-1,1,0]),
3: self.setAttributes([5,2,3,0,0,2,-1,1,0]),
4: self.setAttributes([5,2,4,0,0,2,-1,1,1]),
5: self.setAttributes([5,2,4,0,0,2,0,2,1]),
6: self.setAttributes([6,2,4,0,0,2,0,2,1]),
7: self.setAttributes([6,2,4,0,0,2,0,2,2]),
8: self.setAttributes([6,2,5,0,0,2,0,2,2]),
9: self.setAttributes([7,2,5,0,0,3,0,2,2]),
10: self.setAttributes([7,3,5,0,0,3,0,2,3])
}
typeSwitch = {
'Elite': levelSwitchE.get(self.level),
'Common': levelSwitchC.get(self.level)
}
return typeSwitch.get(self.unitType)
# Each unit gets an instance of the ability? Or each player?
# Add name of each ability to the list available options?
# I think each ability will need to be integrated into the class-specific
# code. i.e. add code to check name of available abilities at normal places
class ReactionManager(GeneralUse):
# give state to reaction manager
# search for reactions with given trigger state
# ask user if they want to react
# if yes trigger reaction
state = 'None'
def setState(self,state):
# give state to reaction manager
self.state = state
def checkReaction(self,unit,target,gameboard,states):
# ask if user wants to do a reaction
# do the reaction (call ability effect)
self.setState(states)
reactionChoices = self.availableReactions(gameboard[unit],gameboard)
if reactionChoices:
return random.choice(reactionChoices)
else:
return 'Pass'
def availableReactions(self,unit,gameboard):
# find available reactions with corresponding state
reactions = [x.name for x in unit.abilities.values() if 'Reaction' in x.cost and set(x.cost['Reaction']).issubset(set(unit.availablePoints())) and self.state == x.state]
return reactions #reaction names
def multipleReactionPoints(self,unit,gameboard):
maxReactions = gameboard[unit].attributeManager.getAttributes('Reaction')
return random.choice([x for x in range(1,maxReactions)])
class AttributeManager(GeneralUse):
bonusAttributes = {'Health':0,'Attack':0,'Movement':0,'Reaction':0,'Special':0,'Hit':0,'Evasion':0,'Armor':0}
permBonusAttr = {'Health':0,'Attack':0,'Movement':0,'Special':0,'Reaction':0,'Damage':0,'Evasion':0,'Hit':0,'Armor':0}
def __init__(self,currentAttributes):
self.currentAttributes = currentAttributes
def getAttributes(self,attribute):
return self.currentAttributes.get(attribute)
def changeAttributes(self,attribute,value):
if attribute != 'Passive':
self.currentAttributes[attribute] = self.currentAttributes[attribute] + value
def setAttributes(self,attribute,value):
self.currentAttributes[attribute] = value + self.bonusAttributes[attribute]
# bonus attributes set at the end of your turn
def setBonusAttributes(self,attribute,value):
self.currentAttributes[attribute] = self.bonusAttributes[attribute] + value
def changePermanentUpgrade(self,attribute,value):
self.permBonusAttr[attribute] = self.permBonusAttr[attribute] + value
class Unit(GeneralUse):
built = False
reactionManager = ReactionManager()
eliminatedUnits = {'Elite':0, 'Common':0, 'Objective':0}
unitRange = 1
rangeBonus = 0
unrestrainedMovement = False
moveable = True
aura = 'None'
location = (-1,-1)
name = 'Unit'
def __init__(self,unitType,unitName):
self.unitType = unitType
self.unitName = unitName
self.direction = random.choice(self.directions)
self.lineOfSightManager = LOS.LineOfSight(self.direction)
def setClass(self,playerClass,playerID,captureCost):
self.playerClass = playerClass
self.playerID = playerID
self.levelManager = LevelManager(1,playerClass,self.unitType)
self.unitAttributes = self.levelManager.getAttributes()
self.maxHealth = self.unitAttributes['Health']
self.attributeManager = AttributeManager(self.unitAttributes)
self.captureCost = captureCost
self.baseAbilities = {'Attack':Attack(self.name,playerID), 'Movement':Movement(self.name,playerID), 'Reorient':Reorient(self.name,playerID), 'Perception':Perception(self.name,playerID),
'AccurateStrike': AccurateStrike(self.name,playerID),'Avoid':Avoid(self.name,playerID),'PurposefulDodge':PurposefulDodge(self.name,playerID),'RedirectedStrike':RedirectedStrike(self.name,playerID),
'Pass': Pass(self.name,playerID)}
self.abilities = self.baseAbilities
def addBonuses(self):
for x in self.attributeManager.permBonusAttr:
self.attributeManager.changeAttributes(x,self.attributeManager.permBonusAttr[x])
self.unitRange = self.unitRange + self.rangeBonus
def createOptions(self):
# match ability costs to available points
# excludes passives since it matches available points to cost
options = [x.name for x in self.abilities.values() if 'Turn' in x.cost and set(x.cost['Turn']).issubset(set(self.availablePoints()))]
return options # this is ability names
def availablePoints(self):
return [x for x in self.attributeManager.currentAttributes if self.attributeManager.currentAttributes.get(x) != 0]
def useAbility(self,ability,time):
[self.attributeManager.changeAttributes(x,-1) for x in self.abilities.get(ability).cost[time]]
def getDistance(self,target):
if (target[0] - self.location[0] >= 0 and target[1] - self.location[1] <= 0) or (target[0] - self.location[0] <= 0 and target[1] - self.location[1] >= 0):
return abs(target[1]-self.location[1])+abs(target[0]-self.location[0])
if (target[0] - self.location[0] >= 0 and target[1] - self.location[1] >= 0) or (target[0] - self.location[0] <= 0 and target[1] - self.location[1] <= 0):
return max(abs(target[1]-self.location[1]),abs(target[0]-self.location[0]))
def getLineOfSight(self,gameboard):
self.lineOfSight = self.lineOfSightManager.allLineOfSight(self.direction,self.location,gameboard)
def createCombatModifiers(self,*args):
return args
def changeLocation(self,location):
self.location = location
return
def passiveMods(self,unit,target,gameboard,combatSteps):
return gameboard,combatSteps
def movementEffects(self,unit,target,gameboard):
return gameboard
def addMovementSpaces(self,unit,gameboard,spaces):
return spaces
def eliminateUnit(self,unitType,unit,playerID,gameboard):
if self.playerID != playerID:
self.eliminatedUnits[unitType] = self.eliminatedUnits[unitType] + 1
if 'WarriorAttack' in self.abilities:
if self.unitType == 'Elite':
for x in [y for y in gameboard if gameboard[y].playerID == playerID]:
if x.weaponUpgrades[self.weapon] < 3:
x.weaponUpgrades[self.weapon] = x.weaponUpgrades[self.weapon] + 1
elif self.weaponUpgrades[self.weapon] < 3:
self.weaponUpgrades[self.weapon] = self.weaponUpgrades[self.weapon] + 1
gameboard[unit].location = 'None'
gameboard['EliminatedUnits'].eliminatedUnits[playerID + ' ' + gameboard[unit].name] = gameboard[unit]
del gameboard[unit]
return gameboard
def classUpgrades(self,unit):
return
def statEffect(self,unitObj):
return unitObj
def generateMovementEffects(self,*args):
return
def setLastAction(self,action):
self.lastAction = action
def setDirection(self,direction,gameboard):
self.direction = direction
self.lineOfSightManager.setDirection(direction,self.location,gameboard)
class Player(GeneralUse):
abilities = []
victoryPoints = 0
level = 1
experiencePoints = 0
captureCost = 'Attack'
def __init__(self,playerClass,playerID):
self.playerClass = playerClass
self.playerID = playerID
# instantiate new units
self.units = {'Elite':Unit('Elite','Elite1'),'Common1':Unit('Common','Common1'),\
'Common2':Unit('Common','Common2'),'Common3':Unit('Common','Common3'),\
'Common4':Unit('Common','Common4')}
for unit in self.units:
self.units[unit].setClass(self.playerClass,self.playerID,self.captureCost)
def turn(self,gameboard,players):
# while not passed keep going
gameboard = self.beginningTurnEffects(gameboard)
while True:
unitChoices = {x:gameboard[x] for x in gameboard.keys() if gameboard[x].name == 'Unit' and gameboard.get(x).playerID == self.playerID}
unitChoices['Pass'] = 'Pass'
for unit in self.units:
if self.units[unit].location not in gameboard:
self.units[unit].location = 'None'
if self.units[unit].location != 'None':
self.units[unit].unitOptions = self.units[unit].createOptions()
if [x for x in self.getAOETargets(4,self.units[unit].location) if x in gameboard and gameboard[x].name == 'EMPTower']:
for x in self.units[unit].unitOptions:
abil = self.units[unit].abilities[self.units[unit].unitOptions[x]]
if 'Turn' in abil.cost:
if abil.cost['Turn'] == 'Special':
del self.units[unit].unitOptions[x]
# unit choice key (x,y)
unitChoiceKey = random.choice(list(unitChoices.keys()))
# unit object
unitChoiceObject = unitChoices.get(unitChoiceKey)
if unitChoiceObject == 'Pass':
for x in self.units:
for attr in ['Attack','Movement','Reaction','Special']:
self.units[x].attributeManager.setBonusAttributes(attr,0)
if self.units[x].location in gameboard:
gameboard[self.units[x].location].attributeManager.setBonusAttributes(attr,0)
break
# execute ability
elif unitChoiceObject.unitOptions:
ability = unitChoiceObject.abilities.get(random.choice(unitChoiceObject.unitOptions))
print(ability)
# subtract cost from unit points
if ability != 'Movement':
for x in ability.cost['Turn']:
gameboard[unitChoiceKey].attributeManager.changeAttributes(x,-1)
# call ability execute function
gameboard = ability.execute(unitChoiceKey,gameboard)
# check gameboard for eliminated units and update players' units
eliminatedUnits = [x for x in gameboard['EliminatedUnits'].eliminatedUnits]
for elimUnit in eliminatedUnits:
player = [x for x in players if x.playerID == gameboard['EliminatedUnits'].eliminatedUnits[elimUnit].playerID][0]
updateUnit = [x for x in player.units if player.units[x].name == gameboard['EliminatedUnits'].eliminatedUnits[elimUnit].name][0]
player.units[updateUnit] = gameboard['EliminatedUnits'].eliminatedUnits[elimUnit]
gameboard['EliminatedUnits'].eliminatedUnits = {}
possibleUnits = [x for x in gameboard if gameboard[x].name == 'Unit']
for unit in possibleUnits:
if unit in gameboard:
if gameboard[unit].playerID == self.playerID:
self.updateUnits(gameboard[unit])
# self.classUpgrades(gameboard[unit])
gameboard[unit] = self.units[gameboard[unit].name]
if gameboard[unit].attributeManager.getAttributes('Health') <= 0:
del gameboard[unit]
gameboard = self.endTurnEffects(gameboard)
return gameboard, players
def updateUnits(self,unit):
self.units[unit.name] = unit
self.units[unit.name].unrestrainedMovement = False
for x in self.units[unit.name].attributeManager.bonusAttributes:
self.units[unit.name].attributeManager.bonusAttributes[x] = 0
def respawnUnits(self,gameboard):
# finds units not in gameboard but in player unit list
respawnPoints = [b for c in [self.adjacentSpaces(a) for a in [x for x in gameboard if gameboard[x].name == 'Respawn' and gameboard[x].playerID == self.playerID]] for b in c]
units = list(set(self.units.keys()).difference(set([gameboard[x] for x in gameboard if gameboard[x].playerID == self.playerID])))
for x in units:
location = random.choice(respawnPoints)
gameboard = self.addUnit(self.units[x], location , gameboard)
gameboard[location].direction = random.choice(self.directions)
respawnPoints.remove(location)
def addUnit(self,unit,location,gameboard):
# add one of your units to the board game
gameboard[location] = unit
gameboard[location].location = location
gameboard[location].lineOfSightManager.setDirection(gameboard[location].direction,location,gameboard)
gameboard[location].addBonuses()
return gameboard
def gainExp(self):
self.experiencePoints = self.experiencePoints + 1
def manageExp(self):
# handle leveling and returning abilities
for unit in self.units:
self.experiencePoints = self.units[unit].eliminatedUnits['Elite'] + self.units[unit].eliminatedUnits['Common'] + self.units[unit].eliminatedUnits['Objective']
if self.experiencePoints != 0 and self.level < 6:
self.victoryPoints = self.victoryPoints + 1
elif self.experiencePoints == 0 and self.level < 6:
self.experiencePoints = 1
if self.experiencePoints < 3:
for x in range(0,self.experiencePoints):
self.levelUp()
else:
for x in range(0,2):
self.levelUp()
self.victoryPoints = self.victoryPoints + self.experiencePoints -2
self.experiencePoints = 0
def levelUp(self):
if self.level < 10:
self.level = self.level + 1
for unit in self.units.values():
unit.levelManager.level = self.level
# self.chooseAbility(random.choice(self.availableAbilities()))
def chooseAbility(self,ability):
self.abilities = {**ability,**self.abilities}
return
def gainVictoryPoints(self,points):
self.victoryPoints = self.victoryPoints + points
def availableAbilities(self):
return
def beginningTurnEffects(self,gameboard):
return gameboard
def endTurnEffects(self,gameboard):
if 'Pyre' in gameboard:
for x in gameboard['Pyre']:
x.dealDamage(gameboard)
x.active = x.active - 1
if x.active == 0:
del gameboard['Pyre'][x]
if 'Meteor' in self.abilities:
if self.abilities['Meteor'].active != 0:
self.abilities['Meteor'].active = self.abilities['Meteor'].active - 1
if self.abilities['Meteor'].active == 0:
self.abilities['Meteor'].dealDamage(gameboard)
return gameboard
class Objective(GeneralUse):
name = 'Objective'
moveable = False
playerID = 'None'
playerClass = 'None'
armor = 0
health = 0
attributeManager = AttributeManager({'Health':0,'Evasion':-10,'Armor':0})
reactionManager = ReactionManager()
abilities = {'Pass':Pass('None','None')}
| |
# Copyright 2018 PayTrace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import functools
from io import StringIO
import json
import logging
import os.path
import shutil
import yaml
from .cases import (
IdentificationListReader as CaseIdListReader,
hash_from_fields as _hash_from_fields,
)
from .exceptions import MultipleAugmentationEntriesError, NoAugmentationError
from .augmentation.compact_file import (
augment_dict_from,
case_keys as case_keys_in_compact_file,
TestCaseAugmenter as CompactFileAugmenter,
Updater as CompactAugmentationUpdater,
)
from .augmentation import update_file
from .utils import (
FilteredDictView as _FilteredDictView,
open_temp_copy,
)
from .yaml_tools import (
YAML_EXT,
content_events as _yaml_content_events,
get_load_all_fn as _get_yaml_load_all,
)
logger = logging.getLogger(__name__)
class InterfaceCaseProvider:
"""Test case data manager
Use an instance of this class to:
* Generate test case data :class:`dict`\ s
* Decorate the case runner function (if auto-updating of compact
augmentation data files is desired)
* Merge extension test case files to the main test case file
* Other case augmentation management tasks
Setting :attr:`use_body_type_magic` to ``True`` automatically parses the
``"request body"`` value as JSON if ``"request type"`` in the same test
case is ``"json"``, and similarly for ``"response body"`` and
``"response type"``.
.. automethod:: __init__
"""
use_body_type_magic = False
safe_yaml_loading = True
class _UpdateState(Enum):
not_requested = '-'
requested = '?'
aborted = '!'
def __repr__(self, ):
return "<{}.{}>".format(type(self).__name__, self.name)
_case_augmenter = None
def __init__(self, spec_dir, group_name, *, case_augmenter=None):
"""Constructing an instance
:param spec_dir: File system directory for test case specifications
:param group_name: Name of the group of tests to load
:keyword case_augmenter:
*optional* An object providing the interface of a
:class:`.CaseAugmenter`
The main test case file of the group is located in *spec_dir* and is
named for *group_name* with the '.yml' extension added. Extension
test case files are found in the *group_name* subdirectory of
*spec_dir* and all have '.yml' extensions.
"""
super().__init__()
self._spec_dir = spec_dir
self._group_name = group_name
self._compact_files_update = self._UpdateState.not_requested
if case_augmenter:
self._case_augmenter = case_augmenter
self._augmented_case = case_augmenter.augmented_test_case
@property
def spec_dir(self):
"""The directory containing the test specification files for this instance"""
return self._spec_dir
@property
def group_name(self):
"""Name of group of test cases to load for this instance"""
return self._group_name
@property
def case_augmenter(self):
"""The :class:`.CaseAugmenter` instance used by this object, if any"""
return self._case_augmenter
@property
def main_group_test_file(self):
"""Path to the main test file of the group for this instance"""
return os.path.join(self.spec_dir, self.group_name + YAML_EXT)
def extension_files(self, ):
"""Get an iterable of the extension files of this instance"""
return extension_files(self.spec_dir, self.group_name)
def cases(self, ):
"""Generates :class:`dict`\ s of test case data
This method reads test cases from the group's main test case file
and auxiliary files, possibly extending them with augmented data (if
*case_augmentations* was given in the constructor).
"""
yield from self._cases_from_file(self.main_group_test_file)
for ext_file in sorted(self.extension_files()):
yield from self._cases_from_file(ext_file)
if self._compact_files_update is self._UpdateState.requested:
self.update_compact_files()
def update_compact_augmentation_on_success(self, fn):
"""Decorator for activating compact data file updates
Using this decorator around the test functions tidies up the logic
around whether to propagate test case augmentation data from update
files to compact files. The compact files will be updated if all
interface tests succeed and not if any of them fail.
The test runner function can be automatically wrapped with this
functionality through :meth:`case_runners`.
"""
CFUpdate = self._UpdateState
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if self._compact_files_update is not CFUpdate.aborted:
self._compact_files_update = CFUpdate.requested
try:
return fn(*args, **kwargs)
except:
self._compact_files_update = CFUpdate.aborted
raise
return wrapper
def case_runners(self, fn, *, do_compact_updates=True):
"""Generates runner callables from a callable
The callables in the returned iterable each call *fn* with all the
positional arguments they are given, the test case :class:`dict` as an
additional positional argument, and all keyword arguments passed to
the case runner.
Using this method rather than :meth:`cases` directly for running tests
has two advantages:
* The default of *do_compact_updates* automatically applies
:meth:`update_compact_augmentation_on_success` to *fn*
* Each returned runner callable will log the test case as YAML prior
to invoking *fn*, which is helpful when updating the augmenting data
for the case becomes necessary
Each callable generated will also have the case data available via
an :attr:`case` on the callable.
"""
if do_compact_updates and self._case_augmenter is not None:
fn = self.update_compact_augmentation_on_success(fn)
for case in self.cases():
@functools.wraps(fn)
def wrapper(*args, **kwargs):
logger.info("{}\n{}".format(
" CASE TESTED ".center(40, '*'),
yaml.dump([case]),
))
return fn(*args, case, **kwargs)
wrapper.case = case
yield wrapper
def update_compact_files(self, ):
"""Calls the :class:`CaseAugmenter` to apply compact data file updates
:raises NoAugmentationError:
when no case augmentation data was specified during construction
of this object
"""
if self._case_augmenter is None:
raise NoAugmentationError("No augmentation data specified")
return self._case_augmenter.update_compact_files()
def merge_test_extensions(self, ):
"""Merge the extension files of the target group into the group's main file"""
ext_files = sorted(self.extension_files())
with open(self.main_group_test_file, 'ab') as fixed_version_specs:
for ext_file in ext_files:
ext_file_ref = os.path.relpath(ext_file, os.path.join(self.spec_dir, self.group_name))
print("---\n# From {}\n".format(ext_file_ref).encode('utf8'), file=fixed_version_specs)
with open(ext_file, 'rb') as ext_specs:
shutil.copyfileobj(ext_specs, fixed_version_specs)
for ext_file in ext_files:
os.remove(ext_file)
def _augmented_case(self, x):
"""This method is defined to be overwritten on the instance level when augmented data is used"""
return x
def _cases_from_file(self, filepath):
with open(filepath, 'rb') as file:
load_all_yaml = _get_yaml_load_all(safe=self.safe_yaml_loading)
for test_case in (
tc
for case_set in load_all_yaml(file)
for tc in case_set
):
if self.use_body_type_magic:
_parse_json_bodies(test_case)
yield self._augmented_case(test_case)
def extension_files(spec_dir, group_name):
"""Iterator of file paths for extensions of a test case group
:param spec_dir: Directory in which specifications live
:param group_name: Name of the group to iterate
"""
yield from data_files(os.path.join(spec_dir, group_name))
def data_files(dir_path):
"""Generate data file paths from the given directory"""
try:
dir_listing = os.listdir(dir_path)
except FileNotFoundError:
return
for entry in dir_listing:
entry = os.path.join(dir_path, entry)
if not os.path.isfile(entry):
continue
if not entry.endswith(YAML_EXT):
continue
yield entry
def _parse_json_bodies(test_case):
if test_case.get('request type') == 'json':
test_case['request body'] = json.loads(test_case['request body'])
if test_case.get('response type') == 'json':
test_case['response body'] = json.loads(test_case['response body'])
class CaseAugmenter:
"""Base class of case augmentation data managers
This class uses and manages files in a case augmentation directory. The
data files are intended to either end in '.yml' or '.update.yml'.
The version control system should, typically, be set up to ignore files
with the '.update.yml' extension. These two kinds of files have a different
"data shape".
Update files (ending in '.update.yml') are convenient for manual editing
because they look like the test case file from which the case came, but
with additional entries in the case data :class:`dict`. The problems with
long term use of this file format are A) it is inefficient for correlation
to test cases, and B) it duplicates data from the test case, possibly
leading to confusion when modifying the .update.yml file does not change
the test case.
Compact data files (other files ending in '.yml') typically are generated
through this package. The format is difficult to manually correlate with
the test file, but does not duplicate all of the test case data as does the
update file data format. Instead, the relevant keys of the test case are
hashed and the hash value is used to index the additional augmentation
value entries.
It is an error for a test case to have multiple augmentations defined
within .yml files (excluding .update.yml files), whether in the same or
different files. It is also an error for multiple files with the
.update.yml extension to specify augmentation for the same case, though
within the same file the last specification is taken. When augmentations
for a case exist within both one .update.yml and one .yml file, the
.update.yml is used (with the goal | |
<gh_stars>0
"""
This module contains the class SRacos, which is the sequential version of Racos (a classification based optimization algorithm).
Author:
<NAME>
Updated by:
<NAME>
"""
import time
import numpy as np
from zoopt.algos.opt_algorithms.racos.racos_classification import RacosClassification
from zoopt.algos.opt_algorithms.racos.racos_common import RacosCommon
from zoopt.solution import Solution
from zoopt.utils.tool_function import ToolFunction
from zoopt import Objective
class SRacos(RacosCommon):
"""
The class SRacos represents Sequential Racos algorithm. It's inherited from RacosCommon.
"""
def __init__(self):
"""
Initialization.
"""
RacosCommon.__init__(self)
return
def opt(self, objective, parameter, strategy='WR', ub=1):
"""
SRacos optimization.
:param objective: an Objective object
:param parameter: a Parameter object
:param strategy: replace strategy
:param ub: uncertain bits, which is a parameter of SRacos
:return: Optimization result
"""
self.clear()
self.set_objective(objective)
self.set_parameters(parameter)
self.init_attribute()
stopping_criterion = self._parameter.get_stopping_criterion()
i = 0
iteration_num = self._parameter.get_budget() - self._parameter.get_train_size()
time_log1 = time.time()
max_distinct_repeat_times = 100
current_not_distinct_times = 0
while i < iteration_num:
sampled_data = self._positive_data + self._negative_data
if np.random.random() < self._parameter.get_probability():
classifier = RacosClassification(
self._objective.get_dim(), self._positive_data, self._negative_data, ub)
classifier.mixed_classification()
solution, distinct_flag = self.distinct_sample_classifier(
classifier, sampled_data, True, self._parameter.get_train_size())
else:
solution, distinct_flag = self.distinct_sample(self._objective.get_dim(), sampled_data)
# panic stop
if solution is None:
ToolFunction.log(" [break loop] because solution is None")
return self._best_solution
if distinct_flag is False:
current_not_distinct_times += 1
if current_not_distinct_times >= max_distinct_repeat_times:
ToolFunction.log(
"[break loop] because distinct_flag is false too much times")
return self._best_solution
else:
continue
# evaluate the solution
objective.eval(solution)
# show best solution
times = i + self._parameter.get_train_size() + 1
self.show_best_solution(parameter.get_intermediate_result(), times, parameter.get_intermediate_freq())
bad_ele = self.replace(self._positive_data, solution, 'pos')
self.replace(self._negative_data, bad_ele, 'neg', strategy)
self._best_solution = self._positive_data[0]
if i == 4:
time_log2 = time.time()
expected_time = (self._parameter.get_budget() - self._parameter.get_train_size()) * \
(time_log2 - time_log1) / 5
if self._parameter.get_time_budget() is not None:
expected_time = min(
expected_time, self._parameter.get_time_budget())
if expected_time > 5:
m, s = divmod(expected_time, 60)
h, m = divmod(m, 60)
ToolFunction.log(
'expected remaining running time: %02d:%02d:%02d' % (h, m, s))
# time budget check
if self._parameter.get_time_budget() is not None:
if (time.time() - time_log1) >= self._parameter.get_time_budget():
ToolFunction.log('time_budget runs out')
return self._best_solution
# terminal_value check
if self._parameter.get_terminal_value() is not None:
if self._best_solution.get_value() <= self._parameter.get_terminal_value():
ToolFunction.log('terminal function value reached')
return self._best_solution
if stopping_criterion.check(self) is True:
return self._best_solution
i += 1
return self._best_solution
def replace(self, iset, x, iset_type, strategy='WR'):
"""
Replace a solution(chosen by strategy) in iset with x.
:param iset: a solution list
:param x: a Solution object
:param iset_type: 'pos' or 'neg'
:param strategy: 'WR': worst replace or 'RR': random replace or 'LM': replace the farthest solution
:return: the replaced solution
"""
if strategy == 'WR':
return self.strategy_wr(iset, x, iset_type)
elif strategy == 'RR':
return self.strategy_rr(iset, x)
elif strategy == 'LM':
best_sol = min(iset, key=lambda x: x.get_value())
return self.strategy_lm(iset, best_sol, x)
def binary_search(self, iset, x, begin, end):
"""
Find the first element larger than x.
:param iset: a solution set
:param x: a Solution object
:param begin: begin position
:param end: end position
:return: the index of the first element larger than x
"""
x_value = x.get_value()
if x_value <= iset[begin].get_value():
return begin
if x_value >= iset[end].get_value():
return end + 1
if end == begin + 1:
return end
mid = (begin + end) // 2
if x_value <= iset[mid].get_value():
return self.binary_search(iset, x, begin, mid)
else:
return self.binary_search(iset, x, mid, end)
def strategy_wr(self, iset, x, iset_type):
"""
Replace the worst solution in iset.
:param iset: a solution set
:param x: a Solution object
:param iset_type: 'pos' or 'neg'
:return: the worst solution
"""
if iset_type == 'pos':
index = self.binary_search(iset, x, 0, len(iset) - 1)
iset.insert(index, x)
worst_ele = iset.pop()
else:
worst_ele, worst_index = Solution.find_maximum(iset)
if worst_ele.get_value() > x.get_value():
iset[worst_index] = x
else:
worst_ele = x
return worst_ele
def strategy_rr(self, iset, x):
"""
Replace a random solution in iset.
:param iset: a solution set
:param x: a Solution object
:return: the replaced solution
"""
len_iset = len(iset)
replace_index = np.random.randint(0, len_iset)
replace_ele = iset[replace_index]
iset[replace_index] = x
return replace_ele
#
def strategy_lm(self, iset, best_sol, x):
"""
Replace the farthest solution from best_sol
:param iset: a solution set
:param best_sol: the best solution, distance between solution in iset and best_sol will be computed
:param x: a Solution object
:return: the farthest solution (has the largest margin) in iset
"""
farthest_dis = 0
farthest_index = 0
for i in range(len(iset)):
dis = self.distance(iset[i].get_x(), best_sol.get_x())
if dis > farthest_dis:
farthest_dis = dis
farthest_index = i
farthest_ele = iset[farthest_index]
iset[farthest_index] = x
return farthest_ele
@staticmethod
def distance(x, y):
"""
Get the distance between the list x and y
:param x: a list
:param y: a list
:return: Euclidean distance
"""
dis = 0
for i in range(len(x)):
dis += (x[i] - y[i])**2
return np.sqrt(dis)
class SRacosTune(RacosCommon):
"""
The class SRacosTune represents Sequential Racos algorithm for Tune. It's inherited from RacosCommon.
"""
def __init__(self, dimension, parameter, **kwargs):
"""
Initialization.
:param dimension: instance of Dimension2 class
:param parameter: instance of Parameter class
"""
RacosCommon.__init__(self)
self.clear()
objective = Objective(None, dimension)
self.set_objective(objective)
self.set_parameters(parameter)
self._parameter.set_server_num(kwargs['parallel_num'])
self.init_num = 0
self.complete_num = 0
self.semaphore = 1 # control init
self.live_num = 0
self.ub = self._parameter.get_uncertain_bits()
if self.ub is None:
self.ub = self.choose_ub(self.get_objective())
return
def suggest(self):
"""
Suggest a trial for Tune, including init trials(decided by `budget`)
"""
if self.semaphore == 0:
return
solution = None
if self.init_num < self._parameter.get_train_size(): # for init
solution, distinct_flag = self.tune_init_attribute() # TODO: It probably samples the same solution but distinct_flag is True
if distinct_flag is False:
return "FINISHED"
self.live_num += 1
elif self.init_num == self._parameter.get_train_size():
self.semaphore = 0
self.init_num += 1
return
elif self.live_num < self._parameter.get_server_num():
solution, distinct_flag = self.sample_solution(self.ub)
if distinct_flag is False:
return "FINISHED"
self.live_num += 1
self.init_num += 1
return solution
def complete(self, solution, result):
"""
Process results for Tune. Put evaluated result to solution.
:param solution: a solution without result
:param result: evaluated result of solution
:return: best solution so far
"""
self.complete_num += 1
self.live_num -= 1
if self.complete_num < self._parameter.get_train_size():
solution.set_value(result)
self._data.append(solution)
elif self.complete_num == self._parameter.get_train_size():
solution.set_value(result)
self._data.append(solution)
self.selection()
self.semaphore = 1
else:
solution.set_value(result)
best_solution_so_far = self.update_classifier(solution)
return best_solution_so_far
def sample_solution(self, ub=1):
"""
Sample a trial for Tune.
:param ub: uncertain bits, which is a parameter of SRacos
:return: a solution containing trial
"""
sampled_data = self._positive_data + self._negative_data
if np.random.random() < self._parameter.get_probability():
classifier = RacosClassification(
self._objective.get_dim(), self._positive_data, self._negative_data, ub)
classifier.mixed_classification()
solution, distinct_flag = self.distinct_sample_classifier(
classifier, sampled_data, True, self._parameter.get_train_size())
else:
solution, distinct_flag = self.distinct_sample(self._objective.get_dim(), sampled_data)
if distinct_flag is True:
sampled_data.append(solution)
return solution, distinct_flag
def update_classifier(self, solution, strategy='WR'):
stopping_criterion = self._parameter.get_stopping_criterion()
bad_ele = self.replace(self._positive_data, solution, 'pos')
self.replace(self._negative_data, bad_ele, 'neg', strategy)
self._best_solution = self._positive_data[0]
# terminal_value check
if self._parameter.get_terminal_value() is not None:
if self._best_solution.get_value() <= self._parameter.get_terminal_value():
ToolFunction.log('terminal function value reached')
return self._best_solution
if stopping_criterion.check(self) is True:
return self._best_solution
return self._best_solution
def replace(self, iset, x, iset_type, strategy='WR'):
"""
Replace a solution(chosen by strategy) in iset with x.
:param iset: a solution list
:param x: a Solution object
:param iset_type: 'pos' or 'neg'
:param strategy: 'WR': worst replace or 'RR': random replace or 'LM': replace the farthest solution
:return: the replaced solution
"""
if strategy == 'WR':
return self.strategy_wr(iset, x, iset_type)
elif strategy == 'RR':
return self.strategy_rr(iset, x)
elif strategy == 'LM':
best_sol = min(iset, key=lambda x: x.get_value())
return self.strategy_lm(iset, best_sol, x)
def binary_search(self, iset, x, begin, end):
"""
Find the first element larger than x.
:param iset: a solution set
:param x: a Solution object
:param begin: begin position
:param end: end position
:return: the index of the first element larger than x
"""
x_value = x.get_value()
if x_value <= iset[begin].get_value():
return begin
if x_value >= iset[end].get_value():
return end + 1
if end == begin + 1:
return end
mid = (begin + end) // 2
if x_value <= iset[mid].get_value():
return self.binary_search(iset, x, begin, mid)
else:
return self.binary_search(iset, x, mid, end)
def strategy_wr(self, iset, x, iset_type):
"""
Replace the worst solution in iset.
:param iset: a solution set
:param x: a Solution object
:param iset_type: 'pos' or 'neg'
:return: the worst solution
"""
if iset_type == 'pos':
index = self.binary_search(iset, x, 0, len(iset) - 1)
iset.insert(index, x)
worst_ele = iset.pop()
else:
worst_ele, worst_index | |
self._diagnostics.append(
Diagnostic(
range=value.range(),
message=f"Imported library '{value.name}' contains no keywords.",
severity=DiagnosticSeverity.WARNING,
source=DIAGNOSTICS_SOURCE_NAME,
)
)
elif isinstance(value, ResourceImport):
if value.name is None:
raise NameSpaceError("Resource setting requires value.")
source = await self.imports_manager.find_file(value.name, base_dir)
# allready imported
if any(r for r in self._resources.values() if r.library_doc.source == source):
return None
result = await self._get_resource_entry(value.name, base_dir, sentinel=value)
result.import_range = value.range()
result.import_source = value.source
if top_level and (
not result.library_doc.errors
and top_level
and not result.imports
and not result.variables
and not result.library_doc.keywords
):
self._diagnostics.append(
Diagnostic(
range=value.range(),
message=f"Imported resource file '{value.name}' is empty.",
severity=DiagnosticSeverity.WARNING,
source=DIAGNOSTICS_SOURCE_NAME,
)
)
elif isinstance(value, VariablesImport):
# TODO: variables
# if value.name is None:
# raise NameSpaceError("Variables setting requires value.")
# result = await self._get_variables_entry(value.name, value.args, base_dir)
# result.import_range = value.range()
# result.import_source = value.source
pass
else:
raise DiagnosticsError("Unknown import type.")
if top_level and result is not None:
if result.library_doc.source is not None and result.library_doc.errors:
if any(err.source for err in result.library_doc.errors):
self._diagnostics.append(
Diagnostic(
range=value.range(),
message="Import definition contains errors.",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
related_information=[
DiagnosticRelatedInformation(
location=Location(
uri=str(Uri.from_path(err.source)),
range=Range(
start=Position(
line=err.line_no - 1
if err.line_no is not None
else result.library_doc.line_no
if result.library_doc.line_no >= 0
else 0,
character=0,
),
end=Position(
line=err.line_no - 1
if err.line_no is not None
else result.library_doc.line_no
if result.library_doc.line_no >= 0
else 0,
character=0,
),
),
),
message=err.message,
)
for err in result.library_doc.errors
if err.source is not None
],
)
)
for err in filter(lambda e: e.source is None, result.library_doc.errors):
self._diagnostics.append(
Diagnostic(
range=value.range(),
message=err.message,
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code=err.type_name,
)
)
elif result.library_doc.errors is not None:
for err in result.library_doc.errors:
self._diagnostics.append(
Diagnostic(
range=value.range(),
message=err.message,
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code=err.type_name,
)
)
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
raise
except BaseException as e:
if top_level:
self._diagnostics.append(
Diagnostic(
range=value.range(),
message=str(e),
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code=type(e).__qualname__,
)
)
return result
for entry in await asyncio.gather(*(_import(v) for v in imports), return_exceptions=True):
if isinstance(entry, (asyncio.CancelledError, SystemExit, KeyboardInterrupt)):
raise entry
if entry is not None:
if isinstance(entry, ResourceEntry):
assert entry.library_doc.source is not None
allready_imported_resources = [
e for e in self._resources.values() if e.library_doc.source == entry.library_doc.source
]
if not allready_imported_resources and entry.library_doc.source != self.source:
self._resources[entry.import_name] = entry
try:
await self._import_imports(
entry.imports,
str(Path(entry.library_doc.source).parent),
top_level=False,
)
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
raise
except BaseException as e:
if top_level:
self._diagnostics.append(
Diagnostic(
range=entry.import_range,
message=str(e) or type(entry).__name__,
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code=type(e).__qualname__,
)
)
else:
if top_level:
if entry.library_doc.source == self.source:
self._diagnostics.append(
Diagnostic(
range=entry.import_range,
message="Recursive resource import.",
severity=DiagnosticSeverity.INFORMATION,
source=DIAGNOSTICS_SOURCE_NAME,
)
)
elif allready_imported_resources and allready_imported_resources[0].library_doc.source:
self._resources[entry.import_name] = entry
self._diagnostics.append(
Diagnostic(
range=entry.import_range,
message="Resource already imported.",
severity=DiagnosticSeverity.INFORMATION,
source=DIAGNOSTICS_SOURCE_NAME,
related_information=[
DiagnosticRelatedInformation(
location=Location(
uri=str(
Uri.from_path(allready_imported_resources[0].import_source)
),
range=allready_imported_resources[0].import_range,
),
message="",
)
],
)
)
else:
if entry.name == BUILTIN_LIBRARY_NAME and entry.alias is None:
self._diagnostics.append(
Diagnostic(
range=entry.import_range,
message=f'Library "{entry}" is not imported,'
' because it would override the "BuiltIn" library.',
severity=DiagnosticSeverity.INFORMATION,
source=DIAGNOSTICS_SOURCE_NAME,
related_information=[
DiagnosticRelatedInformation(
location=Location(
uri=str(Uri.from_path(entry.import_source)),
range=entry.import_range,
),
message="",
)
],
)
)
continue
allready_imported_library = [
e
for e in self._libraries.values()
if e.library_doc.source == entry.library_doc.source
and e.alias == entry.alias
and e.args == entry.args
]
if top_level and allready_imported_library and allready_imported_library[0].library_doc.source:
self._diagnostics.append(
Diagnostic(
range=entry.import_range,
message=f'Library "{entry}" already imported.',
severity=DiagnosticSeverity.INFORMATION,
source=DIAGNOSTICS_SOURCE_NAME,
related_information=[
DiagnosticRelatedInformation(
location=Location(
uri=str(Uri.from_path(allready_imported_library[0].import_source)),
range=allready_imported_library[0].import_range,
),
message="",
)
],
)
)
if (entry.alias or entry.name or entry.import_name) not in self._libraries:
self._libraries[entry.alias or entry.name or entry.import_name] = entry
# TODO Variables
async def _import_default_libraries(self) -> None:
async def _import_lib(library: str) -> Optional[LibraryEntry]:
try:
return await self._get_library_entry(
library, (), None, str(Path(self.source).parent), is_default_library=True
)
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
raise
except BaseException as e:
self._diagnostics.append(
Diagnostic(
range=Range.zero(),
message=f"Can't import default library '{library}': {str(e) or type(e).__name__}",
severity=DiagnosticSeverity.ERROR,
source="Robot",
code=type(e).__qualname__,
)
)
return None
for e in await asyncio.gather(*(_import_lib(library) for library in DEFAULT_LIBRARIES)):
if e is not None:
self._libraries[e.alias or e.name or e.import_name] = e
async def _get_library_entry(
self,
name: str,
args: Tuple[Any, ...],
alias: Optional[str],
base_dir: str,
*,
is_default_library: bool = False,
sentinel: Any = None,
) -> LibraryEntry:
library = await self.imports_manager.get_libdoc_for_library_import(
name, args, base_dir=base_dir, sentinel=None if is_default_library else sentinel
)
return LibraryEntry(name=library.name, import_name=name, library_doc=library, args=args, alias=alias)
async def _get_resource_entry(self, name: str, base_dir: str, sentinel: Any = None) -> ResourceEntry:
namespace = await self.imports_manager.get_namespace_for_resource_import(name, base_dir, sentinel=sentinel)
library_doc = await self.imports_manager.get_libdoc_for_resource_import(name, base_dir, sentinel=sentinel)
return ResourceEntry(
name=library_doc.name,
import_name=name,
library_doc=library_doc,
imports=await namespace.get_imports(),
variables=await namespace.get_own_variables(),
)
# TODO get_variables
@_logger.call
async def get_keywords(self) -> List[KeywordDoc]:
await self.ensure_initialized()
if self._keywords is None:
result: Dict[KeywordMatcher, KeywordDoc] = {}
async for name, doc in async_chain(
(await self.get_library_doc()).keywords.items() if (await self.get_library_doc()) is not None else [],
*(e.library_doc.keywords.items() for e in self._resources.values()),
*(e.library_doc.keywords.items() for e in self._libraries.values()),
):
if KeywordMatcher(name) not in result.keys():
result[KeywordMatcher(name)] = doc
self._keywords = list(result.values())
return self._keywords
@_logger.call
async def _analyze(self) -> None:
if not self._analyzed:
async with self._analyze_lock:
try:
self._diagnostics += await Analyzer().get(self.model, self)
lib_doc = await self.get_library_doc()
if lib_doc.errors is not None:
for err in lib_doc.errors:
self._diagnostics.append(
Diagnostic(
range=Range(
start=Position(
line=((err.line_no - 1) if err.line_no is not None else 0),
character=0,
),
end=Position(
line=((err.line_no - 1) if err.line_no is not None else 0),
character=0,
),
),
message=err.message,
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code=err.type_name,
)
)
finally:
self._analyzed = True
async def find_keyword(self, name: Optional[str]) -> Optional[KeywordDoc]:
await self.ensure_initialized()
return await KeywordFinder(self).find_keyword(name)
async def find_keyword_threadsafe(self, name: Optional[str]) -> Optional[KeywordDoc]:
return await asyncio.wrap_future(asyncio.run_coroutine_threadsafe(self.find_keyword(name), self._loop))
class DiagnosticsEntry(NamedTuple):
message: str
severity: DiagnosticSeverity
code: Optional[str] = None
class CancelSearchError(Exception):
pass
class KeywordFinder:
def __init__(self, namespace: Namespace) -> None:
super().__init__()
self.namespace = namespace
self.diagnostics: List[DiagnosticsEntry] = []
async def find_keyword(self, name: Optional[str]) -> Optional[KeywordDoc]:
try:
result = await self._find_keyword(name)
if result is None:
self.diagnostics.append(
DiagnosticsEntry(
f"No keyword with name {repr(name)} found.", DiagnosticSeverity.ERROR, "KeywordError"
)
)
return result
except CancelSearchError:
return None
async def _find_keyword(self, name: Optional[str]) -> Optional[KeywordDoc]:
if not name:
self.diagnostics.append(
DiagnosticsEntry("Keyword name cannot be empty.", DiagnosticSeverity.ERROR, "KeywordError")
)
raise CancelSearchError()
if not isinstance(name, str):
self.diagnostics.append(
DiagnosticsEntry("Keyword name must be a string.", DiagnosticSeverity.ERROR, "KeywordError")
)
raise CancelSearchError()
result = await self._get_keyword_from_self(name)
if not result and "." in name:
result = await self._get_explicit_keyword(name)
if not result:
result = await self._get_implicit_keyword(name)
if not result:
result = await self._get_bdd_style_keyword(name)
return result
async def _get_keyword_from_self(self, name: str) -> Optional[KeywordDoc]:
return (await self.namespace.get_library_doc()).keywords.get(name, None)
async def _yield_owner_and_kw_names(self, full_name: str) -> AsyncIterator[Tuple[str, ...]]:
tokens = full_name.split(".")
for i in range(1, len(tokens)):
yield ".".join(tokens[:i]), ".".join(tokens[i:])
async def _get_explicit_keyword(self, name: str) -> Optional[KeywordDoc]:
found: List[Tuple[LibraryEntry, KeywordDoc]] = []
async for owner_name, kw_name in self._yield_owner_and_kw_names(name):
found.extend(await self.find_keywords(owner_name, kw_name))
if len(found) > 1:
self.diagnostics.append(
DiagnosticsEntry(
self._create_multiple_keywords_found_message(name, found, implicit=False),
DiagnosticSeverity.ERROR,
"KeywordError",
)
)
raise CancelSearchError()
return found[0][1] if found else None
async def find_keywords(self, owner_name: str, name: str) -> Sequence[Tuple[LibraryEntry, KeywordDoc]]:
from robot.utils.match import eq
return [
(v, v.library_doc.keywords[name])
async for v in async_chain(self.namespace._libraries.values(), self.namespace._resources.values())
if eq(v.alias or v.name, owner_name) and name in v.library_doc.keywords
]
def _create_multiple_keywords_found_message(
self, name: str, found: Sequence[Tuple[LibraryEntry, KeywordDoc]], implicit: bool = True
) -> str:
error = "Multiple keywords with name '%s' found" % name
if implicit:
error += ". Give the full name of the keyword you want to use"
names = sorted(f"{e[0].alias or e[0].name}.{e[1].name}" for e in found)
return "\n ".join([error + ":"] + names)
async def _get_implicit_keyword(self, name: str) -> Optional[KeywordDoc]:
result = await self._get_keyword_from_resource_files(name)
if not result:
result = await self._get_keyword_from_libraries(name)
return result
async def _get_keyword_from_resource_files(self, name: str) -> Optional[KeywordDoc]:
found: List[Tuple[LibraryEntry, KeywordDoc]] = [
(v, v.library_doc.keywords[name])
async for v in async_chain(self.namespace._resources.values())
if name in v.library_doc.keywords
]
if not found:
return None
if len(found) > 1:
found = await self._get_keyword_based_on_search_order(found)
if len(found) == 1:
return found[0][1]
self.diagnostics.append(
DiagnosticsEntry(
self._create_multiple_keywords_found_message(name, found),
DiagnosticSeverity.ERROR,
"KeywordError",
)
)
raise CancelSearchError()
async def _get_keyword_based_on_search_order(
self, entries: List[Tuple[LibraryEntry, KeywordDoc]]
) -> List[Tuple[LibraryEntry, KeywordDoc]]:
from robot.utils.match import eq
for libname in self.namespace.search_order:
for e in entries:
if eq(libname, e[0].alias or e[0].name):
return [e]
return entries
async def _get_keyword_from_libraries(self, name: str) -> Optional[KeywordDoc]:
found = [
(v, v.library_doc.keywords[name])
async for v in async_chain(self.namespace._libraries.values())
if name in v.library_doc.keywords
]
if not found:
return None
if len(found) > 1:
found = await self._get_keyword_based_on_search_order(found)
if len(found) == 2:
found = await self._filter_stdlib_runner(*found)
if len(found) == 1:
return found[0][1]
self.diagnostics.append(
DiagnosticsEntry(
self._create_multiple_keywords_found_message(name, found),
DiagnosticSeverity.ERROR,
"KeywordError",
)
)
raise CancelSearchError()
async def _filter_stdlib_runner(
self, entry1: Tuple[LibraryEntry, KeywordDoc], entry2: Tuple[LibraryEntry, KeywordDoc]
) -> List[Tuple[LibraryEntry, KeywordDoc]]:
from robot.libraries import STDLIBS
stdlibs_without_remote | |
<gh_stars>1-10
# fbdata.models
# PYTHON
from datetime import timedelta
# DJANGO
from django.conf import settings
from django.core.paginator import Paginator
from django.db import models
# DJANGO FACEBOOK
from django_facebook.models import FacebookProfile
# FBDATA
from .fields import IntegerListField
from .utils import (
date_to_timestamp,
empty_hours,
fb_post_type_str,
get_choice_name,
padded_date_range,
random_color,
truncate_html,
wordlist_regex,
LONG_DATE_FORMAT
)
############
# CLASSES
############
class AnonName(models.Model):
name = models.CharField(max_length=16, unique=True)
def __unicode__(self):
return self.name
def _anon_name():
return '%s %s' % tuple(AnonName.objects.all().order_by('?')[:2])
class FBId(models.Model):
user_id = models.BigIntegerField(unique=True)
user_name = models.CharField(max_length=128, null=True, blank=True)
anon_name = models.CharField(max_length=34, default=_anon_name)
fb_type = models.CharField(max_length=12, null=True, blank=True)
colour = models.CharField(max_length=6, default=random_color)
users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='friend_set')
name_error = models.BooleanField(default=False)
def is_participant(self):
return FacebookProfile.objects.filter(facebook_id=self.user_id).exists()
def reference_name(self, anon=True):
return self.anon_name if anon else self.user_name or unicode(self.user_id)
def reference_id(self, anon=True):
return self.pk
def has_user(self, user):
return self.users.filter(pk=user.pk).exists()
def __unicode__(self):
return '%s %s' % (self.user_name or 'Unknown', self.user_id)
def detail_data(self, anon=True):
return {'fbid': self.user_id,
'name': self.user_name,
'anon': self.anon_name,
'type': self.fb_type,
'rgb': self.colour,
'users': [ u.id for u in self.users.all()],
'participant': self.is_participant()}
class UserAnalysis(models.Model):
STATUS_ERROR = 0
STATUS_NEW = 1
STATUS_SUCCESS = 2
STATUS_UNDERTIME = -1
STATUS_CHOICES = (
(STATUS_ERROR, 'error'),
(STATUS_NEW, 'new'),
(STATUS_SUCCESS, 'success'),
(STATUS_UNDERTIME, 'under time')
)
user = models.OneToOneField(settings.AUTH_USER_MODEL)
fbuser = models.ForeignKey('FBId', null=True)
anon_data = models.BooleanField(default=True)
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES,
default=STATUS_NEW)
consent = models.BooleanField(default=False)
page_size = 14.0
def status_str(self):
return get_choice_name(self.status, self.STATUS_CHOICES)
def page_dates(self, page):
if self.start_time:
page = max(0, min(page, self.get_pages())-1)
days = int(page * self.page_size)
page_start = min(self.end_time, self.start_time + timedelta(days=days))
page_end = page_start + timedelta(days=self.page_size)
return padded_date_range(page_start, page_end)
return (None, None)
def get_pages(self):
from math import ceil
if not self.start_time or not self.end_time:
return 0
duration = self.end_time - self.start_time
return int(ceil(duration.days / self.page_size))
def end_page(self):
return self.get_pages()
def ad_topics(self):
return AdTopic.objects.filter(users=self.user).order_by('label')
def ad_topic_labels(self):
return AdTopic.objects.filter(
users=self.user).values_list(
'label', flat=True).order_by('label')
def match_ad_topics(self, input_str):
if not self.ad_regex:
self.ad_regex = wordlist_regex(self.ad_topic_labels())
topics = self.ad_regex.findall(input_str)
return topics
def paginate(self, page=None):
self.paginator = Paginator(range(1, self.get_pages()+1), 1)
page = page or self.end_page()
return self.paginator.page(page)
def recent_time_frame(self):
return self.page_dates(self.end_page())
class AdTopic(models.Model):
users = models.ManyToManyField(settings.AUTH_USER_MODEL)
label = models.CharField(max_length=128)
class StreamPost(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
post_id = models.CharField(max_length=128)
post_from = models.ForeignKey('FBId', null=True)
permalink = models.CharField(max_length=256, null=True, blank=True)
post_type = models.PositiveIntegerField(default=0)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
share_count = models.PositiveIntegerField(default=0)
message = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_posts')
tagged = models.ManyToManyField('FBId', related_name='tagged_posts')
@classmethod
def comment_class(cls):
return PostComment
def get_comments(self):
return self.postcomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'post'
def fbid(self):
return self.post_id
def fb_source_id(self, anon=True):
return self.post_from.reference_id(anon)
def fb_source(self):
return self.post_from
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def detail_data(self, anon=True):
data = {'fbid': self.fbid(),
'type': 'post',
'post_type': self.post_type,
'source_id': self.post_from.reference_id(anon),
'source_name': self.post_from.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'updated_time': date_to_timestamp(self.updated_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'share_count': self.share_count,
'likers': [u.reference_id(anon) for u in self.likers.all()],
'tagged': [u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
data['permalink'] = self.permalink
if self.message:
data['message'] = truncate_html(self.message)
if self.description:
data['description'] = truncate_html(self.description)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def post_type_str(self):
return fb_post_type_str(self.post_type, default='status')
def __unicode__(self):
return u'post: %s' % self.post_id
def display_info(self, anon=True):
title = None
if not anon:
if self.message:
title = truncate_html(self.message)
elif self.description:
title = self.description
if title:
return u'%s %s: %s' % (self.time_str(), self.post_type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.post_type_str())
class FBPhoto(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
object_id = models.CharField(max_length=128)
album_object_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
link = models.CharField(max_length=256, null=True, blank=True)
src = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
caption = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_photos')
tagged = models.ManyToManyField('FBId', related_name='tagged_photos')
@classmethod
def comment_class(cls):
return PhotoComment
def get_comments(self):
return self.photocomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'photo'
def fbid(self):
return self.object_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'photo: %s' % self.object_id
def detail_data(self, anon=True):
data = {'fbid': self.object_id,
'type': 'photo',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'updated_time': date_to_timestamp(self.updated_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
data['link'] = self.link
data['src'] = self.src
if self.caption:
data['caption'] = truncate_html(self.caption)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
if self.caption:
title = truncate_html(self.caption)
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBVideo(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
video_id = models.CharField(max_length=128)
album_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
link = models.CharField(max_length=256, null=True, blank=True)
src = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
title = models.CharField(max_length=256, null=True, blank=True)
description = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_videos')
tagged = models.ManyToManyField('FBId', related_name='tagged_videos')
@classmethod
def comment_class(cls):
return VideoComment
def get_comments(self):
return self.videocomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'video'
def fbid(self):
return self.video_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'video: %s' % self.video_id
def detail_data(self, anon=True):
data = {'fbid': self.video_id,
'type': 'video',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'updated_time': date_to_timestamp(self.updated_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
data['link'] = self.link
data['src'] = self.src
if self.title:
data['title'] = truncate_html(self.title)
if self.description:
data['description'] = truncate_html(self.description)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = self.title
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBLink(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
link_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
via = models.ForeignKey('FBId', null=True, related_name='link_shared')
url = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
share_count = models.PositiveIntegerField(default=0)
click_count = models.PositiveIntegerField(default=0)
caption = models.TextField(null=True, blank=True)
summary = models.TextField(null=True, blank=True)
title = models.TextField(null=True, blank=True)
owner_comment = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_links')
tagged = models.ManyToManyField('FBId', related_name='tagged_links')
@classmethod
def comment_class(cls):
return LinkComment
def get_comments(self):
return self.linkcomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'link'
def fbid(self):
return self.link_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'link: %s' % self.link_id
def detail_data(self, anon=True):
data = {'fbid': self.link_id,
'type': 'link',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'share_count': self.share_count,
'click_count': self.click_count,
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if self.via:
data['via_id'] = self.via.reference_id(anon)
data['via_name'] = self.via.reference_name(anon)
if not anon:
data['url'] = self.url
if self.title:
data['title'] = truncate_html(self.title)
if self.caption:
data['caption'] = truncate_html(self.caption)
if self.summary:
data['summary'] = truncate_html(self.summary)
if self.owner_comment:
data['owner_comment'] = truncate_html(self.owner_comment)
return data
def needs_updated(self, updated_time):
return True
def get_reference_hour(self):
return self.created_time.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = self.title
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBStatus(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
status_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
message = models.TextField(null=True, blank=True)
created_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
likers = models.ManyToManyField('FBId', related_name='liked_status')
tagged = models.ManyToManyField('FBId', related_name='tagged_status')
@classmethod
def comment_class(cls):
return StatusComment
def get_comments(self):
return self.statuscomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
| |
inputs=[x, y],
outfeed_queue=outfeed_queue,
accumulate_outfeed=True)
with ops.device("/device:IPU:0"):
pipeline = ipu_compiler.compile(my_net, inputs=[1.0, 2.0])
utils.move_variable_initialization_to_cpu()
outfed = outfeed_queue.dequeue()
sess.run(variables.global_variables_initializer())
sess.run(pipeline)
# '1' is accumulated 8 times, '2' is accumulated 8 times.
self.assertAllEqual([[8], [16]], sess.run(outfed))
report_json = pva.openReport(report_helper.find_report())
# There should be a GA-add for each output from the last stage.
ok = ['GradientAccumulatorAdd', 'GradientAccumulatorAdd_1']
self.assert_compute_sets_contain_list(report_json, ok)
@test_util.deprecated_graph_mode_only
def testOutfeedDictInference(self):
with tu.ipu_session() as sess:
def identity(x):
return x
def dictstage(x):
return {"x": x}
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed13")
def my_net(x):
return pipelining_ops.pipeline(
[identity, identity, identity, dictstage],
gradient_accumulation_count=8,
inputs=[x],
outfeed_queue=outfeed_queue)
with ops.device("/device:IPU:0"):
pipeline = ipu_compiler.compile(my_net, inputs=[1.0])
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
outfed = outfeed_queue.dequeue()
sess.run(variables.global_variables_initializer())
sess.run(pipeline)
got = sess.run(outfed)
self.assertIsInstance(got, dict)
self.assertAllEqual(np.ones(8), got["x"])
@test_util.deprecated_graph_mode_only
def testOutfeedAccumulatedTrainingRequiresOutfeedALL(self):
"""
Tests that the pipeline op requires a user to give an outfeed of mode ALL
when accumulating the outfeed.
"""
def stage1(x):
with variable_scope.variable_scope("stage1", use_resource=True):
w = variable_scope.get_variable(name="w", initializer=1.0)
return w * x
def identity(x):
return x
def optimizer_function(x):
opt = gradient_descent.GradientDescentOptimizer(0.01)
loss = x + 1.0
return pipelining_ops.OptimizerFunctionOutput(opt, loss)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(
"__feed13", outfeed_mode=ipu_outfeed_queue.IPUOutfeedMode.LAST)
def my_net(x):
return pipelining_ops.pipeline([stage1, identity, identity, identity],
gradient_accumulation_count=8,
inputs=[x],
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function,
accumulate_outfeed=True)
with ops.device("/device:IPU:0"):
with self.assertRaisesRegex(
ValueError,
"To accumulate the outfeed, it must be in IPUOutfeedMode ALL."):
ipu_compiler.compile(my_net, inputs=[1.0])
@test_util.deprecated_graph_mode_only
def testGradientShapeInference(self):
with tu.ipu_session():
variable_shape = (1, 2, 3)
def stage1(x):
with variable_scope.variable_scope("stage1", use_resource=True):
w = variable_scope.get_variable(name="w", shape=variable_shape)
return w * x
def stage2(x):
return x
class MockOptimizer(gradient_descent.GradientDescentOptimizer): # pylint: disable=abstract-method
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
self.applied_gradients = [g for (g, _) in grads_and_vars]
return super().apply_gradients(grads_and_vars, global_step, name)
optimizer = MockOptimizer(0.01)
def optimizer_function(loss):
return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed14")
def my_net(x):
return pipelining_ops.pipeline([stage1, stage2],
gradient_accumulation_count=4,
inputs=[x],
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function)
with ops.device("/device:IPU:0"):
ipu_compiler.compile(my_net, inputs=[0.0])
self.assertEqual(1, len(optimizer.applied_gradients))
self.assertEqual(variable_shape, optimizer.applied_gradients[0].shape)
@test_util.deprecated_graph_mode_only
def testVariableInOptimizer(self):
with tu.ipu_session() as sess:
def stage1(x):
with variable_scope.variable_scope("stage1", use_resource=True):
w = variable_scope.get_variable(name="w", initializer=1.0)
return w * x
def identity(x):
return x
class MockOptimizer(gradient_descent.GradientDescentOptimizer): # pylint: disable=abstract-method
def __init__(self, lr):
super(MockOptimizer, self).__init__(lr)
with variable_scope.variable_scope("optimizer", use_resource=True):
self.p = variable_scope.get_variable(name="p",
initializer=2.0,
trainable=False)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
grads_and_vars = [(g + self.p, v) for (g, v) in grads_and_vars]
return super().apply_gradients(grads_and_vars, global_step, name)
def optimizer_function(x):
opt = MockOptimizer(0.5)
return pipelining_ops.OptimizerFunctionOutput(opt, x)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed15")
def my_net(x):
return pipelining_ops.pipeline([stage1, identity, identity, identity],
gradient_accumulation_count=8,
inputs=[x],
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function,
outfeed_loss=True)
with ops.device("/device:IPU:0"):
pipeline = ipu_compiler.compile(my_net, inputs=[1.0])
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
sess.run(variables.global_variables_initializer())
sess.run(pipeline)
# Accumulate 8 lots of gradient of 1.0 => 8.0, then add 2.0 then
# apply LR and subtract from the original weight:
#
# 1.0 - (8.0 + 2.0) * 0.5 = -4.0
for v in ops.get_default_graph().get_collection('variables'):
if v.name == "stage1/w:0":
new_v = sess.run(v)
self.assertEqual(new_v, -4.0)
# Now change the optimizer variable
for v in ops.get_default_graph().get_collection('variables'):
if v.name == "optimizer/p:0":
sess.run(v.assign(4.0))
sess.run(pipeline)
# Accumulate 8 lots of gradient of 1.0 => -8.0, then add 30.0 then
# apply LR and subtract from the original weight:
#
# -4.0 - (8.0 + 4.0) * 0.5 = -10.0
for v in ops.get_default_graph().get_collection('variables'):
if v.name == "stage1/w:0":
new_v = sess.run(v)
self.assertEqual(new_v, -10.0)
@test_util.deprecated_graph_mode_only
def testPipelineInferenceWithConditional(self):
dataset = tu.create_single_increasing_dataset(10, shape=[1])
dataset = dataset.batch(batch_size=1, drop_remainder=True)
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "__feed16")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed16")
def stage1(x):
return x
def stage2(x):
return x
def stage3(x):
p = x > 2.0
return control_flow_ops.cond(p, lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
def my_net():
return pipelining_ops.pipeline([stage1, stage2, stage3],
6,
inputs=[],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue)
with tu.ipu_session() as sess:
with ops.device("/device:IPU:0"):
r = ipu_compiler.compile(my_net)
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
outfeed_op = outfeed_queue.dequeue()
sess.run(infeed_queue.initializer)
sess.run(r)
output = sess.run(outfeed_op)
self.assertAllClose(output, [2.0, 2.0, 2.0, 1.0, 1.0, 1.0])
@test_util.deprecated_graph_mode_only
def testPipelineWithCustomGradientFunction(self):
def dataset_fn():
dataset = tu.create_single_increasing_dataset(10, shape=[4])
dataset = dataset.batch(batch_size=4, drop_remainder=True)
def dataset_parser(value):
label = math_ops.reduce_mean(value, axis=[1])
return value, math_ops.cast(label / 10, np.int32)
return dataset.map(dataset_parser)
gradient_accumulation_count = 24
repeat_count = 2
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.01)
@custom_gradient.custom_gradient
def f(x):
x = x * x
def grad(dy):
return dy * x
return x, grad
def stage1(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w2",
shape=[4, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
return x, label
def stage2(x, label):
return f(x), label
def stage3(x, label):
loss = math_ops.reduce_mean(
nn.sparse_softmax_cross_entropy_with_logits(logits=x, labels=label))
return loss
def inputs_fn():
with ops.device('cpu'):
return []
pipelining_test_util.PipelineTester.compare_pipeline_to_cpu(
[stage1, stage2, stage3],
inputs_fn, [],
repeat_count,
gradient_accumulation_count,
dataset_fn,
optimizer_fn,
self,
14415,
schedule=pipelining_ops.PipelineSchedule.Grouped)
@test_util.deprecated_graph_mode_only
def testPipelineWithLoop(self):
def dataset_fn():
dataset = tu.create_single_increasing_dataset(10, shape=[4])
dataset = dataset.batch(batch_size=4, drop_remainder=True)
def dataset_parser(value):
label = math_ops.reduce_mean(value, axis=[1])
return value, math_ops.cast(label / 10, np.int32)
return dataset.map(dataset_parser)
gradient_accumulation_count = 24
repeat_count = 2
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.01)
def stage1(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w2",
shape=[4, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
return x, label
def stage2(x, label):
x = control_flow_ops.while_loop(lambda i, _: i < 10,
lambda i, x: (i + 1, x * x), (0, x),
maximum_iterations=5)[1]
return x, label
def stage3(x, label):
loss = math_ops.reduce_mean(
nn.sparse_softmax_cross_entropy_with_logits(logits=x, labels=label))
return loss
def inputs_fn():
with ops.device('cpu'):
return []
pipelining_test_util.PipelineTester.compare_pipeline_to_cpu(
[stage1, stage2, stage3], inputs_fn, [], repeat_count,
gradient_accumulation_count, dataset_fn, optimizer_fn, self, 11326)
@test_util.deprecated_graph_mode_only
def testPipelineWithTensorArray(self):
def dataset_fn():
dataset = tu.create_single_increasing_dataset(10, shape=[4])
dataset = dataset.batch(batch_size=4, drop_remainder=True)
def dataset_parser(value):
label = math_ops.reduce_mean(value, axis=[1])
return math_ops.cast(value,
np.int8), math_ops.cast(label / 10, np.int32)
return dataset.map(dataset_parser)
gradient_accumulation_count = 24
repeat_count = 2
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.01)
def stage1(x, label):
x = math_ops.cast(x, np.float32)
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w2",
shape=[4, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
return x, label
def stage2(x, label):
ta = tensor_array_ops.TensorArray(dtype=np.float32, size=4)
def body(i, tx):
tx = tx.write(i, math_ops.cast(i * 2, np.float32))
return i + 1, tx
ta = control_flow_ops.while_loop(lambda i, _: i < 4,
body, (0, ta),
maximum_iterations=5)[1]
return x * ta.stack(), label
def stage3(x, label):
loss = math_ops.reduce_mean(
nn.sparse_softmax_cross_entropy_with_logits(logits=x, labels=label))
return loss
def inputs_fn():
with ops.device('cpu'):
return []
pipelining_test_util.PipelineTester.compare_pipeline_to_cpu(
[stage1, stage2, stage3], inputs_fn, [], repeat_count,
gradient_accumulation_count, dataset_fn, optimizer_fn, self, 11326)
@test_util.deprecated_graph_mode_only
def testPipelineWithEmbeddingOptimization(self):
dataset_size = 100
embedding_size = 15
def dataset_fn():
dataset = tu.create_single_increasing_dataset(dataset_size, shape=[4])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
def dataset_parser(value):
label = math_ops.reduce_mean(value, axis=[1])
return math_ops.cast(value,
np.int32), math_ops.cast(label % 4, np.int32)
return dataset.map(dataset_parser)
gradient_accumulation_count = 8
repeat_count = 2
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.01)
np.random.seed(1)
embedding_shape = (dataset_size, embedding_size)
embedding_initializer = np.random.normal(0, 1, embedding_shape).astype(
np.float32)
weights_shape = (embedding_size, embedding_size)
weights_initializer = np.random.normal(0, 1,
weights_shape).astype(np.float32)
def stage1(idx, label):
with variable_scope.variable_scope("stage1", use_resource=True):
embedding = variable_scope.get_variable(
"c",
dtype=np.float32,
initializer=embedding_initializer,
trainable=True)
x = embedding_ops.embedding_lookup(embedding, idx)
return x, label
def stage2(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable("w0",
dtype=np.float32,
initializer=weights_initializer,
trainable=True)
x = math_ops.matmul(x, weight)
return x, label
def stage3(x, label):
x = math_ops.reduce_sum(x, axis=[-1])
return x, label
def stage4(x, label):
loss = math_ops.reduce_mean(
nn.sparse_softmax_cross_entropy_with_logits(logits=x, labels=label))
return loss
def inputs_fn():
with ops.device('cpu'):
return []
pipelining_test_util.PipelineTester.compare_pipeline_to_sharding(
[stage1, stage2, stage3, stage4],
inputs_fn, [],
repeat_count,
gradient_accumulation_count,
dataset_fn,
optimizer_fn,
self,
12049,
schedule=pipelining_ops.PipelineSchedule.Interleaved)
@test_util.deprecated_graph_mode_only
def testGradientAccumulationDtype(self):
gradient_accumulation_count = 8
gradient_accumulation_dtype = np.float32
x = np.finfo(np.float16).max
y = np.array(0.0, dtype=np.float16)
initial_w = np.array(1.0, dtype=np.float16)
learning_rate = 2**-10
features = np.repeat(x, gradient_accumulation_count)
labels = np.repeat(y, gradient_accumulation_count)
dataset = dataset_ops.Dataset.from_tensor_slices((features, labels))
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "infeed")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("outfeed")
grad_outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("grad_outfeed")
def stage1(features, labels):
w = variable_scope.get_variable(name="w", initializer=initial_w)
partial = w * features
return partial, labels
def stage2(partial, labels):
loss = partial + labels
return loss
def identity(*args):
return args
def optimizer_function(loss):
class CastingGradientDescent(optimizer_lib.Optimizer): # pylint: disable=abstract-method
"""Compute update using the dtype of the gradient, and then cast to
the dtype of the variable."""
def __init__(self, outer):
self.outer = outer
super().__init__(use_locking=False, name="CastingGradientDescent")
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
update_ops = []
for (grad, var) in grads_and_vars:
self.outer.assertEqual(grad.dtype, gradient_accumulation_dtype)
update_ops.append(grad_outfeed_queue.enqueue(grad))
delta = math_ops.cast(-learning_rate * grad, var.dtype)
update_ops.append(var.assign_add(delta))
return control_flow_ops.group(*update_ops)
opt = CastingGradientDescent(self)
return pipelining_ops.OptimizerFunctionOutput(opt, loss)
def model():
pipeline_op = pipelining_ops.pipeline(
computational_stages=[stage1, identity, identity, stage2],
gradient_accumulation_count=gradient_accumulation_count,
gradient_accumulation_dtype=gradient_accumulation_dtype,
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function,
name="Pipeline")
return pipeline_op
def compiled_model():
with ops.device("/device:IPU:0"):
return ipu_compiler.compile(model)
with tu.ipu_session() as sess:
train_op = compiled_model()
dequeued_gradient = grad_outfeed_queue.dequeue()
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
sess.run(train_op)
[actual_accumulated_gradient] = sess.run(dequeued_gradient)
# L(x) = w * x + y
# dL(x)/dw = | |
order, e.g.: 1. on one hand,
listing docs created by the user, sorted by the created time ascending
will have undefinite expiration because the results cannot change while
the iteration is happening. This cursor would be suitable for long term
polling. 2. on the other hand, listing docs sorted by the last modified
time will have a very short expiration as docs do get modified very
often and the modified time can be changed while the iteration is
happening thus altering the results.
"""
__slots__ = [
'_value_value',
'_value_present',
'_expiration_value',
'_expiration_present',
]
_has_required_fields = True
def __init__(self,
value=None,
expiration=None):
self._value_value = None
self._value_present = False
self._expiration_value = None
self._expiration_present = False
if value is not None:
self.value = value
if expiration is not None:
self.expiration = expiration
@property
def value(self):
"""
The actual cursor value.
:rtype: str
"""
if self._value_present:
return self._value_value
else:
raise AttributeError("missing required field 'value'")
@value.setter
def value(self, val):
val = self._value_validator.validate(val)
self._value_value = val
self._value_present = True
@value.deleter
def value(self):
self._value_value = None
self._value_present = False
@property
def expiration(self):
"""
Expiration time of ``value``. Some cursors might have expiration time
assigned. This is a UTC value after which the cursor is no longer valid
and the API starts returning an error. If cursor expires a new one needs
to be obtained and pagination needs to be restarted. Some cursors might
be short-lived some cursors might be long-lived. This really depends on
the sorting type and order, e.g.: 1. on one hand, listing docs created
by the user, sorted by the created time ascending will have undefinite
expiration because the results cannot change while the iteration is
happening. This cursor would be suitable for long term polling. 2. on
the other hand, listing docs sorted by the last modified time will have
a very short expiration as docs do get modified very often and the
modified time can be changed while the iteration is happening thus
altering the results.
:rtype: datetime.datetime
"""
if self._expiration_present:
return self._expiration_value
else:
return None
@expiration.setter
def expiration(self, val):
if val is None:
del self.expiration
return
val = self._expiration_validator.validate(val)
self._expiration_value = val
self._expiration_present = True
@expiration.deleter
def expiration(self):
self._expiration_value = None
self._expiration_present = False
def _process_custom_annotations(self, annotation_type, processor):
super(Cursor, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'Cursor(value={!r}, expiration={!r})'.format(
self._value_value,
self._expiration_value,
)
Cursor_validator = bv.Struct(Cursor)
class PaperApiBaseError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar insufficient_permissions: Your account does not have permissions to
perform this action.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
insufficient_permissions = None
# Attribute is overwritten below the class definition
other = None
def is_insufficient_permissions(self):
"""
Check if the union tag is ``insufficient_permissions``.
:rtype: bool
"""
return self._tag == 'insufficient_permissions'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def _process_custom_annotations(self, annotation_type, processor):
super(PaperApiBaseError, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'PaperApiBaseError(%r, %r)' % (self._tag, self._value)
PaperApiBaseError_validator = bv.Union(PaperApiBaseError)
class DocLookupError(PaperApiBaseError):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar doc_not_found: The required doc was not found.
"""
# Attribute is overwritten below the class definition
doc_not_found = None
def is_doc_not_found(self):
"""
Check if the union tag is ``doc_not_found``.
:rtype: bool
"""
return self._tag == 'doc_not_found'
def _process_custom_annotations(self, annotation_type, processor):
super(DocLookupError, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'DocLookupError(%r, %r)' % (self._tag, self._value)
DocLookupError_validator = bv.Union(DocLookupError)
class DocSubscriptionLevel(bb.Union):
"""
The subscription level of a Paper doc.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar default: No change email messages unless you're the creator.
:ivar ignore: Ignored: Not shown in pad lists or activity and no email
message is sent.
:ivar every: Subscribed: Shown in pad lists and activity and change email
messages are sent.
:ivar no_email: Unsubscribed: Shown in pad lists, but not in activity and no
change email messages are sent.
"""
_catch_all = None
# Attribute is overwritten below the class definition
default = None
# Attribute is overwritten below the class definition
ignore = None
# Attribute is overwritten below the class definition
every = None
# Attribute is overwritten below the class definition
no_email = None
def is_default(self):
"""
Check if the union tag is ``default``.
:rtype: bool
"""
return self._tag == 'default'
def is_ignore(self):
"""
Check if the union tag is ``ignore``.
:rtype: bool
"""
return self._tag == 'ignore'
def is_every(self):
"""
Check if the union tag is ``every``.
:rtype: bool
"""
return self._tag == 'every'
def is_no_email(self):
"""
Check if the union tag is ``no_email``.
:rtype: bool
"""
return self._tag == 'no_email'
def _process_custom_annotations(self, annotation_type, processor):
super(DocSubscriptionLevel, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'DocSubscriptionLevel(%r, %r)' % (self._tag, self._value)
DocSubscriptionLevel_validator = bv.Union(DocSubscriptionLevel)
class ExportFormat(bb.Union):
"""
The desired export format of the Paper doc.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar html: The HTML export format.
:ivar markdown: The markdown export format.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
html = None
# Attribute is overwritten below the class definition
markdown = None
# Attribute is overwritten below the class definition
other = None
def is_html(self):
"""
Check if the union tag is ``html``.
:rtype: bool
"""
return self._tag == 'html'
def is_markdown(self):
"""
Check if the union tag is ``markdown``.
:rtype: bool
"""
return self._tag == 'markdown'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def _process_custom_annotations(self, annotation_type, processor):
super(ExportFormat, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'ExportFormat(%r, %r)' % (self._tag, self._value)
ExportFormat_validator = bv.Union(ExportFormat)
class Folder(bb.Struct):
"""
Data structure representing a Paper folder.
:ivar id: Paper folder ID. This ID uniquely identifies the folder.
:ivar name: Paper folder name.
"""
__slots__ = [
'_id_value',
'_id_present',
'_name_value',
'_name_present',
]
_has_required_fields = True
def __init__(self,
id=None,
name=None):
self._id_value = None
self._id_present = False
self._name_value = None
self._name_present = False
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""
Paper folder ID. This ID uniquely identifies the folder.
:rtype: str
"""
if self._id_present:
return self._id_value
else:
raise AttributeError("missing required field 'id'")
@id.setter
def id(self, val):
val = self._id_validator.validate(val)
self._id_value = val
self._id_present = True
@id.deleter
def id(self):
self._id_value = None
self._id_present = False
@property
def name(self):
"""
Paper folder name.
:rtype: str
"""
if self._name_present:
return self._name_value
else:
raise AttributeError("missing required field 'name'")
@name.setter
def name(self, val):
val = self._name_validator.validate(val)
self._name_value = val
self._name_present = True
@name.deleter
def name(self):
self._name_value = None
self._name_present = False
def _process_custom_annotations(self, annotation_type, processor):
super(Folder, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'Folder(id={!r}, name={!r})'.format(
self._id_value,
self._name_value,
)
Folder_validator = bv.Struct(Folder)
class FolderSharingPolicyType(bb.Union):
"""
The sharing policy of a Paper folder. Note: The sharing policy of
subfolders is inherited from the root folder.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar team: Everyone in your team and anyone directly invited can access
this folder.
:ivar invite_only: Only people directly invited can access this folder.
"""
_catch_all = None
# Attribute is overwritten below the class definition
team = None
# Attribute is overwritten below the class definition
invite_only = None
def is_team(self):
"""
Check if the union tag is ``team``.
:rtype: bool
"""
return self._tag == 'team'
def is_invite_only(self):
"""
Check | |
<reponame>vst/defx
#!/usr/bin/env python3.6
"""
Copyright 2017 <NAME> <<EMAIL>>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import base64
import datetime
import json
import sys
from decimal import Decimal
from http.client import HTTPResponse
from typing import Dict, Set, Tuple, Any, Type, Optional, Union, List
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode, urlparse
from urllib.request import urlopen, Request
import logging
from collections import OrderedDict
#: Defines the library version.
Version = "0.0.1"
#: Defines the module logger.
Logger = logging.getLogger(__name__)
#: Defines all supported currencies.
SupportedCurrencies = {
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BRL",
"BSD", "BTC", "BTN", "BWP", "BYN", "BYR", "BZD", "CAD", "CDF", "CHF",
"CLF", "CLP", "CNH", "CNY", "COP", "CRC", "CUC", "CUP", "CVE", "CZK",
"DJF", "DKK", "DOP", "DZD", "EGP", "ERN", "ETB", "EUR", "FJD", "FKP",
"GBP", "GEL", "GGP", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", "HKD",
"HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "IMP", "INR", "IQD", "IRR",
"ISK", "JEP", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW",
"KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
"MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR",
"MWK", "MXN", "MYR", "MZN", "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
"OMR", "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON",
"RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP",
"SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", "THB", "TJS",
"TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH", "UGX", "USD",
"UYU", "UZS", "VEF", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XCD",
"XDR", "XOF", "XPD", "XPF", "XPT", "YER", "ZAR", "ZMK", "ZMW", "ZWL",
}
#: Defines the base currency.
BaseCurrency = "USD"
#: Defines the quanization template.
Quantizer = Decimal("0.00000001")
#: Defines the currency type.
CCY = str
#: Defines the type of the currency pair.
Pair = Tuple[CCY, CCY]
#: Defines the type of the date.
Date = datetime.date
#: Defines the type of the rates.
Rate = Decimal
#: Defines the type of partial rates for an arbitrary base currency on an arbitrary date.
PartialRates = Dict[CCY, Rate]
#: Defines the type of rates on an arbitrary date.
Rates = Dict[Pair, Rate]
#: Defines the type of daily rates.
DailyRates = Dict[Date, Dict[Pair, Rate]]
class DecimalEncoder(json.JSONEncoder):
"""
Extends the JSON encoder for encoding `Decimal`s.
Note: Adopted from http://stackoverflow.com/questions/4019856/decimal-to-json
"""
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
return json.JSONEncoder.default(self, obj)
def rated(value: Union[None, Decimal, float, str]) -> Optional[Rate]:
"""
Converts a value to a proper rate value.
"""
## Do we have a value?
if value is None or value == "":
## Nope, return None
return None
## Yep, make it decimal and quantize properly
return Decimal(value).quantize(Quantizer)
class UserException(Exception):
"""
Defines a user exception model.
"""
pass
class APIClient:
"""
Defines an API client.
"""
class APIException(UserException):
"""
Defines an API exception model.
"""
pass
#: Defines the default base API endpoint URL.
baseurl = "https://openexchangerates.org/api"
def __init__(self, apikey: str) -> None:
"""
Initializes an API client.
"""
self.__apikey = apikey
def get(self, uri, params) -> HTTPResponse:
"""
Returns the HTTP response for the given uri and params.
"""
## Get the query string parameters:
qsparams = urlencode(params)
## Construct the URL:
url = f"{self.baseurl}/{uri}?app_id={self.__apikey}&{qsparams}"
## Log it:
Logger.debug(f"Reaching {url}")
## Open and return:
return urlopen(url) # type: ignore
def __call__(self, uri: str, **kwargs) -> Dict[str, Any]:
"""
Calls the remote endpoint and returns the result.
"""
## Attempt to make the request and retrieve the response.
try:
## Attempt to open the url:
with self.get(uri, kwargs) as response:
## Check the status code:
if response.status != 200:
## Something is wrong, raise Exception:
raise APIClient.APIException(f"Can not retrieve response from API: {response.status} {response.reason}")
## Get the response content:
content = json.load(response) # type: ignore
## Done, return the response:
return content
except HTTPError as exc:
raise APIClient.APIException(f"Can not retrieve response from API: {exc.code} {exc.reason} {exc.read()}")
except URLError as exc:
raise APIClient.APIException(f"Can not connect to API: {exc}")
except Exception as exc:
raise APIClient.APIException(f"Unknown exception occured during API request: {exc}")
class TaskType:
"""
Defines a task type.
"""
#: Defines an internal registry of registered task types.
_registry = {} # type: Dict[str, Type[TaskType]]
def __init_subclass__(cls, **kwargs):
"""
Registeres the concrete task type class.
"""
## Call the super.
super().__init_subclass__(**kwargs)
## Add to the registry:
cls._registry[cls.code] = cls
def __init__(self, client: APIClient) -> None:
"""
Keep the client.
"""
self._client = client
@property
def code(self) -> CCY:
"""
Returns the code of the task type.
"""
raise NotImplementedError
def __call__(self, base: CCY, others: Set[CCY], **kwargs) -> DailyRates:
"""
Carries on the task.
"""
raise NotImplementedError
@classmethod
def of(cls, code: str) -> Type["TaskType"]:
"""
Returns the task type by the given code.
"""
return cls._registry.get(code)
@classmethod
def all(cls) -> Set[Type["TaskType"]]:
"""
Returns available choices for task types, in particular themselves as classes.
"""
return set(cls._registry.values())
@classmethod
def choices(cls) -> Set[CCY]:
"""
Returns available choices for task types, in particular their codes.
"""
return set(cls._registry.keys())
@classmethod
def pairup(cls, base: CCY, others: Set[CCY]) -> Set[Pair]:
"""
Creates a set of FX pairs (tuples of CCY codes) of interest.
"""
return {(base, c) for c in others if c != base}
@classmethod
def permute(cls, codes: Set[CCY]) -> Set[Pair]:
"""
Creates permutation of all currency pair codes for the given codes.
"""
return {(c1, c2) for c1 in codes for c2 in codes if not c1 == c2}
@classmethod
def getrate(cls, ccy1: CCY, ccy2: CCY, base: CCY, lookup: Rates) -> Rate:
"""
Finds or computes the rate for the given two currencies and an FX rate database.
"""
try:
return rated(lookup[(ccy1, ccy2)])
except KeyError:
try:
return rated(Decimal("1") / lookup[(ccy2, ccy1)])
except KeyError:
return rated(lookup[(base, ccy2)] / lookup[(base, ccy1)])
@classmethod
def getrates(cls, base: CCY, adict: Dict[CCY, float]) -> Rates:
"""
Converts the API response fragment for rates to a proper rate lookup table.
"""
return {(base, other): rated(rate) for other, rate in adict.items()}
@classmethod
def build(cls, base: CCY, rates: Rates) -> Rates:
"""
Builds an `FX Pair` - `FX Rate` database for an arbitrary date.
"""
## Get all currencies involved except the base currency:
currencies = {c for pair in rates.keys() for c in pair}
## Get pairs as all permutations of currencies:
pairs = cls.permute(currencies)
## Get permutations, compute crosses and return:
return {(ccy1, ccy2): cls.getrate(ccy1, ccy2, base, rates) for ccy1, ccy2 in pairs}
@classmethod
def flatten(cls, drates: DailyRates) -> List[Tuple[str, str, float]]:
"""
Flattens the daily rates into a list of (date, Pair, rate) tuples.
"""
return [(str(d), f"{p[0]}{p[1]}", float(r)) for d, rs in drates.items() for p, r in rs.items()]
@classmethod
def csvify(cls, drates: DailyRates, fdate: str="date", fpair: str="pair", frate: str="rate") -> str:
"""
Returns a CSV representation of daily rates.
"""
## Define escape function:
escape = lambda x: "," in x and "\"{}\"".format(x.replace("\"", "\\\"")) or x
## Escae fields:
fdate = escape(fdate)
fpair = escape(fpair)
frate = escape(frate)
## Construct end
return f"{fdate},{fpair},{frate}\n" + "\n".join([f"{d},{p},{r}" for d, p, r | |
generalize this function to gray scale, black/white image, it does not make any sense for
non RGB image. if you look at their MNIST examples, the mean and stddev are 1-dimensional
(since the inputs are greyscale-- no RGB channels).
"""
if self.blob.dtype == np.uint8 and self.blob.ndim == 3:
blob = (self.blob / 255.0).astype(np.float32)
blob = _move_channel_axis(blob, channel_axis, 0)
mean = np.asarray(img_mean, dtype=np.float32)
std = np.asarray(img_std, dtype=np.float32)
blob = (blob - mean[:, None, None]) / std[:, None, None]
# set back channel to original
blob = _move_channel_axis(blob, 0, channel_axis)
self.blob = blob
else:
raise ValueError(
f'`blob` must be a uint8 ndarray with ndim=3, but receiving {self.blob.dtype} with ndim={self.blob.ndim}'
)
return self
def convert_buffer_to_blob(
self, dtype: Optional[str] = None, count: int = -1, offset: int = 0
) -> 'Document':
"""Assuming the :attr:`buffer` is a _valid_ buffer of Numpy ndarray,
set :attr:`blob` accordingly.
:param dtype: Data-type of the returned array; default: float.
:param count: Number of items to read. ``-1`` means all data in the buffer.
:param offset: Start reading the buffer from this offset (in bytes); default: 0.
:return: itself after processed
"""
self.blob = np.frombuffer(self.buffer, dtype=dtype, count=count, offset=offset)
return self
def convert_blob_to_buffer(self) -> 'Document':
"""Convert :attr:`.blob` to :attr:`.buffer` inplace.
:return: itself after processed
"""
self.buffer = self.blob.tobytes()
return self
def convert_uri_to_buffer(self) -> 'Document':
"""Convert :attr:`.uri` to :attr:`.buffer` inplace.
Internally it downloads from the URI and set :attr:`buffer`.
:return: itself after processed
"""
self.buffer = _uri_to_buffer(self.uri)
return self
def convert_uri_to_datauri(
self, charset: str = 'utf-8', base64: bool = False
) -> 'Document':
"""Convert :attr:`.uri` to dataURI and store it in :attr:`.uri` inplace.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not _is_datauri(self.uri):
buffer = _uri_to_buffer(self.uri)
self.uri = _to_datauri(self.mime_type, buffer, charset, base64, binary=True)
return self
def convert_buffer_to_uri(
self, charset: str = 'utf-8', base64: bool = False
) -> 'Document':
"""Convert :attr:`.buffer` to data :attr:`.uri` in place.
Internally it first reads into buffer and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(
self.mime_type, self.buffer, charset, base64, binary=True
)
return self
def convert_text_to_uri(
self, charset: str = 'utf-8', base64: bool = False
) -> 'Document':
"""Convert :attr:`.text` to data :attr:`.uri`.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data.
Sometimes used for text data that frequently uses non-US-ASCII characters.
:return: itself after processed
"""
self.uri = _to_datauri(self.mime_type, self.text, charset, base64, binary=False)
return self
def convert_uri_to_text(self) -> 'Document':
"""Convert :attr:`.uri` to :attr`.text` inplace.
:return: itself after processed
"""
buffer = _uri_to_buffer(self.uri)
self.text = buffer.decode()
return self
def convert_content_to_uri(self) -> 'Document':
"""Convert :attr:`.content` in :attr:`.uri` inplace with best effort
:return: itself after processed
"""
if self.text:
self.convert_text_to_uri()
elif self.buffer:
self.convert_buffer_to_uri()
elif self.content_type:
raise NotImplementedError
return self
def convert_text_to_blob(
self,
vocab: Dict[str, int],
max_length: Optional[int] = None,
dtype: str = 'int64',
) -> 'Document':
"""Convert :attr:`.text` to :attr:`.blob` inplace.
In the end :attr:`.blob` will be a 1D array where `D` is `max_length`.
To get the vocab of a DocumentArray, you can use `jina.types.document.converters.build_vocab` to
:param vocab: a dictionary that maps a word to an integer index, `0` is reserved for padding, `1` is reserved
for unknown words in :attr:`.text`. So you should *not* include these two entries in `vocab`.
:param max_length: the maximum length of the sequence. Sequence longer than this are cut off from *beginning*.
Sequence shorter than this will be padded with `0` from right hand side.
:param dtype: the dtype of the generated :attr:`.blob`
:return: Document itself after processed
"""
self.blob = np.array(
_text_to_int_sequence(self.text, vocab, max_length), dtype=dtype
)
return self
def convert_blob_to_text(
self, vocab: Union[Dict[str, int], Dict[int, str]], delimiter: str = ' '
) -> 'Document':
"""Convert :attr:`.blob` to :attr:`.text` inplace.
:param vocab: a dictionary that maps a word to an integer index, `0` is reserved for padding, `1` is reserved
for unknown words in :attr:`.text`
:param delimiter: the delimiter that used to connect all words into :attr:`.text`
:return: Document itself after processed
"""
if isinstance(list(vocab.keys())[0], str):
_vocab = {v: k for k, v in vocab.items()}
_text = []
for k in self.blob:
k = int(k)
if k == 0:
continue
elif k == 1:
_text.append('<UNK>')
else:
_text.append(_vocab.get(k, '<UNK>'))
self.text = delimiter.join(_text)
return self
def convert_image_blob_to_sliding_windows(
self,
window_shape: Tuple[int, int] = (64, 64),
strides: Optional[Tuple[int, int]] = None,
padding: bool = False,
channel_axis: int = -1,
as_chunks: bool = False,
) -> 'Document':
"""Convert :attr:`.blob` into a sliding window view with the given window shape :attr:`.blob` inplace.
:param window_shape: desired output size. If size is a sequence like (h, w), the output size will be matched to
this. If size is an int, the output will have the same height and width as the `target_size`.
:param strides: the strides between two neighboring sliding windows. `strides` is a sequence like (h, w), in
which denote the strides on the vertical and the horizontal axis. When not given, using `window_shape`
:param padding: If False, only patches which are fully contained in the input image are included. If True,
all patches whose starting point is inside the input are included, and areas outside the input default to
zero. The `padding` argument has no effect on the size of each patch, it determines how many patches are
extracted. Default is False.
:param channel_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis.
:param as_chunks: If set, each sliding window will be stored in the chunk of the current Document
:return: Document itself after processed
"""
window_h, window_w = window_shape
stride_h, stride_w = strides or window_shape
blob = _move_channel_axis(self.blob, channel_axis, -1)
if padding:
h, w, c = blob.shape
ext_h = window_h - h % stride_h
ext_w = window_w - w % window_w
blob = np.pad(
blob,
((0, ext_h), (0, ext_w), (0, 0)),
mode='constant',
constant_values=0,
)
h, w, c = blob.shape
row_step = blob.strides[0]
col_step = blob.strides[1]
expanded_img = np.lib.stride_tricks.as_strided(
blob,
shape=(
1 + int((h - window_h) / stride_h),
1 + int((w - window_w) / stride_w),
window_h,
window_w,
c,
),
strides=(row_step * stride_h, col_step * stride_w, row_step, col_step, 1),
writeable=False,
)
cur_loc_h, cur_loc_w = 0, 0
if self.location:
cur_loc_h, cur_loc_w = self.location[:2]
bbox_locations = [
(h * stride_h + cur_loc_h, w * stride_w + cur_loc_w, window_h, window_w)
for h in range(expanded_img.shape[0])
for w in range(expanded_img.shape[1])
]
expanded_img = expanded_img.reshape((-1, window_h, window_w, c))
if as_chunks:
from . import Document
for location, _blob in zip(bbox_locations, expanded_img):
self.chunks.append(
Document(
blob=_move_channel_axis(_blob, -1, channel_axis),
location=location,
)
)
else:
self.blob = _move_channel_axis(expanded_img, -1, channel_axis)
return self
convert_image_buffer_to_blob = _deprecate(convert_buffer_to_image_blob)
normalize_image_blob = _deprecate(set_image_blob_normalization)
convert_image_uri_to_blob = _deprecate(convert_uri_to_image_blob)
convert_audio_uri_to_blob = _deprecate(convert_uri_to_audio_blob)
resize_image_blob = _deprecate(set_image_blob_shape)
def _uri_to_buffer(uri: str) -> bytes:
"""Convert uri to buffer
Internally it reads uri into buffer.
:param uri: the uri of Document
:return: buffer bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'{uri} is not a URL or a valid local path')
def _png_to_buffer_1d(arr: 'np.ndarray', width: int, height: int) -> | |
value = 'Rn2x2',
texname = '\\text{I43x22}')
I43x33 = Parameter(name = 'I43x33',
nature = 'internal',
type = 'complex',
value = 'Rn3x3',
texname = '\\text{I43x33}')
I44x33 = Parameter(name = 'I44x33',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(ye3x3)',
texname = '\\text{I44x33}')
I45x11 = Parameter(name = 'I45x11',
nature = 'internal',
type = 'complex',
value = 'Rn1x1*complexconjugate(Rl1x1)',
texname = '\\text{I45x11}')
I45x22 = Parameter(name = 'I45x22',
nature = 'internal',
type = 'complex',
value = 'Rn2x2*complexconjugate(Rl2x2)',
texname = '\\text{I45x22}')
I45x33 = Parameter(name = 'I45x33',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(Rl3x3)',
texname = '\\text{I45x33}')
I45x36 = Parameter(name = 'I45x36',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(Rl6x3)',
texname = '\\text{I45x36}')
I46x33 = Parameter(name = 'I46x33',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(Rl3x6)*complexconjugate(ye3x3)',
texname = '\\text{I46x33}')
I46x36 = Parameter(name = 'I46x36',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(Rl6x6)*complexconjugate(ye3x3)',
texname = '\\text{I46x36}')
I47x33 = Parameter(name = 'I47x33',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(Rl3x6)*complexconjugate(te3x3)',
texname = '\\text{I47x33}')
I47x36 = Parameter(name = 'I47x36',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*complexconjugate(Rl6x6)*complexconjugate(te3x3)',
texname = '\\text{I47x36}')
I48x33 = Parameter(name = 'I48x33',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*ye3x3*complexconjugate(Rl3x3)*complexconjugate(ye3x3)',
texname = '\\text{I48x33}')
I48x36 = Parameter(name = 'I48x36',
nature = 'internal',
type = 'complex',
value = 'Rn3x3*ye3x3*complexconjugate(Rl6x3)*complexconjugate(ye3x3)',
texname = '\\text{I48x36}')
I49x33 = Parameter(name = 'I49x33',
nature = 'internal',
type = 'complex',
value = 'complexconjugate(Ru3x6)*complexconjugate(yu3x3)',
texname = '\\text{I49x33}')
I49x36 = Parameter(name = 'I49x36',
nature = 'internal',
type = 'complex',
value = 'complexconjugate(Ru6x6)*complexconjugate(yu3x3)',
texname = '\\text{I49x36}')
I5x11 = Parameter(name = 'I5x11',
nature = 'internal',
type = 'complex',
value = 'Rd1x1*complexconjugate(Rd1x1)',
texname = '\\text{I5x11}')
I5x22 = Parameter(name = 'I5x22',
nature = 'internal',
type = 'complex',
value = 'Rd2x2*complexconjugate(Rd2x2)',
texname = '\\text{I5x22}')
I5x33 = Parameter(name = 'I5x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd3x3)',
texname = '\\text{I5x33}')
I5x36 = Parameter(name = 'I5x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd3x3)',
texname = '\\text{I5x36}')
I5x63 = Parameter(name = 'I5x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd6x3)',
texname = '\\text{I5x63}')
I5x66 = Parameter(name = 'I5x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd6x3)',
texname = '\\text{I5x66}')
I50x33 = Parameter(name = 'I50x33',
nature = 'internal',
type = 'complex',
value = 'yu3x3*complexconjugate(Ru3x3)',
texname = '\\text{I50x33}')
I50x36 = Parameter(name = 'I50x36',
nature = 'internal',
type = 'complex',
value = 'yu3x3*complexconjugate(Ru6x3)',
texname = '\\text{I50x36}')
I51x11 = Parameter(name = 'I51x11',
nature = 'internal',
type = 'complex',
value = 'Ru1x1*complexconjugate(Ru1x1)',
texname = '\\text{I51x11}')
I51x22 = Parameter(name = 'I51x22',
nature = 'internal',
type = 'complex',
value = 'Ru2x2*complexconjugate(Ru2x2)',
texname = '\\text{I51x22}')
I51x33 = Parameter(name = 'I51x33',
nature = 'internal',
type = 'complex',
value = 'Ru3x3*complexconjugate(Ru3x3)',
texname = '\\text{I51x33}')
I51x36 = Parameter(name = 'I51x36',
nature = 'internal',
type = 'complex',
value = 'Ru6x3*complexconjugate(Ru3x3)',
texname = '\\text{I51x36}')
I51x63 = Parameter(name = 'I51x63',
nature = 'internal',
type = 'complex',
value = 'Ru3x3*complexconjugate(Ru6x3)',
texname = '\\text{I51x63}')
I51x66 = Parameter(name = 'I51x66',
nature = 'internal',
type = 'complex',
value = 'Ru6x3*complexconjugate(Ru6x3)',
texname = '\\text{I51x66}')
I52x33 = Parameter(name = 'I52x33',
nature = 'internal',
type = 'complex',
value = 'Ru3x6*complexconjugate(Ru3x6)',
texname = '\\text{I52x33}')
I52x36 = Parameter(name = 'I52x36',
nature = 'internal',
type = 'complex',
value = 'Ru6x6*complexconjugate(Ru3x6)',
texname = '\\text{I52x36}')
I52x44 = Parameter(name = 'I52x44',
nature = 'internal',
type = 'complex',
value = 'Ru4x4*complexconjugate(Ru4x4)',
texname = '\\text{I52x44}')
I52x55 = Parameter(name = 'I52x55',
nature = 'internal',
type = 'complex',
value = 'Ru5x5*complexconjugate(Ru5x5)',
texname = '\\text{I52x55}')
I52x63 = Parameter(name = 'I52x63',
nature = 'internal',
type = 'complex',
value = 'Ru3x6*complexconjugate(Ru6x6)',
texname = '\\text{I52x63}')
I52x66 = Parameter(name = 'I52x66',
nature = 'internal',
type = 'complex',
value = 'Ru6x6*complexconjugate(Ru6x6)',
texname = '\\text{I52x66}')
I53x11 = Parameter(name = 'I53x11',
nature = 'internal',
type = 'complex',
value = 'Rd1x1*complexconjugate(CKM1x1)*complexconjugate(Ru1x1)',
texname = '\\text{I53x11}')
I53x22 = Parameter(name = 'I53x22',
nature = 'internal',
type = 'complex',
value = 'Rd2x2*complexconjugate(CKM2x2)*complexconjugate(Ru2x2)',
texname = '\\text{I53x22}')
I53x33 = Parameter(name = 'I53x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)',
texname = '\\text{I53x33}')
I53x36 = Parameter(name = 'I53x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)',
texname = '\\text{I53x36}')
I53x63 = Parameter(name = 'I53x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)',
texname = '\\text{I53x63}')
I53x66 = Parameter(name = 'I53x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)',
texname = '\\text{I53x66}')
I54x33 = Parameter(name = 'I54x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x6)*complexconjugate(tu3x3)',
texname = '\\text{I54x33}')
I54x36 = Parameter(name = 'I54x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x6)*complexconjugate(tu3x3)',
texname = '\\text{I54x36}')
I54x63 = Parameter(name = 'I54x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x6)*complexconjugate(tu3x3)',
texname = '\\text{I54x63}')
I54x66 = Parameter(name = 'I54x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x6)*complexconjugate(tu3x3)',
texname = '\\text{I54x66}')
I55x33 = Parameter(name = 'I55x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x6)*complexconjugate(yu3x3)',
texname = '\\text{I55x33}')
I55x36 = Parameter(name = 'I55x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x6)*complexconjugate(yu3x3)',
texname = '\\text{I55x36}')
I55x63 = Parameter(name = 'I55x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x6)*complexconjugate(yu3x3)',
texname = '\\text{I55x63}')
I55x66 = Parameter(name = 'I55x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x6)*complexconjugate(yu3x3)',
texname = '\\text{I55x66}')
I56x33 = Parameter(name = 'I56x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*td3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)',
texname = '\\text{I56x33}')
I56x36 = Parameter(name = 'I56x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*td3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)',
texname = '\\text{I56x36}')
I56x63 = Parameter(name = 'I56x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*td3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)',
texname = '\\text{I56x63}')
I56x66 = Parameter(name = 'I56x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*td3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)',
texname = '\\text{I56x66}')
I57x33 = Parameter(name = 'I57x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)',
texname = '\\text{I57x33}')
I57x36 = Parameter(name = 'I57x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)',
texname = '\\text{I57x36}')
I57x63 = Parameter(name = 'I57x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)',
texname = '\\text{I57x63}')
I57x66 = Parameter(name = 'I57x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)',
texname = '\\text{I57x66}')
I58x33 = Parameter(name = 'I58x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)*complexconjugate(yd3x3)',
texname = '\\text{I58x33}')
I58x36 = Parameter(name = 'I58x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)*complexconjugate(yd3x3)',
texname = '\\text{I58x36}')
I58x63 = Parameter(name = 'I58x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)*complexconjugate(yd3x3)',
texname = '\\text{I58x63}')
I58x66 = Parameter(name = 'I58x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)*complexconjugate(yd3x3)',
texname = '\\text{I58x66}')
I59x33 = Parameter(name = 'I59x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x6)*complexconjugate(yu3x3)',
texname = '\\text{I59x33}')
I59x36 = Parameter(name = 'I59x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x6)*complexconjugate(yu3x3)',
texname = '\\text{I59x36}')
I59x63 = Parameter(name = 'I59x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x6)*complexconjugate(yu3x3)',
texname = '\\text{I59x63}')
I59x66 = Parameter(name = 'I59x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x6)*complexconjugate(yu3x3)',
texname = '\\text{I59x66}')
I6x33 = Parameter(name = 'I6x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*complexconjugate(Rd3x6)',
texname = '\\text{I6x33}')
I6x36 = Parameter(name = 'I6x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*complexconjugate(Rd3x6)',
texname = '\\text{I6x36}')
I6x44 = Parameter(name = 'I6x44',
nature = 'internal',
type = 'complex',
value = 'Rd4x4*complexconjugate(Rd4x4)',
texname = '\\text{I6x44}')
I6x55 = Parameter(name = 'I6x55',
nature = 'internal',
type = 'complex',
value = 'Rd5x5*complexconjugate(Rd5x5)',
texname = '\\text{I6x55}')
I6x63 = Parameter(name = 'I6x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*complexconjugate(Rd6x6)',
texname = '\\text{I6x63}')
I6x66 = Parameter(name = 'I6x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*complexconjugate(Rd6x6)',
texname = '\\text{I6x66}')
I60x33 = Parameter(name = 'I60x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*yu3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)*complexconjugate(yu3x3)',
texname = '\\text{I60x33}')
I60x36 = Parameter(name = 'I60x36',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*yu3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)*complexconjugate(yu3x3)',
texname = '\\text{I60x36}')
I60x63 = Parameter(name = 'I60x63',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*yu3x3*complexconjugate(CKM3x3)*complexconjugate(Ru3x3)*complexconjugate(yu3x3)',
texname = '\\text{I60x63}')
I60x66 = Parameter(name = 'I60x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*yu3x3*complexconjugate(CKM3x3)*complexconjugate(Ru6x3)*complexconjugate(yu3x3)',
texname = '\\text{I60x66}')
I61x33 = Parameter(name = 'I61x33',
nature = 'internal',
type = 'complex',
value = 'Ru3x3*complexconjugate(yu3x3)',
texname = '\\text{I61x33}')
I61x36 = Parameter(name = 'I61x36',
nature = 'internal',
type = 'complex',
value = 'Ru6x3*complexconjugate(yu3x3)',
texname = '\\text{I61x36}')
I62x33 = Parameter(name = 'I62x33',
nature = 'internal',
type = 'complex',
value = 'Ru3x6*yu3x3',
texname = '\\text{I62x33}')
I62x36 = Parameter(name = 'I62x36',
nature = 'internal',
type = 'complex',
value = 'Ru6x6*yu3x3',
texname = '\\text{I62x36}')
I63x11 = Parameter(name = 'I63x11',
nature = 'internal',
type = 'complex',
value = 'CKM1x1*Ru1x1',
texname = '\\text{I63x11}')
I63x22 = Parameter(name = 'I63x22',
nature = 'internal',
type = 'complex',
| |
<filename>pysiaf/utils/rotations.py<gh_stars>10-100
"""A collection of basic routines for performing rotation calculations.
Authors
-------
<NAME>
<NAME>
"""
from __future__ import absolute_import, print_function, division
import copy
import numpy as np
import astropy.units as u
from astropy.modeling.rotations import rotation_matrix
def attitude(v2, v3, ra, dec, pa):
"""Return rotation matrix that transforms from v2,v3 to RA,Dec.
Makes a 3D rotation matrix which rotates a unit vector representing a v2,v3 position
to a unit vector representing an RA, Dec pointing with an assigned position angle
Described in JWST-STScI-001550, SM-12, section 5.1.
Parameters
----------
v2 : float
a position measured in arc-seconds
v3 : float
a position measured in arc-seconds
ra : float
Right Ascension on the sky in degrees
dec : float
Declination on the sky in degrees
pa : float
Position angle in degrees measured from North to V3 axis in North to East direction.
Returns
-------
m : numpy matrix
A (3 x 3) matrix represents the attitude of the telescope which points the given
V2V3 position to the indicated RA and Dec and with the V3 axis rotated by position angle pa
"""
v2d = v2 / 3600.0
v3d = v3 / 3600.0
# Get separate rotation matrices
mv2 = rotate(3, -v2d)
mv3 = rotate(2, v3d)
mra = rotate(3, ra)
mdec = rotate(2, -dec)
mpa = rotate(1, -pa)
# Combine as mra*mdec*mpa*mv3*mv2
m = np.dot(mv3, mv2)
m = np.dot(mpa, m)
m = np.dot(mdec, m)
m = np.dot(mra, m)
return m
def convert_quantity(x_in, to_unit, factor=1.):
"""Check if astropy quantity and apply conversion factor
Parameters
----------
x_in : float or quantity
input
to_unit : astropy.units unit
unit to convert to
factor : float
Factor to apply if input is not a quantity
Returns
-------
x_out : float
converted value
"""
x = copy.deepcopy(x_in)
if isinstance(x, u.Quantity):
x_out = x.to(to_unit).value
else:
x_out = x * factor
return x_out
def attitude_matrix(nu2, nu3, ra, dec, pa, convention='JWST'):
"""Return attitude matrix.
Makes a 3D rotation matrix that transforms between telescope frame
and sky. It rotates a unit vector on the idealized focal sphere
(specified by the spherical coordinates nu2, nu3) to a unit vector
representing an RA, Dec pointing with an assigned position angle
measured at nu2, nu3.
See JWST-STScI-001550, SM-12, section 5.1.
Parameters
----------
nu2 : float
an euler angle (default unit is arc-seconds)
nu3 : float
an euler angle (default unit is arc-seconds)
ra : float
Right Ascension on the sky in degrees
dec : float
Declination on the sky in degrees
pa : float
Position angle of V3 axis at nu2,nu3 measured from
North to East (default unit is degree)
Returns
-------
m : numpy matrix
the attitude matrix
"""
if convention == 'JWST':
pa_sign = -1.
if isinstance(nu2, u.Quantity):
nu2_value = nu2.to(u.deg).value
else:
nu2_value = nu2 / 3600.
if isinstance(nu3, u.Quantity):
nu3_value = nu3.to(u.deg).value
else:
nu3_value = nu3 / 3600.
if isinstance(pa, u.Quantity):
pa_value = pa.to(u.deg).value
else:
pa_value = pa
if isinstance(ra, u.Quantity):
ra_value = ra.to(u.deg).value
else:
ra_value = ra
if isinstance(dec, u.Quantity):
dec_value = dec.to(u.deg).value
else:
dec_value = dec
# Get separate rotation matrices
# astropy's rotation matrix takes inverse sign compared to rotations.rotate
mv2 = rotation_matrix(-1*-nu2_value, axis='z')
mv3 = rotation_matrix(-1*nu3_value, axis='y')
mra = rotation_matrix(-1*ra_value, axis='z')
mdec = rotation_matrix(-1*-dec_value, axis='y')
mpa = rotation_matrix(-1*pa_sign*pa_value, axis='x')
# Combine as mra*mdec*mpa*mv3*mv2
m = np.dot(mv3, mv2)
m = np.dot(mpa, m)
m = np.dot(mdec, m)
m = np.dot(mra, m)
return m
def axial_rotation(ax, phi, vector):
"""Apply direct rotation to a vector using Rodrigues' formula.
Parameters
----------
ax : float array of size 3
a unit vector represent a rotation axis
phi : float
angle in degrees to rotate original vector
vector : float
array of size 3 representing any vector
Returns
-------
v : float
array of size 3 representing the rotated vectot
"""
rphi = np.radians(phi)
v = vector*np.cos(rphi) + cross(ax, vector) * np.sin(rphi) + ax * np.dot(ax, vector) * (1-np.cos(rphi))
return v
def sky_to_tel(attitude, ra, dec, verbose=False): #, return_cartesian=False):
"""Transform from sky (RA, Dec) to telescope (nu2, nu3) angles.
Return nu2,nu3 position on the idealized focal sphere of any RA and
Dec using the inverse of attitude matrix.
Parameters
----------
attitude : 3 by 3 float array
The attitude matrix.
ra : float (default unit is degree)
RA of sky position
dec : float (default unit is degree)
Dec of sky position
Returns
-------
nu2, nu3 : tuple of floats with quantity
spherical coordinates at matching position on the idealized focal sphere
"""
# ra = convert_quantity(ra, u.deg)
# dec = convert_quantity(dec, u.deg)
if attitude.shape != (3,3):
raise ValueError('Attitude has to be 3x3 array.')
# if return_cartesian:
# ra_rad = np.deg2rad(ra)
# dec_rad = np.deg2rad(dec)
# urd = np.array([np.sqrt(1. - (ra_rad ** 2 + dec_rad ** 2)), ra_rad, dec_rad])
# else:
# urd = unit(ra, dec)
unit_vector_sky_side = unit_vector_sky(ra, dec)
if verbose:
print('Sky-side unit vector: {}'.format(unit_vector_sky_side))
inverse_attitude = np.transpose(attitude)
# apply transformation
unit_vector_tel = np.dot(inverse_attitude, unit_vector_sky_side)
if verbose:
print('Tel-side unit vector: {}'.format(unit_vector_tel))
# extract spherical coordinates
nu2, nu3 = polar_angles(unit_vector_tel)
return nu2, nu3
def getv2v3(attitude, ra, dec):
"""Return v2,v3 position of any RA and Dec using the inverse of attitude matrix.
Parameters
----------
attitude : 3 by 3 float array
the telescope attitude matrix
ra : float
RA of sky position
dec : float
Dec of sky position
Returns
-------
v2,v3 : tuple of floats
V2,V3 value at matching position
"""
urd = unit(ra, dec)
inverse_attitude = np.transpose(attitude)
uv = np.dot(inverse_attitude, urd)
v2, v3 = v2v3(uv)
return v2, v3
def cross(a, b):
"""Return cross product of two vectors c = a X b.
The order is significant. Reversing the order changes the sign of the result.
Parameters
----------
a : float array or list of length 3
first vector
b : float array or list of length 3
second vector
Returns
-------
c float array of length 3
the product vector
"""
c = np.array([a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0]])
return c
def pointing(attitude, v2, v3, positive_ra=True, input_cartesian=False):
"""Calculate where a v2v3 position points on the sky using the attitude matrix.
Parameters
----------
attitude : 3 by 3 float array
the telescope attitude matrix
v2 : float or array of floats
V2 coordinate in arc-seconds
v3 : float or array of floats
V3 coordinate in arc-seconds
positive_ra : bool.
If True forces ra value to be positive
Returns
-------
rd : tuple of floats
(ra, dec) - RA and Dec in degrees
"""
v2d = v2 / 3600.0
v3d = v3 / 3600.0
# compute unit vector
if input_cartesian:
v2_rad = np.deg2rad(v2d)
v3_rad = np.deg2rad(v3d)
v = np.array([np.sqrt(1. - (v2_rad ** 2 + v3_rad ** 2)), v2_rad, v3_rad])
else:
v = unit(v2d, v3d)
# apply attitude transformation
w = np.dot(attitude, v)
# compute tuple containing ra and dec in degrees
if input_cartesian:
rd = np.rad2deg(w[1]), np.rad2deg(w[2])
else:
rd = radec(w, positive_ra=positive_ra)
return rd
def tel_to_sky(attitude, nu2, nu3, positive_ra=True):#, input_cartesian=False):
"""Calculate where a nu2,nu3 position points on the sky.
Parameters
----------
attitude : 3 by 3 float array
the telescope attitude matrix
nu2 : float or array of floats (default unit is arcsecond)
V2 coordinate in arc-seconds
nu3 : float or array of floats (default unit is arcsecond)
V3 coordinate in arc-seconds
positive_ra : bool.
If True forces ra value to be positive
Returns
-------
rd : tuple of floats with quantity
(ra, dec) - RA and Dec
"""
nu2_deg = convert_quantity(nu2, u.deg, factor=u.arcsec.to(u.deg))
nu3_deg = convert_quantity(nu3, u.deg, factor=u.arcsec.to(u.deg))
# v2d = v2 / 3600.0
# v3d = v3 / 3600.0
# # compute unit vector
# if input_cartesian:
# v2_rad = np.deg2rad(v2d)
# v3_rad = np.deg2rad(v3d)
# v = np.array([np.sqrt(1. - (v2_rad ** 2 + v3_rad ** 2)), v2_rad, v3_rad])
# else:
# v = unit(v2d, v3d)
unit_vector_tel = unit_vector_sky(nu2_deg, nu3_deg)
# apply attitude transformation
unit_vector_sky_side = np.dot(attitude, unit_vector_tel)
# compute tuple containing ra and dec in degrees
# if input_cartesian:
| |
<reponame>hendrikdutoit/beetools<gh_stars>0
'''Tools for Bright Edge eServices developments & projects
Designed for the use in the Bright Edge eServices echo system. It defines
methods and functions for general use purposes.
Archiver creates an archive of the key project files, print coloured messages
to console with default parameters.
To Do
=====
1. Better example on the logging integration.
2. Complete doctests for all methods & functions.
'''
import configparser
import datetime
import logging
import os
import zipfile
from pathlib import Path
import shutil
import sys
import tempfile
from termcolor import colored
_PROJ_DESC = __doc__.split('\n')[0]
_PROJ_PATH = Path(__file__)
_PROJ_NAME = _PROJ_PATH.stem
_PROJ_VERSION = '3.3.0'
class Archiver:
'''Archiver creates an archive of the key project files to zip file. It assumes the
following project structure of the calling application:
Module
======
projectname/ (a.k.a. Project root dir)
│
├── Data/
│
├── docs/
│
├── src/ (a.k.a. Source dir)
│ └── modname (a.k.a. Project dir)
│ ├── module1.py
│ └── module2.py
│
├── tests/
│ ├── test_module1_test_name1.py
│ └── test_module1_test_name2.py
│
├── .gitignore
├── LICENSE
├── README.md
├── requirements.txt
└── setup.py
Package
======
projectname/ (a.k.a. Project root dir)
│
├── Data/
│
├── docs/
│
├── pkgname/ (a.k.a. Project dir)
│ ├── __init__.py
│ ├── file1.py
│ └── file2.py
│
├── tests/
│ ├── test_file1_test_name.py
│ └── test_file2_test_name.py
│
├── .gitignore
├── LICENSE
├── README.md
├── requirements.txt
└── setup.py
Library (not implemented)
=======
projectname/
│
├── docs/
│
├── Data/
│
├── libname/
│ ├── __init__.py
│ ├── file_1.py
│ ├── pkg_1/
│ │ ├── __init__.py
│ │ ├── file_1.py
│ │ └── file_2.py
│ │
│ └── pkg_2/
│ ├── __init__.py
│ ├── file_3.py
│ └── file_4.py
│
├── tests/
│
├── .gitignore
├── LICENSE
├── README.md
├── requirements.txt
└── setup.py
'''
def __init__(
self,
p_app_desc,
p_app_pth,
p_parent_log_name=None,
p_app_ver=None,
p_app_ini_file_name=None,
p_cls=True,
p_arc_excl_dir=None,
p_arc_extern_dir=None,
p_arc_incl_ext=None,
):
'''Initialize the object
Parameters
----------
p_parent_log_name
Name of the parent logger, i.e., calling application
p_app_ver
Version of the calling application
p_app_desc
Description of the calling application
p_app_pth
Path to the application module
p_app_ini_file_name
Ini file name used by calling application for paramenters
Default = None
p_cls
Clear the screen before start
Default = True
Returns
-------
Examples
--------
>>> t_archiver = Archiver(_PROJ_NAME, __doc__, p_app_pth=Path(__file__))
>>>
'''
self.success = True
if p_parent_log_name:
self.log_name = '{}.{}'.format(p_parent_log_name, _PROJ_NAME)
self.logger = logging.getLogger(self.log_name)
else:
self.log_name = None
self.logger = None
if not isinstance(p_app_pth, Path):
self.app_pth = Path(p_app_pth)
else:
self.app_pth = p_app_pth
self.app_name = self.app_pth.stem
self.app_root_dir = self.find_app_root_dir()
self.app_setup_cfg = self._get_app_setup_cfg()
self.app_ver = self._get_version(p_app_ver)
self.app_desc = p_app_desc
self.app_ini_file_name = p_app_ini_file_name
self.app_setup_cfg_pth = None
self.arc_dir = None
self.arc_excl_dir = _add_parm(
['Archive', 'VersionArchive', 'build'], p_arc_excl_dir
)
if p_arc_extern_dir:
self.arc_extern_dir = Path(p_arc_extern_dir)
else:
self.arc_extern_dir = None
self.arc_incl_ext = _add_parm(['ini', 'py'], p_arc_incl_ext)
self.arc_pth = None
self.cls = p_cls
self.dur_hours = 0
self.dur_min = 0
self.dur_sec = 0
self.elapsed_time = 0
self.end_time = 0
self.start_time = datetime.datetime.now()
self.start_date_str = self.start_time.strftime('%y%m%d%H%M%S')
self.version_archive = 'VersionArchive'
self.make_archive()
self.make_archive_external()
pass
def _get_app_setup_cfg(self):
setup_cfg = None
if (self.app_root_dir / 'setup.cfg').exists():
self.app_setup_cfg_pth = self.app_root_dir / 'setup.cfg'
setup_cfg = configparser.ConfigParser(inline_comment_prefixes='#')
setup_cfg.read([self.app_setup_cfg_pth])
return setup_cfg
def _get_version(self, p_app_ver):
version = '0.0.0'
if p_app_ver:
version = p_app_ver
elif self.app_setup_cfg:
if self.app_setup_cfg.has_option('metadata', 'version'):
version = self.app_setup_cfg.get('metadata', 'version')
return version
def is_dev_mode(self):
'''Determine if it is a production module or not.
Parameters
----------
Returns
-------
Examples
--------
'''
success = False
if 'site-packages' not in self.app_pth.parts:
success = True
return success
def find_app_root_dir(self):
app_root_dir = Path()
if 'src' in self.app_pth.parts:
idx = self.app_pth.parts.index('src')
app_root_dir = self.app_pth.parents[len(self.app_pth.parts) - idx - 1]
elif 'tests' in self.app_pth.parts:
idx = self.app_pth.parts.index('tests')
app_root_dir = self.app_pth.parents[len(self.app_pth.parts) - idx - 1]
elif 'site-packages' in self.app_pth.parts:
app_root_dir = self.app_pth.parents[1]
else:
t_dir = Path()
for i, part in enumerate(self.app_pth.parts):
t_dir = t_dir / part
if part.lower() == self.app_name.lower():
app_root_dir = t_dir
break
return app_root_dir
def make_archive(self):
if self.is_dev_mode() and self.app_root_dir:
self.arc_dir = self.app_root_dir / self.version_archive
if not self.arc_dir.is_dir():
self.arc_dir.mkdir()
self.arc_pth = self.arc_dir / '{} {} ({} Beta).zip'.format(
self.app_name, self.start_date_str, self.app_ver
)
with zipfile.ZipFile(self.arc_pth, 'w') as archive_zip:
for ext in self.arc_incl_ext:
files = self.app_root_dir.glob('**/*.{}'.format(ext))
for file in files:
exclude_file = False
for excl_dir in self.arc_excl_dir:
if excl_dir in file.parts:
exclude_file = True
if not exclude_file:
archive_zip.write(file)
pass
pass
def make_archive_external(self):
if self.is_dev_mode() and self.arc_extern_dir:
if not self.arc_extern_dir.exists():
self.arc_extern_dir.mkdir()
shutil.copy(self.arc_pth, self.arc_extern_dir)
def print_footer(self):
'''Print standard footers
Parameters
----------
Returns
-------
None
Examples
--------
'''
success = True
self.end_time = datetime.datetime.now()
self.elapsed_time = self.end_time - self.start_time
self.dur_hours = int(self.elapsed_time.seconds / 3600)
self.dur_min = int((self.elapsed_time.seconds - self.dur_hours * 3600) / 60)
self.dur_sec = int(
self.elapsed_time.seconds - self.dur_hours * 3600 - self.dur_min * 60
)
print_str = '\n{:<15}{:<15}'.format(
'Start:', self.start_time.strftime('%m/%d %H:%M:%S')
)
print(print_str)
print_str = '{:<15}{:<15}'.format(
'End:', self.end_time.strftime('%m/%d %H:%M:%S')
)
print(print_str)
print_str = '{:<15}{:>5} {:0>2}:{:0>2}:{:0>2}'.format(
'Duration:',
self.elapsed_time.days,
self.dur_hours,
self.dur_min,
self.dur_sec,
)
print(print_str)
return success
def print_header(self, p_cls=True):
'''Initialize the start of the module, make backup and print standard headers
Parameters
----------
p_cls
Clear the screen
Returns
-------
None
Examples
--------
'''
success = True
self.cls = p_cls
if sys.platform.startswith('win32') and self.cls:
os.system('cls')
elif sys.platform.startswith('linux') and self.cls:
os.system('clear')
args = sys.argv[1:]
for i, arg in enumerate(args):
if arg == '-c':
args[i + 1] = str(self.app_ini_file_name)
print(
msg_header(
'{} ({}) {}\nDescription: {}\n'.format(
self.app_name, self.app_ver, ' '.join(args), self.app_desc
)
)
)
return success
# Message defaults
BAR_LEN = 50
MSG_LEN = 50
CRASH_RETRY = 2
def _add_parm(def_parm, new_parm):
if isinstance(new_parm, list):
def_parm += [x for x in new_parm if x not in def_parm]
elif isinstance(new_parm, str) and new_parm not in def_parm:
def_parm.append(new_parm)
return def_parm
def msg_display(p_msg, p_len=MSG_LEN, p_color='white') -> str:
'''Return a text message in white on black.
Parameters
----------
p_msg
The message
p_len
The fixed length of the message. Default is beetools.MSG_LEN
p_color
Color of text, always on black.
[ grey, red, green, yellow, blue, magenta, cyan, white ]
Returns
-------
str
Text in the specified color.
Examples
--------
>>> from beetools import msg_display
>>> msg_display( 'Display message' )
'\\x1b[37mDisplay message '
'''
msg = colored('{: <{len}}'.format(p_msg, len=p_len), p_color)
return msg[:p_len] + ' '
def msg_error(p_msg) -> str:
'''Return an "error" text message in red on black
Parameters
----------
p_msg
The message
Returns
-------
str
Text in red on black.
Examples
--------
>>> from beetools import msg_error
>>> msg_error( 'Error message' )
'\\x1b[31mError message\\x1b[0m'
'''
return colored('{}'.format(p_msg), 'red')
def msg_header(p_msg) -> str:
'''Return a "header" text message in cyan on black
Parameters
----------
p_msg
The message
Returns
-------
str
Text in red on black.
Examples
--------
>>> from beetools import msg_header
>>> msg_header( 'Header message' )
'\\x1b[36mHeader message\\x1b[0m'
'''
return colored('{}'.format(p_msg), 'cyan')
def msg_info(p_msg) -> str:
'''Return an "information" text message in yellow on black
Parameters
----------
p_msg
The message
Returns
-------
str
Text in red on black.
Examples
--------
>>> from beetools import msg_info
>>> msg_info( 'Info message' )
'\\x1b[33mInfo message\\x1b[0m'
'''
return colored('{}'.format(p_msg), 'yellow')
def msg_milestone(p_msg) -> str:
'''Return a "milestone" text message in magenta on black
Parameters
----------
p_msg
The message
Returns
-------
str
Text in red on black.
Examples
--------
>>> from beetools import msg_milestone
>>> msg_milestone( 'Milestone message' )
'\\x1b[35mMilestone message\\x1b[0m'
'''
return colored('{}'.format(p_msg), 'magenta')
def msg_ok(p_msg) -> str:
'''Return an "OK" text message in green on black
Parameters
----------
p_msg
The message
Returns
-------
str
Text in red on black.
Examples
--------
>>> from beetools import msg_ok
>>> msg_ok( 'OK message' )
'\\x1b[32mOK message\\x1b[0m'
'''
return colored('{}'.format(p_msg), 'green')
def example_archiver(p_cls=True):
'''Example to illustrate usage
Parameters
----------
p_cls
Clear the screen before start
Default is True
Returns
-------
bool
Successful execution [ b_tls.arc_pth | False ]
Examples
--------
'''
success = True
app_name = 'TestApp'
app_desc = 'Test application description'
with tempfile.TemporaryDirectory() as temp_dir:
app_dir = Path(temp_dir, app_name, 'src', app_name.lower())
app_dir.mkdir(parents=True)
app_pth = app_dir / Path(app_name.lower()).with_suffix('.py')
app_pth.touch()
arc_extern_dir = Path(temp_dir, 'external')
# arc_extern_dir.mkdir(parents = True)
t_archiver = Archiver(app_desc, app_pth, p_arc_extern_dir=arc_extern_dir)
t_archiver.print_header(p_cls=p_cls)
t_archiver.print_footer()
# working_dir.rmdir()
return success
def example_messaging():
'''Standard example to illustrate standard use.
Parameters
----------
Returns
-------
bool
Successful execution [ b_tls.archive_path | False ]
Examples
--------
'''
success = True
print(
msg_display(
'This message print in blue and cut at {} character because it is too long!'.format(
MSG_LEN
),
p_color='blue',
)
)
print(msg_ok('This message is an OK message'))
print(msg_info('This | |
ip_sec_vpn_service: :class:`com.vmware.nsx_policy.model_client.IPSecVpnService`
:param ip_sec_vpn_service: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.IPSecVpnService`
:return: com.vmware.nsx_policy.model.IPSecVpnService
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
'ip_sec_vpn_service': ip_sec_vpn_service,
})
class L2vpnContext(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L2vpnContextStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
tier0_id,
locale_service_id,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2VpnContext`
:return: com.vmware.nsx_policy.model.L2VpnContext
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
})
class L2vpnServices(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_services'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L2vpnServicesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
tier0_id,
locale_service_id,
service_id,
):
"""
Delete L2VPN service for given locale service.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
})
def get(self,
tier0_id,
locale_service_id,
service_id,
):
"""
Get L2VPN service for given locale service.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2VPNService`
:return: com.vmware.nsx_policy.model.L2VPNService
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
})
def list(self,
tier0_id,
locale_service_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Get paginated list of all L2VPN services.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2VPNServiceListResult`
:return: com.vmware.nsx_policy.model.L2VPNServiceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
tier0_id,
locale_service_id,
service_id,
l2_vpn_service,
):
"""
Create or patch L2VPN service for given locale service.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:type l2_vpn_service: :class:`com.vmware.nsx_policy.model_client.L2VPNService`
:param l2_vpn_service: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
'l2_vpn_service': l2_vpn_service,
})
def update(self,
tier0_id,
locale_service_id,
service_id,
l2_vpn_service,
):
"""
Create or fully replace L2VPN service for given locale service.
Revision is optional for creation and required for update.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:type l2_vpn_service: :class:`com.vmware.nsx_policy.model_client.L2VPNService`
:param l2_vpn_service: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2VPNService`
:return: com.vmware.nsx_policy.model.L2VPNService
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
'l2_vpn_service': l2_vpn_service,
})
class L3vpnContext(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l3vpn_context'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L3vpnContextStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
tier0_id,
locale_service_id,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3VpnContext`
:return: com.vmware.nsx_policy.model.L3VpnContext
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
})
class L3vpns(VapiInterface):
"""
"""
LIST_L3VPN_SESSION_POLICYBASEDL3VPNSESSION = "PolicyBasedL3VpnSession"
"""
Possible value for ``l3vpnSession`` of method :func:`L3vpns.list`.
"""
LIST_L3VPN_SESSION_ROUTEBASEDL3VPNSESSION = "RouteBasedL3VpnSession"
"""
Possible value for ``l3vpnSession`` of method :func:`L3vpns.list`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l3vpns'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L3vpnsStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
tier0_id,
locale_service_id,
l3vpn_id,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l3vpn_id': l3vpn_id,
})
def get(self,
tier0_id,
locale_service_id,
l3vpn_id,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:return: com.vmware.nsx_policy.model.L3Vpn
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l3vpn_id': l3vpn_id,
})
def list(self,
tier0_id,
locale_service_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
l3vpn_session=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type l3vpn_session: :class:`str` or ``None``
:param l3vpn_session: Resource type of L3Vpn Session (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3VpnListResult`
:return: com.vmware.nsx_policy.model.L3VpnListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'l3vpn_session': l3vpn_session,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
tier0_id,
locale_service_id,
l3vpn_id,
l3_vpn,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: (required)
:type l3_vpn: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:param l3_vpn: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l3vpn_id': l3vpn_id,
'l3_vpn': l3_vpn,
})
def showsensitivedata(self,
tier0_id,
locale_service_id,
l3vpn_id,
):
"""
| |
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import unittest
from bip_utils import Bip44Conf, Bip44, Bip44Coins, Bip49Coins, Bip84Coins
from tests.bip.bip44_base.test_bip44_base import Bip44BaseTestHelper
# Results generated with: https://iancoleman.io/bip39
# Or with coin-specific/multi-coin wallets (e.g. TronLink, Cosmostation, Trust Wallet, Math Wallet)
# There are some differences from the website and the specs I found for Litecoin testnet (extended keys prefixes) so,
# in that case, the keys were generated by this library after begin tested for the correct addresses
TEST_VECT = [
#
# Main nets
#
# Algorand
{
"coin": Bip44Coins.ALGORAND,
"names": ("Algorand", "ALGO"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"EP2D7TV7IAFANZHK3B6QLKB53N5UTD7RARVXZTWCPCRQQBKYVGM2XIMT2Q",
"4TY6NBYEJMUOI5WPNUG7YRM63GQNVZA47336IICDLIARPF7GFUMSUJ2CUU",
"YJBVNZYARZIVME3DWJLXWMJ22YWMANMTHGPLO4O7A2FDWP6662SER5SWLA",
"4FNYH2QEESPLDGZJGNCOLRD7EZCIKWO3BHUPXTCU54JRJKCUN35FTQCTFQ",
"NLMHFPS37XEHFWDPUUN3MHWVPYEBRUPFQWZVHIMRKPHSW5UPUYLJCVZN5I",
],
},
# Avax C-Chain
{
"coin": Bip44Coins.AVAX_C_CHAIN,
"names": ("Avax C-Chain", "AVAX"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x9858EfFD232B4033E47d90003D41EC34EcaEda94",
"0x6Fac4D18c912343BF86fa7049364Dd4E424Ab9C0",
"0xb6716976A3ebe8D39aCEB04372f22Ff8e6802D7A",
"0xF3f50213C1d2e255e4B2bAD430F8A38EEF8D718E",
"0x51cA8ff9f1C0a99f88E86B8112eA3237F55374cA",
],
},
# Avax P-Chain
{
"coin": Bip44Coins.AVAX_P_CHAIN,
"names": ("Avax P-Chain", "AVAX"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"P-<KEY>",
"P-<KEY>",
"P-<KEY>",
"P-<KEY>",
"P-<KEY>",
],
},
# Avax X-Chain
{
"coin": Bip44Coins.AVAX_X_CHAIN,
"names": ("Avax X-Chain", "AVAX"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
},
# Band Protocol
{
"coin": Bip44Coins.BAND_PROTOCOL,
"names": ("Band Protocol", "BAND"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"band1vh8tr8ddf7g0gfep23t46yllmlvtqfjknjdd38",
"band1c2pramju9wtdz2jqulfalku58pq2ecgxcqj0f8",
"band1tzt3hf6vges3wqhl3tuhm70r89tqk80ckndy5a",
"band1zjxdfh8954dkehatc06jajh5v6jqlxtsc5pm7w",
"band1n2eculyv945phq5y7cj4l0a6tkg2px79stvxpd",
],
},
# Binance Chain
{
"coin": Bip44Coins.BINANCE_CHAIN,
"names": ("Binance Chain", "BNB"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"bnb1jpcfwe6x9q3q5t0dfjcfz052x8q94vlygyva4r",
],
},
# Binance Smart Chain
{
"coin": Bip44Coins.BINANCE_SMART_CHAIN,
"names": ("Binance Smart Chain", "BNB"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x9858EfFD232B4033E47d90003D41EC34EcaEda94",
"0x6Fac4D18c912343BF86fa7049364Dd4E424Ab9C0",
"0xb6716976A3ebe8D39aCEB04372f22Ff8e6802D7A",
"0xF3f50213C1d2e255e4B2bAD430F8A38EEF8D718E",
"0x51cA8ff9f1C0a99f88E86B8112eA3237F55374cA",
],
},
# Bitcoin
{
"coin": Bip44Coins.BITCOIN,
"names": ("Bitcoin", "BTC"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "xprv9s21ZrQH143K3GJpoapnV8SFfukcVBSfeCficPSGfubmSFDxo1kuHnLisriDvSnRRuL2Qrg5ggqHKNVpxR86QEC8w35uxmGoggxtQTPvfUu",
"wif_master": "<KEY>",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"1MVGa13XFvvpKGZdX389iU8b3qwtmAyrsJ",
"1Gka4JdwhLxRwXaC6oLNH4YuEogeeSwqW7",
],
},
# Bitcoin Cash
{
"coin": Bip44Coins.BITCOIN_CASH,
"names": ("Bitcoin Cash", "BCH"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "<KEY>",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses_legacy": {
"cls": Bip44Conf.BitcoinCashMainNet,
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
]
},
"addresses": [
"bitcoincash:qqyx49mu0kkn9ftfj6hje6g2wfer34yfnq5tahq3q6",
"bitcoincash:qp8sfdhgjlq68hlzka9lcsxtcnvuvnd0xqxugfzzc5",
"bitcoincash:qqkuy34ntrye9a2h4xpdstcu4aq5wfrwscjtaphenr",
"bitcoincash:qzcyvxr0e23d408u62ulf6cnspc0k4dyduy8kh77nc",
"bitcoincash:qptzx8m39zjuuyvrf86s3kywuledfht2jcty8we6gv",
],
},
# BitcoinSV
{
"coin": Bip44Coins.BITCOIN_SV,
"names": ("BitcoinSV", "BSV"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "<KEY>",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
},
# Celo
{
"coin": Bip44Coins.CELO,
"names": ("Celo", "CELO"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0xE<KEY>0D7a61F58535F6EC99cd860cA",
"0x3EC275571fDa6E659c18a5D81C90b350a8b34f8A",
"<KEY>",
"0xbe596d1b4Ba1385a0161628e17886F5E476ed73B",
"0xB0691D324e0c1026236e72394ea3264b46a383f6",
],
},
# Cosmos
{
"coin": Bip44Coins.COSMOS,
"names": ("Cosmos", "ATOM"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
},
# Dash
{
"coin": Bip44Coins.DASH,
"names": ("Dash", "DASH"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "<KEY>",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"XdD2biTJ3saZtcR6ravwJ9bvmkvmDq49Xg",
"XkBrNhE8srfb8BbeTRSU4dxWsjjedra4Xn",
"Xe8n8PZNgngjbMCFEA9unH26TmEWPPjm6a",
],
},
# Dogecoin
{
"coin": Bip44Coins.DOGECOIN,
"names": ("Dogecoin", "DOGE"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "<KEY>",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
},
# Elrond
{
"coin": Bip44Coins.ELROND,
"names": ("<NAME>", "eGLD"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "xprv9s21ZrQH143K4GYBne6aGFZLHUSCPzQ3amKxfzz6kHpqDLBHk58dvPW7twcRFJeo1oQAvNeiYpZEeUkWCW3YgdvXHUcQQU6xjh8jBo4Y8xG",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"erd1xkrttq324elvla4kk83r6wns35cjyqw7vg5tmdfn7qmrc2drd7qswlwt6z",
"erd19assuvyq236dmr4th5wjy59c75cn98zjusd3m74kv5y9ylerl0esenuv3m",
"erd1hhwwsj0837t6dkjy53waxtgp3aqws863rrvx6lj2v9t3jfxmhq5qgp0dj0",
"erd1yvj6es98ecj0axw399a8juekwvc4a3f4rvxpy8vudfvy4v6p088qexz52j",
],
},
# EOS
{
"coin": Bip44Coins.EOS,
"names": ("EOS", "EOS"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
},
# Ethereum
{
"coin": Bip44Coins.ETHEREUM,
"names": ("Ethereum", "ETH"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x9858EfFD232B4033E47d90003D41EC34EcaEda94",
"0x6Fac4D18c912343BF86fa7049364Dd4E424Ab9C0",
"0xb6716976A3ebe8D39aCEB04372f22Ff8e6802D7A",
"0xF3f50213C1d2e255e4B2bAD430F8A38EEF8D718E",
"0x51cA8ff9f1C0a99f88E86B8112eA3237F55374cA",
],
},
# Ethereum Classic
{
"coin": Bip44Coins.ETHEREUM_CLASSIC,
"names": ("Ethereum Classic", "ETC"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0xFA22515E43658ce56A7682B801e9B5456f511420",
"0xAee144cB8098B17BCD98D82802fDf32E8ca518cF",
"0x6A9F90A1241E1960AD1A0cCb2400C9d235eA0846",
"0x4Fb914Ca25C64595B28aa6dD78321E7b885a8eC7",
"0x327b2ac3B8623eBdB3eEB39B150f960b6d3ec1a9",
],
},
# Fantom Opera
{
"coin": Bip44Coins.FANTOM_OPERA,
"names": ("Fantom Opera", "FTM"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x9858EfFD232B4033E47d90003D41EC34EcaEda94",
"<KEY>",
"0xb6716976A3ebe8D39aCEB04372f22Ff8e6802D7A",
"0xF3f50213C1d2e255e4B2bAD430F8A38EEF8D718E",
"0x51cA8ff9f1C0a99f88E86B8112eA3237F55374cA",
],
},
# Filecoin
{
"coin": Bip44Coins.FILECOIN,
"names": ("Filecoin", "FIL"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"f1c6kiiaqgtkxtp2sgirvfmtasoeyq5kuiszksjna",
"f1w2bkkhrdempg3ab45cnl22t4yfsdw2sspl7p5ra",
],
},
# Harmony One (Atom address)
{
"coin": Bip44Coins.HARMONY_ONE_ATOM,
"names": ("Harmony One", "ONE"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"one1q6gkzcap0uruuu8r6sldxuu47pd4ww9w9t7tg6",
"one13c4p4dzqjtcu6wvkvhqkpesagng5tfsl625gqm",
"one1lyt6xhyymyd22qwp3vrq0dq03wanktqsereqtt",
"one1ynsd3aq3v4cz9twqttp3d4hwzadqdxm85s64v3",
"one1gk9tzen4ltq2lw208m62pwkcsnk73uwklq7mgd",
],
},
# Harmony One (Ethereum address)
{
"coin": Bip44Coins.HARMONY_ONE_ETH,
"names": ("Harmony One", "ONE"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x06916163A17F07ce70e3d43Ed37395f05B5738aE",
"0x8E2A1aB44092F1CD399665c160e61D44D145A61F",
"0xF917A35c84D91AA501C18b0607B40f8bBB3B2C10",
"0x24e0D8F411657022Adc05Ac316D6ee175A069b67",
"0x458Ab16675FAC0aFb94f3EF4a0BAd884ede8F1D6",
],
},
# Harmony One (Metamask address)
{
"coin": Bip44Coins.HARMONY_ONE_METAMASK,
"names": ("Harmony One", "ONE"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x9858EfFD232B4033E47d90003D41EC34EcaEda94",
"0x6Fac4D18c912343BF86fa7049364Dd4E424Ab9C0",
"0xb6716976A3ebe8D39aCEB04372f22Ff8e6802D7A",
"0xF3f50213C1d2e255e4B2bAD430F8A38EEF8D718E",
"0x51cA8ff9f1C0a99f88E86B8112eA3237F55374cA",
],
},
# Huobi Chain
{
"coin": Bip44Coins.HUOBI_CHAIN,
"names": ("Huobi Token", "HT"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"0x9858EfFD232B4033E47d90003D41EC34EcaEda94",
"0x6Fac4D18c912343BF86fa7049364Dd4E424Ab9C0",
"0xb6716976A3ebe8D39aCEB04372f22Ff8e6802D7A",
"0xF3f50213C1d2e255e4B2bAD430F8A38EEF8D718E",
"0x51cA8ff9f1C0a99f88E86B8112eA3237F55374cA",
],
},
# IRIS Network
{
"coin": Bip44Coins.IRIS_NET,
"names": ("IRIS Network", "IRIS"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"chain_ext": {
"ex_pub": "<KEY>",
"ex_priv": "<KEY>",
},
"addresses": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
},
# Kava
{
"coin": Bip44Coins.KAVA,
"names": ("Kava", "KAVA"),
"is_testnet": False,
"seed": b"<KEY>",
"ex_master": "<KEY>",
"wif_master": "",
"account": {
"ex_pub": | |
<filename>pyGPGO/covfunc.py
import numpy as np
from scipy.special import gamma, kv
from scipy.spatial.distance import cdist
default_bounds = {
'l': [1e-4, 1],
'sigmaf': [1e-4, 2],
'sigman': [1e-6, 2],
'v': [1e-3, 10],
'gamma': [1e-3, 1.99],
'alpha': [1e-3, 1e4],
'period': [1e-3, 10]
}
def l2norm_(X, Xstar):
"""
Wrapper function to compute the L2 norm
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances.
Xstar: np.ndarray, shape=((m, nfeatures))
Instances
Returns
-------
np.ndarray
Pairwise euclidian distance between row pairs of `X` and `Xstar`.
"""
return cdist(X, Xstar)
def kronDelta(X, Xstar):
"""
Computes Kronecker delta for rows in X and Xstar.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances.
Xstar: np.ndarray, shape((m, nfeatures))
Instances.
Returns
-------
np.ndarray
Kronecker delta between row pairs of `X` and `Xstar`.
"""
return cdist(X, Xstar) < np.finfo(np.float32).eps
class squaredExponential:
def __init__(self, l=1, sigmaf=1.0, sigman=1e-6, bounds=None, parameters=['l', 'sigmaf',
'sigman']):
"""
Squared exponential kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
return self.sigmaf * np.exp(-.5 * r ** 2 / self.l ** 2) + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param='l'):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'l':
r = l2norm_(X, Xstar)
num = r ** 2 * self.sigmaf * np.exp(-r ** 2 / (2 * self.l ** 2))
den = self.l ** 3
l_grad = num / den
return (l_grad)
elif param == 'sigmaf':
r = l2norm_(X, Xstar)
sigmaf_grad = (np.exp(-.5 * r ** 2 / self.l ** 2))
return (sigmaf_grad)
elif param == 'sigman':
sigman_grad = kronDelta(X, Xstar)
return (sigman_grad)
else:
raise ValueError('Param not found')
class matern:
def __init__(self, v=1, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['v',
'l',
'sigmaf',
'sigman']):
"""
Matern kernel class.
Parameters
----------
v: float
Scale-mixture hyperparameter of the Matern covariance function.
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.v, self.l = v, l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
bessel = kv(self.v, np.sqrt(2 * self.v) * r / self.l)
f = 2 ** (1 - self.v) / gamma(self.v) * (np.sqrt(2 * self.v) * r / self.l) ** self.v
res = f * bessel
res[np.isnan(res)] = 1
res = self.sigmaf * res + self.sigman * kronDelta(X, Xstar)
return (res)
class matern32:
def __init__(self, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['l', 'sigmaf', 'sigman']):
"""
Matern v=3/2 kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
one = (1 + np.sqrt(3 * (r / self.l) ** 2))
two = np.exp(- np.sqrt(3 * (r / self.l) ** 2))
return self.sigmaf * one * two + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'l':
r = l2norm_(X, Xstar)
num = 3 * (r ** 2) * self.sigmaf * np.exp(-np.sqrt(3) * r / self.l)
return num / (self.l ** 3)
elif param == 'sigmaf':
r = l2norm_(X, Xstar)
one = (1 + np.sqrt(3 * (r / self.l) ** 2))
two = np.exp(- np.sqrt(3 * (r / self.l) ** 2))
return one * two
elif param == 'sigman':
return kronDelta(X, Xstar)
else:
raise ValueError('Param not found')
class matern52:
def __init__(self, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['l', 'sigmaf', 'sigman']):
"""
Matern v=5/2 kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)/self.l
one = (1 + np.sqrt(5 * r ** 2) + 5 * r ** 2 / 3)
two = np.exp(-np.sqrt(5 * r ** 2))
return self.sigmaf * one * two + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
r = l2norm_(X, Xstar)
if param == 'l':
num_one = 5 * r ** 2 * np.exp(-np.sqrt(5) * r / self.l)
num_two = np.sqrt(5) * r / self.l + 1
res = num_one * num_two / (3 * self.l ** 3)
return res
elif param == 'sigmaf':
one = (1 + np.sqrt(5 * (r / self.l) ** 2) + 5 * (r / self.l) ** 2 / 3)
two = np.exp(-np.sqrt(5 * r ** 2))
return one * two
elif param == 'sigman':
return kronDelta(X, Xstar)
class gammaExponential:
def __init__(self, gamma=1, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['gamma',
'l',
'sigmaf',
'sigman']):
"""
Gamma-exponential kernel class.
Parameters
----------
gamma: float
Hyperparameter of the Gamma-exponential covariance function.
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @namespace pyfortified_requests
import logging
import csv
import datetime as dt
import gzip
import http.client as http_client
import io
import ujson as json
import os
import re
import time
import requests
from pyfortified_logging import (LoggingFormat, LoggingOutput)
from pprintpp import pprint
from pyfortified_requests import (__python_required_version__)
from pyfortified_requests.errors import (get_exception_message, RequestsFortifiedErrorCodes)
from pyfortified_requests.exceptions.custom import (
RequestsFortifiedModuleError,
)
from pyfortified_requests.support import (
base_class_name,
bytes_to_human,
csv_skip_last_row,
detect_bom,
env_usage,
handle_json_decode_error,
python_check_version,
remove_bom,
validate_response,
)
from pyfortified_requests.support.curl import command_line_request_curl
from .pyfortified_requests import (RequestsFortified)
from safe_cast import safe_dict
log = logging.getLogger(__name__)
python_check_version(__python_required_version__)
class RequestsFortifiedDownload(object):
__requests_client = None
def __init__(
self,
logger_level=logging.INFO,
logger_format=LoggingFormat.JSON,
logger_output=LoggingOutput.STDOUT_COLOR,
requests_client=None,
):
self.requests_client = RequestsFortified(
logger_format=logger_format,
logger_level=logger_level,
logger_output=logger_output,
requests_client=requests_client
)
@property
def logger(self):
return self.requests_client.logger
@property
def session(self):
return self.requests_client.session
@property
def requests_session_client(self):
return self.requests_client.requests_session_client
@property
def requests_client(self):
return self.__requests_client
@requests_client.setter
def requests_client(self, value):
self.__requests_client = value
def request(self, **kwargs):
return self.requests_client.request(**kwargs)
@property
def built_request_curl(self):
return self.requests_client.built_request_curl
def request_csv_download(
self,
request_method,
request_url,
tmp_csv_file_name,
tmp_directory,
request_params=None,
request_data=None,
request_retry=None,
request_retry_func=None,
request_retry_excps=None,
request_retry_http_status_codes=None,
request_retry_excps_func=None,
request_headers=None,
request_auth=None,
request_label=None,
build_request_curl=True,
allow_redirects=True,
verify=True,
skip_first_row=False,
skip_last_row=False,
read_first_row=False,
csv_delimiter=',',
csv_header=None,
encoding_write=None,
encoding_read=None,
decode_unicode=False,
):
"""Download and Read CSV file.
Args:
request_method: request_method for the new :class:`Request` object.
request_url: URL for the new :class:`Request` object.
tmp_csv_file_name: Provide temporary name for downloaded CSV
tmp_directory: Provide temporary directory to hold downloaded CSV
request_params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
request_data: (optional) Dictionary, bytes, or file-like object to
send in the body of the :class:`Request`.
request_retry: (optional) Retry configuration.
request_headers: (optional) Dictionary of HTTP Headers to
send with the :class:`Request`.
request_auth: (optional) Auth tuple to enable
Basic/Digest/Custom HTTP Auth.
allow_redirects: (optional) Boolean. Set to True if
POST/PUT/DELETE redirect following is allowed.
verify: (optional) whether the SSL cert will be verified. A
CA_BUNDLE path can also be provided. Defaults to ``True``.
skip_first_row: (optional) Skip first row if it does not contain
column headers.
skip_last_row: (optional) Skip first row if it does not contain
column values.
read_first_row: (optional) Read first row separate from data returned.
csv_delimiter: (optional) Delimiter character, default comma ','.
csv_header:
encoding_write:
encoding_read:
decode_unicode:
Returns:
Generator containing CSV data by rows in JSON dictionary format.
"""
_request_label = 'Request Download CSV File'
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.debug(
"{0}: Start".format(request_label),
extra={
'request_url': request_url,
'encoding_write': encoding_write,
'encoding_read': encoding_read,
}
)
timer_start = dt.datetime.now()
_attempts = 0
_tries = 60
_delay = 10
while _tries:
_attempts += 1
log.info(
"{0}: Attempt: {1}".format(request_label, _attempts),
extra={
'request_url': request_url,
}
)
response = self.requests_client.request(
request_method=request_method,
request_url=request_url,
request_params=request_params,
request_data=request_data,
request_retry=request_retry,
request_retry_func=request_retry_func,
request_retry_excps=request_retry_excps,
request_retry_http_status_codes=request_retry_http_status_codes,
request_retry_excps_func=request_retry_excps_func,
request_headers=request_headers,
request_auth=request_auth,
build_request_curl=build_request_curl,
allow_redirects=allow_redirects,
verify=verify,
stream=True,
request_label=request_label
)
if response is None:
log.error(
"{0}: No response".format(request_label),
extra={
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: No response".format(request_label),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_REQUEST,
)
http_status_code = response.status_code
timer_end = dt.datetime.now()
timer_delta = timer_end - timer_start
response_time_secs = timer_delta.seconds
response_headers = None
if hasattr(response, 'headers'):
response_headers = \
json.loads(
json.dumps(
dict(response.headers)
)
)
log.debug(
"{0}: Response Status".format(request_label),
extra={
'http_status_code': http_status_code,
'response_time_secs': response_time_secs,
'response_url': response.url,
'response_headers': safe_dict(response_headers),
}
)
(tmp_csv_file_path, tmp_csv_file_size) = self.download_csv(
response,
tmp_directory,
tmp_csv_file_name,
request_label=request_label,
encoding_write=encoding_write,
decode_unicode=decode_unicode
)
if tmp_csv_file_path is not None:
break
_tries -= 1
if not _tries:
log.error(
"{0}: Exhausted Retries".format(request_label),
extra={
'tries': _tries,
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Exhausted Retries".format(request_label),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_RETRY_EXHAUSTED
)
log.info(
"{0}: Performing Retry".format(request_label),
extra={
'tries': _tries,
'delay': _delay,
'request_url': request_url,
}
)
time.sleep(_delay)
log.info(
"{0}: Finished".format(request_label),
extra={
'file_path': tmp_csv_file_path,
'file_size': bytes_to_human(tmp_csv_file_size),
'encoding_read': encoding_read,
}
)
log.debug(
"{0}: Usage".format(request_label),
extra=env_usage(tmp_directory),
)
with open(file=tmp_csv_file_path, mode='r', encoding=encoding_read) as csv_file_r:
if read_first_row:
csv_report_name = csv_file_r.readline()
csv_report_name = re.sub('\"', '', csv_report_name)
csv_report_name = re.sub('\n', '', csv_report_name)
log.info(
"{0}: Report".format(request_label),
extra={'csv_report_name': csv_report_name},
)
elif skip_first_row:
next(csv_file_r)
csv_file_header = next(csv_file_r)
csv_header_actual = \
[h.strip() for h in csv_file_header.split(csv_delimiter)]
csv_header_hr = []
index = 0
for column_name in csv_header_actual:
csv_header_hr.append({'index': index, 'name': column_name})
index += 1
log.debug(
"{0}: Content Header".format(request_label),
extra={'csv_header': csv_header_hr},
)
csv_fieldnames = csv_header if csv_header else csv_header_actual
csv_dict_reader = csv.DictReader(csv_file_r, fieldnames=csv_fieldnames, delimiter=csv_delimiter)
if skip_last_row:
for row in csv_skip_last_row(csv_dict_reader):
yield row
else:
for row in csv_dict_reader:
yield row
def request_json_download(
self,
request_method,
request_url,
tmp_json_file_name,
tmp_directory,
request_params=None,
request_data=None,
request_retry=None,
request_retry_func=None,
request_retry_excps=None,
request_retry_excps_func=None,
request_headers=None,
request_auth=None,
request_label=None,
build_request_curl=False,
allow_redirects=True,
verify=True,
encoding_write=None,
encoding_read=None,
):
"""Download and Read JSON file.
Args:
request_method: request_method for the new :class:`Request` object.
request_url: URL for the new :class:`Request` object.
tmp_json_file_name: Provide temporary name for downloaded CSV
tmp_directory: Provide temporary directory to hold downloaded CSV
request_params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
request_data: (optional) Dictionary, bytes, or file-like object to
send in the body of the :class:`Request`.
request_retry: (optional) Retry configuration.
request_headers: (optional) Dictionary of HTTP Headers to
send with the :class:`Request`.
request_auth: (optional) Auth tuple to enable
Basic/Digest/Custom HTTP Auth.
build_request_curl: (optional) Build a copy-n-paste curl for command line
that provides same request as this call.
allow_redirects: (optional) Boolean. Set to True if
POST/PUT/DELETE redirect following is allowed.
verify: (optional) whether the SSL cert will be verified. A
CA_BUNDLE path can also be provided. Defaults to ``True``.
encoding_write:
encoding_read:
decode_unicode:
Returns:
Generator containing JSON data by rows in JSON dictionary format.
"""
_request_label = "Request Download JSON File"
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.info(
"{0}: Start".format(request_label),
extra={
'request_url': request_url,
'encoding_write': encoding_write,
'encoding_read': encoding_read,
}
)
timer_start = dt.datetime.now()
_attempts = 0
_tries = 60
_delay = 10
while _tries:
_attempts += 1
log.debug(
"{0}: Download".format(request_label),
extra={
'attempts': _attempts,
'request_url': request_url,
}
)
response = self.requests_client.request(
request_method=request_method,
request_url=request_url,
request_params=request_params,
request_data=request_data,
request_retry=request_retry,
request_retry_func=request_retry_func,
request_retry_excps=request_retry_excps,
request_retry_excps_func=request_retry_excps_func,
request_headers=request_headers,
request_auth=request_auth,
build_request_curl=build_request_curl,
allow_redirects=allow_redirects,
verify=verify,
stream=True,
request_label=request_label
)
if response is None:
log.error(
"{0}: No response".format(request_label),
extra={
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: No response".format(request_label),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_REQUEST
)
http_status_code = response.status_code
timer_end = dt.datetime.now()
timer_delta = timer_end - timer_start
response_time_secs = timer_delta.seconds
response_headers = None
if hasattr(response, 'headers'):
response_headers = \
json.loads(
json.dumps(
dict(response.headers)
)
)
log.debug(
"{0}: Response Status".format(request_label),
extra={
'http_status_code': http_status_code,
'response_time_secs': response_time_secs,
'response_url': response.url,
'response_headers': safe_dict(response_headers),
}
)
if not os.path.exists(tmp_directory):
os.mkdir(tmp_directory)
tmp_json_file_path = "{0}/{1}".format(tmp_directory, tmp_json_file_name)
if os.path.exists(tmp_json_file_path):
log.debug(
"{0}: Removing".format(request_label),
extra={'file_path': tmp_json_file_path},
)
os.remove(tmp_json_file_path)
mode_write = 'wb' if encoding_write is None else 'w'
log.debug(
"{0}: Finished".format(request_label),
extra={
'file_path': tmp_json_file_path,
'mode_write': mode_write,
'encoding_write': encoding_write,
}
)
log.debug(
"{0}: Usage".format(request_label),
extra=env_usage(tmp_directory)
)
chunk_total_sum = 0
with open(file=tmp_json_file_path, mode=mode_write, encoding=encoding_write) as json_raw_file_w:
log.debug(
"{0}: Response Raw: Started".format(request_label),
extra={
'file_path': tmp_json_file_path,
}
)
_tries -= 1
error_exception = None
error_details = None
chunk_size = 8192
try:
raw_response = response.raw
while True:
chunk = raw_response.read(chunk_size, decode_content=True)
if not chunk:
break
chunk_total_sum += chunk_size
json_raw_file_w.write(chunk)
json_raw_file_w.flush()
os.fsync(json_raw_file_w.fileno())
log.debug(
"{0}: By Chunk: Completed".format(request_label),
extra={
'file_path': tmp_json_file_path,
}
)
break
except requests.exceptions.ChunkedEncodingError as chunked_encoding_ex:
error_exception = base_class_name(chunked_encoding_ex)
error_details = get_exception_message(chunked_encoding_ex)
log.warning(
"{0}: Error: {1}".format(request_label, error_exception),
extra={
'error_details': error_details,
'chunk_total_sum': chunk_total_sum,
}
)
if not _tries:
log.error(
"{0}: Exhausted Retries: Error: {1}".format(request_label, error_exception),
)
raise
except http_client.IncompleteRead as incomplete_read_ex:
error_exception = base_class_name(incomplete_read_ex)
error_details = get_exception_message(incomplete_read_ex)
log.warning(
"{0}: IncompleteRead".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
'chunk_total_sum': chunk_total_sum,
}
)
if not _tries:
log.error(
"{0}: Exhausted Retries: Error: {1}".format(request_label, error_exception),
)
raise
except requests.exceptions.RequestException as request_ex:
log.error(
"{0}: Request Exception".format(request_label),
extra={
'error_exception': base_class_name(request_ex),
'error_details': get_exception_message(request_ex),
'chunk_total_sum': chunk_total_sum,
}
)
raise
except Exception as ex:
log.error(
"{0}: Unexpected Exception".format(request_label),
extra={
'error_exception': base_class_name(ex),
'error_details': get_exception_message(ex),
'chunk_total_sum': chunk_total_sum,
}
)
raise
if not _tries:
log.error(
"{0}: Exhausted Retries".format(request_label),
extra={
'tries': _tries,
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Exhausted Retries: {1}".format(request_label, request_url),
error_request_curl=self.built_request_curl,
error_code=RequestsFortifiedErrorCodes.REQ_ERR_RETRY_EXHAUSTED
)
log.info(
"{0}: Performing Retry".format(request_label),
extra={
'tries': _tries,
'delay': _delay,
'request_url': request_url,
}
)
time.sleep(_delay)
tmp_json_file_size = os.path.getsize(tmp_json_file_path)
bom_enc, bom_len, bom_header = detect_bom(tmp_json_file_path)
log.info(
"{0}: By Chunk: Completed: Details".format(request_label),
extra={
'file_path': tmp_json_file_path,
'file_size': bytes_to_human(tmp_json_file_size),
'chunk_total_sum': chunk_total_sum,
'bom_encoding': bom_enc,
}
)
if bom_enc == 'gzip':
tmp_json_gz_file_path = "%s.gz" % tmp_json_file_path
os.rename(src=tmp_json_file_path, dst=tmp_json_gz_file_path)
with open(file=tmp_json_file_path, mode=mode_write, encoding=encoding_write) as json_file_w:
log.debug(
"{0}: GZip: Started".format(request_label),
extra={
'file_path': tmp_json_file_path,
}
)
with gzip.open(tmp_json_gz_file_path, 'r') as gzip_file_r:
json_file_w.write(gzip_file_r.read())
response_extra = {
'file_path': tmp_json_file_path,
'file_size': bytes_to_human(tmp_json_file_size),
}
log.info(
"{0}: Read Downloaded".format(request_label),
extra=response_extra
)
json_download = None
with open(tmp_json_file_path, mode='r') as json_file_r:
json_file_content = json_file_r.read()
try:
json_download = json.loads(json_file_content)
except ValueError as | |
<reponame>reinforcementdriving/cvat
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import io
import os
import os.path as osp
import random
import shutil
import tempfile
import xml.etree.ElementTree as ET
import zipfile
from collections import defaultdict
from enum import Enum
from glob import glob
from io import BytesIO
from unittest import mock
import open3d as o3d
import struct
import av
import numpy as np
from pdf2image import convert_from_bytes
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.http import HttpResponse
from PIL import Image
from pycocotools import coco as coco_loader
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from cvat.apps.engine.models import (AttributeType, Data, Job, Project,
Segment, StatusChoice, Task, Label, StorageMethodChoice, StorageChoice)
from cvat.apps.engine.prepare import prepare_meta, prepare_meta_for_upload
from cvat.apps.engine.media_extractors import ValidateDimension
from cvat.apps.engine.models import DimensionType
def create_db_users(cls):
(group_admin, _) = Group.objects.get_or_create(name="admin")
(group_user, _) = Group.objects.get_or_create(name="user")
(group_annotator, _) = Group.objects.get_or_create(name="annotator")
(group_observer, _) = Group.objects.get_or_create(name="observer")
user_admin = User.objects.create_superuser(username="admin", email="",
password="<PASSWORD>")
user_admin.groups.add(group_admin)
user_owner = User.objects.create_user(username="user1", password="<PASSWORD>")
user_owner.groups.add(group_user)
user_assignee = User.objects.create_user(username="user2", password="<PASSWORD>")
user_assignee.groups.add(group_annotator)
user_annotator = User.objects.create_user(username="user3", password="<PASSWORD>")
user_annotator.groups.add(group_annotator)
user_observer = User.objects.create_user(username="user4", password="<PASSWORD>")
user_observer.groups.add(group_observer)
user_dummy = User.objects.create_user(username="user5", password="<PASSWORD>")
user_dummy.groups.add(group_user)
cls.admin = user_admin
cls.owner = cls.user1 = user_owner
cls.assignee = cls.user2 = user_assignee
cls.annotator = cls.user3 = user_annotator
cls.observer = cls.user4 = user_observer
cls.user = cls.user5 = user_dummy
def create_db_task(data):
data_settings = {
"size": data.pop("size"),
"image_quality": data.pop("image_quality"),
}
db_data = Data.objects.create(**data_settings)
shutil.rmtree(db_data.get_data_dirname(), ignore_errors=True)
os.makedirs(db_data.get_data_dirname())
os.makedirs(db_data.get_upload_dirname())
db_task = Task.objects.create(**data)
shutil.rmtree(db_task.get_task_dirname(), ignore_errors=True)
os.makedirs(db_task.get_task_dirname())
os.makedirs(db_task.get_task_logs_dirname())
os.makedirs(db_task.get_task_artifacts_dirname())
db_task.data = db_data
db_task.save()
for x in range(0, db_task.data.size, db_task.segment_size):
start_frame = x
stop_frame = min(x + db_task.segment_size - 1, db_task.data.size - 1)
db_segment = Segment()
db_segment.task = db_task
db_segment.start_frame = start_frame
db_segment.stop_frame = stop_frame
db_segment.save()
db_job = Job()
db_job.segment = db_segment
db_job.save()
return db_task
def create_dummy_db_tasks(obj, project=None):
tasks = []
data = {
"name": "my task #1",
"owner": obj.owner,
"assignee": obj.assignee,
"overlap": 0,
"segment_size": 100,
"image_quality": 75,
"size": 100,
"project": project
}
db_task = create_db_task(data)
tasks.append(db_task)
data = {
"name": "my multijob task",
"owner": obj.user,
"overlap": 0,
"segment_size": 100,
"image_quality": 50,
"size": 200,
"project": project
}
db_task = create_db_task(data)
tasks.append(db_task)
data = {
"name": "my task #2",
"owner": obj.owner,
"assignee": obj.assignee,
"overlap": 0,
"segment_size": 100,
"image_quality": 75,
"size": 100,
"project": project
}
db_task = create_db_task(data)
tasks.append(db_task)
data = {
"name": "<NAME>",
"owner": obj.admin,
"overlap": 0,
"segment_size": 50,
"image_quality": 95,
"size": 50,
"project": project
}
db_task = create_db_task(data)
tasks.append(db_task)
return tasks
def create_dummy_db_projects(obj):
projects = []
data = {
"name": "my empty project",
"owner": obj.owner,
"assignee": obj.assignee,
}
db_project = Project.objects.create(**data)
projects.append(db_project)
data = {
"name": "my project without assignee",
"owner": obj.user,
}
db_project = Project.objects.create(**data)
create_dummy_db_tasks(obj, db_project)
projects.append(db_project)
data = {
"name": "my big project",
"owner": obj.owner,
"assignee": obj.assignee,
}
db_project = Project.objects.create(**data)
create_dummy_db_tasks(obj, db_project)
projects.append(db_project)
data = {
"name": "public project",
}
db_project = Project.objects.create(**data)
create_dummy_db_tasks(obj, db_project)
projects.append(db_project)
data = {
"name": "super project",
"owner": obj.admin,
"assignee": obj.assignee,
}
db_project = Project.objects.create(**data)
create_dummy_db_tasks(obj, db_project)
projects.append(db_project)
return projects
class ForceLogin:
def __init__(self, user, client):
self.user = user
self.client = client
def __enter__(self):
if self.user:
self.client.force_login(self.user, backend='django.contrib.auth.backends.ModelBackend')
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.user:
self.client.logout()
class JobGetAPITestCase(APITestCase):
def setUp(self):
self.client = APIClient()
@classmethod
def setUpTestData(cls):
create_db_users(cls)
cls.task = create_dummy_db_tasks(cls)[0]
cls.job = Job.objects.filter(segment__task_id=cls.task.id).first()
cls.job.assignee = cls.annotator
cls.job.save()
def _run_api_v1_jobs_id(self, jid, user):
with ForceLogin(user, self.client):
response = self.client.get('/api/v1/jobs/{}'.format(jid))
return response
def _check_request(self, response):
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["id"], self.job.id)
self.assertEqual(response.data["status"], StatusChoice.ANNOTATION)
self.assertEqual(response.data["start_frame"], self.job.segment.start_frame)
self.assertEqual(response.data["stop_frame"], self.job.segment.stop_frame)
def test_api_v1_jobs_id_admin(self):
response = self._run_api_v1_jobs_id(self.job.id, self.admin)
self._check_request(response)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.admin)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_owner(self):
response = self._run_api_v1_jobs_id(self.job.id, self.owner)
self._check_request(response)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.owner)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_annotator(self):
response = self._run_api_v1_jobs_id(self.job.id, self.annotator)
self._check_request(response)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.annotator)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_observer(self):
response = self._run_api_v1_jobs_id(self.job.id, self.observer)
self._check_request(response)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.observer)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_user(self):
response = self._run_api_v1_jobs_id(self.job.id, self.user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_no_auth(self):
response = self._run_api_v1_jobs_id(self.job.id, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self._run_api_v1_jobs_id(self.job.id + 10, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class JobUpdateAPITestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.task = create_dummy_db_tasks(self)[0]
self.job = Job.objects.filter(segment__task_id=self.task.id).first()
self.job.assignee = self.annotator
self.job.save()
@classmethod
def setUpTestData(cls):
create_db_users(cls)
def _run_api_v1_jobs_id(self, jid, user, data):
with ForceLogin(user, self.client):
response = self.client.put('/api/v1/jobs/{}'.format(jid), data=data, format='json')
return response
def _check_request(self, response, data):
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["id"], self.job.id)
self.assertEqual(response.data["status"], data.get('status', self.job.status))
assignee = self.job.assignee.id if self.job.assignee else None
self.assertEqual(response.data["assignee"]["id"], data.get('assignee_id', assignee))
self.assertEqual(response.data["start_frame"], self.job.segment.start_frame)
self.assertEqual(response.data["stop_frame"], self.job.segment.stop_frame)
def test_api_v1_jobs_id_admin(self):
data = {"status": StatusChoice.COMPLETED, "assignee_id": self.owner.id}
response = self._run_api_v1_jobs_id(self.job.id, self.admin, data)
self._check_request(response, data)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_owner(self):
data = {"status": StatusChoice.VALIDATION, "assignee_id": self.annotator.id}
response = self._run_api_v1_jobs_id(self.job.id, self.owner, data)
self._check_request(response, data)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.owner, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_annotator(self):
data = {"status": StatusChoice.ANNOTATION, "assignee_id": self.user.id}
response = self._run_api_v1_jobs_id(self.job.id, self.annotator, data)
self._check_request(response, data)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.annotator, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_observer(self):
data = {"status": StatusChoice.ANNOTATION, "assignee_id": self.admin.id}
response = self._run_api_v1_jobs_id(self.job.id, self.observer, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.observer, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_user(self):
data = {"status": StatusChoice.ANNOTATION, "assignee_id": self.user.id}
response = self._run_api_v1_jobs_id(self.job.id, self.user, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._run_api_v1_jobs_id(self.job.id + 10, self.user, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v1_jobs_id_no_auth(self):
data = {"status": StatusChoice.ANNOTATION, "assignee_id": self.user.id}
response = self._run_api_v1_jobs_id(self.job.id, None, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self._run_api_v1_jobs_id(self.job.id + 10, None, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class JobPartialUpdateAPITestCase(JobUpdateAPITestCase):
def _run_api_v1_jobs_id(self, jid, user, data):
with ForceLogin(user, self.client):
response = self.client.patch('/api/v1/jobs/{}'.format(jid), data=data, format='json')
return response
def test_api_v1_jobs_id_annotator_partial(self):
data = {"status": StatusChoice.VALIDATION}
response = self._run_api_v1_jobs_id(self.job.id, self.owner, data)
self._check_request(response, data)
def test_api_v1_jobs_id_admin_partial(self):
data = {"assignee_id": self.user.id}
response = self._run_api_v1_jobs_id(self.job.id, self.owner, data)
self._check_request(response, data)
class JobReview(APITestCase):
def setUp(self):
self.client = APIClient()
@classmethod
def setUpTestData(cls):
create_db_users(cls)
cls.task = create_dummy_db_tasks(cls)[0]
cls.job = Job.objects.filter(segment__task_id=cls.task.id).first()
cls.reviewer = cls.annotator
cls.job.reviewer = cls.reviewer
cls.job.assignee = cls.assignee
cls.job.save()
cls.reject_review_data = {
"job": cls.job.id,
"issue_set": [
{
"position": [
50, 50, 100, 100
],
"comment_set": [
{
"message": "This is wrong!"
}, {
"message": "This is wrong 2!"
}
],
"frame": 0
}
],
"estimated_quality": 3,
"status": "rejected"
}
cls.accept_review_data = {
"job": cls.job.id,
"issue_set": [],
"estimated_quality": 5,
"status": "accepted"
}
cls.review_further_data = {
"job": cls.job.id,
"issue_set": [],
"estimated_quality": 4,
"status": "review_further",
"reviewer_id": cls.reviewer.id
}
cls.create_comment_data = [{
"message": "This is testing message"
}, {
"message": "This is testing message 2"
}, {
"message": "This is testing message 3"
}]
def _post_request(self, path, user, data):
with ForceLogin(user, self.client):
response = self.client.post(path, data=data, format='json')
return response
def _patch_request(self, path, user, data):
with ForceLogin(user, self.client):
response = self.client.patch(path, data=data, format='json')
return response
def _get_request(self, path, user):
with ForceLogin(user, self.client):
response = self.client.get(path)
return response
def _delete_request(self, path, user):
with ForceLogin(user, self.client):
response = self.client.delete(path)
return response
def _fetch_job_from_db(self):
self.job = Job.objects.prefetch_related(
'review_set',
'review_set__issue_set',
'review_set__issue_set__comment_set').filter(segment__task_id=self.task.id).first()
def _set_annotation_status(self):
self._patch_request('/api/v1/jobs/{}'.format(self.job.id), self.admin, {'status': 'annotation'})
def _set_validation_status(self):
self._patch_request('/api/v1/jobs/{}'.format(self.job.id), self.admin, {'status': 'validation'})
def test_api_v1_job_annotation_review(self):
self._set_annotation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.accept_review_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._post_request('/api/v1/reviews', self.assignee, self.accept_review_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_api_v1_job_validation_review_create(self):
self._set_validation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.accept_review_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self._fetch_job_from_db()
self.assertEqual(self.job.status, 'completed')
response = self._post_request('/api/v1/reviews', self.assignee, self.accept_review_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.job.review_set.first().delete()
def test_api_v1_job_reject_review(self):
self._set_validation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.reject_review_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self._fetch_job_from_db()
self.assertEqual(self.job.status, 'annotation')
self.job.review_set.first().delete()
def test_api_v1_job_review_further(self):
self._set_validation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.review_further_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self._fetch_job_from_db()
self.assertEqual(self.job.status, 'validation')
self.job.review_set.first().delete()
def test_api_v1_create_review_comment(self):
self._set_validation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.reject_review_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
issue_id = response.data['issue_set'][0]['id']
comments = self.create_comment_data[:]
for comment in comments:
comment.update({
'issue': issue_id
})
response = self._post_request('/api/v1/comments', self.assignee, comment)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self._get_request('/api/v1/issues/{}/comments'.format(issue_id), self.reviewer)
self.assertIsInstance(response.data, cls = list)
self.assertEqual(len(response.data), 5)
self.job.review_set.all().delete()
self.job.issue_set.all().delete()
def test_api_v1_edit_review_comment(self):
self._set_validation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.reject_review_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
issue_id = response.data['issue_set'][0]['id']
comments = self.create_comment_data[:]
for comment in comments:
comment.update({
'issue': issue_id
})
response = self._post_request('/api/v1/comments', self.assignee, comment)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self._get_request('/api/v1/issues/{}/comments'.format(issue_id), self.reviewer)
last_comment = max(response.data, key=lambda comment: comment['id'])
last_comment.update({
'message': 'fixed message 3'
})
last_comment.update({
'author_id': last_comment['author']['id'],
'author': None
})
response = self._patch_request('/api/v1/comments/{}'.format(last_comment['id']), self.reviewer, last_comment)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._patch_request('/api/v1/comments/{}'.format(last_comment['id']), self.assignee, last_comment)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['message'], last_comment['message'])
response = self._get_request('/api/v1/issues/{}/comments'.format(issue_id), self.reviewer)
updated_last_comment = max(response.data, key=lambda comment: comment['id'])
self.assertEqual(updated_last_comment['message'], last_comment['message'])
self.job.review_set.all().delete()
self.job.issue_set.all().delete()
def test_api_v1_remove_comment(self):
self._set_validation_status()
response = self._post_request('/api/v1/reviews', self.reviewer, self.reject_review_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
issue_id = response.data['issue_set'][0]['id']
comments = self.create_comment_data[:]
for comment in comments:
comment.update({
'issue': issue_id
})
response = self._post_request('/api/v1/comments', self.assignee, comment)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self._get_request('/api/v1/issues/{}/comments'.format(issue_id), self.reviewer)
last_comment = max(response.data, key=lambda comment: comment['id'])
response = self._delete_request('/api/v1/comments/{}'.format(last_comment['id']), self.reviewer)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._delete_request('/api/v1/comments/{}'.format(last_comment['id']), self.assignee)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self._fetch_job_from_db()
ids = list(map(lambda | |
"""
--------------------------
OFFLINE OPTIMAL BENCHMARK:
---------------------------
It uses IBM CPLEX to maximise the social walfare of a current network structure and task list by solving the current environment given the usual problem restrictions.
This represents the upper bound of the social walfare.
In order to use the benchmark, call the offline_optimal() function with the required parameters.
"""
from numpy.core.fromnumeric import take
import pandas as pd
import numpy as np
from tqdm import tqdm
from docplex.mp.model import Model as Model
from docplex.mp.environment import Environment
def as_df(cplex_solution, name_key='name', value_key='value'):
""" Converts the solution to a pandas dataframe with two columns: variable name and values
:param name_key: column name for variable names. Default is 'name'
:param value_key: cilumn name for values., Default is 'value'.
:return: a pandas dataframe, if pandas is present.
"""
assert name_key
assert value_key
assert name_key != value_key
try:
import pandas as pd
except ImportError:
raise ImportError(
'Cannot convert solution to pandas.DataFrame if pandas is not available')
names = []
values = []
for dv, dvv in cplex_solution.iter_var_values():
names.append(dv.to_string())
values.append(dvv)
name_value_dict = {name_key: names, value_key: values}
return pd.DataFrame(name_value_dict)
def format_allocation(nr_nodes, nr_timestamps, max_str_length_task,
cplex_solution):
"""Visualize the allocation of the tasks in cli.
Args:
nr_nodes (int): Number of fog nodes.
nr_timestamps (int): Number of timestamps.
max_str_length_task (int): The maximum length of the name of a task.
cplex_solution (cplex.Solution): The solution of the cplex optimizer.
Returns:
[output]: [A formated dict of the allocation scheme]
"""
# Convert to dataframe
solution_df = as_df(cplex_solution)
# Populate the output dict with placeholders
output = {}
for i in range(nr_nodes):
output["node_" + str(i)] = {}
# Define the fill string
fill_string = ''.join(['.' for i in range(max_str_length_task)])
task_dict = {}
task_distribution = {}
# For every variable in the solution update the output
for i in solution_df['name']:
# Only process the allocation variables
if len(i.split('_')) != 6:
break
# Only process solutions of the model aka the ones with value == 1.0
if solution_df[solution_df['name'] == i].iloc[0]['value'] != 1.0:
continue
# Extract the information from the current processed variables
task = int(i.split('_')[3])
node = int(i.split('_')[4])
timestamp = int(i.split('_')[5])
# If its the first time the node has seen this task add a new entry to its position
if task not in output['node_' + str(node)]:
output['node_' + str(node)][task] = task_dict[task] = [
fill_string for y in range(nr_timestamps)]
# Update the allocated timeslot for that node
output['node_' +
str(node)][task][timestamp] = fill_string[len(str(task)):] + str(
task)
return output
def compute_walfare_based_on_tasks(model_solution, df_tasks, df_nodes):
"""Calculate the upper bound based on the number of allocated tasks.
It is computed by summing the valuation coefficients z
Args:
model_solution (CPLEX.mp.model.solution): A solution object from the CPLEX model.
df_tasks (datagrame): A dataframe of tasks and their valuation.
Returns:
[int]: [The total socail walfare]
"""
# Convert to dataframe
solution_df = as_df(model_solution)
tasks_dict = {}
task_usage = {}
# For every variable in the solution update the output
for i in solution_df['name']:
# Only process the allocation variables
if len(i.split('_')) != 6:
break
# Only process solutions of the model aka the ones with value == 1.0
if solution_df[solution_df['name'] == i].iloc[0]['value'] != 1.0:
continue
# Extract the information from the current processed variables
task = int(i.split('_')[3])
node = int(i.split('_')[4])
timestamp = int(i.split('_')[5][0])
tasks_dict[task] = node
task_usage[task] = (task_usage[task] + 1) if task in task_usage else 1
# Calculate Social walfare by summing valuation of individual timestamps
# return sum(
# [(df_tasks.loc[task, 'valuation_coefficient'] / df_tasks.loc[task, 'usage_time'] -
# (df_tasks.loc[task, 'CPU'] * df_nodes.loc[node, 'CPU_cost'] +
# df_tasks.loc[task, 'RAM'] * df_nodes.loc[node, 'RAM_cost'] +
# df_tasks.loc[task, 'DISK'] * df_nodes.loc[node, 'DISK_cost'])) * task_usage[task]
# for (task, node) in tasks_dict.items()])
return sum(
[(df_tasks.loc[task, 'valuation_coefficient'] -
(df_tasks.loc[task, 'CPU'] * df_nodes.loc[node, 'CPU_cost'] +
df_tasks.loc[task, 'RAM'] * df_nodes.loc[node, 'RAM_cost'] +
df_tasks.loc[task, 'DISK'] * df_nodes.loc[node, 'DISK_cost'])) *
task_usage[task]
for (task, node) in tasks_dict.items()])
def offline_optimal(df_tasks, df_nodes, timestamp_nr, task_nr, node_nr,
mipgap=0.1):
"""Solve the constraints allocation problem using CPLEX.
Args:
df_tasks (dataframe): A dataframe containing information about the tasks.
df_nodes (dataframe): A dataframe containing information about the fog nodes.
timestamp_nr (int): Number of timestamps.
task_nr (int): Number of tasks.
node_nr (int): Number of fog nodes.
:param mipgap: relative tolerance of the Cplex solution
Returns:
[double, double, int, dict]: [social welfare from allocated tasks, social walfare from model, number of allocated tasks, allocation dict]
"""
mdl = Model(name='Maximise social welfare')
# set the tolerance to 1%
mdl.parameters.mip.tolerances.mipgap = mipgap
# auxiliary variable representing if a task is allocated to a fognode n at time slot t
z = mdl.binary_var_dict(
((task, fog_node, timestamp) for task in range(task_nr)
for fog_node in range(node_nr) for timestamp in range(timestamp_nr)),
name="task_node_time")
# variable that is one if z change from 0 to 1
d = mdl.binary_var_dict(((task, timestamp) for task in range(task_nr)
for timestamp in range(timestamp_nr - 1)),
name='task_timestamp')
x = mdl.binary_var_dict(((task, node) for task in range(
task_nr) for node in range(node_nr)), name="task_node")
for task in range(task_nr):
mdl.add_constraint(mdl.sum(x[task, node]
for node in range(node_nr)) <= 1)
# # Flag tasks that are impossible to allocate due to time constraints
# for index, task in df_tasks.iterrows():
# if(task.loc['start_time'] + task.loc['usage_time'] > timestamp_nr - 1):
# for node in range(node_nr):
# for timestamp in range(timestamp_nr):
# mdl.add_constraint(z[index,node,timestamp] == 0)
# time constraints
for task in range(task_nr):
mdl.add_constraint(
(mdl.sum(z[task, fog_node, timestamp] for timestamp in range(
timestamp_nr) for fog_node in range(node_nr)) <= df_tasks.loc[
task, 'usage_time'])) # allocated time <= required time
for fog_node in range(node_nr):
# mdl.add_constraint((mdl.sum(z[task, fog_node, timestamp] for timestamp in range(
# timestamp_nr)) <= df_tasks.loc[task, 'usage_tim
# e'])) # allocated time <= required time
# mdl.add_constraint((mdl.sum(z[task, fog_node, timestamp] for timestamp in range(
# timestamp_nr)) <= df_tasks.loc[task, 'usage_time'])) # allocated time <= required time
for timestamp in range(int(df_tasks.loc[task, 'start_time'])):
# no usage time before the start time
mdl.add_constraint(z[task, fog_node, timestamp] == 0)
for timestamp in range(int(df_tasks.loc[task, 'deadline'] + 1),
timestamp_nr):
# no usage time after the deadline
mdl.add_constraint(z[task, fog_node, timestamp] == 0)
# resource constraints
for timestamp in range(timestamp_nr):
for fog_node in range(node_nr):
mdl.add_constraint(mdl.sum(
z[task, fog_node, timestamp] * df_tasks.loc[task, 'CPU'] for
task in range(task_nr))
<= df_nodes.loc[fog_node, 'CPU'])
mdl.add_constraint(mdl.sum(
z[task, fog_node, timestamp] * df_tasks.loc[task, 'RAM'] for
task in range(task_nr))
<= df_nodes.loc[fog_node, 'RAM'])
mdl.add_constraint(mdl.sum(
z[task, fog_node, timestamp] * df_tasks.loc[task, 'DISK']
for task in range(task_nr)) <= df_nodes.loc[
fog_node, 'DISK'])
# one tasktimestamp is only processed in one fog node
for timestamp in range(timestamp_nr):
for task in range(task_nr):
mdl.add_constraint(mdl.sum(z[task, fog_node, timestamp]
for fog_node in range(node_nr)) <= 1)
# tasks are non-preemptive for timestamps
# d is 1 if z change from 0 to 1
for task in range(task_nr):
for timestamp in range(timestamp_nr - 1):
mdl.add_constraint(d[task, timestamp] == (
mdl.sum(z[task, fog_node, timestamp + 1] for fog_node in
range(node_nr)) - 1
>= mdl.sum(
z[task, fog_node, timestamp] for fog_node in range(node_nr))))
# sum(d) inspect of time is less or equal to one
mdl.add_constraint(mdl.sum(d[task, timestamp]
for timestamp in
range(timestamp_nr - 1)) <= 1)
# tasks are non-preemptive for fog nodes
for fog_node in range(node_nr):
for task in range(task_nr):
start_time = df_tasks.loc[task, 'start_time']
deadline = df_tasks.loc[task, "deadline"]
for t1 in range(start_time + 1, deadline):
for t2 in range(t1, deadline):
mdl.add_constraint(
z[task, fog_node, t1] - z[task, fog_node, t1 - 1] + z[
task, fog_node, t2] -
z[task, fog_node, t2 + 1]
>= -1)
# value_of_tasks = mdl.sum(df_tasks.loc[task, 'valuation_coefficient'] / df_tasks.loc[task, 'usage_time'] * z[task, fog_node, timestamp] * x[task, fog_node]
# for task in range(task_nr) for fog_node in range(node_nr) for timestamp in range(timestamp_nr))
value_of_tasks = mdl.sum(
df_tasks.loc[task, 'valuation_coefficient'] * z[
task, fog_node, timestamp] * x[
task, fog_node]
for task in range(task_nr) for fog_node in range(node_nr) for timestamp
in
range(timestamp_nr))
CPU_cost = mdl.sum(
df_tasks.loc[task, 'CPU'] * df_nodes.loc[fog_node, 'CPU_cost'] * z[
task, fog_node, timestamp]
for task in range(task_nr) for fog_node in range(node_nr) for timestamp
in
range(timestamp_nr))
RAM_cost = mdl.sum(
df_tasks.loc[task, 'RAM'] * df_nodes.loc[fog_node, 'RAM_cost'] * z[
task, fog_node, timestamp]
for task in range(task_nr) for fog_node in range(node_nr) for timestamp
in
range(timestamp_nr))
DISK_cost = mdl.sum(
df_tasks.loc[task, 'DISK'] * df_nodes.loc[fog_node, 'DISK_cost'] * z[
task, fog_node, timestamp]
for task in range(task_nr) for fog_node in range(node_nr) for timestamp
in
range(timestamp_nr))
social_welfare = value_of_tasks - CPU_cost - RAM_cost - | |
<reponame>mclark58/kb_PRINSEQ<filename>test/kb_PRINSEQ_server_test.py<gh_stars>0
# -*- coding: utf-8 -*-
import os # noqa: F401
import shutil
import time
import unittest
from configparser import ConfigParser # py3
from os import environ
import requests
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.ReadsUtilsClient import ReadsUtils
from installed_clients.WorkspaceClient import Workspace as workspaceService
from installed_clients.baseclient import ServerError as DFUError
from kb_PRINSEQ.authclient import KBaseAuth as _KBaseAuth
from kb_PRINSEQ.kb_PRINSEQImpl import kb_PRINSEQ
from kb_PRINSEQ.kb_PRINSEQServer import MethodContext
class kb_PRINSEQTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_PRINSEQ'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(cls.token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': user_id,
'provenance': [
{'service': 'kb_PRINSEQ',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.shockURL = cls.cfg['shock-url']
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=cls.token)
cls.serviceImpl = kb_PRINSEQ(cls.cfg)
# cls.ws = workspaceService(cls.wsURL, token=token)
# cls.ws = Workspace(cls.cfg['workspace-url'], token=cls.token)
# cls.hs = HandleService(url=cls.cfg['handle-service-url'],
# token=cls.token)
cls.scratch = cls.cfg['scratch']
shutil.rmtree(cls.scratch)
os.mkdir(cls.scratch)
suffix = int(time.time() * 1000)
wsName = "test_kb_PRINSEQ_" + str(suffix)
cls.ws_info = cls.wsClient.create_workspace({'workspace': wsName})
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=cls.token)
cls.nodes_to_delete = []
cls.nodes_to_delete.extend(cls.upload_test_reads())
print("NODES TO DELETE: {}".format(str(cls.nodes_to_delete)))
print('\n\n=============== Starting tests ==================')
@classmethod
def tearDownClass(cls):
if cls.getWsName():
cls.wsClient.delete_workspace({'workspace': cls.getWsName()})
print(('Test workspace {} was deleted'.format(str(cls.getWsName()))))
if hasattr(cls, 'nodes_to_delete'):
for node in cls.nodes_to_delete:
cls.delete_shock_node(node)
def getWsClient(self):
return self.__class__.wsClient
@classmethod
def getWsName(cls):
return cls.ws_info[1]
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def upload_test_reads(cls):
"""
Seeding an initial SE and PE Reads objects to test filtering
"""
header = dict()
header["Authorization"] = "Oauth {0}".format(cls.token)
# readsUtils_Client = ReadsUtils(url=self.callback_url, token=ctx['token']) # SDK local
readsUtils_Client = ReadsUtils(os.environ['SDK_CALLBACK_URL'], token=cls.token)
temp_nodes = []
fwdtf = 'small_forward.fq'
revtf = 'small_reverse.fq'
fwdtarget = os.path.join(cls.scratch, fwdtf)
revtarget = os.path.join(cls.scratch, revtf)
print("CWD: "+str(os.getcwd()))
shutil.copy('/kb/module/test/data/' + fwdtf, fwdtarget)
shutil.copy('/kb/module/test/data/' + revtf, revtarget)
# Upload single end reads
cls.se_reads_reference = \
readsUtils_Client.upload_reads({'wsname': cls.getWsName(),
'name': "se_reads",
'sequencing_tech': 'Illumina',
'fwd_file': fwdtarget}
)['obj_ref']
se_data = cls.dfu.get_objects(
{'object_refs': [cls.getWsName() + '/se_reads']})['data'][0]['data']
temp_nodes.append(se_data['lib']['file']['id'])
# Upload paired end reads
cls.pe_reads_reference = \
readsUtils_Client.upload_reads({'wsname': cls.getWsName(),
'name': "pe_reads",
'sequencing_tech': 'Illumina',
'fwd_file': fwdtarget,
'rev_file': revtarget,
'insert_size_mean': 42,
'insert_size_std_dev': 10,
}
)['obj_ref']
pe_data = cls.dfu.get_objects(
{'object_refs': [cls.getWsName() + '/pe_reads']})['data'][0]['data']
temp_nodes.append(pe_data['lib1']['file']['id'])
return temp_nodes
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print('Deleted shock node ' + node_id)
@classmethod
def getPeRef(cls):
return cls.pe_reads_reference
@classmethod
def getSeRef(cls):
print("READS REFERENCE:"+str(cls.se_reads_reference))
return cls.se_reads_reference
def test_invalid_threshold_value(self):
output_reads_name = "SE_dust_2"
lc_method = "dust"
lc_threshold = 200
exception = ValueError
with self.assertRaises(exception) as context:
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref":
self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
self.assertEqual(("The threshold for {} must be between 0 and 100, it is currently " +
"set to : {}").format(lc_method,
lc_threshold),
str(context.exception.message))
def test_no_entropy_threshold(self):
output_reads_name = "SE_entropy_2"
lc_method = "entropy"
lc_threshold = 200
exception = ValueError
with self.assertRaises(exception) as context:
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref":
self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
self.assertEqual(("A low complexity threshold needs to be entered for " +
"{}").format(lc_method),
str(context.exception.message))
def test_no_dust_threshold(self):
# The original input reads file has 12500 reads. This filtered nearly 3000 reads.
output_reads_name = "SE_dust_2"
lc_method = "dust"
lc_threshold = 200
exception = ValueError
with self.assertRaises(exception) as context:
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref":
self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_entropy_threshold": lc_threshold})
self.assertEqual(("A low complexity threshold needs to be entered for " +
"{}").format(lc_method),
str(context.exception.message))
def test_se_dust_partial(self):
# The original input reads file has 12500 reads. This filtered nearly 3000 reads.
output_reads_name = "SE_dust_2"
lc_method = "dust"
lc_threshold = 2
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 9544)
self.assertEqual(reads_object['sequencing_tech'], "Illumina")
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
def test_se_dust_loose(self):
# The original input reads file has 12500 reads. None of the reads get filtered.
output_reads_name = "SE_dust_40"
lc_method = "dust"
lc_threshold = 40
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 12500)
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
def test_se_entropy_partial(self):
# The original input reads file has 12500 reads. Only 14 read gets filtered.
output_reads_name = "SE_entropy_70"
lc_method = "entropy"
lc_threshold = 70
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_entropy_threshold": lc_threshold})
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 12486)
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
def test_se_entropy_loose(self):
# The original input reads file has 12500 reads. No reads get filtered.
output_reads_name = "SE_entropy_50"
lc_method = "entropy"
lc_threshold = 50
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_entropy_threshold": lc_threshold})
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 12500)
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
def test_se_entropy_none(self):
# No New reads object created because all reads filtered out.
output_reads_name = "SE_entropy_100"
lc_method = "entropy"
lc_threshold = 100
self.getImpl().execReadLibraryPRINSEQ(self.ctx,
{"input_reads_ref": self.se_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_entropy_threshold": lc_threshold})
with self.assertRaises(DFUError) as context:
self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})
expected_error_prefix = f"No object with name {output_reads_name} exists in workspace"
self.assertIn(expected_error_prefix, str(context.exception))
def test_pe_dust_partial(self):
# Three new objects made
# 1) Filtered Pair-end object with matching good Reads
# 2&3) Filtered FWD and REV Reads without matching pair (singletons).
output_reads_name = "PE_dust_2"
lc_method = "dust"
lc_threshold = 2
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.pe_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
# Check for filtered paired reads object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 14950)
self.assertEqual(reads_object['insert_size_mean'], 42)
self.assertEqual(reads_object['sequencing_tech'], "Illumina")
node = reads_object['lib1']['file']['id']
self.delete_shock_node(node)
# Check fwd singletons object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name +
"_fwd_singletons"]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 2069)
self.assertEqual(reads_object['sequencing_tech'], "Illumina")
self.assertTrue('insert_size_mean' not in reads_object)
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
# Check rev singletons object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name +
"_rev_singletons"]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 2002)
self.assertEqual(reads_object['sequencing_tech'], "Illumina")
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
def test_pe_dust_strict(self):
# Two new objects made (NO PAIRED END MADE as no matching pairs)
# 1&2) Filtered FWD and REV Reads without matching pair (singletons).
output_reads_name = "PE_dust_0"
lc_method = "dust"
lc_threshold = 0
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.pe_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
# Check filtered paired reads object does not exist
with self.assertRaises(DFUError) as context:
self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})
expected_error_prefix = f"No object with name {output_reads_name} exists in workspace"
self.assertIn(expected_error_prefix, str(context.exception))
# Check fwd singletons object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name +
"_fwd_singletons"]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 1)
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
# Check rev singletons object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name +
"_rev_singletons"]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 1)
node = reads_object['lib']['file']['id']
self.delete_shock_node(node)
def test_pe_dust_loose(self):
# Only 1 new objects made since no reads filtered.
# 1) Filtered Pair-end object with matching Reads
output_reads_name = "PE_dust_100"
lc_method = "dust"
lc_threshold = 100
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.pe_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_dust_threshold": lc_threshold})
# Check for filtered paired reads object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 25000)
node = reads_object['lib1']['file']['id']
self.delete_shock_node(node)
# Check fwd singletons object does not exist
temp_object_name = output_reads_name + "_fwd_singletons"
expected_error_prefix = f"No object with name {temp_object_name} exists in workspace"
with self.assertRaises(DFUError) as context:
self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + temp_object_name]})
self.assertIn(expected_error_prefix, str(context.exception))
# Check rev singletons object does not exist
temp_object_name = output_reads_name + "_rev_singletons"
expected_error_prefix = f"No object with name {temp_object_name} exists in workspace"
with self.assertRaises(DFUError) as context:
self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + temp_object_name]})
# print "ERROR:{}:".format(str(context.exception.message))
# expected_error_prefix = \
# "No object with name {} exists in workspace".format(temp_object_name)
self.assertIn(expected_error_prefix, str(context.exception))
def test_pe_entropy_partial(self):
# Two new objects made (the reverse singleton has no reads, no object made)
# 1) Filtered Pair-end object with matching good Reads
# 2) Filtered FWD and REV Reads without matching pair (singletons).
output_reads_name = "PE_entropy_60"
lc_method = "entropy"
lc_threshold = 60
self.getImpl().execReadLibraryPRINSEQ(self.ctx, {"input_reads_ref": self.pe_reads_reference,
"output_ws": self.getWsName(),
"output_reads_name": output_reads_name,
"lc_method": lc_method,
"lc_entropy_threshold": lc_threshold})
# Check for filtered paired reads object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 24996)
node = reads_object['lib1']['file']['id']
self.delete_shock_node(node)
# Check fwd singletons object
reads_object = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/' + output_reads_name +
"_fwd_singletons"]})['data'][0]['data']
self.assertEqual(reads_object['read_count'], 2)
node | |
= \
self._create_instance_with_personality_json(None)
self.assertEquals(response.status_int, 202)
response = json.loads(response.body)
self.assertTrue('adminPass' in response['server'])
self.assertEqual(16, len(response['server']['adminPass']))
def test_create_instance_admin_pass_xml(self):
request, response, dummy = \
self._create_instance_with_personality_xml(None)
self.assertEquals(response.status_int, 202)
dom = minidom.parseString(response.body)
server = dom.childNodes[0]
self.assertEquals(server.nodeName, 'server')
self.assertEqual(16, len(server.getAttribute('adminPass')))
class TestGetKernelRamdiskFromImage(test.TestCase):
"""
If we're building from an AMI-style image, we need to be able to fetch the
kernel and ramdisk associated with the machine image. This information is
stored with the image metadata and return via the ImageService.
These tests ensure that we parse the metadata return the ImageService
correctly and that we handle failure modes appropriately.
"""
def test_status_not_active(self):
"""We should only allow fetching of kernel and ramdisk information if
we have a 'fully-formed' image, aka 'active'
"""
image_meta = {'id': 1, 'status': 'queued'}
self.assertRaises(exception.Invalid, self._get_k_r, image_meta)
def test_not_ami(self):
"""Anything other than ami should return no kernel and no ramdisk"""
image_meta = {'id': 1, 'status': 'active', 'container_format': 'vhd'}
kernel_id, ramdisk_id = self._get_k_r(image_meta)
self.assertEqual(kernel_id, None)
self.assertEqual(ramdisk_id, None)
def test_ami_no_kernel(self):
"""If an ami is missing a kernel it should raise NotFound"""
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'ramdisk_id': 1}}
self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
def test_ami_no_ramdisk(self):
"""If an ami is missing a ramdisk, return kernel ID and None for
ramdisk ID
"""
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'kernel_id': 1}}
kernel_id, ramdisk_id = self._get_k_r(image_meta)
self.assertEqual(kernel_id, 1)
self.assertEqual(ramdisk_id, None)
def test_ami_kernel_ramdisk_present(self):
"""Return IDs if both kernel and ramdisk are present"""
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}}
kernel_id, ramdisk_id = self._get_k_r(image_meta)
self.assertEqual(kernel_id, 1)
self.assertEqual(ramdisk_id, 2)
@staticmethod
def _get_k_r(image_meta):
"""Rebinding function to a shorter name for convenience"""
kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \
_do_get_kernel_ramdisk_from_image(image_meta)
return kernel_id, ramdisk_id
class ServersViewBuilderV11Test(test.TestCase):
def setUp(self):
self.instance = self._get_instance()
self.view_builder = self._get_view_builder()
def tearDown(self):
pass
def _get_instance(self):
created_at = datetime.datetime(2010, 10, 10, 12, 0, 0)
updated_at = datetime.datetime(2010, 11, 11, 11, 0, 0)
instance = {
"id": 1,
"created_at": created_at,
"updated_at": updated_at,
"admin_pass": "",
"user_id": "fake",
"project_id": "fake",
"image_ref": "5",
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": "",
"key_data": "",
"vm_state": vm_states.BUILDING,
"task_state": None,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
"hostname": "",
"host": "",
"instance_type": {
"flavorid": 1,
},
"user_data": "",
"reservation_id": "",
"mac_address": "",
"scheduled_at": utils.utcnow(),
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"availability_zone": "",
"display_name": "test_server",
"locked": False,
"metadata": [],
"accessIPv4": "192.168.3.11",
"accessIPv6": "fead::1234",
#"address": ,
#"floating_ips": [{"address":ip} for ip in public_addresses]}
"uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"}
return instance
def _get_view_builder(self, project_id=""):
base_url = "http://localhost/v1.1"
views = nova.api.openstack.views
address_builder = views.addresses.ViewBuilderV11()
flavor_builder = views.flavors.ViewBuilderV11(base_url, project_id)
image_builder = views.images.ViewBuilderV11(base_url, project_id)
view_builder = nova.api.openstack.views.servers.ViewBuilderV11(
address_builder,
flavor_builder,
image_builder,
base_url,
project_id,
)
return view_builder
def test_build_server(self):
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"name": "test_server",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/servers/1",
},
],
}
}
output = self.view_builder.build(self.instance, False)
self.assertDictMatch(output, expected_server)
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"name": "test_server",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/servers/1",
},
],
}
}
view_builder = self._get_view_builder(project_id='fake')
output = view_builder.build(self.instance, False)
self.assertDictMatch(output, expected_server)
def test_build_server_detail(self):
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"user_id": "fake",
"tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
"key_name": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {},
"metadata": {},
"config_drive": None,
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/servers/1",
},
],
}
}
output = self.view_builder.build(self.instance, True)
self.assertDictMatch(output, expected_server)
def test_build_server_detail_active_status(self):
#set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"user_id": "fake",
"tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
"key_name": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {},
"metadata": {},
"config_drive": None,
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/servers/1",
},
],
}
}
output = self.view_builder.build(self.instance, True)
self.assertDictMatch(output, expected_server)
def test_build_server_detail_with_accessipv4(self):
self.instance['access_ip_v4'] = '192.168.3.11'
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"user_id": "fake",
"tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"key_name": "",
"status": "BUILD",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {},
"metadata": {},
"config_drive": None,
"accessIPv4": "192.168.3.11",
"accessIPv6": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/servers/1",
},
],
}
}
output = self.view_builder.build(self.instance, True)
self.assertDictMatch(output, expected_server)
def test_build_server_detail_with_accessipv6(self):
self.instance['access_ip_v6'] = 'fead::1234'
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"user_id": "fake",
"tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"key_name": "",
"status": "BUILD",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {},
"metadata": {},
"config_drive": None,
"accessIPv4": "",
"accessIPv6": "fead::1234",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/servers/1",
},
],
}
}
output = self.view_builder.build(self.instance, True)
self.assertDictMatch(output, expected_server)
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(InstanceMetadata(key="Open", value="Stack"))
metadata.append(InstanceMetadata(key="Number", value=1))
self.instance['metadata'] = metadata
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
"server": {
"id": 1,
"uuid": self.instance['uuid'],
"user_id": "fake",
"tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
"key_name": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {},
"metadata": {
"Open": "Stack",
"Number": "1",
},
"config_drive": None,
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"href": "http://localhost/servers/1",
},
],
}
}
output = self.view_builder.build(self.instance, True)
self.assertDictMatch(output, expected_server)
class ServerXMLSerializationTest(test.TestCase):
TIMESTAMP = "2010-10-11T10:30:22Z"
SERVER_HREF = 'http://localhost/v1.1/servers/123'
SERVER_BOOKMARK = 'http://localhost/servers/123'
IMAGE_BOOKMARK = 'http://localhost/images/5'
FLAVOR_BOOKMARK = 'http://localhost/flavors/1'
def setUp(self):
self.maxDiff = None
test.TestCase.setUp(self)
def test_show(self):
serializer = servers.ServerXMLSerializer()
fixture = {
"server": {
"id": 1,
"user_id": "fake",
"tenant_id": "fake",
"uuid": FAKE_UUID,
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
"key_name": '',
"accessIPv4": "192.168.3.11",
"accessIPv6": "fead::1234",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": self.IMAGE_BOOKMARK,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": self.FLAVOR_BOOKMARK,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "172.16.58.3",
},
{
"version": 6,
"addr": "::babe:192.168.127.1238",
},
],
"network_two": [
{
"version": 4,
"addr": "192.168.3.11",
},
{
"version": 6,
"addr": "::babe:192.168.3.11",
},
],
},
"metadata": {
"Open": "Stack",
"Number": "1",
},
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
}
}
output = serializer.serialize(fixture, 'show')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'server')
expected_server_href = self.SERVER_HREF
expected_server_bookmark = self.SERVER_BOOKMARK
expected_image_bookmark = self.IMAGE_BOOKMARK
expected_flavor_bookmark = self.FLAVOR_BOOKMARK
expected_now = self.TIMESTAMP
expected_uuid = FAKE_UUID
server_dict = fixture['server']
for key in ['name', 'id', 'uuid', 'created', 'accessIPv4',
'updated', 'progress', 'status', 'hostId',
'accessIPv6']:
self.assertEqual(root.get(key), str(server_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = server_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
image_root = root.find('{0}image'.format(NS))
self.assertEqual(image_root.get('id'), server_dict['image']['id'])
link_nodes = image_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['image']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
flavor_root = root.find('{0}flavor'.format(NS))
self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['flavor']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
addresses_root = root.find('{0}addresses'.format(NS))
addresses_dict = server_dict['addresses']
network_elems = addresses_root.findall('{0}network'.format(NS))
self.assertEqual(len(network_elems), 2)
| |
<reponame>oliviertilmans/flowcorder
"""
Classes to create and start a daemon process.
This module defines the following classes and compositions:
* Daemon: the base class to create a daemon.
|
| is made of
v
* DaemonComponent: factories that will create the components of the daemon,
see the Component class for an (empty) squeleton of a component class.
|
| is configured by
v
* CLIParam: CLI parameter definitions.
Dependencies between DaemonComponent MUST BE LOOP FREE (this won't be checked)!
Creating and starting a daemon implies to:
- define the Component classes, and create the DaemonComponent instances that
will manage them and their CLI parameter
- Create a Daemon instance
- call main() on the Daemon instance to start the daemon (possibly forking
it in the background).
"""
import argparse
import atexit
import os
import itertools
import multiprocessing as mp
import time
from configparser import ConfigParser, Error as CFGError, NoSectionError
from pkg_resources import resource_string
from daemons.prefab import run as _daemons
import flowcorder as lib
from ..utils import DaemonThread
import logging
LOG = logging.getLogger(__name__)
def main(daemon):
"""Start a daemon."""
parser = daemon.create_parser()
args = parser.parse_args()
cfg = _build_config_dict(args.cfg, args.name)
# Daemonize or stay in foreground then start the actual daemon
if args.daemonize:
_daemonize(daemon.start_with_args, args, cfg)
else:
daemon.start_with_args(args, cfg)
def _build_config_dict(filename, names):
cfg = _MConfigParser(allow_no_value=True)
out = {}
try:
cfg.read_string(resource_string('flowcorder',
'base_config.cfg').decode('utf8'),
'defaults')
if filename is not None:
LOG.debug('Using configuration file: %s', filename)
cfg.read(filename, encoding='utf8')
for k, v in cfg.items(section=cfg.default_section):
out[k] = v
LOG.debug('Registering default: %s -> %s', k, v)
for section in itertools.chain(names):
try:
for key in cfg.options(section=section, exclude_default=True):
value = cfg.get(section, key)
out[key] = value
LOG.debug('[%s] Updated property %s -> %s', section,
key, value)
except CFGError as e:
LOG.warning('Cannot use section %s in the config file: %s',
section, e)
except (CFGError, FileNotFoundError, IOError) as e:
LOG.warning('Cannot read configuration file: %s', e)
return out
class Daemon(object):
"""A Daemon that will run in the background."""
def __init__(self, description, *components):
"""Instantiate this Daemon from the components list."""
self.description = description
self.components = _components_by_deps(components)
LOG.debug('Components order by dependencies: %s',
'<-'.join(str(c) for c in self.components))
self.instances = {}
def start_with_args(self, args, cfg):
"""Start the daemon."""
# Handle general arguments
if args.debug:
lib.DEBUG = set(args.debug)
if args.log_config is not None:
if os.path.isfile(args.log_config):
logging.config.fileConfig(args.log_config)
else:
LOG.warning('Cannot load the logging configuration file at %s',
args.log_config)
self._create_components(args, cfg)
try:
try:
self._start_components()
self._join_components()
except KeyboardInterrupt:
self._stop()
self._join_components
except Exception as e:
LOG.exception(e)
time.sleep(args.crash_hold_off)
def create_parser(self):
"""Create and return a new CLI argument parser for this daemon."""
# Create the parser and its global arguments
parser = argparse.ArgumentParser(description=self.description)
parser.add_argument('--log-config', type=str,
help='The logging configuration file to use')
parser.add_argument('--debug', help='Turn debugging on, for '
'various components',
choices=list(lib.DEBUG_OPTIONS), nargs='*')
# Daemon-specific arg group
cfgparser = parser.add_argument_group('Config file properties')
cfgparser.add_argument('--cfg', help='Configuration file path')
cfgparser.add_argument('--name', help='Configuration file section '
'names to use other than the DEFAULT one '
'read in order (i.e., latest conflicting value '
'is the one used)', nargs='*', default=[])
dparser = parser.add_argument_group('Daemon properties')
dparser.add_argument('--daemonize', action='store_true',
help='Fork and detach this process to run in the '
'background')
dparser.add_argument('--pid-file', help='The file in which this '
'process\' PID should be saved',
default='/var/run/flowcorder_daemon.pid')
dparser.add_argument('--action', help='The action to perform when '
'daemonizing this process', default='start',
# Reload there to please systemd terminology
choices=['start', 'stop', 'restart', 'reload'])
dparser.add_argument('--crash-hold-off', help='Wait X sec before '
'exiting when crashing', type=float, default=0.1)
# Register all components arguments
for c in self.components:
c.complete_parser(parser)
return parser
def _create_components(self, args, cfg):
for c in self.components:
self.instances[c.name] = c.instantiate(args, cfg, self.instances)
def _start_components(self):
atexit.register(self._stop)
for c in self.components:
self.instances[c.name].start()
def _join_components(self):
for c in self.components:
self.instances[c.name].join()
def _stop(self):
"""Stop the daemon."""
if lib.IS_RUNNING:
atexit.unregister(self._stop)
lib.IS_RUNNING = False
for c in reversed(self.components):
self.instances[c.name].stop()
class DaemonComponent(object):
"""Top-level object that needs to be instantiated to create the daemon."""
def __init__(self, cls, cli_params=[], components_deps=[], debug_keys=[]):
"""
Specify a new daemon component.
:cls: The component class
:cli_params: The list of parameter to parse from the CLI in order to
create this component. See CLIParam.
:components_deps: Other needed component class that must be
instantiated prior to this one, and given as argument
when instantiating.
:debug_keys: Keys that can be used as --debug flag
"""
self.cls = cls
self.cli_params = cli_params
self.components_deps = components_deps
lib.DEBUG_OPTIONS.update(debug_keys)
def complete_parser(self, parser):
"""Complete a parser to add this component's arguments."""
# Do not create a new group if the component has no CLI parameters
if not self.cli_params:
return
grp = parser.add_argument_group(self.name, self.cls.__doc__)
for param in self.cli_params:
param.register(grp)
def instantiate(self, args, cfg, components):
"""
Instatiate this component.
:args: The parsed CLI params.
:cfg: The configuration dict.
:components: A dict of already instantiated component instances,
keyed by their component name.
"""
kwargs = {}
for dep in self.components_deps:
try:
if isinstance(dep, DaemonComponent):
dep = dep.name
kwargs[dep] = components[dep]
except KeyError:
raise RuntimeError('Could not create the component %s as it '
'is missing its dependency %s' %
(self.name, dep))
try:
kwargs.update({p.name: p.value(args, cfg)
for p in self.cli_params})
LOG.debug('Arguments for %s: %s', self.name, kwargs)
except (AttributeError, KeyError) as e:
raise RuntimeError('Could not create the component %s as it as '
'an unspecified CLI param %s' % (self.name, e))
return self.cls(**kwargs)
@property
def name(self):
"""Return the component name."""
return self.cls.__name__
def __str__(self):
"""Identify by component name."""
return self.name
__repr__ = __str__
def __hash__(self):
"""Components are unique by name."""
return hash(self.name)
class CLIParam(object):
"""A CLI parameter for a DaemonComponent."""
def __init__(self, name, *args, **kwargs):
"""
Register a new CLI parameter.
See ArgumentParser.add_argument
:name: paramter option string.
"""
self.full_name = name
self.name = name.replace('--', '').replace('-', '_')
self.args = args
self.kwargs = kwargs
# We don't care if the default is None
if kwargs.get('default', None) is not None:
raise RuntimeError('default CLIParam values should only appear '
'in the base configuration file!\nkey:%s' %
self.full_name)
def register(self, parser):
"""Register the parameter to an argument parser."""
return parser.add_argument(self.full_name, *self.args, **self.kwargs)
def value(self, args, cfg):
"""Extract this parameter value."""
cli = getattr(args, self.name)
if cli is not None:
return cli
# First try the in-code name
arg = cfg.get(self.name, None)
# Fallback on CLI name
if args is None:
arg = cfg.get(self.full_name, None)
# Attempt CLI name without the leading --
if args is None:
arg = cfg.get(self.full_name[2:], None)
# Convert to expected type, string by default
cast_func = self.kwargs.get('type', str)
LOG.debug('%s: Attempting to convert %s to %s', self.name,
arg, cast_func.__name__)
# nargs indicates a list type
if 'nargs' in self.kwargs:
return list(map(cast_func, arg.split(' ')))
return cast_func(arg)
DEFAULT_UNIX_SOCK_PARAM = CLIParam(
'--ctrl-socket', help='The unix socket on which the daemon should listen'
' for the transport stacks flow exports')
class Component(object):
"""A Component that does nothing."""
def start(self):
"""Start the component."""
pass
def stop(self):
"""Stop the component."""
pass
def join(self):
"""Return when this component halts."""
class ThreadedComponent(Component):
"""A Component that do its work in a separate thread."""
def __init__(self):
"""Register the component thread variable."""
self.evt_thread = None
def start(self, *args, **kwargs):
"""
Build and start the event thread.
:args: The args array that will be passed to the step function initial
call.
:kwargs: The kwargs that will be passed to the step function.
"""
self.evt_thread = DaemonThread(target=self.do_work,
args=args, kwargs=kwargs)
self.evt_thread.start()
def join(self):
"""Wait for the event thread completion."""
if self.evt_thread:
self.evt_thread.join()
def do_work(self):
"""Execute the work at each tick. See DaemonThread doc for args."""
raise NotImplemented
class ConsumerComponent(Component):
"""
A component that will spawn a new process to consume data.
The configure function registers an initiamization function for the
consumer, which should return a function that will be called for each
newly received data.
"""
def __init__(self):
"""Register the communication infrastructure for the component."""
self.init_func = self.child = self.wq = self.halt_evt = None
def configure(self, init_func=None):
"""Register the initialization function for the consumer."""
self.init_func = init_func
def start(self, **kw):
"""Start the consumer process."""
rq, self.wq = mp.Pipe(False)
self.halt_evt = mp.Event()
self.child = mp.Process(target=_spawn_target,
args=(self.init_func, rq, self.halt_evt),
kwargs=kw,
daemon=True)
self.child.start()
def stop(self):
if self.halt_evt:
self.halt_evt.set()
self.wq.close()
def join(self):
self.child.join()
def export(self, data):
self.wq.send(data)
def _spawn_target(init_func, rq, halt, **kw):
consumer = init_func(**kw)
while not halt.is_set():
try:
if rq.poll(1):
d = rq.recv()
consumer(d)
except EOFError:
break # process needs to stop
rq.close()
LOG.debug('Stopping consumer subprocess')
def _daemonize(func, args, cfg):
"""Daemonize and call func with | |
is not None:
try:
getattr(oldLink, signal).disconnect(slot)
oldLink.sigResized.disconnect(slot)
except (TypeError, RuntimeError):
## This can occur if the view has been deleted already
pass
if view is None or isinstance(view, str):
self.state['linkedViews'][axis] = view
else:
self.state['linkedViews'][axis] = weakref.ref(view)
getattr(view, signal).connect(slot)
view.sigResized.connect(slot)
if view.autoRangeEnabled()[axis] is not False:
self.enableAutoRange(axis, False)
slot()
else:
if self.autoRangeEnabled()[axis] is False:
slot()
self.sigStateChanged.emit(self)
def blockLink(self, b):
self.linksBlocked = b ## prevents recursive plot-change propagation
def linkedXChanged(self):
## called when x range of linked view has changed
view = self.linkedView(0)
self.linkedViewChanged(view, ViewBox.XAxis)
def linkedYChanged(self):
## called when y range of linked view has changed
view = self.linkedView(1)
self.linkedViewChanged(view, ViewBox.YAxis)
def linkedView(self, ax):
## Return the linked view for axis *ax*.
## this method _always_ returns either a ViewBox or None.
v = self.state['linkedViews'][ax]
if v is None or isinstance(v, str):
return None
else:
return v() ## dereference weakref pointer. If the reference is dead, this returns None
def linkedViewChanged(self, view, axis):
if self.linksBlocked or view is None:
return
#print self.name, "ViewBox.linkedViewChanged", axis, view.viewRange()[axis]
vr = view.viewRect()
vg = view.screenGeometry()
sg = self.screenGeometry()
if vg is None or sg is None:
return
view.blockLink(True)
try:
if axis == ViewBox.XAxis:
overlap = min(sg.right(), vg.right()) - max(sg.left(), vg.left())
if overlap < min(vg.width()/3, sg.width()/3): ## if less than 1/3 of views overlap,
## then just replicate the view
x1 = vr.left()
x2 = vr.right()
else: ## views overlap; line them up
upp = float(vr.width()) / vg.width()
if self.xInverted():
x1 = vr.left() + (sg.right()-vg.right()) * upp
else:
x1 = vr.left() + (sg.x()-vg.x()) * upp
x2 = x1 + sg.width() * upp
self.enableAutoRange(ViewBox.XAxis, False)
self.setXRange(x1, x2, padding=0)
else:
overlap = min(sg.bottom(), vg.bottom()) - max(sg.top(), vg.top())
if overlap < min(vg.height()/3, sg.height()/3): ## if less than 1/3 of views overlap,
## then just replicate the view
y1 = vr.top()
y2 = vr.bottom()
else: ## views overlap; line them up
upp = float(vr.height()) / vg.height()
if self.yInverted():
y2 = vr.bottom() + (sg.bottom()-vg.bottom()) * upp
else:
y2 = vr.bottom() + (sg.top()-vg.top()) * upp
y1 = y2 - sg.height() * upp
self.enableAutoRange(ViewBox.YAxis, False)
self.setYRange(y1, y2, padding=0)
finally:
view.blockLink(False)
def screenGeometry(self):
"""return the screen geometry of the viewbox"""
v = self.getViewWidget()
if v is None:
return None
b = self.sceneBoundingRect()
wr = v.mapFromScene(b).boundingRect()
pos = v.mapToGlobal(v.pos())
wr.adjust(pos.x(), pos.y(), pos.x(), pos.y())
return wr
def itemsChanged(self):
## called when items are added/removed from self.childGroup
self.updateAutoRange()
def itemBoundsChanged(self, item):
self._itemBoundsCache.pop(item, None)
if (self.state['autoRange'][0] is not False) or (self.state['autoRange'][1] is not False):
self._autoRangeNeedsUpdate = True
self.update()
def _invertAxis(self, ax, inv):
key = 'xy'[ax] + 'Inverted'
if self.state[key] == inv:
return
self.state[key] = inv
self._matrixNeedsUpdate = True # updateViewRange won't detect this for us
self.updateViewRange()
self.update()
self.sigStateChanged.emit(self)
if ax:
self.sigYRangeChanged.emit(self, tuple(self.state['viewRange'][ax]))
else:
self.sigXRangeChanged.emit(self, tuple(self.state['viewRange'][ax]))
def invertY(self, b=True):
"""
By default, the positive y-axis points upward on the screen. Use invertY(True) to reverse the y-axis.
"""
self._invertAxis(1, b)
def yInverted(self):
return self.state['yInverted']
def invertX(self, b=True):
"""
By default, the positive x-axis points rightward on the screen. Use invertX(True) to reverse the x-axis.
"""
self._invertAxis(0, b)
def xInverted(self):
return self.state['xInverted']
def setBorder(self, *args, **kwds):
"""
Set the pen used to draw border around the view
If border is None, then no border will be drawn.
Added in version 0.9.10
See :func:`mkPen <pyqtgraph.mkPen>` for arguments.
"""
self.border = fn.mkPen(*args, **kwds)
self.borderRect.setPen(self.border)
def setDefaultPadding(self, padding=0.02):
"""
Sets the fraction of the data range that is used to pad the view range in when auto-ranging.
By default, this fraction is 0.02.
"""
self.state['defaultPadding'] = padding
def setAspectLocked(self, lock=True, ratio=1):
"""
If the aspect ratio is locked, view scaling must always preserve the aspect ratio.
By default, the ratio is set to 1; x and y both have the same scaling.
This ratio can be overridden (xScale/yScale), or use None to lock in the current ratio.
"""
if not lock:
if self.state['aspectLocked'] == False:
return
self.state['aspectLocked'] = False
else:
currentRatio = self.getAspectRatio()
if ratio is None:
ratio = currentRatio
if self.state['aspectLocked'] == ratio: # nothing to change
return
self.state['aspectLocked'] = ratio
if ratio != currentRatio: ## If this would change the current range, do that now
self.updateViewRange()
self.updateAutoRange()
self.updateViewRange()
self.sigStateChanged.emit(self)
def childTransform(self):
"""
Return the transform that maps from child(item in the childGroup) coordinates to local coordinates.
(This maps from inside the viewbox to outside)
"""
self.updateMatrix()
m = self.childGroup.transform()
return m
def mapToView(self, obj):
"""Maps from the local coordinates of the ViewBox to the coordinate system displayed inside the ViewBox"""
self.updateMatrix()
m = fn.invertQTransform(self.childTransform())
return m.map(obj)
def mapFromView(self, obj):
"""Maps from the coordinate system displayed inside the ViewBox to the local coordinates of the ViewBox"""
self.updateMatrix()
m = self.childTransform()
return m.map(obj)
def mapSceneToView(self, obj):
"""Maps from scene coordinates to the coordinate system displayed inside the ViewBox"""
self.updateMatrix()
return self.mapToView(self.mapFromScene(obj))
def mapViewToScene(self, obj):
"""Maps from the coordinate system displayed inside the ViewBox to scene coordinates"""
self.updateMatrix()
return self.mapToScene(self.mapFromView(obj))
def mapFromItemToView(self, item, obj):
"""Maps *obj* from the local coordinate system of *item* to the view coordinates"""
self.updateMatrix()
return self.childGroup.mapFromItem(item, obj)
def mapFromViewToItem(self, item, obj):
"""Maps *obj* from view coordinates to the local coordinate system of *item*."""
self.updateMatrix()
return self.childGroup.mapToItem(item, obj)
def mapViewToDevice(self, obj):
self.updateMatrix()
return self.mapToDevice(self.mapFromView(obj))
def mapDeviceToView(self, obj):
self.updateMatrix()
return self.mapToView(self.mapFromDevice(obj))
def viewPixelSize(self):
"""Return the (width, height) of a screen pixel in view coordinates."""
o = self.mapToView(Point(0,0))
px, py = [Point(self.mapToView(v) - o) for v in self.pixelVectors()]
return (px.length(), py.length())
def itemBoundingRect(self, item):
"""Return the bounding rect of the item in view coordinates"""
return self.mapSceneToView(item.sceneBoundingRect()).boundingRect()
def wheelEvent(self, ev, axis=None):
if axis in (0, 1):
mask = [False, False]
mask[axis] = self.state['mouseEnabled'][axis]
else:
mask = self.state['mouseEnabled'][:]
s = 1.02 ** (ev.delta() * self.state['wheelScaleFactor']) # actual scaling factor
s = [(None if m is False else s) for m in mask]
center = Point(fn.invertQTransform(self.childGroup.transform()).map(ev.pos()))
self._resetTarget()
self.scaleBy(s, center)
ev.accept()
self.sigRangeChangedManually.emit(mask)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.MouseButton.RightButton and self.menuEnabled():
ev.accept()
self.raiseContextMenu(ev)
def raiseContextMenu(self, ev):
menu = self.getMenu(ev)
if menu is not None:
self.scene().addParentContextMenus(self, menu, ev)
menu.popup(ev.screenPos().toPoint())
def getMenu(self, ev):
return self.menu
def getContextMenus(self, event):
return self.menu.actions() if self.menuEnabled() else []
def mouseDragEvent(self, ev, axis=None):
## if axis is specified, event will only affect that axis.
ev.accept() ## we accept all buttons
pos = ev.pos()
lastPos = ev.lastPos()
dif = pos - lastPos
dif = dif * -1
## Ignore axes if mouse is disabled
mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float64)
mask = mouseEnabled.copy()
if axis is not None:
mask[1-axis] = 0.0
## Scale or translate based on mouse button
if ev.button() in [QtCore.Qt.MouseButton.LeftButton, QtCore.Qt.MouseButton.MiddleButton]:
if self.state['mouseMode'] == ViewBox.RectMode and axis is None:
if ev.isFinish(): ## This is the final move in the drag; change the view scale now
#print "finish"
self.rbScaleBox.hide()
ax = QtCore.QRectF(Point(ev.buttonDownPos(ev.button())), Point(pos))
ax = self.childGroup.mapRectFromParent(ax)
self.showAxRect(ax)
self.axHistoryPointer += 1
self.axHistory = self.axHistory[:self.axHistoryPointer] + [ax]
else:
## update shape of scale box
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
else:
tr = self.childGroup.transform()
tr = fn.invertQTransform(tr)
tr = tr.map(dif*mask) - tr.map(Point(0,0))
x = tr.x() if mask[0] == 1 else None
y = tr.y() if mask[1] == 1 else None
self._resetTarget()
if x is not None or y is not None:
self.translateBy(x=x, y=y)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
elif ev.button() & QtCore.Qt.MouseButton.RightButton:
#print "vb.rightDrag"
if self.state['aspectLocked'] is not False:
mask[0] = 0
dif = ev.screenPos() - ev.lastScreenPos()
dif = np.array([dif.x(), dif.y()])
dif[0] *= -1
s = ((mask * 0.02) + 1) ** dif
tr = self.childGroup.transform()
tr = fn.invertQTransform(tr)
x = s[0] if mouseEnabled[0] == 1 else None
y = s[1] if mouseEnabled[1] == 1 else None
center = Point(tr.map(ev.buttonDownPos(QtCore.Qt.MouseButton.RightButton)))
self._resetTarget()
self.scaleBy(x=x, y=y, center=center)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
def keyPressEvent(self, ev):
"""
This routine should capture key presses in the current view box.
Key presses are used only when mouse mode is RectMode
The following events are implemented:
ctrl-A : zooms out to the default "full" view of the plot
ctrl-+ : moves forward in the zooming stack (if it exists)
ctrl-- : moves backward in the zooming stack (if it exists)
"""
ev.accept()
if ev.text() == '-':
self.scaleHistory(-1)
| |
{limits}
:param shape: {shape}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param sort: return mutual information in sorted (descending) order, and also return the correspond list of expressions when sorted is True
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar},
"""
# either a list of tuples with custom combinations
if y is None and _issequence(x) and all([_issequence(k) for k in x]):
waslist, [combinations, ] = vaex.utils.listify(x)
shape_result = (len(combinations),)
elif _issequence(x) and (_issequence(y) or y is None):
# or ask for a matrix of combinations
if y is None:
combinations = list(itertools.product(x, repeat=dimension))
shape_result = (len(x), ) * dimension
else:
shape_result = (len(x), len(y))
combinations = np.array([[(i, j) for i in y] for j in x]).reshape((-1, 2)).tolist()
waslist = True
elif _issequence(x):
shape_result = (len(x),)
combinations = [(i, y) for i in x]
waslist = True
elif _issequence(y):
shape_result = (len(y),)
combinations = [(i, y) for i in x]
waslist = True
else:
shape_result = tuple()
combinations = [(x, y)]
waslist = False
if mi_limits:
mi_limits = [mi_limits]
limits = self.limits(binby, limits, delay=True)
# make sure we only do the unique combinations
combinations_sorted = [tuple(sorted(k)) for k in combinations]
combinations_unique, unique_reverse = np.unique(combinations_sorted, return_inverse=True, axis=0)
combinations_unique = list(map(tuple, combinations_unique.tolist()))
mi_limits = self.limits(combinations_unique, mi_limits, delay=True)
@delayed
def calculate(counts):
# TODO: mutual information doesn't take axis arguments, so ugly solution for now
counts = counts.astype(np.float64)
fullshape = _expand_shape(shape, len(binby))
out = np.zeros((fullshape), dtype=float)
if len(fullshape) == 0:
out = vaex.kld.mutual_information(counts)
# print("count> ", np.sum(counts))
elif len(fullshape) == 1:
for i in range(fullshape[0]):
out[i] = vaex.kld.mutual_information(counts[..., i])
# print("counti> ", np.sum(counts[...,i]))
# print("countt> ", np.sum(counts))
elif len(fullshape) == 2:
for i in range(fullshape[0]):
for j in range(fullshape[1]):
out[i, j] = vaex.kld.mutual_information(counts[..., i, j])
elif len(fullshape) == 3:
for i in range(fullshape[0]):
for j in range(fullshape[1]):
for k in range(fullshape[2]):
out[i, j, k] = vaex.kld.mutual_information(counts[..., i, j, k])
else:
raise ValueError("binby with dim > 3 is not yet supported")
return out
@delayed
def has_limits(limits, mi_limits):
if not _issequence(binby):
limits = [list(limits)]
values = []
for expressions, expression_limits in zip(combinations_unique, mi_limits):
total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby))
counts = self.count(binby=list(expressions) + list(binby), limits=list(expression_limits) + list(limits),
shape=total_shape, delay=True, selection=selection)
values.append(calculate(counts))
return values
@delayed
def finish(mi_list):
if sort:
mi_list = np.array(mi_list)
indices = np.argsort(mi_list)[::-1]
sorted_x = list([x[k] for k in indices])
return mi_list[indices], sorted_x
else:
mi_list = np.array(mi_list)
# reconstruct original ordering
mi_list = mi_list[unique_reverse]
total_shape = _expand_shape(shape, len(binby))
total_shape += shape_result
return np.array(vaex.utils.unlistify(waslist, mi_list)).reshape(total_shape)
values = finish(delayed_list(has_limits(limits, mi_limits)))
return self._delay(delay, values)
def bin_edges(self, expression, limits, shape=default_shape):
return self.bins(expression, limits, shape=shape, edges=True)
def bin_centers(self, expression, limits, shape=default_shape):
return self.bins(expression, limits, shape=shape, edges=False)
def bins(self, expression, limits, shape=default_shape, edges=True):
vmin, vmax = limits
if edges:
bins = np.ogrid[limits[0]:limits[1]:(shape + 1) * 1j]
return bins
else:
dx = (limits[1] - limits[0]) / shape
bins = np.ogrid[limits[0]:limits[1] - dx:(shape) * 1j]
return bins + dx / 2
def nearest_bin(self, value, limits, shape):
bins = self.bins('', limits=limits, edges=False, shape=shape)
index = np.argmin(np.abs(bins - value))
return index
def _compute_agg(self, name, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, extra_expressions=None, array_type=None):
logger.debug("aggregate %s(%r, binby=%r, limits=%r)", name, expression, binby, limits)
expression = _ensure_strings_from_expressions(expression)
if extra_expressions:
extra_expressions = _ensure_strings_from_expressions(extra_expressions)
expression_waslist, [expressions, ] = vaex.utils.listify(expression)
# TODO: doesn't seemn needed anymore?
# expressions = [self._column_aliases.get(k, k) for k in expressions]
import traceback
trace = ''.join(traceback.format_stack())
for expression in expressions:
if expression and expression != "*":
self.validate_expression(expression)
if not hasattr(self.local, '_aggregator_nest_count'):
self.local._aggregator_nest_count = 0
if self.local._aggregator_nest_count != 0:
raise RuntimeError("nested aggregator call: \nlast trace:\n%s\ncurrent trace:\n%s" % (self.local.last_trace, trace))
else:
self.local.last_trace = trace
# Instead of 'expression is not None', we would like to have 'not virtual'
# but in agg.py we do some casting, which results in calling .dtype(..) with a non-column
# expression even though all expressions passed here are column references
# virtual = [k for k in expressions if k and k not in self.columns]
if self._future_behaviour != 5 and (self.filtered and expression not in [None, '*']):
# When our dataframe is filtered, and we have expressions, we may end up calling
# df.dtype(..) which in turn may call df.evaluate(..) which in turn needs to have
# the filter cache filled in order to compute the first non-missing row. This last
# item could call df.count() again, leading to nested aggregators, which we do not
# support. df.dtype() needs to call evaluate with filtering enabled since we consider
# it invalid that expressions are evaluate with filtered data. Sklearn for instance may
# give errors when evaluated with NaN's present.
# TODO: GET RID OF THIS
# TODO: temporary disabled
# len(self) # fill caches and masks
pass
progressbar = vaex.utils.progressbars(progress, title=name)
if not isinstance(binby, (list, tuple)) or len(binby) > 0:
progressbar_limits = progressbar.add("binners")
binners = self._create_binners(binby, limits, shape, selection=selection, delay=True, progress=progressbar_limits)
else:
binners = ()
progressbar_agg = progressbar
@delayed
def compute(expression, binners, selection, edges):
binners = tuple(binners)
if not hasattr(self.local, '_aggregator_nest_count'):
self.local._aggregator_nest_count = 0
self.local._aggregator_nest_count += 1
try:
if expression in ["*", None]:
agg = vaex.agg.aggregates[name](selection=selection, edges=edges)
else:
if extra_expressions:
agg = vaex.agg.aggregates[name](expression, *extra_expressions, selection=selection, edges=edges)
else:
agg = vaex.agg.aggregates[name](expression, selection=selection, edges=edges)
tasks, result = agg.add_tasks(self, binners, progress=progressbar)
@delayed
def finish(counts):
return np.asanyarray(counts)
return finish(result)
finally:
self.local._aggregator_nest_count -= 1
@delayed
def finish(binners, *counts):
if array_type == 'xarray':
dims = [binner.expression for binner in binners]
if expression_waslist:
dims = ['expression'] + dims
def to_coord(binner):
if isinstance(binner, BinnerOrdinal):
return self.category_labels(binner.expression)
elif isinstance(binner, BinnerScalar):
return self.bin_centers(binner.expression, [binner.minimum, binner.maximum], binner.count)
coords = [to_coord(binner) for binner in binners]
if expression_waslist:
coords = [expressions] + coords
counts = np.asanyarray(counts)
else:
counts = counts[0]
return xarray.DataArray(counts, dims=dims, coords=coords)
elif array_type == 'list':
return vaex.utils.unlistify(expression_waslist, counts).tolist()
elif array_type in [None, 'numpy']:
def possibly_masked_array(ar):
if isinstance(ar, (list, tuple)):
has_mask = any(np.ma.isMaskedArray(k) for k in ar)
else:
has_mask = np.ma.isMaskedArray(ar)
if has_mask:
return np.ma.array(ar)
else:
return np.asanyarray(ar)
return possibly_masked_array(vaex.utils.unlistify(expression_waslist, counts))
else:
raise RuntimeError(f'Unknown array_type {format}')
stats = [compute(expression, binners, selection=selection, edges=edges) for expression in expressions]
var = finish(binners, *stats)
return self._delay(delay, progressbar.exit_on(var))
@docsubst
def count(self, expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):
"""Count the number of non-NaN values (or all, if expression is None or "*").
Example:
>>> df.count()
330000
>>> df.count("*")
330000.0
>>> df.count("*", binby=["x"], shape=4)
array([ 10925., 155427., 152007., 10748.])
:param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:param array_type: {array_type}
:return: {return_stat_scalar}
"""
return self._compute_agg('count', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
@delayed
def _first_calculation(self, expression, order_expression, binby, limits, shape, selection, edges, progressbar):
if shape:
limits, shapes = limits
else:
limits, shapes = limits, shape
task = tasks.TaskStatistic(self, binby, shapes, limits, weights=[expression, order_expression], op=tasks.OP_FIRST, selection=selection, edges=edges)
task = self.executor.schedule(task)
progressbar.add_task(task, "count for %s" % expression)
@delayed
def finish(counts):
counts = np.array(counts)
return counts
return finish(task)
@docsubst
def first(self, expression, order_expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):
"""Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.first(df.x, df.y, shape=8)
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,
8.47446537, -5.73602629, 10.18783 ])
:param expression: {expression}
:param order_expression: Order the values in the bins by this expression.
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:param array_type: {array_type}
:return: Ndarray containing the first elements.
:rtype: numpy.array
"""
return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression], array_type=array_type)
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
expression = _ensure_strings_from_expressions(expression)
order_expression = _ensure_string_from_expression(order_expression)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions,] = vaex.utils.listify(expression)
@delayed
def finish(*counts):
counts = np.asarray(counts)
return vaex.utils.unlistify(waslist, | |
the superstate.
if Hsm.exit(me, me.state) == Hsm.RET_HANDLED:
Hsm.trig(me, me.state, Signal.EMPTY)
t = me.state
# Step into children until we enter the target
for st in reversed(path[:path.index(t)]):
Hsm.enter(me, st)
@staticmethod
def init(me, event = None):
"""Transitions to the initial state. Follows any INIT transitions
from the inital state and performs ENTRY actions as it proceeds.
Use this to pass any parameters to initialize the state machine.
p. 172
"""
# TODO: The initial state MUST transition to another state
# The code that formerly did this was:
# status = me.initial_state(me, event)
# assert status == Hsm.RET_TRAN
# But the above code is commented out so an Ahsm's _initial()
# isn't executed twice.
me.state = Hsm._perform_init_chain(me, Hsm.top)
@staticmethod
def dispatch(me, event):
"""Dispatches the given event to this Hsm.
Follows the application's state transitions
until the event is handled or top() is reached
p. 174
"""
Spy.on_hsm_dispatch_event(event)
# Save the current state
t = me.state
# Proceed to superstates if event is not handled, we wish to find the superstate
# (if any) that does handle the event and to record the path to that state
exit_path = []
r = Hsm.RET_SUPER
while r == Hsm.RET_SUPER:
s = me.state
exit_path.append(s)
Spy.on_hsm_dispatch_pre(s)
r = s(me, event) # invoke state handler
# We leave the while loop with s at the state which was able to respond
# to the event, or to Hsm.top if none did
Spy.on_hsm_dispatch_post(exit_path)
# If the state handler for s requests a transition
if r == Hsm.RET_TRAN:
t = me.state
# Store target of transition
# Exit from the current state to the state s which handles
# the transition. We do not exit from s=exit_path[-1] itself.
for st in exit_path[:-1]:
r = Hsm.exit(me, st)
assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)
s = exit_path[-1]
# Transition to t through the HSM
Hsm._perform_transition(me, s, t)
# Do initializations starting at t
t = Hsm._perform_init_chain(me, t)
# Restore the state
me.state = t
class Framework(object):
"""Framework is a composite class that holds:
- the asyncio event loop
- the registry of AHSMs
- the set of TimeEvents
- the handle to the next TimeEvent
- the table subscriptions to events
"""
_event_loop = asyncio.get_event_loop()
# The Framework maintains a registry of Ahsms in a list.
_ahsm_registry = []
# The Framework maintains a dict of priorities in use
# to prevent duplicates.
# An Ahsm's priority is checked against this dict
# within the Ahsm.start() method
# when the Ahsm is added to the Framework.
# The dict's key is the priority (integer) and the value is the Ahsm.
_priority_dict = {}
# The Framework maintains a group of TimeEvents in a dict. The next
# expiration of the TimeEvent is the key and the event is the value.
# Only the event with the next expiration time is scheduled for the
# timeEventCallback(). As TimeEvents are added and removed, the scheduled
# callback must be re-evaluated. Periodic TimeEvents should only have
# one entry in the dict: the next expiration. The timeEventCallback() will
# add a Periodic TimeEvent back into the dict with its next expiration.
_time_events = {}
# When a TimeEvent is scheduled for the timeEventCallback(),
# a handle is kept so that the callback may be cancelled if necessary.
_tm_event_handle = None
# The Subscriber Table is a dictionary. The keys are signals.
# The value for each key is a list of Ahsms that are subscribed to the
# signal. An Ahsm may subscribe to a signal at any time during runtime.
_subscriber_table = {}
@staticmethod
def post(event, act):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is an Ahsm instance.
"""
assert isinstance(act, Ahsm)
act.postFIFO(event)
@staticmethod
def post_by_name(event, act_name):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is a string of the name of the class
to which the event is sent. The event will post to all actors
having the given classname.
"""
assert type(act_name) is str
for act in Framework._ahsm_registry:
if act.__class__.__name__ == act_name:
act.postFIFO(event)
@staticmethod
def publish(event):
"""Posts the event to the message queue of every Ahsm
that is subscribed to the event's signal.
"""
if event.signal in Framework._subscriber_table:
for act in Framework._subscriber_table[event.signal]:
act.postFIFO(event)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def subscribe(signame, act):
"""Adds the given Ahsm to the subscriber table list
for the given signal. The argument, signame, is a string of the name
of the Signal to which the Ahsm is subscribing. Using a string allows
the Signal to be created in the registry if it is not already.
"""
sigid = Signal.register(signame)
if sigid not in Framework._subscriber_table:
Framework._subscriber_table[sigid] = []
Framework._subscriber_table[sigid].append(act)
@staticmethod
def addTimeEvent(tm_event, delta):
"""Adds the TimeEvent to the list of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
after the delay, delta.
"""
expiration = Framework._event_loop.time() + delta
Framework.addTimeEventAt(tm_event, expiration)
@staticmethod
def addTimeEventAt(tm_event, abs_time):
"""Adds the TimeEvent to the list of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
at the given absolute time (_event_loop.time()).
"""
assert tm_event not in Framework._time_events.values()
Framework._insortTimeEvent(tm_event, abs_time)
@staticmethod
def _insortTimeEvent(tm_event, expiration):
"""Inserts a TimeEvent into the list of time events,
sorted by the next expiration of the timer.
If the expiration time matches an existing expiration,
we add the smallest amount of time to the given expiration
to avoid a key collision in the Dict
and make the identically-timed events fire in a FIFO fashion.
"""
# If the event is to happen in the past, post it now
now = Framework._event_loop.time()
if expiration < now:
tm_event.act.postFIFO(tm_event)
# TODO: if periodic, need to schedule next?
# If an event already occupies this expiration time,
# increase this event's expiration by the smallest measurable amount
while expiration in Framework._time_events.keys():
m, e = math.frexp(expiration)
expiration = (m + sys.float_info.epsilon) * 2**e
Framework._time_events[expiration] = tm_event
# If this is the only active TimeEvent, schedule its callback
if len(Framework._time_events) == 1:
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event, expiration)
# If there are other TimeEvents,
# check if this one should replace the scheduled one
else:
if expiration < min(Framework._time_events.keys()):
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event,
expiration)
@staticmethod
def removeTimeEvent(tm_event):
"""Removes the TimeEvent from the list of active time events.
Cancels the TimeEvent's callback if there is one.
Schedules the next event's callback if there is one.
"""
for k,v in Framework._time_events.items():
if v is tm_event:
# If the event being removed is scheduled for callback,
# cancel and schedule the next event if there is one
if k == min(Framework._time_events.keys()):
del Framework._time_events[k]
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
if len(Framework._time_events) > 0:
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = \
Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback,
next_event, next_expiration)
else:
Framework._tm_event_handle = None
else:
del Framework._time_events[k]
break
@staticmethod
def timeEventCallback(tm_event, expiration):
"""The callback function for all TimeEvents.
Posts the event to the event's target Ahsm.
If the TimeEvent is periodic, re-insort the event
in the list of active time events.
"""
assert expiration in Framework._time_events.keys(), (
"Exp:%d _time_events.keys():%s" %
(expiration, Framework._time_events.keys()))
# Remove this expired TimeEvent from the active list
del Framework._time_events[expiration]
Framework._tm_event_handle = None
# Post the event to the target Ahsm
tm_event.act.postFIFO(tm_event)
# If this is a periodic time event, schedule its next expiration
if tm_event.interval > 0:
Framework._insortTimeEvent(tm_event,
expiration + tm_event.interval)
# If not set already and there are more events, set the next event callback
if (Framework._tm_event_handle == None and
len(Framework._time_events) > 0):
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback, next_event,
next_expiration)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def add(act):
"""Makes the framework aware of the given Ahsm.
"""
Framework._ahsm_registry.append(act)
assert act.priority not in Framework._priority_dict, (
"Priority MUST be unique")
Framework._priority_dict[act.priority] = act
Spy.on_framework_add(act)
@staticmethod
def run():
| |
<filename>eagle/eagle.py
import argparse
import sys
from datetime import datetime
from .groups import add_group, delete_group, soft_delete_group
from .meta import CONFIG
from .storage import get_storage
from .tasks import add_task, delete_task, edit_task, prune
def clear():
"""
Clears todo list - removes all tasks.
"""
with get_storage() as s:
s["tasks"].clear()
s["groups"].clear()
print("\nYour list has been cleared out.\n")
def parse_arguments():
"""
Parses CLI arguments and returns Namespace object.
:return: Namespace object with parsed params.
:rtype: Namespace
"""
parser = argparse.ArgumentParser(
prog=CONFIG["package_name"], description=CONFIG["description"]
)
# 1. Task
# -a, --add
h = (
'Creates a task like: -a "do the right thing" or -a "make yo bed" 1d or -a "make yo sis bed" @20/1/2050. '
'For recurring tasks you can use "d", "w", "m", "y" for days, weeks, months, years.'
)
meta = ("TASK", "FREQUENCY (and GROUP)")
parser.add_argument("-a", "--add", nargs="+", action="append", metavar=meta, help=h)
# -d, --delete
h = "Removes an item from todo list. Cannot be undone."
meta = "TASK"
parser.add_argument(
"-d", "--delete", nargs=1, type=int, action="append", metavar=meta, help=h
)
# -c, --clear
h = "Clears todo list - removes all the tasks. No undo."
parser.add_argument("--clear", action="store_true", help=h)
# --prune
h = "Removes all overdue tasks."
parser.add_argument("--prune", action="store_true", help=h)
# 2. Group
# -A, --add-group
h = "Creates a group which can be used for managing tasks."
meta = "GROUP"
parser.add_argument(
"-A", "--add-group", nargs=1, action="append", metavar=meta, help=h
)
# -D, --delete-group
h = "Removes a group and tasks attached to the group."
meta = "GROUP"
parser.add_argument(
"-D", "--delete-group", nargs=1, action="append", metavar=meta, help=h
)
# -S, --soft-delete-group
h = "Removes a group and tasks attached to the group are pulled out."
meta = "GROUP"
parser.add_argument(
"-S", "--soft-delete-group", nargs=1, action="append", metavar=meta, help=h
)
# -e
h = "Edits a task."
meta = "TASK"
parser.add_argument("-e", "--edit", nargs=1, type=int, metavar=meta, help=h)
# 3. List
# -g, --group
h = "Filters tasks by group."
parser.add_argument("-g", "--group", nargs=1, action="append", help=h)
# --overdue
h = "Filters overdue tasks."
parser.add_argument("--overdue", action="store_true", help=h)
# --today
h = "Filters today's tasks."
parser.add_argument("--today", action="store_true", help=h)
# --upcoming
h = "Filters upcoming tasks (up to 3 days starting from today)."
parser.add_argument("--upcoming", action="store_true", help=h)
# --search
h = "Searches tasks."
meta = "QUERY"
parser.add_argument("--search", nargs=1, action="append", metavar=meta, help=h)
# --others
h = "Filters others tasks."
parser.add_argument("--others", action="store_true", help=h)
# --sort
h = 'Sort tasks by the given flag. Possible options are: "groups".'
parser.add_argument("--sort", choices=["groups"], help=h)
# --version
# parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
h = "Shows version and other useful informations."
parser.add_argument("--version", action="store_true", help=h)
return parser.parse_args()
def print_list(tasks, sort_by=None, all_tasks=False):
"""
Prints overdue, today upcoming and other tasks.
:param list tasks: List of already filtered tasks - enumerated.
:param str sort_by: Sort the task by given flag - choices: "groups"
"""
def get_printable_freq(freq):
"""
Formats task frequency.
:param datetime or str frequency: Frequency to be formatted.
:return: Formatted frequency string.
:rtype: str
"""
# Display the frequency properly.
if isinstance(freq, datetime):
return freq.strftime("%d/%m/%Y")
else:
return freq
def print_task(number, task, freq=False):
"""
Prints formatted task.
:param int number: Order number of the task.
:param Task task: Task object.
:param str freq: Formatted task frequency.
"""
group = ""
# Format task group.
if task.group:
group = f" [{task.group}]"
if freq:
print(f"\t{number + 1}. {task.title} ({freq}){group}")
else:
print(f"\t{number + 1}. {task.title}{group}")
def print_overdue_tasks(tasks):
"""
Prints overdue tasks in numbered list.
The item number is +1 of index from storage index.
:param dict tasks: Task dict where key is task index and val Task instance.
"""
print("\nOverdue:")
for i, t in tasks:
freq = get_printable_freq(t.frequency)
print_task(i, t, freq)
def print_today_tasks(tasks):
"""
Prints today tasks in numbered list.
The item number is +1 of index from storage index.
:param dict tasks: Task dict where key is task index and val Task instance.
"""
print("\nToday:")
for i, t in tasks:
freq = get_printable_freq(t.frequency)
print_task(i, t, freq)
def print_upcoming_tasks(tasks):
"""
Prints upcoming tasks in numbered list.
The item number is +1 of index from storage index.
:param dict tasks: Task dict where key is task index and val Task instance.
"""
print("\nUpcoming:")
for i, t in tasks:
freq = get_printable_freq(t.frequency)
print_task(i, t, freq)
def print_other_tasks(tasks):
"""
Prints other (besides today) tasks in numbered list.
The item number is +1 of index from storage index.
:param dict tasks: Task dict where key is task index and val Task instance.
"""
print("\nYour list:")
for i, t in tasks:
if t.frequency:
freq = get_printable_freq(t.frequency)
print_task(i, t, freq)
else:
print_task(i, t)
print("")
def sort_tasks(tasks, flag):
"""
Sorts tasks by the given flag.
Choices are:
* groups
:param list tasks: List of tasks.
:param str sort_by: The sort flag.
"""
if "groups" == flag:
return sorted(tasks, key=lambda t: t[1].group if t[1].group else "")
# Load tasks.
if all_tasks:
with get_storage() as s:
tasks = enumerate(s["tasks"])
overdue_tasks = []
today_tasks = []
other_tasks = []
upcoming_tasks = []
# tasks = zip(range(0, len(tasks)), tasks)
# Gather tasks.
for i, t in tasks:
if t.is_overdue():
overdue_tasks.append((i, t))
elif t.is_today_task():
today_tasks.append((i, t))
elif t.is_upcoming():
upcoming_tasks.append((i, t))
else:
other_tasks.append((i, t))
# Sort tasks.
if sort_by:
if overdue_tasks:
overdue_tasks = sort_tasks(overdue_tasks, sort_by)
if today_tasks:
today_tasks = sort_tasks(today_tasks, sort_by)
if other_tasks:
other_tasks = sort_tasks(other_tasks, sort_by)
if overdue_tasks:
print_overdue_tasks(overdue_tasks)
if today_tasks:
print_today_tasks(today_tasks)
if upcoming_tasks:
print_upcoming_tasks(upcoming_tasks)
if other_tasks:
print_other_tasks(other_tasks)
def filter_tasks_by_groups(tasks=None, groups=None):
"""
Filters tasks by the given groups.
:param list tasks: List of already filtered tasks.
:param list groups: List of existing groups.
:return: Narrowed list of tasks.
:rtype: list
"""
# Load tasks.
if not tasks:
with get_storage() as s:
tasks = s["tasks"]
# Flatten group list.
if groups:
groups = [g for g_list in groups for g in g_list]
tasks = list(filter(lambda t: t[1].group in groups, enumerate(tasks)))
return tasks
def filter_today_tasks():
"""
Filters today's tasks.
:return: Narrowed list of tasks - enumerated.
:rtype: list
"""
# Load tasks.
with get_storage() as s:
tasks = s["tasks"]
return list(filter(lambda t: t[1].is_today_task(), enumerate(tasks)))
def filter_overdue_tasks():
"""
Filters overdue tasks.
:return: Narrowed list of tasks - enumerated.
:rtype: list
"""
# Load tasks.
with get_storage() as s:
tasks = s["tasks"]
return list(filter(lambda t: t[1].is_overdue(), enumerate(tasks)))
def filter_other_tasks():
"""
Filters other tasks.
:return: Narrowed list of tasks.
:rtype: list
"""
# Load tasks.
with get_storage() as s:
tasks = enumerate(s["tasks"])
return list(
filter(lambda t: not t[1].is_today_task() and not t[1].is_overdue(), tasks)
)
def search_tasks(queries):
"""
Search tasks.
:param str queries: Query string to be searched in tasks.
:return: Narrowed list of tasks.
:rtype: list
"""
filtered_tasks = []
queries = [q for q_list in queries for q in q_list]
# Load tasks.
with get_storage() as s:
tasks = s["tasks"]
for query in queries:
filtered_tasks.extend(
list(
filter(lambda t: query.lower() in t[1].title.lower(), enumerate(tasks))
)
)
return filtered_tasks
def filter_upcoming_tasks():
"""
Filters upcoming tasks.
:return: Narrowed list of tasks.
:rtype: list
"""
# Load tasks.
with get_storage() as s:
tasks = s["tasks"]
return list(filter(lambda t: t[1].is_upcoming(), enumerate(tasks)))
def eagle():
"""
Main app function. Spins up the wheel
and delivers the output.
"""
to_print = False
# groups = None
tasks = []
args = None
all_tasks = False
if 1 < len(sys.argv):
args = parse_arguments()
# print(args)
# Add task.
if args.add:
add_task(args.add)
to_print = True
# Edit task.
if args.edit:
edit_task(args.edit)
to_print = True
# Delete task.
if args.delete:
delete_task(args.delete)
to_print = True
# Clear tasks.
if args.clear:
clear()
if args.prune:
prune()
# Add group.
if args.add_group:
add_group(args.add_group)
to_print = True
# Delete group.
if args.delete_group:
delete_group(args.delete_group)
to_print = True
# Soft delete group.
if args.soft_delete_group:
soft_delete_group(args.soft_delete_group)
to_print = True
# Filter by group.
if args.group:
to_print = True
tasks.extend(filter_tasks_by_groups(tasks, args.group))
# Filter today's tasks.
if args.today:
to_print = True
tasks.extend(filter_today_tasks())
# Filter overdue tasks.
if args.overdue:
to_print = True
tasks.extend(filter_overdue_tasks())
# Filter upcoming tasks.
if args.upcoming:
to_print = True
tasks.extend(filter_upcoming_tasks())
# Search tasks.
if args.search:
to_print = True
tasks.extend(search_tasks(args.search))
# Filter other tasks.
if args.others:
to_print = True
tasks.extend(filter_other_tasks())
# Sort.
if args.sort:
to_print = True
# Version.
if args.version:
print(
(
f"{CONFIG['package_name']} {CONFIG['version']}\n"
f"Author: {CONFIG['author']}\n"
f"Homepage: {CONFIG['homepage']}"
)
)
| |
iraf
from numpy import array, compress
import string
import re
import sys
import os
from ntt import delete
from ntt.util import readhdr, readkey3, delete
import ntt
hdr = readhdr(img)
_ra = readkey3(hdr, 'RA')
_dec = readkey3(hdr, 'DEC')
iraf.imcoords(_doprint=0)
iraf.astcat(_doprint=0)
toforget = ['imcoords', 'astcat', 'tv']
for t in toforget:
try:
iraf.unlearn(t)
except:
pass
iraf.noao.astcat.aregpars.rcrauni = ''
iraf.noao.astcat.aregpars.rcdecuni = ''
iraf.noao.astcat.catdb = ntt.__path__[0] + '/standard/cat/catalogue.dat'
iraf.noao.astcat.aregpars.rcra = _ra / 15
iraf.noao.astcat.aregpars.rcdec = _dec
iraf.noao.astcat.aregpars.rrawidt = 15.
iraf.noao.astcat.aregpars.rdecwid = 15.
delete('tmp.catalogue')
delete('tmp.catalogue.pix')
if method == 'iraf':
if catalogue == 'usnoa2':
lll = iraf.noao.astcat.agetcat(
'pars', 'STDOUT', catalog='usno2@noao', verbose='no', Stdout=1)
elif catalogue == 'usnob1':
lll = iraf.noao.astcat.agetcat(
'pars', 'STDOUT', catalog='usnob1@noao', verbose='no', Stdout=1)
elif catalogue == '2mass':
lll = iraf.noao.astcat.agetcat(
'pars', 'STDOUT', catalog='twomass@irsa', verbose='yes', Stdout=1)
# elif catalogue=='gsc1':
# lll=iraf.noao.astcat.agetcat('pars','STDOUT',catalog='gsc1@cadc',verbose='no',Stdout=1)
else:
if os.path.isfile(ntt.__path__[0] + '/standard/cat/' + catalogue):
ff = open(ntt.__path__[0] + '/standard/cat/' + catalogue)
lll = ff.readlines()
ff.close()
for ii in range(0, len(lll)):
lll[ii] = re.sub('\n', '', lll[ii])
print 'catalogue from user'
else:
sys.exit('Error: catalogue ' + str(catalogue) +
'not in the list [usnob1,usnoa2,2mass]')
########
# REMOVE COMMENT LINES
lllNew = []
for ll in lll:
if ll[:2] != "\ " and ll[:2] != "| ":
lllNew.append(ll)
lll = lllNew
# for i, v in enumerate(lll):
# print i, v
# FIND LINE WITH nfield value
indfield = [i for i in range(0, len(lll)) if 'nfields' in lll[i]]
fields = int(lll[indfield[0]].split()[-1])
stdcoo = {}
column = {}
for j in range(indfield[0] + 1, indfield[0] + fields + 1):
if lll[j].split()[1] not in column:
column[lll[j].split()[1]] = int(lll[j].split()[2])
if lll[j].split()[1] not in stdcoo:
stdcoo[lll[j].split()[1]] = []
startIndex = lll.index('# END CATALOG HEADER') + 2
for i in lll[startIndex:]:
for j in stdcoo.keys():
val = i.split()[column[j] - 1]
if j in ["ra", "dec"]:
val = val.replace("d", ":").replace(
"h", ":").replace("m", ":").replace("s", "")
stdcoo[j].append(val)
colonne3 = str(int(column['ra'])) + ' ' + str(int(column['dec']))
if catalogue in ['usnoa2', 'usnob1', '2mass', 'gsc1']:
colonne4 = {'usnoa2': 'mag1', 'usnob1': 'R2mag',
'2mass': 'mag1', 'gsc1': 'mag'}
else:
for jj in column.keys():
if jj in ['U', 'B', 'V', 'R', 'I', 'g', 'r', 'i', 'z']:
colonne4 = {catalogue: jj}
break
elif method == 'vizir':
# replace vizq with vizq2 to be consistent
#
stdcoo = ntt.efoscastrodef.vizq2(_ra, _dec, catalogue, 10)
lll = ['# END CATALOG HEADER', '#']
for ff in range(0, len(stdcoo['ra'])):
lll.append(str(stdcoo['ra'][ff]) + ' ' +
str(stdcoo['dec'][ff]) + ' ' + str(stdcoo['mag'][ff]))
colonne4 = {'usnoa2': 'mag', 'usnob1': 'mag',
'2mass': 'mag', 'gsc1': 'mag'}
colonne3 = ' 1 2 '
column = {'ra': 1, 'dec': 2, 'r': 3}
ddd2 = iraf.wcsctran('STDIN', 'STDOUT', img, Stdin=lll, Stdout=1, inwcs='world', units='degree degrees',
outwcs='logical', columns=colonne3, formats='%10.1f %10.1f')
xx, yy = [], []
for i in ddd2[ddd2.index('# END CATALOG HEADER') + 2:]:
if i:
xx.append(float(i.split()[column['ra'] - 1]))
yy.append(float(i.split()[column['dec'] - 1]))
# colonne4={'usnoa2':'mag1','usnob1':'R2mag','2mass':'mag1','gsc1':'mag'}
#######
acoo1 = []
apixx1, apixy1, am1, apix1 = [], [], [], []
for i in range(0, len(stdcoo['ra'])):
acoo1.append(str(stdcoo['ra'][i]) + ' ' + str(stdcoo['dec'][i]))
apix1.append(str(xx[i]) + ' ' + str(yy[i]))
am1.append(stdcoo[colonne4[catalogue]][i])
if catalogue == '2mass':
for jj in range(0, len(am1)):
try:
am1[jj] = float(re.sub('L', '', str(am1[jj])))
except:
am1[jj] = 999
for key in stdcoo.keys():
stdcoo[key] = compress((array(xx) < int(int(hdr['NAXIS1']) + 100)) & (array(xx) > -100) & (
array(yy) < int(int(hdr['NAXIS2']) + 100)) & (array(yy) > -100), array(stdcoo[key]))
stdcoo['coo'] = compress((array(xx) < int(int(hdr['NAXIS1']) + 100)) & (array(xx) > -100) & (
array(yy) < int(int(hdr['NAXIS2']) + 100)) & (array(yy) > -100), array(acoo1))
stdcoo['pix'] = compress((array(xx) < int(int(hdr['NAXIS1']) + 100)) & (array(xx) > -100) & (
array(yy) < int(int(hdr['NAXIS2']) + 100)) & (array(yy) > -100), array(apix1))
stdcoo['mag'] = compress((array(xx) < int(int(hdr['NAXIS1']) + 100)) & (array(xx) > -100) & (
array(yy) < int(int(hdr['NAXIS2']) + 100)) & (array(yy) > -100), array(am1, float))
stdcoo['x'] = compress((array(xx) < int(int(hdr['NAXIS1']) + 100)) & (array(xx) > -100) & (
array(yy) < int(int(hdr['NAXIS2']) + 100)) & (array(yy) > -100), array(xx, float))
stdcoo['y'] = compress((array(xx) < int(int(hdr['NAXIS1']) + 100)) & (array(xx) > -100) & (
array(yy) < int(int(hdr['NAXIS2']) + 100)) & (array(yy) > -100), array(yy, float))
return stdcoo
##########################################################################
def efoscastrometry2(lista, catalogue, _interactive, number, sexvec, catvec, guess=False, fitgeo='xyscale',
tollerance1=100, tollerance2=30, _update='yes', imex=False, nummin=4):
# print "LOGX:: entering efoscastrometry2\n"
import os
import string
import re
import sys
import numpy
import math
from numpy import array, compress, argsort, sort, asarray
from numpy import round, mean, std, sqrt, median
from numpy import argmin, isnan, abs, genfromtxt
import ntt
import time
import datetime
from ntt.efoscastrodef import wcsstart
from ntt.util import delete, readhdr, readkey3, display_image, defsex
from pyraf import iraf
xpix, ypix, fw, cl, cm, ell, bkg = sexvec
acoo1, apix1, am1 = catvec['coo'], catvec['pix'], catvec['mag']
# catalogue
iraf.noao(_doprint=0)
iraf.imcoords(_doprint=0)
iraf.tv(_doprint=0)
iraf.tv.rimexam.backgrou = 'yes'
iraf.astcat(_doprint=0)
toforget = ['imcoords', 'astcat', 'tv']
for t in toforget:
try:
iraf.unlearn(t)
except:
pass
verbose = False
if _interactive:
verbose = True
img = lista[0]
hdr = readhdr(img)
_instrume = readkey3(hdr, 'instrume')
if _instrume == 'efosc':
magsel0 = 7.0
magsel1 = 21.
elif _instrume == 'sofi':
magsel0 = 7.0
magsel1 = 21.
# if guess: ntt.efoscastrodef.wcsstart(img)#,500,500)
_CRPIX1 = readkey3(hdr, 'CRPIX1')
_CRPIX2 = readkey3(hdr, 'CRPIX2')
if verbose:
display_image(img, 1, '', '', False)
iraf.tvmark(1, 'STDIN', Stdin=list(apix1), mark="circle", number='yes', label='no', radii=20, nxoffse=5,
nyoffse=5, color=205, txsize=4)
raw_input('mark catalogue ' + str(len(apix1)))
else:
# ss=datetime.datetime.now()
time.sleep(.7)
answ = 'yes'
magsel11 = magsel1
mlim = 0
while answ == 'yes':
amcut1 = compress((array(am1) > magsel0) &
(array(am1) < magsel11), am1)
if len(amcut1) <= number:
answ = 'no'
magsel11 = magsel1 + mlim + .5
else:
mlim = mlim - .5
magsel11 = magsel1 + mlim
amcut = compress((array(am1) > magsel0) & (array(am1) < magsel11), am1)
apixcut = compress((array(am1) > magsel0) & (
array(am1) < magsel11), apix1) # usno x y cut_list
acoocut = compress((array(am1) > magsel0) & (
array(am1) < magsel11), acoo1) # usno ra dec cut_list
rausno = compress((array(am1) > magsel0) & (
array(am1) < magsel11), array(catvec['ra'], float))
decusno = compress((array(am1) > magsel0) & (
array(am1) < magsel11), array(catvec['dec'], float))
xusno, yusno = [], []
for i in apixcut:
xusno.append(float(string.split(i)[0]))
yusno.append(float(string.split(i)[1]))
xusno, yusno = array(xusno), array(yusno)
#################################################################
if verbose:
iraf.tvmark(1, 'STDIN', Stdin=list(apixcut), mark="circle", number='yes', label='no', radii=8, nxoffse=5,
nyoffse=5, color=204, txsize=2)
raw_input('brightest ' + str(number) + ' objects')
############## sextractor ##################
if len(xpix) >= number:
cm = array(cm, float)
xpix = xpix[argsort(cm)][0:number]
ypix = ypix[argsort(cm)][0:number]
fw = fw[argsort(cm)][0:number]
ell = ell[argsort(cm)][0:number]
cm = cm[argsort(cm)][0:number]
if verbose:
sexpix = []
for i in range(0, len(xpix)):
sexpix.append(str(xpix[i]) + ' ' + str(ypix[i]))
iraf.tvmark(1, 'STDIN', Stdin=list(sexpix), mark="circle", number='yes', label='no', radii=8, nxoffse=5,
nyoffse=5, color=206, txsize=2)
raw_input('print sex ' + str(len(sexpix)))
xsex, ysex = array(xpix), array(ypix)
fwsex = array(fw)
ellsex = array(ell)
#####################################################################
max_sep = tollerance1
xdist, ydist = [], []
for i in range(len(xusno)):
dist = sqrt((xusno[i] - xsex) ** 2 + (yusno[i] - ysex) ** 2)
idist = argmin(dist)
if dist[idist] < max_sep:
xdist.append(xusno[i] - xsex[idist])
ydist.append(yusno[i] - ysex[idist])
if len(xdist) >= 2:
xoff, xstd = round(median(xdist), 2), round(std(xdist), 2)
yoff, ystd = round(median(ydist), 2), round(std(ydist), 2)
_xdist, _ydist = array(xdist), array(ydist)
__xdist = compress((abs(_xdist - xoff) < 3 * xstd) &
(abs(_ydist - yoff) < 3 * ystd), _xdist)
__ydist = compress((abs(_xdist - xoff) < 3 * xstd) &
(abs(_ydist - yoff) < 3 * ystd), _ydist)
if len(__xdist) >= 2:
xoff, xstd = round(median(__xdist), 2), round(std(__xdist), 2)
yoff, ystd = round(median(__ydist), 2), round(std(__ydist), 2)
else:
xoff, yoff = 0, 0
else:
xoff, yoff = 0, 0
if isnan(xoff):
xoff = 0
if isnan(yoff):
yoff = 0
_CRPIX1 = readkey3(hdr, 'CRPIX1')
_CRPIX2 = readkey3(hdr, 'CRPIX2')
ntt.util.updateheader(img, 0, {'CRPIX1': [_CRPIX1 - xoff, '']})
ntt.util.updateheader(img, 0, {'CRPIX2': [_CRPIX2 - yoff, '']})
xusno2_new = xusno - xoff
yusno2_new = yusno - yoff
#####################################################################
max_sep = tollerance2
fwhm = []
fwhm2 = []
ell = []
xref = []
iraf.tv(_doprint=0)
iraf.tv.rimexam.backgrou = 'yes'
vettoretran = []
# print "LOGX:: xusno2_new: %(xusno2_new)s" % locals()
for i in range(len(xusno2_new)):
dist = sqrt((xusno2_new[i] - xsex) ** 2 + (yusno2_new[i] - ysex) ** 2)
idist = argmin(dist)
if dist[idist] < max_sep:
xref.append(xsex[idist])
vettoretran.append(
str(rausno[i]) + ' ' + | |
word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[3] == "R" or word[3] == "r" :
toGuess = toGuess[:3] + "r" + toGuess[4:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[3] == "S" or word[3] == "s" :
toGuess = toGuess[:3] + "s" + toGuess[4:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == "t" :
toGuess = toGuess[:2] + "t" + toGuess[3:]
if word[3] == "T" or word[3] == "t" :
toGuess = toGuess[:3] + "t" + toGuess[4:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and word[3] != "t" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "t" + ", "
if guessChar == "U" or guessChar == "u" :
if word[1] == "U" or word[1] == "u" :
toGuess = toGuess[:1] + "u" + toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[3] == "U" or word[3] == "u" :
toGuess = toGuess[:3] + "u" + toGuess[4:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[3] == "V" or word[3] == "v" :
toGuess = toGuess[:3] + "v" + toGuess[4:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" or guessChar == "w" :
if word[1] == "W" or word[1] == "w" :
toGuess = toGuess[:1] + "w" + toGuess[2:]
if word[2] == "W" or word[2] == "w" :
toGuess = toGuess[:2] + "w" + toGuess[3:]
if word[3] == "W" or word[3] == "w" :
toGuess = toGuess[:3] + "w" + toGuess[4:]
if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" and word[3] != "W" and word[3] != "w" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] + "x" + toGuess[3:]
if word[3] == "X" or word[3] == "x" :
toGuess = toGuess[:3] + "x" + toGuess[4:]
if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" and word[3] != "X" and word[3] != "x" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "x" + ", "
if guessChar == "Y" or guessChar == "y" :
if word[1] == "Y" or word[1] == "y" :
toGuess = toGuess[:1] + "y" + toGuess[2:]
if word[2] == "Y" or word[2] == "y" :
toGuess = toGuess[:2] + "y" + toGuess[3:]
if word[3] == "Y" or word[3] == "y" :
toGuess = toGuess[:3] + "y" + toGuess[4:]
if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" and word[3] != "Y" and word[3] != "y" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "y" + ", "
if guessChar == "Z" or guessChar == "z" :
if word[1] == "Z" or word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[3] == "Z" or word[3] == "z" :
toGuess = toGuess[:3] + "z" + toGuess[4:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 3 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 4 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 5 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t| / ")
print("\t|")
print("\t|")
if numberOfErrors == 6 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t| / \\")
print("\t|")
print("\t|")
print("\nYou lose! GAME OVER\n")
print("The answer was \"" + word + "\"")
loser = True
if not loser :
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if not "_" in toGuess :
print("\nCONGRATULATIONS! You win!!!")
if len(word) == 6 :
toGuess = str(word[0]) + "____" + str(word[5])
numberOfErrors = 0
wrongChars=""
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[4] == "A" or word[4] == "a" :
toGuess = toGuess[:4] + "a" + toGuess[5:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" and word[4] != "A" and word[4] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[4] == "B" or word[4] == "b" :
toGuess = toGuess[:4] + "b" + toGuess[5:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" and word[4] != "B" and word[4] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[4] == "C" or word[4] == "c" :
toGuess = toGuess[:4] + "c" + toGuess[5:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" and word[4] != "C" and word[4] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[4] == "D" or word[4] == "d" :
toGuess = toGuess[:4] + "d" + toGuess[5:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != | |
<filename>src/Charsiu.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import torch
from itertools import groupby
sys.path.append('src/')
import numpy as np
#sys.path.insert(0,'src')
from models import Wav2Vec2ForAttentionAlignment, Wav2Vec2ForFrameClassification, Wav2Vec2ForCTC
from utils import seq2duration,forced_align,duration2textgrid,word2textgrid
from processors import CharsiuPreprocessor_zh, CharsiuPreprocessor_en
processors = {'zh':CharsiuPreprocessor_zh,
'en':CharsiuPreprocessor_en}
class charsiu_aligner:
def __init__(self,
lang='en',
sampling_rate=16000,
device=None,
recognizer=None,
processor=None,
resolution=0.01):
self.lang = lang
if processor is not None:
self.processor = processor
else:
self.charsiu_processor = processors[self.lang]()
self.resolution = resolution
self.sr = sampling_rate
self.recognizer = recognizer
if device is None:
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
self.device = device
def _freeze_model(self):
self.aligner.eval().to(self.device)
if self.recognizer is not None:
self.recognizer.eval().to(self.device)
def align(self,audio,text):
raise NotImplementedError()
def serve(self,audio,save_to,output_format='variable',text=None):
raise NotImplementedError()
def _to_textgrid(self,phones,save_to):
'''
Convert output tuples to a textgrid file
Parameters
----------
phones : TYPE
DESCRIPTION.
Returns
-------
None.
'''
duration2textgrid(phones,save_path=save_to)
print('Alignment output has been saved to %s'%(save_to))
def _to_tsv(self,phones,save_to):
'''
Convert output tuples to a tab-separated file
Parameters
----------
phones : TYPE
DESCRIPTION.
Returns
-------
None.
'''
with open(save_to,'w') as f:
for start,end,phone in phones:
f.write('%s\t%s\t%s\n'%(start,end,phone))
print('Alignment output has been saved to %s'%(save_to))
class charsiu_forced_aligner(charsiu_aligner):
def __init__(self, aligner, sil_threshold=4, **kwargs):
super(charsiu_forced_aligner, self).__init__(**kwargs)
self.aligner = Wav2Vec2ForFrameClassification.from_pretrained(aligner)
self.sil_threshold = sil_threshold
self._freeze_model()
def align(self, audio, text):
'''
Perform forced alignment
Parameters
----------
audio : np.ndarray [shape=(n,)]
time series of speech signal
text : str
The transcription
Returns
-------
A tuple of aligned phones in the form (start_time, end_time, phone)
'''
audio = self.charsiu_processor.audio_preprocess(audio,sr=self.sr)
audio = torch.Tensor(audio).unsqueeze(0).to(self.device)
phones, words = self.charsiu_processor.get_phones_and_words(text)
phone_ids = self.charsiu_processor.get_phone_ids(phones)
with torch.no_grad():
out = self.aligner(audio)
cost = torch.softmax(out.logits,dim=-1).detach().cpu().numpy().squeeze()
sil_mask = self._get_sil_mask(cost)
nonsil_idx = np.argwhere(sil_mask!=self.charsiu_processor.sil_idx).squeeze()
if nonsil_idx is None:
raise Exception("No speech detected! Please check the audio file!")
aligned_phone_ids = forced_align(cost[nonsil_idx,:],phone_ids[1:-1])
aligned_phones = [self.charsiu_processor.mapping_id2phone(phone_ids[1:-1][i]) for i in aligned_phone_ids]
pred_phones = self._merge_silence(aligned_phones,sil_mask)
pred_phones = seq2duration(pred_phones,resolution=self.resolution)
pred_words = self.charsiu_processor.align_words(pred_phones,phones,words)
return pred_phones, pred_words
def serve(self,audio,text,save_to,output_format='textgrid'):
'''
A wrapper function for quick inference
Parameters
----------
audio : TYPE
DESCRIPTION.
text : TYPE, optional
DESCRIPTION. The default is None.
output_format : str, optional
Output phone-taudio alignment as a "tsv" or "textgrid" file.
The default is 'textgrid'.
Returns
-------
None.
'''
phones, words = self.align(audio,text)
if output_format == 'tsv':
if save_to.endswith('.tsv'):
save_to_phone = save_to.replace('.tsv','_phone.tsv')
save_to_word = save_to.replace('.tsv','_word.tsv')
else:
save_to_phone = save_to + '_phone.tsv'
save_to_word = save_to + '_word.tsv'
self._to_tsv(phones, save_to_phone)
self._to_tsv(words, save_to_word)
elif output_format == 'textgrid':
self._to_textgrid(phones, words, save_to)
else:
raise Exception('Please specify the correct output format (tsv or textgird)!')
def _to_textgrid(self,phones,words,save_to):
'''
Convert output tuples to a textgrid file
Parameters
----------
phones : TYPE
DESCRIPTION.
Returns
-------
None.
'''
word2textgrid(phones,words,save_path=save_to)
print('Alignment output has been saved to %s'%(save_to))
def _merge_silence(self,aligned_phones,sil_mask):
# merge silent and non-silent intervals
pred_phones = []
count = 0
for i in sil_mask:
if i==self.charsiu_processor.sil_idx:
pred_phones.append('[SIL]')
else:
pred_phones.append(aligned_phones[count])
count += 1
assert len(pred_phones) == len(sil_mask)
return pred_phones
def _get_sil_mask(self,cost):
# single out silent intervals
preds = np.argmax(cost,axis=-1)
sil_mask = []
for key, group in groupby(preds):
group = list(group)
if (key==self.charsiu_processor.sil_idx and len(group)<self.sil_threshold):
sil_mask += [-1 for i in range(len(group))]
else:
sil_mask += group
return np.array(sil_mask)
class charsiu_attention_aligner(charsiu_aligner):
def __init__(self, aligner, **kwargs):
super(charsiu_attention_aligner, self).__init__(**kwargs)
self.aligner = Wav2Vec2ForAttentionAlignment.from_pretrained(aligner)
self._freeze_model()
def align(self, audio, text):
'''
Perform forced alignment
Parameters
----------
audio : np.ndarray [shape=(n,)]
time series of speech signal
text : str
The transcription
Returns
-------
A tuple of aligned phones in the form (start_time, end_time, phone)
'''
audio = self.charsiu_processor.audio_preprocess(audio,sr=self.sr)
audio = torch.Tensor(audio).unsqueeze(0).to(self.device)
phones, words = self.charsiu_processor.get_phones_and_words(text)
phone_ids = self.charsiu_processor.get_phone_ids(phones)
batch = {'input_values':audio,
'labels': torch.tensor(phone_ids).unsqueeze(0).long().to(self.device)
}
with torch.no_grad():
out = self.aligner(**batch)
att = torch.softmax(out.logits,dim=-1),
preds = torch.argmax(att[0],dim=-1).cpu().detach().squeeze().numpy()
pred_phones = [self.charsiu_processor.mapping_id2phone(phone_ids[i]) for i in preds]
pred_phones = seq2duration(pred_phones,resolution=self.resolution)
return pred_phones
def serve(self,audio,text,save_to,output_format='textgrid'):
'''
A wrapper function for quick inference
Parameters
----------
audio : TYPE
DESCRIPTION.
text : TYPE, optional
DESCRIPTION. The default is None.
output_format : str, optional
Output phone-taudio alignment as a "tsv" or "textgrid" file.
The default is 'textgrid'.
Returns
-------
None.
'''
aligned_phones = self.align(audio,text)
if output_format == 'tsv':
self._to_tsv(aligned_phones, save_to)
elif output_format == 'textgrid':
self._to_textgrid(aligned_phones, save_to)
else:
raise Exception('Please specify the correct output format (tsv or textgird)!')
def _to_textgrid(self,phones,save_to):
'''
Convert output tuples to a textgrid file
Parameters
----------
phones : TYPE
DESCRIPTION.
Returns
-------
None.
'''
duration2textgrid(phones,save_path=save_to)
print('Alignment output has been saved to %s'%(save_to))
class charsiu_chain_attention_aligner(charsiu_aligner):
def __init__(self, aligner, recognizer, **kwargs):
super(charsiu_chain_attention_aligner, self).__init__(**kwargs)
self.aligner = Wav2Vec2ForAttentionAlignment.from_pretrained(aligner)
self.recognizer = Wav2Vec2ForCTC.from_pretrained(recognizer)
self._freeze_model()
def align(self, audio):
'''
Recognize phones and perform forced alignment
Parameters
----------
audio : np.ndarray [shape=(n,)]
time series of speech signal
Returns
-------
A tuple of aligned phones in the form (start_time, end_time, phone)
'''
if self.recognizer is None:
print('A recognizer is not specified. Will use the default recognizer.')
self.recognizer = Wav2Vec2ForCTC.from_pretrained('charsiu/en_w2v2_ctc_libris_and_cv')
# perform phone recognition
audio = self.charsiu_processor.audio_preprocess(audio,sr=self.sr)
audio = torch.tensor(audio).float().unsqueeze(0).to(self.device)
with torch.no_grad():
out = self.recognizer(audio)
pred_ids = torch.argmax(out.logits,dim=-1).squeeze()
phones = self.charsiu_processor.processor.tokenizer.convert_ids_to_tokens(pred_ids,skip_special_tokens=True)
phones = [p for p,group in groupby(phones)]
phone_ids = self.charsiu_processor.get_phone_ids(phones)
# perform forced alignment
batch = {'input_values':audio,
'labels': torch.tensor(phone_ids).unsqueeze(0).long().to(self.device)
}
with torch.no_grad():
out = self.aligner(**batch)
att = torch.softmax(out.logits,dim=-1)
preds = torch.argmax(att[0],dim=-1).cpu().detach().squeeze().numpy()
pred_phones = [self.charsiu_processor.mapping_id2phone(phone_ids[i]) for i in preds]
pred_phones = seq2duration(pred_phones,resolution=self.resolution)
return pred_phones
def serve(self,audio,save_to,output_format='textgrid'):
'''
A wrapper function for quick inference
Note. Only phones are supported in text independent alignment.
Parameters
----------
audio : TYPE
DESCRIPTION.
text : TYPE, optional
DESCRIPTION. The default is None.
output_format : str, optional
Output phone-taudio alignment as a "tsv" or "textgrid" file.
The default is 'textgrid'.
Returns
-------
None.
'''
aligned_phones = self.align(audio)
if output_format == 'tsv':
self._to_tsv(aligned_phones, save_to)
elif output_format == 'textgrid':
self._to_textgrid(aligned_phones, save_to)
else:
raise Exception('Please specify the correct output format (tsv or textgird)!')
class charsiu_chain_forced_aligner(charsiu_aligner):
def __init__(self, aligner, recognizer, **kwargs):
super(charsiu_chain_forced_aligner, self).__init__(**kwargs)
self.aligner = Wav2Vec2ForFrameClassification.from_pretrained(aligner)
self.recognizer = Wav2Vec2ForCTC.from_pretrained(recognizer)
self._freeze_model()
def align(self, audio):
'''
Recognize phones and perform forced alignment
Parameters
----------
audio : np.ndarray [shape=(n,)]
time series of speech signal
Returns
-------
A tuple of aligned phones in the form (start_time, end_time, phone)
'''
if self.recognizer is None:
print('A recognizer is not specified. Will use the default recognizer.')
self.recognizer = Wav2Vec2ForCTC.from_pretrained('charsiu/en_w2v2_ctc_libris_and_cv')
# perform phone recognition
audio = self.charsiu_processor.audio_preprocess(audio,sr=self.sr)
audio = torch.tensor(audio).float().unsqueeze(0).to(self.device)
with torch.no_grad():
out = self.recognizer(audio)
pred_ids = torch.argmax(out.logits,dim=-1).squeeze()
phones = self.charsiu_processor.processor.tokenizer.convert_ids_to_tokens(pred_ids,skip_special_tokens=True)
phones = [p for p,group in groupby(phones)]
phone_ids = self.charsiu_processor.get_phone_ids(phones)
# perform forced alignment
with torch.no_grad():
out = self.aligner(audio)
cost = torch.softmax(out.logits,dim=-1).detach().cpu().numpy().squeeze()
aligned_phone_ids = forced_align(cost,phone_ids)
aligned_phones = [self.charsiu_processor.mapping_id2phone(phone_ids[i]) for i in aligned_phone_ids]
pred_phones = seq2duration(aligned_phones,resolution=self.resolution)
return pred_phones
def serve(self,audio,save_to,output_format='textgrid'):
'''
A wrapper function for quick inference
Note. Only phones are supported in text independent alignment.
Parameters
----------
audio : TYPE
DESCRIPTION.
text : TYPE, optional
DESCRIPTION. The default is None.
output_format : str, optional
Output phone-taudio alignment as a "tsv" or "textgrid" file.
The default is 'textgrid'.
Returns
-------
None.
'''
aligned_phones = self.align(audio)
if output_format == 'tsv':
self._to_tsv(aligned_phones, save_to)
elif output_format == 'textgrid':
self._to_textgrid(aligned_phones, save_to)
else:
raise Exception('Please specify the correct output format (tsv or textgird)!')
class charsiu_predictive_aligner(charsiu_aligner):
def __init__(self, aligner, **kwargs):
super(charsiu_predictive_aligner, self).__init__(**kwargs)
self.aligner = Wav2Vec2ForFrameClassification.from_pretrained(aligner)
self._freeze_model()
def align(self, audio):
'''
Directly predict the phone-to-audio alignment based on acoustic signal only
Parameters
----------
audio : np.ndarray [shape=(n,)]
time series of speech signal
Returns
-------
A tuple of aligned phones in the form (start_time, end_time, phone)
'''
audio = self.charsiu_processor.audio_preprocess(audio,sr=self.sr)
audio = torch.Tensor(audio).unsqueeze(0).to(self.device)
with torch.no_grad():
out = self.aligner(audio)
pred_ids = torch.argmax(out.logits.squeeze(),dim=-1)
pred_ids = pred_ids.detach().cpu().numpy()
pred_phones = [self.charsiu_processor.mapping_id2phone(int(i)) for i in pred_ids]
pred_phones = seq2duration(pred_phones,resolution=self.resolution)
return pred_phones
def serve(self,audio,save_to,output_format='textgrid'):
'''
A wrapper function for quick inference
Note. Only phones are supported in text independent alignment.
Parameters
----------
audio : TYPE
DESCRIPTION.
text : | |
tabela 'cargos'
cargoID = db.Column(db.Integer, db.ForeignKey('cargos.id'))
# Relação de medidores de um usuário
medidores = db.relationship('Medidor', backref='usuario', lazy='dynamic')
### Métodos ###
# Criar o primeiro administrador, caso ainda não haja um
@staticmethod
def criar_administrador():
if not Usuario.query.join(Cargo).filter(Cargo.nome=='Administrador').first():
# Definição dos dados (alguns obtidos em variáveis de ambiente)
administrador = Usuario(email=current_app.config['ADMIN_EMAIL'])
administrador.nome = 'Administrador'
administrador.senha = current_app.config['ADMIN_SENHA']
administrador.verificado = True
administrador.confirmado = True
administrador.cargo = Cargo.query.filter_by(nome='Administrador').first()
# Salvando no banco de dados
db.session.add(administrador)
db.session.commit()
# Representação no shell
def __repr__(self):
return '<Usuário: %s>' % self.nome #[%s]>' % (self.nome, self.cargo.nome)
# Representação na interface
def __str__(self):
return self.nome
class ModeloMedidor(db.Model):
__tablename__ = 'modelos_dos_medidores'
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(32), index=True)
# Relação de medidores de um modelo
medidores = db.relationship('Medidor', backref='modelo', lazy='dynamic')
### Métodos ###
# Adicionar os modelos ao banco de dados
@staticmethod
def criar_modelos():
# Definição dos modelos e suas permissões
modelos = [
u'1.0'
,u'1.1'
,u'2.0'
]
# Criação dos modelos
for nome_modelo in modelos:
modelo = ModeloMedidor.query.filter_by(nome=nome_modelo).first()
if modelo is None:
modelo = ModeloMedidor(nome=nome_modelo)
db.session.add(modelo)
# Salvando no banco de dados
db.session.commit()
# Representação no shell
def __repr__(self):
return '<Modelo de medidor: %s>' % self.nome
# Representação na interface
def __str__(self):
return self.nome
# Tabela de associação de medidores pais e filhos
genealogia = db.Table('genealogia',
db.Column('paiID', db.Integer, db.ForeignKey('medidores.id')),
db.Column('filhoID', db.Integer, db.ForeignKey('medidores.id'))
)
class Medidor(db.Model):
__tablename__ = 'medidores'
id = db.Column(db.Integer, primary_key=True)
# Nome para facilitar a identificação do medidor, pode ser o complemento da frase "Medidor do(a) ..."
nome = db.Column(db.String(64), index=True)
# Preço médio do m³ de água em R$
precoMedio = db.Column(db.Float)
# Constante do medidor que informa quantos m³/pulso ele mede
cte = db.Column(db.Float)
# Um endereço para o medidor, já que este pode ser diferente do pertencente ao proprietário
endereco = db.Column(db.String(128))
# Relacionamento de pais e filhos na tabela 'medidores'
filhos = db.relationship('Medidor',
secondary=genealogia,
primaryjoin=(genealogia.c.paiID == id),
secondaryjoin=(genealogia.c.filhoID == id),
backref=db.backref('pais', lazy='dynamic'),
lazy='dynamic')
# pais.all() = [todos os medidores acima do medidor atual]
# Relação com a tabela 'usuarios'
usuarioID = db.Column(db.Integer, db.ForeignKey('usuarios.id'))
# Relação com a tabela 'modelos_dos_medidores'
modelo_do_medidorID = db.Column(db.Integer, db.ForeignKey('modelos_dos_medidores.id'))
# Relação com a tabela 'metas'
metaID = db.Column(db.Integer, db.ForeignKey('metas.id'))
# Relação com as tabela 'medicoes'
medicoes = db.relationship('Medicao', backref='medidor', lazy='dynamic')
### MÉTODOS ###
'''
def addPai(self, medidor):
if not self.ehFilho(medidor):
self.pais.append(medidor) # não precisa de medidor.filhos.append(self) ???
db.sesson.add(self)
'''
# Representação no shell
def __repr__(self):
return '<Medidor ID: %s>' % self.nome
# Representação na interface
def __str__(self):
return self.nome
class Meta(db.Model):
__tablename__ = 'metas'
id = db.Column(db.Integer, primary_key=True)
# Descrição da meta
descricao = db.Column(db.String(64), index=True)
# Valor numérico da meta
valor = db.Column(db.Float)
# Unidade de medida da meta
unidadeDoValor = db.Column(db.String(20))
# Início da meta, por exemplo: 10/03/2017
inicio = db.Column(db.DateTime, index=True)
# Intervalo de tempo para atingir a meta (Dia, Semana, Mês, Ano)
intervalo = db.Column(db.Interval)
# Tempo entre realtórios por e-mail ao usuário sobre a meta
notificacoes = db.Column(db.Interval)
# Guardando num número binário se um alerta já foi emitido, sendo os bits da seguinte ordem:
# [notificadoDia100%, notificadoMes100%, notificadoDia80%, notificadoMes80%]
notificado = db.Column(db.Integer, default=0b0000)
# Relação medidor de uma meta
medidores = db.relationship('Medidor', backref='meta', lazy='dynamic')
''' MÉTODOS '''
def foiNotificado(self, intervalo, porcento):
switch = {
"100%": {
"dia": self.notificado & 0b1000 == 0b1000
,"mês": self.notificado & 0b0100 == 0b0100
}
,"80%": {
"dia": self.notificado & 0b0010 == 0b0010
,"mês": self.notificado & 0b0001 == 0b0001
}
}
return switch[porcento][intervalo]
def notificar(self, intervalo, porcento):
if not self.foiNotificado(intervalo, porcento): # segurança para não mandar o e-mail mais de uma vez
# Mandando o e-mail
assunto = {
"100%": {
"dia": "Ultrapassagem de meta diária!"
,"mês": "Ultrapassagem de meta mensal!"
}
,"80%": {
"dia": "80% da meta diária atingida"
,"mês": "80% da meta mensal atingida"
}
}
sendAsyncEmail.delay({
"para": self.medidores.first().usuario.email
,"assunto": assunto[porcento][intervalo]
,"template": 'email/alertaMeta'
,"kwargs": {
"intervaloMeta": intervalo
,"porcentagem": porcento
}
})
# Atualizando o database
switch = {
"100%": {
"dia": self.notificado | 0b1000
,"mês": self.notificado | 0b0100
}
,"80%": {
"dia": self.notificado | 0b0010
,"mês": self.notificado | 0b0001
}
}
self.notificado = switch[porcento][intervalo]
db.session.add(self)
db.session.commit()
def desnotificar(self, intervalo, porcento):
switch = {
"100%": {
"dia": self.notificado & 0b0111
,"mês": self.notificado & 0b1011
}
,"80%": {
"dia": self.notificado & 0b1101
,"mês": self.notificado & 0b1110
}
}
self.notificado = switch[porcento][intervalo]
db.session.add(self)
db.session.commit()
# Representação no shell
def __repr__(self):
return '<Meta: %s>' % self.descricao
# Representação na interface
def __str__(self):
return self.descricao
class Medicao(db.Model):
__tablename__ = 'medicoes' # nome da tabela no banco de dados
id = db.Column(db.Integer, primary_key = True)
# Número de pulsos, enviado pelo medidor
valor = db.Column(db.Float)
# Data e hora que o servidor recebeu a medição
dataHora = db.Column(db.DateTime, index = True, default=datetime.utcnow)
# Relação com a tabela 'medidores'
medidorID = db.Column(db.Integer, db.ForeignKey('medidores.id'))
### Métodos ###
# Converter uma medição em JSON
def to_json(self):
jsonMedicao = {
'valor': self.valor,
'dataHora': self.dataHora
}
return jsonMedicao
# Converter um JSON numa Medicao
@staticmethod
def from_json(jsonMedicao):
valorDaMedicao = jsonMedicao.get('valor')
if valorDaMedicao is None:
return 'bug'
# raise ValidationError('dado enviado sem valor atribuído')
return Medicao(valor = valorDaMedicao)
# Representação no shell
def __repr__(self):
return '<Medição: %s>' % self.id
# Representação na interface
def __str__(self):
return self.id
###############################################################################################################################
# Formulário de aquisição de dados
class DataForm(Form):
dado = FloatField('Insira um float:')
submit = SubmitField('Enviar')
###############################################################################################################################
# View functions do webapp
@app.route('/', methods = ['GET', 'POST'])
def index():
# mesAtual = Dado.query.filter(extract('month', Dado.dataHora) == datetime.datetime.utcnow().month).all()
minhaMeta = Meta.query.first()
minhaMeta = {
"valor": {
"m³": minhaMeta.valor
,"L": minhaMeta.valor*1000
,"R$": minhaMeta.valor*PM
}
,"unidadeDoValor": minhaMeta.unidadeDoValor
,"inicio": minhaMeta.inicio.isoformat()+'Z'
,"fim": (minhaMeta.inicio + minhaMeta.intervalo).isoformat()+'Z'
}
eu = Usuario.query.first()
eu = {
"nome": eu.nome
,"email": eu.email
,"senha": eu.senhaHash
}
# 21 dados para poder calcular 20 últimas vazões
dados_ultimos21 = Medicao.query.all()[-21:]
vazao_ultimos20 = []
dataHora_ultimos20 = []
tempTxt = ''
temp = 0
for i in range(1,len(dados_ultimos21)):
'''
Os dados em formato datetime serão utilizados no lado do cliente em conjunto com a biblioteca moment.js
para oferecer a conversão da data e hora de acordo com a localização e configuração do usuário. O que vai
acontecer no javascript do cliente é:
moment("2012-12-31T23:55:13Z").format('LLLL');
Pra isso tem que ser enviado no lugar do objeto datetime uma string usando isoformat(), como:
obj.isoformat();
que coloca um 'T' entre a data e a hora e depois adicionar um 'Z' no final da string pro moment.js
reconhecer a parada
'''
tempTxt = dados_ultimos21[i].dataHora.isoformat()+'Z'
dataHora_ultimos20.append(tempTxt)
'''
(60 s/min)*(1/1000 L/mL)*(cte mL/pulsos)*(intervaloDeConsumo pulsos)/(intervaloDeTempo s)
= 0.06*cte*intervaloDeConsumo/intervaloDeTempo L/min
'''
temp = (0.06*cte)*(dados_ultimos21[i].valor - dados_ultimos21[i-1].valor)/\
(dados_ultimos21[i].dataHora - dados_ultimos21[i-1].dataHora).total_seconds() # L/min
vazao_ultimos20.append(temp)
''' CONSUMO ACUMULADO NO DIA E NO MÊS '''
# consumo do mês em m³
dado_ultimo = dados_ultimos21[-1]
consumoMes = dado_ultimo.valor*cte/1000000 # m³
# consumo do dia em m³
# Primeiro pega-se a última medição do dia anterior. No caso utilizamos '<=' para o caso que o medidor não tenha consumo no
# dia anterior
agora = datetime.utcnow()
ultimaMedicaoOntem = \
Medicao.query.filter(Medicao.dataHora <= datetime(agora.year, agora.month, agora.day)).order_by(Medicao.id.desc()).first()
consumoDia = ultimaMedicaoOntem.valor if (ultimaMedicaoOntem is not None) else 0 # caso não exista consumo nos dias anteriores
consumoDia = consumoMes - consumoDia*cte/1000000 # m³
# consumos em m³, L e R$
consumoMes = {
"m³": consumoMes
,"L": consumoMes*1000
,"R$": consumoMes*PM
}
consumoDia = {
"m³": consumoDia
,"L": consumoDia*1000
,"R$": consumoDia*PM
}
# dicionário final
consumoDiaMes = {
"dia": consumoDia
,"mês": consumoMes
}
consumoAcumuladoTotal = {
"valor": Medicao.query.order_by(Medicao.id.desc()).first().valor # pegando o último valor
,"dataHora": Medicao.query.first().dataHora.isoformat()+'Z'
}
consumoAcumuladoTotal['valor'] = cte*consumoAcumuladoTotal['valor']/1000000 # m³
historico_1mes = {
"valor": {
"m³": []
,"L": []
,"R$": []
}
,"dataHora": []
}
# supondo mẽs com 31 dias
temp = []
for i in range(32,0,-1): #[32, 30, ..., 2, 1]
# última medição do dia i-ésimo dia anterior
temp2 = Medicao.query.filter(extract('day', Medicao.dataHora) == datetime.utcnow().day-i).order_by(Medicao.id.desc()).first()
if temp2 is not None:
temp.append(temp2)
if len(temp) > 1:
consumoDoDia = (temp[-1].valor - temp[-2].valor)*cte/1000000 # m³
historico_1mes["valor"]["m³"].append(consumoDoDia)
historico_1mes["valor"]["L"].append(consumoDoDia*1000)
historico_1mes["valor"]["R$"].append(consumoDoDia*PM)
# Formato de dataHora para a biblioteca plotly.js
historico_1mes["dataHora"].append("%d-%d-%d" %(temp[-1].dataHora.year, temp[-1].dataHora.month, temp[-1].dataHora.day))
return | |
<filename>fortiosapi/fortiosapi.py
#!/usr/bin/env python
# Copyright 2015 Fortinet, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###################################################################
#
# fortiosapi.py aims at simplyfing the configuration and
# integration of Fortgate configuration using the restapi
#
# A Python module to abstract configuration using FortiOS REST API
#
###################################################################
import json
# Set default logging handler to avoid "No handler found" warnings.
import logging
import subprocess
import time
import paramiko
import requests
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Disable warnings about certificates.
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# may need to move to specifying the ca or use Verify=false
# cafile = 'cacert.pem'
# r = requests.get(url, verify=cafile)
logging.getLogger(__name__).addHandler(NullHandler())
# create logger
LOG = logging.getLogger('fortiosapi')
class FortiOSAPI(object):
def __init__(self):
self._https = True
self._fortiversion = "Version is set when logged"
# reference the fortinet version of the targeted product.
self._session = requests.session() # use single session
# persistant and same for all
self._session.verify = False
# (can be changed to) self._session.verify = '/path/to/certfile'
def logging(self, response):
try:
LOG.debug("Request : %s on url : %s ", response.request.method,
response.request.url)
LOG.debug("Response : http code %s reason : %s ",
response.status_code, response.reason)
LOG.debug("raw response: %s ", response.content)
except:
LOG.warning("method errors in request when global")
def debug(self, status):
if status == 'on':
LOG.setLevel(logging.DEBUG)
def formatresponse(self, res, vdom=None):
LOG.debug("formating response")
self.logging(res)
# Generic way to format the return from FortiAPI
# If vdom is global the resp is a dict of resp (even 1)
# 1 per vdom we check only the first one here (might need a more
# complex check)
if vdom == "global":
resp = json.loads(res.content.decode('utf-8'))[0]
resp['vdom'] = "global"
else:
LOG.debug("content res: %s", res.content)
resp = json.loads(res.content.decode('utf-8'))
return resp
def https(self, status):
if status == 'on':
self._https = True
if status == 'off':
self._https = False
def update_cookie(self):
# Retrieve server csrf and update session's headers
LOG.debug("cookies are : %s ", self._session.cookies)
for cookie in self._session.cookies:
if cookie.name == 'ccsrftoken':
csrftoken = cookie.value[1:-1] # token stored as a list
LOG.debug("csrftoken before update : %s ", csrftoken)
self._session.headers.update({'X-CSRFTOKEN': csrftoken})
LOG.debug("csrftoken after update : %s ", csrftoken)
def login(self, host, username, password, https_port=443):
self.host = host
if self._https is True:
self.url_prefix = 'https://' + self.host + ':' + https_port
else:
self.url_prefix = 'http://' + self.host
url = self.url_prefix + '/logincheck'
res = self._session.post(
url,
data='username=' + username + '&secretkey=' + password + "&ajax=1")
self.logging(res)
# Ajax=1 documented in 5.6 API ref but available on 5.4
if res.content.decode('ascii')[0] == '1':
# Update session's csrftoken
self.update_cookie()
else:
raise Exception('login failed')
try:
self._fortiversion = self.monitor('system', 'interface')['version']
except:
raise Exception('can not get following login')
# Might be wise to return the license status here
def get_version(self):
return self._fortiversion
def get_mkey(self, path, name, vdom=None, data=None):
# retreive the table mkey from schema
schema = self.schema(path, name, vdom=None)
try:
keyname = schema['mkey']
except KeyError:
LOG.warning("there is no mkey for %s/%s", path, name)
return None
try:
mkey = data[keyname]
except KeyError:
LOG.warning("mkey %s not set in the data", mkey)
return None
return mkey
def logout(self):
url = self.url_prefix + '/logout'
res = self._session.post(url)
self._session.close()
self._session.cookies.clear()
self.logging(res)
def cmdb_url(self, path, name, vdom, mkey=None):
# return builded URL
url_postfix = '/api/v2/cmdb/' + path + '/' + name
if mkey:
url_postfix = url_postfix + '/' + str(mkey)
if vdom:
LOG.debug("vdom is: %s", vdom)
if vdom == "global":
url_postfix += '?global=1'
else:
url_postfix += '?vdom=' + vdom
url = self.url_prefix + url_postfix
LOG.debug("urlbuild is %s with crsf: %s", url, self._session.headers)
return url
def mon_url(self, path, name, vdom=None, mkey=None):
# return builded URL
url_postfix = '/api/v2/monitor/' + path + '/' + name
if mkey:
url_postfix = url_postfix + '/' + str(mkey)
if vdom:
LOG.debug("vdom is: %s", vdom)
if vdom == "global":
url_postfix += '?global=1'
else:
url_postfix += '?vdom=' + vdom
url = self.url_prefix + url_postfix
return url
def monitor(self, path, name, vdom=None, mkey=None, parameters=None):
url = self.mon_url(path, name, vdom, mkey)
res = self._session.get(url, params=parameters)
LOG.debug("in MONITOR function")
return self.formatresponse(res, vdom=vdom)
def download(self, path, name, vdom=None, mkey=None, parameters=None):
url = self.mon_url(path, name)
res = self._session.get(url, params=parameters)
LOG.debug("in DOWNLOAD function")
return res
def upload(self, path, name, vdom=None, mkey=None,
parameters=None, data=None, files=None):
url = self.mon_url(path, name)
res = self._session.post(url, params=parameters,
data=data, files=files)
LOG.debug("in UPLOAD function")
return res
def get(self, path, name, vdom=None, mkey=None, parameters=None):
url = self.cmdb_url(path, name, vdom, mkey)
res = self._session.get(url, params=parameters)
LOG.debug("in GET function")
return self.formatresponse(res, vdom=vdom)
def schema(self, path, name, vdom=None):
# vdom or global is managed in cmdb_url
if vdom is None:
url = self.cmdb_url(path, name, vdom) + "?action=schema"
else:
url = self.cmdb_url(path, name, vdom) + "&action=schema"
res = self._session.get(url)
self.logging(res)
if res.status_code is 200:
return json.loads(res.content.decode('utf-8'))['results']
else:
return json.loads(res.content.decode('utf-8'))
def get_name_path_dict(self, vdom=None):
# return builded URL
url_postfix = '/api/v2/cmdb/'
if vdom is None:
url_postfix += '?vdom=' + vdom + "&action=schema"
else:
url_postfix += "?action=schema"
url = self.url_prefix + url_postfix
cmdbschema = self._session.get(url)
self.logging(cmdbschema)
j = json.loads(cmdbschema.content.decode('utf-8'))['results']
dict = []
for keys in j:
if "__tree__" not in keys['path']:
dict.append(keys['path'] + " " + keys['name'])
return dict
def post(self, path, name, vdom=None,
mkey=None, parameters=None, data=None):
if not mkey:
mkey = self.get_mkey(path, name, vdom=vdom, data=data)
# post with mkey will return a 404 as the next level is not there yet
url = self.cmdb_url(path, name, vdom, mkey=None)
res = self._session.post(
url, params=parameters, data=json.dumps(data))
LOG.debug("in POST function")
return self.formatresponse(res, vdom=vdom)
def put(self, path, name, vdom=None,
mkey=None, parameters=None, data=None):
if not mkey:
mkey = self.get_mkey(path, name, vdom=vdom, data=data)
url = self.cmdb_url(path, name, vdom, mkey)
res = self._session.put(url, params=parameters,
data=json.dumps(data))
LOG.debug("in PUT function")
return self.formatresponse(res, vdom=vdom)
def delete(self, path, name, vdom=None,
mkey=None, parameters=None, data=None):
# Need to find the type of the mkey to avoid error when integer assume
# the other types will be ok.
if not mkey:
mkey = self.get_mkey(path, name, vdom=vdom, data=data)
url = self.cmdb_url(path, name, vdom, mkey)
res = self._session.delete(
url, params=parameters, data=json.dumps(data))
LOG.debug("in DELETE function")
return self.formatresponse(res, vdom=vdom)
# Set will try to put if err code is 424 will try put (ressource exists)
# may add a force option to delete and redo if troubles.
def set(self, path, name, vdom=None,
mkey=None, parameters=None, data=None):
# post with mkey will return a 404 as the next level is not there yet
url = self.cmdb_url(path, name, vdom, mkey=mkey)
if not mkey:
mkey = self.get_mkey(path, name, vdom=vdom, data=data)
url = self.cmdb_url(path, name, mkey=mkey, vdom=vdom)
res = self._session.put(
url, params=parameters, data=json.dumps(data))
LOG.debug("in SET function after PUT")
r = self.formatresponse(res, vdom=vdom)
if r['http_status'] == 404 or r['http_status'] == 405:
LOG.warning(
"Try to put on %s failed doing a put to force parameters\
change consider delete if still fails ",
res.request.url)
#need to reset the url without mkey if doing a post
url = self.cmdb_url(path, name, mkey=None, vdom=vdom)
res = self._session.post(
url, params=parameters, data=json.dumps(data))
LOG.debug("in SET function after POST")
return self.formatresponse(res, vdom=vdom)
else:
return r
# send multiline string ''' get system status ''' using ssh
def ssh(self, cmds, host, user, password=<PASSWORD>, private_key=None, ssh_port=22):
''' Send a multi line string via ssh to the fortigate '''
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if private_key is None:
client.connect(host, port=ssh_port, username=user, password=password,
allow_agent=False, timeout=10)
else:
k = paramiko.RSAKey.from_private_key_file(private_key)
client.connect(host, port=ssh_port, username=user, pkey=k,
allow_agent=False, timeout=10)
LOG.debug("ssh login to %s:%s ", host, ssh_port)
# commands is a multiline string using the ''' string ''' format
try:
stdin, stdout, stderr = client.exec_command(cmds)
except:
LOG.debug("exec_command failed")
raise subprocess.CalledProcessError(returncode=retcode, cmd=cmds,
output=output)
LOG.debug("ssh command in: %s out: %s err: %s ",
stdin, stdout, stderr)
retcode = stdout.channel.recv_exit_status()
LOG.debug("Paramiko return code : %s ", retcode)
client.close() # @TODO re-use connections
if retcode > 0:
output = stderr.read().strip()
raise subprocess.CalledProcessError(returncode=retcode, cmd=cmds,
output=output)
| |
<gh_stars>10-100
# Author: <NAME> <<EMAIL>> <<EMAIL>>
# License: BSD 3 clause (C) 2017
# As of 02 July 2017 those implementations are available here:
# https://lvdmaaten.github.io/tsne/
# https://github.com/scikit-learn/scikit-learn/tree/master/sklearn/manifold
# - <NAME>
# References:
# [1] <NAME> and <NAME>. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning
# Research 9(Nov):2579-2605, 2008.
# [2] <NAME> and <NAME>. Stochastic Neighbor Embedding. In Advances in Neural Information Processing Systems,
# volume 15, pages 833-840, Cambridge, MA, USA, 2002. The MIT Press.
import numpy as np
# TODO get machine precision instead ?
EPS = 1e-12 # Precision level
def product(*args):
values = map(tuple, args)
res = [[]]
for v in values:
res = [x+[y] for x in res for y in v]
final_res = list()
for prod in res:
final_res.append(tuple(prod))
return final_res
class LionTSNELightweight:
"""
Lightweight version of LION-tSNE class. Effectively, LION part, tSNE results .
"""
def __init__(self, x, y):
"""
Takes X and Y without any testing or consideration. Useful for debugging or for using already generated
visualization.
:param x: Points in original dimensions.
:param y: Same points in reduced dimensions.
:param verbose: Logging level. 0 - nothing. Higher - more verbose.
"""
self.X = x
self.Y = y
self.n_embedded_dimensions = y.shape[1]
def get_distance(self,x1,x2):
"""
:param x1: K-dimensional sample
:param x2: Another K-dimensional sample
:return: Distance according to chosen distance metrics
"""
if self.distance_function == 'Euclidean':
x1 = x1.reshape((-1,))
x2 = x2.reshape((-1,))
return np.sqrt(np.sum((x1-x2)**2))
else:
return self.distance_function(x1, x2)
def get_distance_matrix(self, x):
"""
:param x: NxK array of N samples, K dimensions each.
:return: NxN matrix of distances according to chosen distance metrics
"""
d = np.zeros((x.shape[0], x.shape[0]))
for i in range(x.shape[0]):
for j in range(x.shape[0]):
d[i,j] = np.sqrt(np.sum((x[i,:] - x[j,:])**2))
return d
def generate_lion_tsne_embedder(self, function_kwargs={}, random_state=None, verbose=0,
return_intermediate=False):
'''
Method mainly focused on local IDW interpolation and special outlier
placement procedure.
TODO: The outlier placement is suboptimal for 3 or more dimensional Y (it places them in a "spiral" on the
plane, rather than in a grid). Still method is usable.
TODO: For now only IDW is used as a local interpolation method.
TODO: Without loss of generality we assume that all input X are distinct (if not - leave only 1 of those x
and assume that corresponding y is returned for all of them).
Described in details: <add ref later>
:param function_kwargs: parameters of the algorithm. Accepted ones:
'radius_x' : radius in X space for search of nearest neighbors. Ignored if 'radius_x_percentile' is set.
'power' : power parameter of IDW distribution. Default - 1 (if IDW is used)
'radius_x_percentile' : percentile of nearest neighbors distribution in X, which is used to set radius
in search for nearest neighbors. Suppresses 'radius_x'. Accepts value in percents (i.e. for 99% enter 99,
not 0.99). Default - 99 (if radius_x not set).
'radius_y' : distance in Y space to indicate outliers. Use if you don't want radius in terms of percentiles.
Ignored if 'radius_y_percentile' is set.
NOTE: 'y_safety_margin' will be added to the radius anyway, and it has non-zero default. Multiplication
coefficient will be applied also.
'radius_y_percentile' : percentile of nearest neighbors distribution in Y, which is used to set distance
to embed outliers. Suppresses 'radius_y'. Accepts value in percents (i.e. for 99% enter 99,
not 0.99). Default - 100 (if radius_y not set), i.e. radius_y = maximum nearest neighbors distance in y
(before multiplication by radius_y_coefficient and before applying safety margin).
'y_safety_margin': safety margin for outlier placement in Y space. If lots of similar outliers clump up,
the closest distance can get lower than radius_y, safety margin can prevent it for a while.
Default - equal to radius_y_close. Added to radius_y, whether radius_y is given or calculated from
percentile. Applied after multiplication coefficient.
'radius_y_close' : if an algorithm requires to "place y close to y_i", it will be placed at a random angle
at a distance not exceeding radius_y_close. Suppressed by 'radius_y_close_percentile'
'radius_y_close_percentile' : if an algorithm requires to "place y close to y_i", it will be placed at a
random angle at a distance not exceeding radius_y_close. Radius_y_close can be set at a percentile of
nearest neighbor distance in Y. Suppressew 'radius_y_close'. Default - 10 (if 'radius_y_close' not set)
'radius_y_coefficient' : default - 1.0. Radius_y will be mutiplied by it (before adding safety margin).
'outlier_placement_method' : Method of placing outliers.
'cell-based' (default, None) - see article. Splits area between y_min to y_max into 2*r_y sized cells,
then places outliers at the center of each free cell.
'circular' : encircles data, adds r_y to it, then finds outlier positions outside that circle with
proper angular spacing. If ran full cirle, add one more r_y to radius and continues. So, outliers are
placed on smth like a spiral at the center of the data.
:param return_intermediate: along with embedding function it will return state of a low of intermediate
variables. Can be useful for debugging or plotting. Default - false.
:param random_state: random seed. Default - None.
:param verbose: Logging level. Default - 0 (log nothing).
:return: Embedding function, which accepts NxK array (K - original number of dimensions). Returns NxD embedded
array (D is usually 2). Also accepts verbose parameter for logging level.
'''
# TODO Save distance matrix on demand? Can make things faster in many cases, but takes memory.
# TODO Save P matrix also on demand? It is needed even less, it seems.
distance_matrix = self.get_distance_matrix(self.X)
np.fill_diagonal(distance_matrix, np.inf) # We are not interested in distance to itself
nn_x_distance = np.min(distance_matrix, axis=1) # Any axis will do
outlier_placement_method = function_kwargs.get('outlier_placement_method', None)
if outlier_placement_method is None:
outlier_placement_method = 'cell-based'
outlier_placement_method = outlier_placement_method.lower()
# TODO Step 1. Extra
if 'radius_x' in function_kwargs and 'radius_x_percentile' not in function_kwargs:
radius_x = function_kwargs['radius_x']
else:
radius_x_percentile = function_kwargs.get('radius_x_percentile', 99)
if verbose >= 2:
print("Setting radius_x at a percentile: ", radius_x_percentile)
radius_x = np.percentile(nn_x_distance, radius_x_percentile)
# Some potentially shared calculations for radius_y and radius_y_close
if 'radius_y_percentile' in function_kwargs or 'radius_y' not in function_kwargs or \
'radius_y_close_percentile' in function_kwargs or 'radius_y_close' not in function_kwargs:
# In that case we will need those things
y_distance_matrix = self.get_distance_matrix(self.Y)
np.fill_diagonal(y_distance_matrix, np.inf) # We are not interested in distance to itself
nn_y_distance = np.min(y_distance_matrix, axis=1) # Any axis will do
if 'radius_y' in function_kwargs and 'radius_y_percentile' not in function_kwargs:
radius_y = function_kwargs['radius_y']
else:
radius_y_percentile = function_kwargs.get('radius_y_percentile', 100)
radius_y = np.percentile(nn_y_distance, radius_y_percentile)
if verbose >= 2:
print("Set radius_y at a percentile: ", radius_y_percentile, "Value: ", radius_y)
if 'radius_y_close' in function_kwargs and 'radius_y_close_percentile' not in function_kwargs:
radius_y_close = function_kwargs['radius_y_close']
else:
radius_y_close_percentile = function_kwargs.get('radius_y_close_percentile', 10)
if verbose >= 2:
print("Setting radius_y_close at a percentile: ", radius_y_close_percentile)
radius_y_close = np.percentile(nn_y_distance, radius_y_close_percentile)
if outlier_placement_method == 'circular':
y_center = np.mean(self.Y, axis=0)
y_data_radius = np.max(np.sqrt(np.sum((self.Y - y_center) ** 2, axis=1))) # For outlier placement
radius_y_coef = function_kwargs.get('radius_y_coefficient', 1.0)
y_safety_margin = function_kwargs.get('y_safety_margin', radius_y_close)
radius_y *= radius_y_coef
radius_y += y_safety_margin
if verbose >= 2:
print("Radius_x: ", radius_x)
print("Radius_y_coef: ", radius_y_coef)
print("Safety Y margin: ", y_safety_margin)
print("Radius_y_close: ", radius_y_close)
print("Radius_y (final): ", radius_y)
power = function_kwargs.get('power', 1.0)
if outlier_placement_method == 'cell-based':
available_cells = list() # Contain number of cells counting on each axis
if verbose >= 2:
print('Generating original set of cells.')
y_min = np.min(self.Y, axis=0).reshape(-1) # Let's precompute
y_max = np.max(self.Y, axis=0).reshape(-1) # Let's precompute
if verbose >= 2:
print('Minimums: ', y_min)
print('Maximums: ', y_max)
# Number of cells per dimension.
original_cell_nums = [int(np.floor((y_max[i] - y_min[i]) / (2 * radius_y))) for i in range(self.Y.shape[1])]
if verbose >= 2:
print('Cell nums: ', original_cell_nums)
# Within y_min to y_max cell can be slighlty larger to divide exactly
adjusted_cell_sizes = [(y_max[i] - y_min[i]) / original_cell_nums[i] for i in range(self.Y.shape[1])]
if verbose >= 2:
print('Adjusted cell sizes: ', adjusted_cell_sizes)
# How many outer layers did we have to add. For now - none.
# added_outer_layers = 0 #We do it locally, cause runs are independent
cell_list = list(product(*[list(range(i)) for i in original_cell_nums]))
if verbose >= 3:
print('Cell list: ', cell_list)
for | |
0.6718750,
0.6835938, 0.6914062, 0.7031250, 0.7148438, 0.7265625, 0.7343750,
0.7460938, 0.7578125, 0.7656250, 0.7773438, 0.7851562, 0.7929688,
0.8046875, 0.8125000, 0.8203125, 0.8320312, 0.8398438, 0.8476562,
0.8554688, 0.8632812, 0.8710938, 0.8789062, 0.8867188, 0.8945312,
0.8984375, 0.9062500, 0.9140625, 0.9218750, 0.9257812, 0.9335938,
0.9375000, 0.9453125, 0.9492188, 0.9570312, 0.9609375, 0.9648438,
0.9687500, 0.9765625, 0.9804688, 0.9843750, 0.9882812, 0.9921875,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938]),
array([ 0.0000000, 0.0000000, 0.0039062, 0.0078125, 0.0117188, 0.0156250,
0.0195312, 0.0234375, 0.0273438, 0.0312500, 0.0351562, 0.0507812,
0.0429688, 0.0468750, 0.0507812, 0.0546875, 0.0585938, 0.0625000,
0.0664062, 0.0703125, 0.0742188, 0.0781250, 0.0820312, 0.0859375,
0.0898438, 0.0937500, 0.0976562, 0.1015625, 0.1054688, 0.1093750,
0.1132812, 0.1171875, 0.1210938, 0.1250000, 0.1289062, 0.1328125,
0.1367188, 0.1406250, 0.1445312, 0.1484375, 0.1523438, 0.1562500,
0.1601562, 0.1640625, 0.1679688, 0.1718750, 0.1757812, 0.1796875,
0.1835938, 0.1875000, 0.1914062, 0.1953125, 0.1992188, 0.2031250,
0.2070312, 0.2109375, 0.2148438, 0.2187500, 0.2226562, 0.2265625,
0.2304688, 0.2343750, 0.2382812, 0.2421875, 0.2460938, 0.2500000,
0.2500000, 0.2539062, 0.2578125, 0.2617188, 0.2656250, 0.2695312,
0.2734375, 0.2773438, 0.2812500, 0.2851562, 0.2890625, 0.2929688,
0.2968750, 0.3007812, 0.3046875, 0.3085938, 0.3125000, 0.3164062,
0.3203125, 0.3242188, 0.3281250, 0.3398438, 0.3593750, 0.3789062,
0.3984375, 0.4179688, 0.4335938, 0.4531250, 0.4726562, 0.4882812,
0.5078125, 0.5234375, 0.5429688, 0.5585938, 0.5742188, 0.5937500,
0.6093750, 0.6250000, 0.6406250, 0.6562500, 0.6718750, 0.6875000,
0.7031250, 0.7187500, 0.7343750, 0.7500000, 0.7656250, 0.7773438,
0.7929688, 0.8085938, 0.8203125, 0.8359375, 0.8476562, 0.8632812,
0.8750000, 0.8906250, 0.9023438, 0.9140625, 0.9296875, 0.9414062,
0.9531250, 0.9648438, 0.9765625, 0.9882812, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9921875, 0.9882812, 0.9843750, 0.9804688, 0.9804688, 0.9765625,
0.9726562, 0.9726562, 0.9687500, 0.9648438, 0.9648438, 0.9609375,
0.9609375, 0.9609375, 0.9570312, 0.9570312, 0.9570312, 0.9570312,
0.9531250, 0.9531250, 0.9531250, 0.9531250, 0.9531250, 0.9531250,
0.9570312, 0.9570312, 0.9570312, 0.9570312, 0.9609375, 0.9609375,
0.9609375, 0.9648438, 0.9648438, 0.9687500, 0.9726562, 0.9726562,
0.9765625, 0.9804688, 0.9804688, 0.9804688]),
array([ 0.0117188, 0.0117188, 0.0507812, 0.0664062, 0.0898438, 0.1171875,
0.1445312, 0.1718750, 0.1953125, 0.2226562, 0.2460938, 0.2734375,
0.2968750, 0.3203125, 0.3476562, 0.3710938, 0.3945312, 0.4179688,
0.4453125, 0.4687500, 0.4921875, 0.5156250, 0.5390625, 0.5625000,
0.5820312, 0.6054688, 0.6289062, 0.6523438, 0.6718750, 0.6953125,
0.7187500, 0.7382812, 0.7617188, 0.7812500, 0.8007812, 0.8242188,
0.8437500, 0.8632812, 0.8867188, 0.9062500, 0.9257812, 0.9453125,
0.9648438, 0.9843750, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9882812, 0.9765625,
0.9648438, 0.9531250, 0.9453125, 0.9335938, 0.9218750, 0.9140625,
0.9023438, 0.8945312, 0.8828125, 0.8750000, 0.8632812, 0.8554688,
0.8476562, 0.8359375, 0.8281250, 0.8203125, 0.8125000, 0.8046875,
0.7968750, 0.7890625, 0.7812500, 0.7734375, 0.7656250, 0.7578125,
0.7500000, 0.7460938, 0.7382812, 0.7304688, 0.7265625, 0.7187500,
0.7148438, 0.7070312, 0.7031250, 0.6992188, 0.6914062, 0.6875000,
0.6835938, 0.6796875, 0.6757812, 0.6718750, 0.6679688, 0.6679688,
0.6718750, 0.6757812, 0.6796875, 0.6835938, 0.6875000, 0.6914062,
0.6953125, 0.6992188, 0.7031250, 0.7070312, 0.7109375, 0.7148438,
0.7187500, 0.7226562, 0.7265625, 0.7304688, 0.7343750, 0.7382812,
0.7421875, 0.7460938, 0.7460938, 0.7500000, 0.7539062, 0.7578125,
0.7617188, 0.7656250, 0.7695312, 0.7734375, 0.7773438, 0.7812500,
0.7851562, 0.7890625, 0.7929688, 0.7968750, 0.8007812, 0.8046875,
0.8085938, 0.8125000, 0.8164062, 0.8203125, 0.8242188, 0.8281250,
0.8320312, 0.8359375, 0.8398438, 0.8437500, 0.8476562, 0.8515625,
0.8554688, 0.8593750, 0.8632812, 0.8671875, 0.8710938, 0.8750000,
0.8789062, 0.8828125, 0.8867188, 0.8906250, 0.8945312, 0.8984375,
0.9023438, 0.9062500, 0.9101562, 0.9140625, 0.9179688, 0.9218750,
0.9257812, 0.9296875, 0.9335938, 0.9375000, 0.9414062, 0.9453125,
0.9492188, 0.9531250, 0.9570312, 0.9609375, 0.9648438, 0.9687500,
0.9726562, 0.9765625, 0.9804688, 0.9804688]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 22 :: Hue Sat Value 2 ###
color_map_luts['idl22'] = \
(
array([ 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9921875, 0.9882812, 0.9843750, 0.9804688,
0.9726562, 0.9687500, 0.9648438, 0.9609375, 0.9531250, 0.9492188,
0.9414062, 0.9375000, 0.9296875, 0.9257812, 0.9179688, 0.9101562,
0.9023438, 0.8984375, 0.8906250, 0.8828125, 0.8750000, 0.8671875,
0.8593750, 0.8515625, 0.8437500, 0.8320312, 0.8242188, 0.8164062,
0.8085938, 0.7968750, 0.7890625, 0.7773438, 0.7695312, 0.7578125,
0.7500000, 0.7382812, 0.7304688, 0.7187500, 0.7070312, 0.6953125,
0.6835938, 0.6718750, 0.6640625, 0.6562500, 0.6523438, 0.6484375,
0.6445312, 0.6406250, 0.6367188, 0.6328125, 0.6289062, 0.6250000,
0.6210938, 0.6171875, 0.6132812, 0.6093750, 0.6054688, 0.6015625,
0.5976562, 0.5937500, 0.5898438, 0.5859375, 0.5820312, 0.5781250,
0.5742188, 0.5703125, 0.5664062, 0.5625000, 0.5585938, 0.5546875,
0.5507812, 0.5468750, 0.5429688, 0.5390625, 0.5351562, 0.5312500,
0.5273438, 0.5234375, 0.5195312, 0.5156250, 0.5117188, 0.5078125,
0.5039062, 0.5000000, 0.4960938, 0.4921875, 0.4882812, 0.4843750,
0.4804688, 0.4765625, 0.4726562, 0.4687500, 0.4648438, 0.4609375,
0.4570312, 0.4531250, 0.4492188, 0.4453125, 0.4414062, 0.4375000,
0.4335938, 0.4296875, 0.4257812, 0.4218750, 0.4179688, 0.4140625,
0.4101562, 0.4062500, 0.4023438, 0.3984375, 0.3945312, 0.3906250,
0.3867188, 0.3828125, 0.3789062, 0.3750000, 0.3710938, 0.3671875,
0.3632812, 0.3593750, 0.3554688, 0.3515625, 0.3476562, 0.3437500,
0.3398438, 0.3359375, 0.3320312, 0.3320312, 0.3281250, 0.3281250,
0.3437500, 0.3554688, 0.3671875, 0.3789062, 0.3906250, 0.4023438,
0.4179688, 0.4296875, 0.4453125, 0.4570312, 0.4726562, 0.4843750,
0.5000000, 0.5117188, 0.5273438, 0.5429688, 0.5585938, 0.5703125,
0.5859375, 0.6015625, 0.6171875, 0.6328125, 0.6484375, 0.6640625,
0.6796875, 0.6992188, 0.7148438, 0.7304688, 0.7460938, 0.7656250,
0.7812500, 0.8007812, 0.8164062, 0.8359375, 0.8515625, 0.8710938,
0.8906250, 0.9101562, 0.9257812, 0.9453125, 0.9648438, 0.9843750,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938]),
array([ 0.9882812, 0.9882812, 0.9843750, 0.9804688, 0.9765625, 0.9726562,
0.9687500, 0.9648438, 0.9609375, 0.9570312, 0.9531250, 0.9492188,
0.9453125, 0.9414062, 0.9375000, 0.9335938, 0.9296875, 0.9257812,
0.9218750, 0.9179688, 0.9140625, 0.9101562, 0.9062500, 0.9023438,
0.8984375, 0.8945312, 0.8906250, 0.8867188, 0.8828125, 0.8789062,
0.8750000, 0.8710938, 0.8671875, 0.8632812, 0.8593750, 0.8554688,
0.8515625, 0.8476562, 0.8437500, 0.8398438, 0.8359375, 0.8320312,
0.8281250, 0.8242188, 0.8203125, 0.8164062, 0.8125000, 0.8085938,
0.8046875, 0.8007812, 0.7968750, 0.7929688, 0.7890625, 0.7851562,
0.7812500, 0.7773438, 0.7734375, 0.7695312, 0.7656250, 0.7617188,
0.7578125, 0.7539062, 0.7500000, 0.7460938, 0.7421875, 0.7382812,
0.7343750, 0.7304688, 0.7265625, 0.7226562, 0.7187500, 0.7148438,
0.7109375, 0.7070312, 0.7031250, 0.6992188, 0.6953125, 0.6914062,
0.6875000, 0.6835938, 0.6796875, 0.6757812, 0.6718750, 0.6679688,
0.6640625, 0.6601562, 0.6601562, 0.6601562, 0.6640625, 0.6679688,
0.6718750, 0.6757812, 0.6796875, 0.6875000, 0.6914062, 0.6953125,
0.7031250, 0.7070312, 0.7148438, 0.7187500, 0.7265625, 0.7343750,
0.7382812, 0.7460938, 0.7539062, 0.7617188, 0.7695312, 0.7773438,
0.7851562, 0.7929688, 0.8007812, 0.8085938, 0.8164062, 0.8242188,
0.8359375, 0.8437500, 0.8515625, 0.8632812, 0.8710938, 0.8828125,
0.8906250, 0.9023438, 0.9101562, 0.9218750, 0.9335938, 0.9414062,
0.9531250, 0.9648438, 0.9765625, 0.9882812, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, | |
If the queryset is being used for a list of comment resources,
then this can be further filtered by passing ``?interdiff-revision=``
on the URL to match the given interdiff revision, and
``?line=`` to match comments on the given line number.
"""
q = super(FileDiffCommentResource, self).get_queryset(
request, review_request_id, *args, **kwargs)
return q.filter(filediff__diffset__revision=diff_revision)
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of comments on a file in a diff.
This list can be filtered down by using the ``?line=`` and
``?interdiff-revision=``.
To filter for comments that start on a particular line in the file,
using ``?line=``.
To filter for comments that span revisions of diffs, you can specify
the second revision in the range using ``?interdiff-revision=``.
"""
pass
filediff_comment_resource = FileDiffCommentResource()
class ReviewDiffCommentResource(BaseDiffCommentResource):
"""Provides information on diff comments made on a review.
If the review is a draft, then comments can be added, deleted, or
changed on this list. However, if the review is already published,
then no changes can be made.
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
model_parent_key = 'review'
mimetype_list_resource_name = 'review-diff-comments'
mimetype_item_resource_name = 'review-diff-comment'
def get_queryset(self, request, review_request_id, review_id,
*args, **kwargs):
q = super(ReviewDiffCommentResource, self).get_queryset(
request, review_request_id, *args, **kwargs)
return q.filter(review=review_id)
def has_delete_permissions(self, request, comment, *args, **kwargs):
review = comment.review.get()
return not review.public and review.user == request.user
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required = {
'filediff_id': {
'type': int,
'description': 'The ID of the file diff the comment is on.',
},
'first_line': {
'type': int,
'description': 'The line number the comment starts at.',
},
'num_lines': {
'type': int,
'description': 'The number of lines the comment spans.',
},
'text': {
'type': str,
'description': 'The comment text.',
},
},
optional = {
'interfilediff_id': {
'type': int,
'description': 'The ID of the second file diff in the '
'interdiff the comment is on.',
},
'issue_opened': {
'type': bool,
'description': 'Whether the comment opens an issue.',
},
},
)
def create(self, request, first_line, num_lines, text,
filediff_id, issue_opened=False, interfilediff_id=None, *args,
**kwargs):
"""Creates a new diff comment.
This will create a new diff comment on this review. The review
must be a draft review.
"""
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
review = review_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_resource.has_modify_permissions(request, review):
return _no_access_error(request.user)
filediff = None
interfilediff = None
invalid_fields = {}
try:
filediff = FileDiff.objects.get(
pk=filediff_id,
diffset__history__review_request=review_request)
except ObjectDoesNotExist:
invalid_fields['filediff_id'] = \
['This is not a valid filediff ID']
if filediff and interfilediff_id:
if interfilediff_id == filediff.id:
invalid_fields['interfilediff_id'] = \
['This cannot be the same as filediff_id']
else:
try:
interfilediff = FileDiff.objects.get(
pk=interfilediff_id,
diffset__history=filediff.diffset.history)
except ObjectDoesNotExist:
invalid_fields['interfilediff_id'] = \
['This is not a valid interfilediff ID']
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields,
}
new_comment = self.model(filediff=filediff,
interfilediff=interfilediff,
text=text,
first_line=first_line,
num_lines=num_lines,
issue_opened=bool(issue_opened))
if issue_opened:
new_comment.issue_status = BaseComment.OPEN
else:
new_comment.issue_status = None
new_comment.save()
review.comments.add(new_comment)
review.save()
return 201, {
self.item_result_key: new_comment,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional = {
'first_line': {
'type': int,
'description': 'The line number the comment starts at.',
},
'num_lines': {
'type': int,
'description': 'The number of lines the comment spans.',
},
'text': {
'type': str,
'description': 'The comment text.',
},
'issue_opened': {
'type': bool,
'description': 'Whether or not the comment opens an issue.',
},
'issue_status': {
'type': ('dropped', 'open', 'resolved'),
'description': 'The status of an open issue.',
}
},
)
def update(self, request, *args, **kwargs):
"""Updates a diff comment.
This can update the text or line range of an existing comment.
"""
try:
review_request_resource.get_object(request, *args, **kwargs)
review = review_resource.get_object(request, *args, **kwargs)
diff_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
# Determine whether or not we're updating the issue status.
# If so, delegate to the base_comment_resource.
if base_comment_resource.should_update_issue_status(diff_comment,
**kwargs):
return base_comment_resource.update_issue_status(request, self,
*args, **kwargs)
if not review_resource.has_modify_permissions(request, review):
return _no_access_error(request.user)
# If we've updated the comment from having no issue opened,
# to having an issue opened, we need to set the issue status
# to OPEN.
if not diff_comment.issue_opened and kwargs.get('issue_opened', False):
diff_comment.issue_status = BaseComment.OPEN
for field in ('text', 'first_line', 'num_lines', 'issue_opened'):
value = kwargs.get(field, None)
if value is not None:
setattr(diff_comment, field, value)
diff_comment.save()
return 200, {
self.item_result_key: diff_comment,
}
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def delete(self, *args, **kwargs):
"""Deletes the comment.
This will remove the comment from the review. This cannot be undone.
Only comments on draft reviews can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of comments made on a review.
This list can be filtered down by using the ``?line=`` and
``?interdiff-revision=``.
To filter for comments that start on a particular line in the file,
using ``?line=``.
To filter for comments that span revisions of diffs, you can specify
the second revision in the range using ``?interdiff-revision=``.
"""
pass
review_diff_comment_resource = ReviewDiffCommentResource()
class ReviewReplyDiffCommentResource(BaseDiffCommentResource):
"""Provides information on replies to diff comments made on a review reply.
If the reply is a draft, then comments can be added, deleted, or
changed on this list. However, if the reply is already published,
then no changed can be made.
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
model_parent_key = 'review'
fields = dict({
'reply_to': {
'type': ReviewDiffCommentResource,
'description': 'The comment being replied to.',
},
}, **BaseDiffCommentResource.fields)
mimetype_list_resource_name = 'review-reply-diff-comments'
mimetype_item_resource_name = 'review-reply-diff-comment'
def get_queryset(self, request, review_request_id, review_id, reply_id,
*args, **kwargs):
q = super(ReviewReplyDiffCommentResource, self).get_queryset(
request, review_request_id, *args, **kwargs)
q = q.filter(review=reply_id, review__base_reply_to=review_id)
return q
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required = {
'reply_to_id': {
'type': int,
'description': 'The ID of the comment being replied to.',
},
'text': {
'type': str,
'description': 'The comment text.',
},
},
)
def create(self, request, reply_to_id, text, *args, **kwargs):
"""Creates a new reply to a diff comment on the parent review.
This will create a new diff comment as part of this reply. The reply
must be a draft reply.
"""
try:
review_request_resource.get_object(request, *args, **kwargs)
reply = review_reply_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_reply_resource.has_modify_permissions(request, reply):
return _no_access_error(request.user)
try:
comment = \
review_diff_comment_resource.get_object(request,
comment_id=reply_to_id,
*args, **kwargs)
except ObjectDoesNotExist:
return INVALID_FORM_DATA, {
'fields': {
'reply_to_id': ['This is not a valid comment ID'],
}
}
q = self.get_queryset(request, *args, **kwargs)
q = q.filter(Q(reply_to=comment) & Q(review=reply))
try:
new_comment = q.get()
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
is_new = False
except self.model.DoesNotExist:
new_comment = self.model(filediff=comment.filediff,
interfilediff=comment.interfilediff,
reply_to=comment,
first_line=comment.first_line,
num_lines=comment.num_lines)
is_new = True
new_comment.text = text
new_comment.save()
data = {
self.item_result_key: new_comment,
}
if is_new:
reply.comments.add(new_comment)
reply.save()
return 201, data
else:
return 303, data, {
'Location': self.get_href(new_comment, request, *args, **kwargs)
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required = {
'text': {
'type': str,
'description': 'The new comment text.',
},
},
)
def update(self, request, *args, **kwargs):
"""Updates a reply to a diff comment.
This can only update the text in the comment. The comment being
replied to cannot change.
"""
try:
review_request_resource.get_object(request, *args, **kwargs)
reply = review_reply_resource.get_object(request, *args, **kwargs)
diff_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_reply_resource.has_modify_permissions(request, reply):
return _no_access_error(request.user)
for field in ('text',):
value = kwargs.get(field, None)
if value is not None:
setattr(diff_comment, field, value)
diff_comment.save()
return 200, {
self.item_result_key: diff_comment,
}
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def delete(self, *args, **kwargs):
"""Deletes a comment from a draft reply.
This will remove the comment from the reply. This cannot be undone.
Only comments on draft replies can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def get(self, *args, **kwargs):
"""Returns information on a reply to a comment.
Much of the information will be identical to that of the comment
being replied to. For example, the range of lines. This is because
the reply to the comment is meant to cover the | |
the socket is closed) only works for
# sockets. On other platforms it works for pipes and sockets.
if is_socket or (is_fifo and not IS_AIX):
loop.add_reader(fileno, self._read_ready)
except:
self.close()
raise
return self
def __repr__(self):
"""Returns the ``UnixWritePipeTransport``'s representation."""
return f'<{self.__class__.__name__} fd={self.fileno}>'
def get_extra_info(self, name, default=None):
"""
Gets optional transport information.
Parameters
----------
name : `str`
The extra information's name to get.
default : `Any`, Optional
Default value to return if `name` could not be matched. Defaults to `None`.
Returns
-------
info : `default`, `Any`
"""
return self._extra.get(name, default)
def get_write_buffer_size(self):
"""
Return the current size of the write buffer.
Returns
-------
get_write_buffer_size : `int`
"""
return len(self._buffer)
def _read_ready(self):
"""
Added as a read callback on the respective event loop to be called when the data is received on the pipe.
If this happens, since it is a write only pipe, means it should be closed, so we do like that.
"""
# Pipe was closed by peer.
if self._buffer:
exception = BrokenPipeError()
else:
exception = None
self._close(exception)
def write(self, data):
"""
Write the given data to the transport.
The method do no blocks, instead arranges the data to be sent asynchronously.
Parameters
----------
data : `bytes-like`
The bytes data to be sent.
"""
if not data:
return
if isinstance(data, bytearray):
data = memoryview(data)
if self.closing:
return
buffer = self._buffer
if not buffer:
try:
n = os.write(self.fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except BaseException as err:
self._fatal_error(err, 'Fatal write error on pipe transport')
return
if n == len(data):
return
if n > 0:
data = memoryview(data)[n:]
self.loop.add_writer(self.fileno, self._write_ready)
buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
"""
Added as a write callback on the respective event loop when the transport has unsent data. Called when the
respective socket becomes writable.
"""
buffer = self._buffer
try:
n = os.write(self.fileno, buffer)
except (BlockingIOError, InterruptedError):
pass
except BaseException as err:
buffer.clear()
self.loop.remove_writer(self.fileno)
self._fatal_error(err, 'Fatal write error on pipe transport')
else:
if n == len(buffer):
buffer.clear()
self.loop.remove_writer(self.fileno)
self._maybe_resume_protocol() # May append to buffer.
if self.closing:
self.loop.remove_reader(self.fileno)
self._call_connection_lost(None)
return
if n > 0:
del buffer[:n]
def can_write_eof(self):
"""
Return whether the transport supports ``.write_eof``.
Returns
-------
can_write_eof : `bool`
``UnixWritePipeTransport`` instances always return `True`.
"""
return True
def write_eof(self):
"""
Writes eof to the transport's protocol if applicable.
If the write transport's buffer is empty, calls connection lost as well.
"""
if self.closing:
return
self.closing = True
if not self._buffer:
loop = self.loop
loop.remove_reader(self.fileno)
loop.call_soon(self.__class__._call_connection_lost, self, None)
def set_protocol(self, protocol):
"""
Sets a new protocol to the transport.
Parameters
----------
protocol : ``SubprocessWritePipeProtocol`` or `Any`
Asynchronous protocol implementation.
"""
self.protocol = protocol
def get_protocol(self):
"""
Gets the transport's actual protocol.
Returns
-------
protocol : `None`, ``SubprocessWritePipeProtocol` or `Any`
Asynchronous protocol implementation.
"""
return self.protocol
def is_closing(self):
"""
Returns whether the read pipe transport is closing.
Returns
-------
is_closing : `bool`
"""
return self.closing
def close(self):
"""
Starts the shutdown process of the write pipe transport.
"""
if (self.pipe is not None) and (not self.closing):
self.write_eof()
def __del__(self):
"""
Closes the write pipe transport if not yet closed.
"""
pipe = self.pipe
if (pipe is not None):
pipe.close()
def abort(self):
"""
Close the transport immediately.
The buffered data will be lost.
"""
self._close(None)
def _fatal_error(self, exception, message='Fatal error on pipe transport'):
"""
If a fatal error occurs on the transport, renders its traceback and closes itself.
Parameters
----------
exception : `BaseException`
The occurred exception.
message : `str`, Optional
Additional error message to render.
"""
if not isinstance(exception, OSError):
self.loop.render_exc_async(exception, [message, '\non: ', repr(self), '\n'])
self._close(exception)
def _close(self, exception):
"""
Starts the transport's closing process.
Parameters
----------
exception : `None` or ``BaseException``
Defines whether the connection is closed, or an exception was received.
If the connection was closed, then `exception` is given as `None`. This can happen at the case, when eof is
received as well.
"""
self.closing = True
loop = self.loop
buffer = self._buffer
if buffer:
self.loop.remove_writer(self.fileno)
buffer.clear()
loop.remove_reader(self.fileno)
loop.call_soon(self.__class__._call_connection_lost, self, exception)
def _call_connection_lost(self, exception):
"""
Calls the write pipe transport's protocol's `.connection_lost` with the given exception and closes the
transport's pipe.
Parameters
----------
exception : `None` or ``BaseException``
Exception to call the protocol's ``.connection_lost`` with.
Defines whether the connection is closed, or an exception was received.
If the connection was closed, then `exception` is given as `None`. This can happen at the case, when eof is
received as well.
"""
protocol = self.protocol
if protocol is None:
return
try:
protocol.connection_lost(exception)
finally:
pipe = self.pipe
if (pipe is not None):
self.pipe = None
pipe.close()
self.protocol = None
def _maybe_pause_protocol(self):
"""
Called after data was ensured to be written into the pipe to check whether it's protocol should be paused.
"""
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if self.protocol_paused:
return
self.protocol_paused = True
protocol = self.protocol
if protocol is None:
return
try:
protocol.pause_writing()
except BaseException as err:
self.loop.render_exc_async(err, [
repr(self), '`._maybe_pause_protocol` failed\n'
'On: ', repr(protocol), '.pause_writing()\n'])
def _maybe_resume_protocol(self):
"""
Called after successful writing to the pipe to check whether the protocol should be resumed.
"""
if (self.protocol_paused and self.get_write_buffer_size() <= self._low_water):
self.protocol_paused = False
protocol = self.protocol
if (protocol is not None):
try:
protocol.resume_writing()
except BaseException as err:
self.loop.render_exc_async(err, [
repr(self), '`._maybe_resume_protocol` failed\n'
'on: ', repr(protocol), '.resume_writing()\n'])
def get_write_buffer_limits(self):
"""
Returns the low and the high water of the transport.
Returns
-------
low_water : `int`
The ``.protocol`` is paused writing when the buffer size passes the high water mark. Defaults to `65536`.
high_water : `int`
The ``.protocol`` is resumed writing when the buffer size goes under the low water mark. Defaults to
`16384`.
"""
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, low=None, high=None):
"""
Sets the write buffer limits of the transport.
Parameters
----------
low : None` or `int`, Optional
The ``.protocol`` is paused writing when the buffer size passes the high water mark. Defaults to `65536`.
high : `None` or `int`, Optional
The ``.protocol`` is resumed writing when the buffer size goes under the low water mark. Defaults to
`16384`.
Raises
------
ValueError
If `high` is lower than `low` or if `low` is lower than `0`.
"""
if high is None:
if low is None:
high = 65536
low = 16384
else:
high = low<<2
else:
if low is None:
low = high>>2
if low < 0 or high < low:
raise ValueError(f'High water must be greater or equal than low, what must be greater than equal than `0`, '
f'got high={high!r}; low={low!r}.')
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, low=None, high=None):
"""
Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's ``.pause_writing`` and ``.resume_writing`` methods. If
specified, the low-water limit must be less than or equal to the high-water limit. Neither value can be
negative. The defaults are implementation-specific. If only the high-water limit is given, the low-water limit
defaults to an implementation-specific value less than or equal to the high-water limit. Setting high to zero
forces low to zero as well, and causes ``.pause_writing`` to be called whenever the buffer becomes non-empty.
Setting low to zero causes ``.resume_writing`` to be called only once the buffer is empty. Use of zero for
either limit is generally sub-optimal as it reduces opportunities for doing I/O and computation concurrently.
Parameters
----------
low : None` or `int`, Optional
The ``.protocol`` is paused writing when the buffer size passes the high water mark. Defaults to `65536`.
high : `None` or `int`, Optional
| |
network_name = f"{container.full_name}_network"
if container.name == 'router':
continue
# We are creating a new subnet with a new subnet number
subnet += 1
# We maintain a map of container_name to subnet for use by the router.
container_to_subnet[container.name] = subnet
actual_name = '{0}_Actual'.format(container.name)
# Create the network with the appropriate iprange
ipam_pool = docker.types.IPAMPool(subnet=f'{network_num}.{untrusted_num}.{subnet}.0/24')
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
network = client.networks.create(
network_name,
ipam=ipam_config,
driver='bridge',
internal=True
)
# We connect the container with host=2. Later we'll connect the router with host=3
container_ip = f'{network_num}.{untrusted_num}.{subnet}.2'
container.set_ip_address(network_name, container_ip)
network.connect(container.container, ipv4_address=container_ip, aliases=[actual_name, ])
self.networks.append(network)
# The router pretends to be all dockers on this network.
if len(container.outgoing_connections) == 0:
connected_machines = [x.name for x in containers]
else:
connected_machines = container.outgoing_connections
for connected_machine in connected_machines:
if connected_machine == 'router':
continue
if connected_machine == container.name:
continue
if container.name not in router_connections:
router_connections[container.name] = []
if connected_machine not in router_connections:
router_connections[connected_machine] = []
# The router must be in both endpoints' network, and must connect to
# all endpoints on a network simultaneously, so we group together
# all connections here, and then connect later.
router_connections[container.name].append(connected_machine)
router_connections[connected_machine].append(container.name)
# Connect the router to all networks.
for startpoint, endpoints in router_connections.items():
full_startpoint_name = f'{self.untrusted_user}_{startpoint}'
network_name = f"{full_startpoint_name}_network"
# Store the ip address of the router on this network
router_ip = f'{network_num}.{untrusted_num}.{container_to_subnet[startpoint]}.3'
router.set_ip_address(network_name, router_ip)
aliases = []
for endpoint in endpoints:
if endpoint in aliases:
continue
aliases.append(endpoint)
network = self.get_network_with_name(network_name)
network.connect(router.container, ipv4_address=router_ip, aliases=aliases)
client.close()
def cleanup_networks(self):
""" Destroy all created networks. """
for network in self.networks:
try:
network.remove()
network.client.api.close()
network.client.close()
self.log_message(
f'{dateutils.get_current_time()} '
f'destroying docker network {network}'
)
except Exception:
self.log_message(
f'{dateutils.get_current_time()} ERROR: Could not remove docker '
f'network {network}'
)
self.networks.clear()
def create_knownhosts_json(self, containers):
"""
Given a set of containers, add initialization files to each
container's directory which specify how to connect to other endpoints
on the container's network (hostname, port).
"""
# Writing complete knownhost JSON to the container directory
router = self.get_router(containers)
sorted_networked_containers = sorted(containers, key=lambda x: x.name)
for container in sorted_networked_containers:
knownhosts_location = os.path.join(container.directory, 'knownhosts.json')
container_knownhost = {'hosts': {}}
if len(container.outgoing_connections) == 0:
connections = [x.name for x in containers]
else:
connections = container.outgoing_connections
if container.name not in connections:
connections.append(container.name)
sorted_connections = sorted(connections)
for connected_container_name in sorted_connections:
connected_container = self.get_container_with_name(
connected_container_name,
containers
)
network_name = f"{container.full_name}_network"
# If there is a router, the router is impersonating all other
# containers, but has only one ip address.
if router is not None:
# Even if we are injecting the router, we know who WE are.
if container.name == 'router' and connected_container_name == 'router':
continue
elif container.name == connected_container_name:
network_name = f"{container.full_name}_network"
ip_address = container.get_ip_address(network_name)
# If this node is not the router, we must inject the router
elif container.name != 'router':
# Get the router's ip on the container's network
network_name = f"{container.full_name}_network"
ip_address = router.get_ip_address(network_name)
else:
# If we are the router, get the connected container's ip on its own network
network_name = f"{self.untrusted_user}_{connected_container_name}_network"
ip_address = connected_container.get_ip_address(network_name)
else:
ip_address = connected_container.get_ip_address(
f'{self.untrusted_user}_routerless_network'
)
container_knownhost['hosts'][connected_container.name] = {
'tcp_start_port': connected_container.tcp_port_range[0],
'tcp_end_port': connected_container.tcp_port_range[1],
'udp_start_port': connected_container.udp_port_range[0],
'udp_end_port': connected_container.udp_port_range[1],
'ip_address': ip_address
}
with open(knownhosts_location, 'w') as outfile:
json.dump(container_knownhost, outfile, indent=4)
autograding_utils.add_all_permissions(knownhosts_location)
def create_knownhosts_txt(self, containers):
"""
Given a set of containers, add initialization files to each
container's directory which specify how to connect to other endpoints
on the container's network (hostname, port).
"""
tcp_connection_list = []
udp_connection_list = []
sorted_containers = sorted(containers, key=lambda x: x.name)
for container in sorted_containers:
tcp_connection_list.append([container.name, container.tcp_port_range[0]])
udp_connection_list.append([container.name, container.udp_port_range[0]])
# Writing complete knownhosts csvs to input directory'
networked_containers = self.get_standard_containers(containers)
router = self.get_router(containers)
if router is not None:
networked_containers.append(router)
sorted_networked_containers = sorted(networked_containers, key=lambda x: x.name)
for container in sorted_networked_containers:
knownhosts_location = os.path.join(container.directory, 'knownhosts_tcp.txt')
with open(knownhosts_location, 'w') as outfile:
for tup in tcp_connection_list:
outfile.write(" ".join(map(str, tup)) + '\n')
outfile.flush()
autograding_utils.add_all_permissions(knownhosts_location)
knownhosts_location = os.path.join(container.directory, 'knownhosts_udp.txt')
with open(knownhosts_location, 'w') as outfile:
for tup in udp_connection_list:
outfile.write(" ".join(map(str, tup)) + '\n')
outfile.flush()
autograding_utils.add_all_permissions(knownhosts_location)
###########################################################
#
# Dispatcher Functions
#
###########################################################
def process_dispatcher_actions(self, containers):
"""
Deliver actions (stdin, delay, stop, start, kill)
to a set of containers per their testcase specification.
"""
for action_obj in self.dispatcher_actions:
action_type = action_obj["action"]
if action_type == "delay":
time_to_delay = float(action_obj["seconds"])
while time_to_delay > 0 and self.at_least_one_alive(containers):
if time_to_delay >= .1:
time.sleep(.1)
else:
time.sleep(time_to_delay)
# This can go negative (subtracts .1 even in the else case) but that's fine.
time_to_delay -= .1
elif action_type == "stdin":
self.send_message_to_processes(
containers,
action_obj["string"],
action_obj["containers"]
)
elif action_type in ['stop', 'start', 'kill']:
self.send_message_to_processes(
containers,
f"SUBMITTY_SIGNAL:{action_type.upper()}\n",
action_obj['containers']
)
# A .1 second delay after each action to keep things flowing smoothly.
time.sleep(.1)
if len(self.dispatcher_actions) > 0:
names = [c.name for c in containers]
self.send_message_to_processes(containers, "SUBMITTY_SIGNAL:FINALMESSAGE\n", names)
def get_container_with_name(self, name, containers):
""" Given a name, grab the corresponding container. """
for container in containers:
if container.name == name:
return container
return None
def get_network_with_name(self, name):
""" Given a name, grab the corresponding container. """
for network in self.networks:
if network.name == name:
return network
return None
# Targets must hold names/keys for the processes dictionary
def send_message_to_processes(self, containers, message, targets):
"""
Given containers, targets, and a message, deliver the message to the target containers.
"""
for target in targets:
container = self.get_container_with_name(target, containers)
container.container.reload()
if container.container.status != 'exited':
os.write(container.socket.fileno(), message.encode('utf-8'))
else:
pass
def at_least_one_alive(self, containers):
""" Check that at least one of a set of containers is running. """
for container in self.get_standard_containers(containers):
# Update container variables so that status is accurate.
container.container.reload()
if container.container.status != 'exited':
return True
return False
###########################################################
#
# Overridden Secure Execution Environment Functions
#
###########################################################
def setup_for_compilation_testcase(self):
""" For every container, set up its directory for compilation. """
os.chdir(self.tmp_work)
for container in self.containers:
self._setup_single_directory_for_compilation(container.directory)
# Run any necessary pre_commands
self._run_pre_commands(container.directory)
def setup_for_execution_testcase(self, testcase_dependencies):
""" For every container, set up its directory for execution. """
os.chdir(self.tmp_work)
for container in self.containers:
self._setup_single_directory_for_execution(container.directory, testcase_dependencies)
self._run_pre_commands(container.directory)
# Copy in the submitty_router if necessary.
if container.import_router:
router_path = os.path.join(self.tmp_autograding, "bin", "submitty_router.py")
self.log_message(f"COPYING:\n\t{router_path}\n\t{container.directory}")
shutil.copy(router_path, container.directory)
autograding_utils.add_all_permissions(container.directory)
def setup_for_random_output(self, testcase_dependencies):
""" For every container, set up its directory for random output generation. """
os.chdir(self.tmp_work)
for container in self.solution_containers:
self._setup_single_directory_for_random_output(
container.directory,
testcase_dependencies
)
self._run_pre_commands(container.directory)
if container.import_router:
router_path = os.path.join(self.tmp_autograding, "bin", "submitty_router.py")
self.log_message(f"COPYING:\n\t{router_path}\n\t{container.directory}")
shutil.copy(router_path, container.directory)
autograding_utils.add_all_permissions(container.directory)
def setup_for_archival(self, overall_log):
""" For every container, set up its directory for archival. """
self.setup_for_testcase_archival(overall_log)
for container in self.containers:
if len(self.containers) > 1:
public_dir = os.path.join(
self.tmp_results,
"results_public",
self.name,
container.name
)
details_dir = os.path.join(self.tmp_results, "details", self.name, container.name)
os.mkdir(public_dir)
os.mkdir(details_dir)
def execute_random_input(self, untrusted_user, executable, arguments, logfile, cwd):
""" Generate random input for this container using its testcase specification. """
container_spec = {
'container_name': f'{untrusted_user}_temporary_container',
'container_image': 'submitty/autograding-default:latest',
'server': False,
'outgoing_connections': []
}
# Create a container to generate random input inside of.
container = Container(
container_spec,
untrusted_user,
self.random_input_directory,
False,
self.is_test_environment,
self.log_message,
self.log_container_meta
)
execution_script = os.path.join(container.directory, executable)
try:
container.create(execution_script, arguments, False)
container.start(logfile)
container.process.wait()
except Exception:
self.log_message(
'ERROR generating random input using docker. '
'See stack trace output for more details.'
)
self.log_stack_trace(traceback.format_exc())
finally:
container.cleanup_container()
return container.return_code
def execute_random_output(self, untrusted_user, script, arguments, logfile, cwd=None):
"""
Random output execution is analogous to execution, but with slightly different arguments
and a different network of containers.
"""
return self.execute_helper(self.solution_containers, script, arguments, logfile)
def execute(self, untrusted_user, script, arguments, logfile, cwd=None):
""" Run an execution step using our container network specification. """
return self.execute_helper(self.containers, script, arguments, logfile)
def execute_helper(self, containers, script, arguments, logfile):
""" Create, Start, Monitor/Deliver input to a network of containers. """
try:
# Make certain we are executing in the environment in which we say we are
# (i.e. test vs production environment).
self.verify_execution_status()
except Exception:
self.log_stack_trace(traceback.format_exc())
self.log_message("ERROR: Could not verify execution mode status.")
return
try:
self.create_containers(containers, script, arguments)
self.network_containers(containers)
except Exception:
self.log_message(
'ERROR: Could not create or network containers. '
'See stack trace output for more details.'
)
self.log_stack_trace(traceback.format_exc())
return -1
try:
router = self.get_router(containers)
# First start the router a second before any other container,
# giving it time to initialize.
if router is not None:
| |
<filename>geomstats/geometry/special_orthogonal.py
"""The special orthogonal group SO(n).
i.e. the Lie group of rotations in n dimensions.
"""
import geomstats.backend as gs
from geomstats.geometry.embedded_manifold import EmbeddedManifold
from geomstats.geometry.general_linear import GeneralLinear
from geomstats.geometry.lie_group import LieGroup
ATOL = 1e-5
TAYLOR_COEFFS_1_AT_0 = [1., 0.,
- 1. / 12., 0.,
- 1. / 720., 0.,
- 1. / 30240., 0.]
TAYLOR_COEFFS_2_AT_0 = [1. / 12., 0.,
1. / 720., 0.,
1. / 30240., 0.,
1. / 1209600., 0.]
TAYLOR_COEFFS_1_AT_PI = [0., - gs.pi / 4.,
- 1. / 4., - gs.pi / 48.,
- 1. / 48., - gs.pi / 480.,
- 1. / 480.]
class SpecialOrthogonal(LieGroup, EmbeddedManifold):
"""Class for the special orthogonal group SO(n).
i.e. the Lie group of rotations.
"""
def __init__(self, n, point_type=None, epsilon=0.):
assert isinstance(n, int) and n > 1
self.n = n
self.dimension = int((n * (n - 1)) / 2)
self.epsilon = epsilon
self.default_point_type = point_type
if point_type is None:
self.default_point_type = 'vector' if n == 3 else 'matrix'
LieGroup.__init__(self,
dimension=self.dimension)
EmbeddedManifold.__init__(self,
dimension=self.dimension,
embedding_manifold=GeneralLinear(n=n))
self.bi_invariant_metric = self.left_canonical_metric
def get_identity(self, point_type=None):
"""Get the identity of the group.
as a vector if point_type == 'vector',
as a matrix if point_type == 'matrix'.
"""
if point_type is None:
point_type = self.default_point_type
identity = gs.zeros(self.dimension)
if point_type == 'matrix':
identity = gs.eye(self.n)
return identity
identity = property(get_identity)
def belongs(self, point, point_type=None):
"""Evaluate if a point belongs to SO(n)."""
if point_type is None:
point_type = self.default_point_type
if point_type == 'vector':
point = gs.to_ndarray(point, to_ndim=2)
n_points, vec_dim = point.shape
belongs = vec_dim == self.dimension
belongs = gs.to_ndarray(belongs, to_ndim=1)
belongs = gs.to_ndarray(belongs, to_ndim=2, axis=1)
belongs = gs.tile(belongs, (n_points, 1))
return belongs
elif point_type == 'matrix':
point = gs.to_ndarray(point, to_ndim=3)
point_transpose = gs.transpose(point, axes=(0, 2, 1))
mask = gs.isclose(gs.matmul(point, point_transpose),
gs.eye(self.n))
mask = gs.all(mask, axis=(1, 2))
mask = gs.to_ndarray(mask, to_ndim=1)
mask = gs.to_ndarray(mask, to_ndim=2, axis=1)
return mask
def regularize(self, point, point_type=None):
"""Regularize a point to be in accordance with convention.
In 3D, regularize the norm of the rotation vector,
to be between 0 and pi, following the axis-angle
representation's convention.
If the angle angle is between pi and 2pi,
the function computes its complementary in 2pi and
inverts the direction of the rotation axis.
Parameters
----------
point : array-like
point_type : str, {'vector', 'matrix'}
Returns
-------
regularized_point : array-like
"""
if point_type is None:
point_type = self.default_point_type
if point_type == 'vector':
point = gs.to_ndarray(point, to_ndim=2)
n_points, _ = point.shape
regularized_point = point
if self.n == 3:
angle = gs.linalg.norm(regularized_point, axis=1)
mask_0 = gs.isclose(angle, 0.)
mask_not_0 = ~mask_0
mask_pi = gs.isclose(angle, gs.pi)
# This avoids division by 0.
mask_0_float = gs.cast(mask_0, gs.float32) + self.epsilon
mask_not_0_float = (
gs.cast(mask_not_0, gs.float32)
+ self.epsilon)
mask_pi_float = gs.cast(mask_pi, gs.float32) + self.epsilon
k = gs.floor(angle / (2 * gs.pi) + .5)
angle += mask_0_float
norms_ratio = gs.zeros_like(angle)
norms_ratio += mask_not_0_float * (
1. - 2. * gs.pi * k / angle)
norms_ratio += mask_0_float
norms_ratio += mask_pi_float * (
gs.pi / angle
- (1. - 2. * gs.pi * k / angle))
regularized_point = gs.einsum(
'n,ni->ni', norms_ratio, regularized_point)
assert gs.ndim(regularized_point) == 2
elif point_type == 'matrix':
point = gs.to_ndarray(point, to_ndim=3)
regularized_point = gs.to_ndarray(point, to_ndim=3)
return regularized_point
def regularize_tangent_vec_at_identity(
self, tangent_vec, metric=None, point_type=None):
"""Regularize a tangent vector at the identify.
In 3D, regularize a tangent_vector by getting its norm at the identity,
determined by the metric, to be less than pi.
Parameters
----------
tangent_vec
metric
point_type
Returns
-------
regularized_vec
"""
if point_type is None:
point_type = self.default_point_type
if point_type == 'vector':
tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=2)
if self.n == 3:
if metric is None:
metric = self.left_canonical_metric
tangent_vec_metric_norm = metric.norm(tangent_vec)
tangent_vec_canonical_norm = gs.linalg.norm(
tangent_vec, axis=1)
if gs.ndim(tangent_vec_canonical_norm) == 1:
tangent_vec_canonical_norm = gs.expand_dims(
tangent_vec_canonical_norm, axis=1)
mask_norm_0 = gs.isclose(tangent_vec_metric_norm, 0.)
mask_canonical_norm_0 = gs.isclose(
tangent_vec_canonical_norm, 0.)
mask_0 = mask_norm_0 | mask_canonical_norm_0
mask_else = ~mask_0
# This avoids division by 0.
mask_0_float = gs.cast(mask_0, gs.float32) + self.epsilon
mask_else_float = gs.cast(mask_else, gs.float32) + self.epsilon
regularized_vec = gs.zeros_like(tangent_vec)
regularized_vec += mask_0_float * tangent_vec
tangent_vec_canonical_norm += mask_0_float
coef = gs.zeros_like(tangent_vec_metric_norm)
coef += mask_else_float * (
tangent_vec_metric_norm
/ tangent_vec_canonical_norm)
regularized_vec += mask_else_float * self.regularize(
coef * tangent_vec)
coef += mask_0_float
regularized_vec = mask_else_float * (
regularized_vec / coef)
else:
# TODO(nina): Check if/how regularization is needed in nD?
regularized_vec = tangent_vec
elif point_type == 'matrix':
regularized_vec = tangent_vec
return regularized_vec
def regularize_tangent_vec(
self, tangent_vec, base_point,
metric=None, point_type=None):
"""Regularize tangent vector at a base point.
In 3D, regularize a tangent_vector by getting the norm of its parallel
transport to the identity, determined by the metric, less than pi.
Parameters
----------
tangent_vec
metric
point_type
Returns
-------
regularized_tangent_vec
"""
if point_type is None:
point_type = self.default_point_type
if point_type == 'vector':
tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=2)
if self.n == 3:
if metric is None:
metric = self.left_canonical_metric
base_point = self.regularize(base_point, point_type)
n_vecs = tangent_vec.shape[0]
jacobian = self.jacobian_translation(
point=base_point,
left_or_right=metric.left_or_right,
point_type=point_type)
jacobian = gs.array([jacobian[0]] * n_vecs)
inv_jacobian = gs.linalg.inv(jacobian)
inv_jacobian = gs.to_ndarray(inv_jacobian, to_ndim=3)
tangent_vec_at_id = gs.einsum(
'ni,nij->nj',
tangent_vec,
gs.transpose(inv_jacobian, axes=(0, 2, 1)))
tangent_vec_at_id = self.regularize_tangent_vec_at_identity(
tangent_vec_at_id, metric, point_type)
jacobian = gs.to_ndarray(jacobian, to_ndim=3)
regularized_tangent_vec = gs.einsum(
'ni,nij->nj',
tangent_vec_at_id,
gs.transpose(jacobian, axes=(0, 2, 1)))
else:
# TODO(nina): Check if/how regularization is needed in nD?
regularized_tangent_vec = tangent_vec
elif point_type == 'matrix':
regularized_tangent_vec = tangent_vec
return regularized_tangent_vec
def projection(self, mat):
"""Project a matrix on SO(n) using the Frobenius norm.
Parameters
----------
mat : array-like
Returns
-------
rot_mat : array-like
"""
mat = gs.to_ndarray(mat, to_ndim=3)
n_mats, mat_dim_1, mat_dim_2 = mat.shape
assert mat_dim_1 == mat_dim_2 == self.n
if self.n == 3:
mat_unitary_u, diag_s, mat_unitary_v = gs.linalg.svd(mat)
rot_mat = gs.einsum('nij,njk->nik', mat_unitary_u, mat_unitary_v)
mask = gs.less(gs.linalg.det(rot_mat), 0.)
mask_float = gs.cast(mask, gs.float32) + self.epsilon
diag = gs.array([[1., 1., -1.]])
diag = gs.to_ndarray(gs.diag(diag), to_ndim=3) + self.epsilon
new_mat_diag_s = gs.tile(diag, [n_mats, 1, 1])
aux_mat = gs.einsum(
'nij,njk->nik',
mat_unitary_u,
new_mat_diag_s)
rot_mat += gs.einsum(
'n,njk->njk',
mask_float,
gs.einsum(
'nij,njk->nik',
aux_mat,
mat_unitary_v))
else:
aux_mat = gs.matmul(gs.transpose(mat, axes=(0, 2, 1)), mat)
inv_sqrt_mat = gs.linalg.inv(
gs.linalg.sqrtm(aux_mat))
rot_mat = gs.matmul(mat, inv_sqrt_mat)
assert gs.ndim(rot_mat) == 3
return rot_mat
def skew_matrix_from_vector(self, vec):
"""Get the skew-symmetric matrix derived from the vector.
In 3D, compute the skew-symmetric matrix,known as the cross-product of
a vector, associated to the vector `vec`.
In nD, fill a skew-symmetric matrix with the values of the vector.
Parameters
----------
vec
Returns
-------
skew_mat
"""
vec = gs.to_ndarray(vec, to_ndim=2)
n_vecs = vec.shape[0]
vec_dim = gs.shape(vec)[1]
if self.n == 2: # SO(2)
id_skew = gs.array([[[0., 1.], [-1., 0.]]] * n_vecs)
skew_mat = gs.einsum(
'nij,ni->nij', gs.cast(id_skew, gs.float32), vec)
elif self.n == 3: # SO(3)
# This avois dividing by 0.
levi_civita_symbol = gs.array([[
[[0., 0., 0.],
[0., 0., 1.],
[0., -1., 0.]],
[[0., 0., -1.],
[0., 0., 0.],
[1., 0., 0.]],
[[0., 1., 0.],
[-1., 0., 0.],
[0., 0., 0.]]
]] * n_vecs) + self.epsilon
# This avois dividing by 0.
basis_vec_1 = gs.array([[1., 0., 0.]] * n_vecs) + self.epsilon
basis_vec_2 = gs.array([[0., 1., 0.]] * n_vecs) + self.epsilon
basis_vec_3 = gs.array([[0., 0., 1.]] * n_vecs) + self.epsilon
cross_prod_1 = gs.einsum(
'nijk,ni,nj->nk',
levi_civita_symbol,
basis_vec_1,
vec)
cross_prod_2 = gs.einsum(
'nijk,ni,nj->nk',
levi_civita_symbol,
basis_vec_2,
vec)
cross_prod_3 = gs.einsum(
'nijk,ni,nj->nk',
levi_civita_symbol,
basis_vec_3,
vec)
cross_prod_1 = gs.to_ndarray(cross_prod_1, to_ndim=3, axis=1)
cross_prod_2 = gs.to_ndarray(cross_prod_2, to_ndim=3, axis=1)
cross_prod_3 = gs.to_ndarray(cross_prod_3, to_ndim=3, axis=1)
skew_mat = gs.concatenate(
[cross_prod_1, cross_prod_2, cross_prod_3], axis=1)
else: # SO(n)
mat_dim = gs.cast(
((1. + gs.sqrt(1. + 8. * vec_dim)) / 2.), gs.int32)
skew_mat = gs.zeros((n_vecs,) + (self.n,) * 2)
upper_triangle_indices = gs.triu_indices(mat_dim, k=1)
for i in range(n_vecs):
skew_mat[i][upper_triangle_indices] = vec[i]
skew_mat[i] = skew_mat[i] - skew_mat[i].transpose()
assert gs.ndim(skew_mat) == 3
return skew_mat
def vector_from_skew_matrix(self, skew_mat):
"""Derive a vector from the skew-symmetric matrix.
In 3D, compute the vector defining the cross product
associated to the skew-symmetric matrix skew mat.
In nD, fill a vector by reading the values
of the upper triangle of skew_mat.
Parameters
----------
skew_mat
Returns
-------
vec
"""
skew_mat = gs.to_ndarray(skew_mat, to_ndim=3)
n_skew_mats, mat_dim_1, mat_dim_2 = skew_mat.shape
assert mat_dim_1 == mat_dim_2 == self.n
vec_dim = self.dimension
vec = gs.zeros((n_skew_mats, vec_dim))
if self.n == 2: # SO(2)
vec = gs.expand_dims(skew_mat[:, 0, 1], axis=1)
elif self.n == 3: # | |
\
% self.ioctx.name)
def __iter__(self):
return self
def next(self):
"""
Get the next object name and locator in the pool
:raises: StopIteration
:returns: next rados.Ioctx Object
"""
key = c_char_p()
locator = c_char_p()
nspace = c_char_p()
ret = run_in_thread(self.ioctx.librados.rados_nobjects_list_next,
(self.ctx, byref(key), byref(locator), byref(nspace)))
if ret < 0:
raise StopIteration()
return Object(self.ioctx, key.value, locator.value, nspace.value)
def __del__(self):
run_in_thread(self.ioctx.librados.rados_nobjects_list_close, (self.ctx,))
class XattrIterator(object):
"""Extended attribute iterator"""
def __init__(self, ioctx, it, oid):
self.ioctx = ioctx
self.it = it
self.oid = oid
def __iter__(self):
return self
def next(self):
"""
Get the next xattr on the object
:raises: StopIteration
:returns: pair - of name and value of the next Xattr
"""
name_ = c_char_p(0)
val_ = c_char_p(0)
len_ = c_int(0)
ret = run_in_thread(self.ioctx.librados.rados_getxattrs_next,
(self.it, byref(name_), byref(val_), byref(len_)))
if (ret != 0):
raise make_ex(ret, "error iterating over the extended attributes \
in '%s'" % self.oid)
if name_.value == None:
raise StopIteration()
name = ctypes.string_at(name_)
val = ctypes.string_at(val_, len_)
return (name, val)
def __del__(self):
run_in_thread(self.ioctx.librados.rados_getxattrs_end, (self.it,))
class SnapIterator(object):
"""Snapshot iterator"""
def __init__(self, ioctx):
self.ioctx = ioctx
# We don't know how big a buffer we need until we've called the
# function. So use the exponential doubling strategy.
num_snaps = 10
while True:
self.snaps = (ctypes.c_uint64 * num_snaps)()
ret = run_in_thread(self.ioctx.librados.rados_ioctx_snap_list,
(self.ioctx.io, self.snaps, c_int(num_snaps)))
if (ret >= 0):
self.max_snap = ret
break
elif (ret != -errno.ERANGE):
raise make_ex(ret, "error calling rados_snap_list for \
ioctx '%s'" % self.ioctx.name)
num_snaps = num_snaps * 2
self.cur_snap = 0
def __iter__(self):
return self
def next(self):
"""
Get the next Snapshot
:raises: :class:`Error`, StopIteration
:returns: Snap - next snapshot
"""
if (self.cur_snap >= self.max_snap):
raise StopIteration
snap_id = self.snaps[self.cur_snap]
name_len = 10
while True:
name = create_string_buffer(name_len)
ret = run_in_thread(self.ioctx.librados.rados_ioctx_snap_get_name,
(self.ioctx.io, c_uint64(snap_id), byref(name),
c_int(name_len)))
if (ret == 0):
name_len = ret
break
elif (ret != -errno.ERANGE):
raise make_ex(ret, "rados_snap_get_name error")
name_len = name_len * 2
snap = Snap(self.ioctx, name.value, snap_id)
self.cur_snap = self.cur_snap + 1
return snap
class Snap(object):
"""Snapshot object"""
def __init__(self, ioctx, name, snap_id):
self.ioctx = ioctx
self.name = name
self.snap_id = snap_id
def __str__(self):
return "rados.Snap(ioctx=%s,name=%s,snap_id=%d)" \
% (str(self.ioctx), self.name, self.snap_id)
def get_timestamp(self):
"""
Find when a snapshot in the current pool occurred
:raises: :class:`Error`
:returns: datetime - the data and time the snapshot was created
"""
snap_time = c_long(0)
ret = run_in_thread(self.ioctx.librados.rados_ioctx_snap_get_stamp,
(self.ioctx.io, self.snap_id, byref(snap_time)))
if (ret != 0):
raise make_ex(ret, "rados_ioctx_snap_get_stamp error")
return datetime.fromtimestamp(snap_time.value)
class Completion(object):
"""completion object"""
def __init__(self, ioctx, rados_comp, oncomplete, onsafe,
complete_cb, safe_cb):
self.rados_comp = rados_comp
self.oncomplete = oncomplete
self.onsafe = onsafe
self.ioctx = ioctx
self.complete_cb = complete_cb
self.safe_cb = safe_cb
def is_safe(self):
"""
Is an asynchronous operation safe?
This does not imply that the safe callback has finished.
:returns: True if the operation is safe
"""
return run_in_thread(self.ioctx.librados.rados_aio_is_safe,
(self.rados_comp,)) == 1
def is_complete(self):
"""
Has an asynchronous operation completed?
This does not imply that the safe callback has finished.
:returns: True if the operation is completed
"""
return run_in_thread(self.ioctx.librados.rados_aio_is_complete,
(self.rados_comp,)) == 1
def wait_for_safe(self):
"""
Wait for an asynchronous operation to be marked safe
This does not imply that the safe callback has finished.
"""
run_in_thread(self.ioctx.librados.rados_aio_wait_for_safe,
(self.rados_comp,))
def wait_for_complete(self):
"""
Wait for an asynchronous operation to complete
This does not imply that the complete callback has finished.
"""
run_in_thread(self.ioctx.librados.rados_aio_wait_for_complete,
(self.rados_comp,))
def wait_for_safe_and_cb(self):
"""
Wait for an asynchronous operation to be marked safe and for
the safe callback to have returned
"""
run_in_thread(self.ioctx.librados.rados_aio_wait_for_safe_and_cb,
(self.rados_comp,))
def wait_for_complete_and_cb(self):
"""
Wait for an asynchronous operation to complete and for the
complete callback to have returned
:returns: whether the operation is completed
"""
return run_in_thread(
self.ioctx.librados.rados_aio_wait_for_complete_and_cb,
(self.rados_comp,)
)
def get_return_value(self):
"""
Get the return value of an asychronous operation
The return value is set when the operation is complete or safe,
whichever comes first.
:returns: int - return value of the operation
"""
return run_in_thread(self.ioctx.librados.rados_aio_get_return_value,
(self.rados_comp,))
def __del__(self):
"""
Release a completion
Call this when you no longer need the completion. It may not be
freed immediately if the operation is not acked and committed.
"""
run_in_thread(self.ioctx.librados.rados_aio_release,
(self.rados_comp,))
RADOS_CB = CFUNCTYPE(c_int, c_void_p, c_void_p)
class Ioctx(object):
"""rados.Ioctx object"""
def __init__(self, name, librados, io):
self.name = name
self.librados = librados
self.io = io
self.state = "open"
self.locator_key = ""
self.nspace = ""
self.safe_cbs = {}
self.complete_cbs = {}
self.lock = threading.Lock()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
return False
def __del__(self):
self.close()
def __aio_safe_cb(self, completion, _):
"""
Callback to onsafe() for asynchronous operations
"""
cb = None
with self.lock:
cb = self.safe_cbs[completion]
del self.safe_cbs[completion]
cb.onsafe(cb)
return 0
def __aio_complete_cb(self, completion, _):
"""
Callback to oncomplete() for asynchronous operations
"""
cb = None
with self.lock:
cb = self.complete_cbs[completion]
del self.complete_cbs[completion]
cb.oncomplete(cb)
return 0
def __get_completion(self, oncomplete, onsafe):
"""
Constructs a completion to use with asynchronous operations
:param oncomplete: what to do when the write is safe and complete in memory
on all replicas
:type oncomplete: completion
:param onsafe: what to do when the write is safe and complete on storage
on all replicas
:type onsafe: completion
:raises: :class:`Error`
:returns: completion object
"""
completion = c_void_p(0)
complete_cb = None
safe_cb = None
if oncomplete:
complete_cb = RADOS_CB(self.__aio_complete_cb)
if onsafe:
safe_cb = RADOS_CB(self.__aio_safe_cb)
ret = run_in_thread(self.librados.rados_aio_create_completion,
(c_void_p(0), complete_cb, safe_cb,
byref(completion)))
if ret < 0:
raise make_ex(ret, "error getting a completion")
with self.lock:
completion_obj = Completion(self, completion, oncomplete, onsafe,
complete_cb, safe_cb)
if oncomplete:
self.complete_cbs[completion.value] = completion_obj
if onsafe:
self.safe_cbs[completion.value] = completion_obj
return completion_obj
def aio_write(self, object_name, to_write, offset=0,
oncomplete=None, onsafe=None):
"""
Write data to an object asynchronously
Queues the write and returns.
:param object_name: name of the object
:type object_name: str
:param to_write: data to write
:type to_write: str
:param offset: byte offset in the object to begin writing at
:type offset: int
:param oncomplete: what to do when the write is safe and complete in memory
on all replicas
:type oncomplete: completion
:param onsafe: what to do when the write is safe and complete on storage
on all replicas
:type onsafe: completion
:raises: :class:`Error`
:returns: completion object
"""
completion = self.__get_completion(oncomplete, onsafe)
ret = run_in_thread(self.librados.rados_aio_write,
(self.io, c_char_p(object_name),
completion.rados_comp, c_char_p(to_write),
c_size_t(len(to_write)), c_uint64(offset)))
if ret < 0:
raise make_ex(ret, "error writing object %s" % object_name)
return completion
def aio_write_full(self, object_name, to_write,
oncomplete=None, onsafe=None):
"""
Asychronously write an entire object
The object is filled with the provided data. If the object exists,
it is atomically truncated and then written.
Queues the write and returns.
:param object_name: name of the object
:type object_name: str
:param to_write: data to write
:type to_write: str
:param oncomplete: what to do when the write is safe and complete in memory
on all replicas
:type oncomplete: completion
:param onsafe: what to do when the write is safe and complete on storage
on all replicas
:type onsafe: completion
:raises: :class:`Error`
:returns: completion object
"""
completion = self.__get_completion(oncomplete, onsafe)
ret = run_in_thread(self.librados.rados_aio_write_full,
(self.io, c_char_p(object_name),
completion.rados_comp, c_char_p(to_write),
c_size_t(len(to_write))))
if ret < 0:
raise make_ex(ret, "error writing object %s" % object_name)
return completion
def aio_append(self, object_name, to_append, oncomplete=None, onsafe=None):
"""
Asychronously append data to an object
Queues the write and returns.
:param object_name: name of the object
:type object_name: str
:param to_append: data to append
:type to_append: str
:param offset: byte offset in the object to begin writing at
:type offset: int
:param oncomplete: what to do when the write is safe and complete in memory
on all replicas
:type oncomplete: completion
:param onsafe: what to do when the write is safe and complete on storage
on all replicas
:type onsafe: completion
:raises: :class:`Error`
:returns: completion object
"""
completion = self.__get_completion(oncomplete, onsafe)
ret = run_in_thread(self.librados.rados_aio_append,
(self.io, c_char_p(object_name),
completion.rados_comp, c_char_p(to_append),
c_size_t(len(to_append))))
if ret < 0:
raise make_ex(ret, "error appending to object %s" % object_name)
return completion
def aio_flush(self):
"""
Block until all pending writes in an io context are safe
:raises: :class:`Error`
"""
ret = run_in_thread(self.librados.rados_aio_flush, (self.io,))
if ret < 0:
raise make_ex(ret, "error flushing")
def aio_read(self, object_name, length, offset, oncomplete):
"""
Asychronously read data from an object
oncomplete will be called | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import xlrd
import sys,csv
# import codecs
from datetime import date,datetime
from pyh import *
page = PyH('JIRA看板')
#默认只打印3列
PRT_COL = 3
NEW_LINE = ''
SPRINT_DATE = '0901'
class Utils():
def __init__(self):
self.p = 0;
def add(self,param):
self.p += param
return self.p
utils = Utils()
# def readExcel():
# '''Read'''
# filePath = r"/Users/Nicholas/Documents/人员通讯信息——曲健2017.8.1.xlsx"
# # 打开文件
# wb = xlrd.open_workbook(filePath)
# # 打开第一个sheet
# sheet0 = wb.sheets()[0]
# v = sheet0.cell(1,1).value
# print(v)
def ReadFile(filePath,encoding):
with codecs.open(filePath,"r",encoding) as f:
return f.read()
def WriteFile(filePath,u,encoding):
with codecs.open(filePath,"w",encoding) as f:
f.write(u)
'''''
定义GBK_2_UTF8方法,用于转换文件存储编码
'''
def GBK_2_UTF8(src,dst):
content = ReadFile(src,encoding='gbk')
WriteFile(dst,content,encoding='utf_8')
def readCsv():
srcPath = "/Users/Nicholas/Desktop/JIRA-ETS源数据2017.8.23.csv"
dstPath = "/Users/Nicholas/Desktop/JIRA-ETS源数据2017.8.23-UT8.csv"
# GBK_2_UTF8(srcPath,dstPath);
# 预处理csv
# 1. 按照类型(story,任务,子任务),parentid排序
csvFile = open(srcPath, encoding='GBK',mode='r')
csvData = csv.reader(csvFile, delimiter=',')
# sort by team, issuetype, parentid
sortedlist = sorted(csvData, key = lambda x: (x[53],x[4], x[3]))
csvFile.close()
# with open(dstPath, "w", newline = '') as f:
# fileWriter = csv.writer(f, delimiter=',')
# for row in sortedlist:
# if (row[0]=='主题') or ('ETS 0901 Sprint' not in row):
# continue
# fileWriter.writerow(row)
# f.close()
# 渲染出html页面和table的引用
outerTab = renderHTMLTab()
trLine = 0
i = 0
for row in sortedlist:
if (row[0]=='主题') or ('ETS 0901 Sprint' not in row):
continue
issuename = row[0]
issuekey = row[1]
issueid = row[2]
parentid = row[3]
issuetype = row[4]
owner = row[14]
estimate = row[35]
team = row[53]
epiclink = row[57]
i += 1
if (i % PRT_COL == 1):
trLine = outerTab << tr()
parentRow = 0
if (issuetype == 'Story') or (issuetype == r'任务'):
parentRow = findNameByKey(sortedlist, epiclink)
else:
parentRow = findNameById(sortedlist, parentid)
if parentRow is None:
parentRow = ['','','','','','']
renderCard(trLine, team, issuetype, issuekey, issuename, \
parentRow[4], parentRow[1], parentRow[0], owner, estimate )
# if (issuetype == 'Story') or (issuetype == r'任务'):
# renderStory(trLine,issuename, epiclink)
# elif (issuetype == r'子任务'):
# renderSubTask(trLine, parentid, issuename, owner, estimate)
page.printOut('abc.html')
def findNameById(list, id):
for row in list:
if (row[2] == id):
return row
def findNameByKey(list, key):
for row in list:
if (row[1] == key):
return row
def renderHTMLTab():
# 添加头信息
page.head.addObj(charset)
page.head.addObj("<style>body { font-family: Arial, Helvetica, sans-serif; font-size:14px; }" \
+ nl \
# + "table tr td { border:1px solid #0094ff; }</style>")
+ ".bordercls {border:2px solid #212121;} </style>")
# 添加table
# 添加table
outerTab = page << table(width="1300px", style="border-spacing:2px;")
outerTab << tr() << td(width="300px") + td(width="300px") + td(width="300px") + td(width="300px")
return outerTab
# page.printOut('a.html')
# page.printOut()
def renderCard(outerTab, team, issuetype, issuekey, issuename, parenttype, parentkey, parentname, owner, estimate):
global NEW_LINE
if (utils.add(1) % PRT_COL == 1):
NEW_LINE = outerTab << tr()
#story
condition = 0
tagbgcolor = "#88ca79"
if (issuetype == r'任务'):
condition = 1
tagbgcolor ="#d3b1e2"
elif (issuetype == r'子任务'):
condition = 2
tagbgcolor = "#e6a988"
cardTab = NEW_LINE << td(width="300px") << table(width="100%", bgcolor=tagbgcolor)
cardTab << tr() << td(width="70%") + td(width="30%")
#标题是故事名称
cardTab << tr() << td(colspan="2", height="50px", align="center", cl="bordercls") << b(showDesc(team, parenttype,parentkey,parentname))
if condition == 0 :
cardTab << tr() \
<< td(rowspan="3", colspan="2", height="150px", align="left", valign="middle", cl="bordercls") \
<< span(showDesc('', issuetype, issuekey, issuename))
elif condition == 1 or condition == 2:
ttr = cardTab << tr()
ttr << td(rowspan="3", height="150px", cl="bordercls") << span(showDesc('', issuetype, issuekey, issuename))
ttr << td(height="50px", cl="bordercls") << span(owner)
cardTab << tr() << td(rowspan="2", cl="bordercls") << span(estimate)
# 拼装标题或者卡片描述
def showDesc(team, issuetype, issuekey, issuename):
if (issuetype is None) or (issuetype == ''):
return '-~~-'
elif (team != ''):
return team +" 团队<br/>[" + issuetype + "][" + issuekey + "]: " + issuename
else:
return "[" + issuetype + "][" + issuekey + "]: " + issuename
from jira import JIRA
from jira.resources import *
from jira.resources import Board
from jira.resources import GreenHopperResource
from jira.resources import Sprint
PRJ_ETS = "ETS"
# http://172.16.58.3:8080/rest/greenhopper/1.0/rapidviews/viewsData
JIRA_BOARD_URL="rapidviews/viewsData"
# http://172.16.58.3:8080/rest/greenhopper/1.0/xboard/plan/backlog/data.json?rapidViewId=97&selectedProjectKey=ETS
JIRA_BACKLOG_URL="xboard/plan/backlog/data.json?rapidViewId={boardId}&selectedProjectKey={projectName}"
JIRA_SUBTASK_URL="xboard/issue/details.json?rapidViewId={boardId}&issueIdOrKey={backlogKey}&loadSubtasks=true"
def jira():
# 渲染出html页面和table的引用
outerTab = renderHTMLTab()
myoption1 = {
"server": "http://172.16.58.3:8080",
"context_path": "/",
"rest_path": "api",
"rest_api_version": "2",
"agile_rest_path": GreenHopperResource.GREENHOPPER_REST_PATH, #默认的option是GreenHoper
"agile_rest_api_version": "1.0",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": False,
"headers": {
'Cache-Control': 'no-cache',
'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
'X-Atlassian-Token': 'no-check'}}
jira_greenhopper = JIRA(options=myoption1, basic_auth=('nicholas.qu','****'))
# myoption2 = {
# "server": "http://172.16.58.3:8080",
# "context_path": "/",
# "rest_path": "api",
# "rest_api_version": "2",
# "agile_rest_path": 'agile', #GreenHopperResource.AGILE_BASE_REST_PATH 默认的option是GreenHoper
# "agile_rest_api_version": "1.0",
# "verify": True,
# "resilient": True,
# "async": False,
# "client_cert": None,
# "check_update": False,
# "headers": {
# 'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
# 'Content-Type': 'application/json', # ;charset=UTF-8',
# 'X-Atlassian-Token': 'no-check'}}
# greenhopper不支持param的条件查询,只能自己过滤
# jira_api2 = JIRA(options=myoption2, basic_auth=('nicholas.qu','****'))
boards_json = jira_greenhopper._get_json(JIRA_BOARD_URL, base=GreenHopperResource.AGILE_BASE_URL)
filtered_boards = []
for board in boards_json['views']:
if (board['name'].startswith("ETS-")):
filtered_boards.append(board)
for board in filtered_boards:
board_id = board['id']
board_name = board['name']
backlogs_url = JIRA_BACKLOG_URL.replace("{projectName}",PRJ_ETS).replace("{boardId}", str(board_id))
backlogs_json = jira_greenhopper._get_json(backlogs_url, base=GreenHopperResource.AGILE_BASE_URL)
# 打印团队
team = board_name[4:7].replace("团",'').replace("队",'')
#只取当前sprint的backlog
backlogids = []
sprintname = ''
for sprint in backlogs_json['sprints']:
if (SPRINT_DATE in sprint['name']):
backlogids = sprint['issuesIds']
sprintname = sprint['name']
break
print(r'当前正在处理:' + team + '......\n Sprint['+ sprintname + \
'] 共有 ' + str(len(backlogids)) + ' 个backlog' )
for backlog in backlogs_json['issues']:
backlogid = backlog['id']
# 判断是否在sprint里
if (backlogid not in backlogids):
continue
backlogtype = backlog['typeName']
backlogkey = backlog['key']
backlogname = backlog['summary']
parenttype = 'Epic'
try:
parentkey = backlog['epicField']['epicKey']
except Exception as e:
parentkey = ''
try:
parentname = backlog['epicField']['text']
except Exception as e:
parentname = ''
try:
owner = backlog['assigneeName']
except Exception as e:
owner = ''
estimate = ''
# 打印故事或者任务
renderCard(outerTab, team, backlogtype, backlogkey, backlogname, \
parenttype, parentkey, parentname, owner, estimate )
subtasks_url = JIRA_SUBTASK_URL.replace("{boardId}", str(board_id)).replace("{backlogKey}", backlogkey)
subtasks_json = jira_greenhopper._get_json(subtasks_url, base=GreenHopperResource.AGILE_BASE_URL)
backlog_detail_tabs = subtasks_json['tabs']['defaultTabs']
subtask_tab = backlog_detail_tabs[0]
for tab in backlog_detail_tabs:
if (tab['tabId'] == 'SUB_TASKS'):
subtask_tab = tab
break
for subtask in tab['subtaskEntries']:
issuetype = '子任务'
issuekey = subtask['key']
issuename = subtask['summary']
try:
owner = subtask['assignee']['name']
except Exception as e:
owner = ''
estimate = ''
# 打印子任务
renderCard(outerTab, team, issuetype, issuekey, issuename, \
backlogtype, backlogkey, backlogname, owner, estimate )
page.printOut('abc.html')
# myboards = jira_greenhopper.boards(name="ETS-")
# mysprint = ''
# for myboard in myboards:
# print(str(myboard.id) + ' ' + myboard.name)
# mysprints = jira.sprints(myboard.id)
# for loopsprint in mysprints:
# if (SPRINT_DATE in loopsprint.name):
# mysprint = loopsprint
# break;
# return
# if mysprint == '':
# print(r'' + myboard.name + r' has not['+ SPRINT_DATE + '] sprint!')
# return
# print('正在处理Sprint...' + mysprint.name + " [id=" + str(mysprint.id) + "]")
# Issue Fields: https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-version-2-tutorial
# Agile Api: https://docs.atlassian.com/jira-software/REST/cloud/
# https://docs.atlassian.com/jira-software/REST/
# Rest Api: https://docs.atlassian.com/jira/REST/6.4.6/?&_ga=2.163402428.121064973.1503830222-399981431.1503830222#api/2/search-search
# JQL: https://confluence.atlassian.com/jira/advanced-searching-179442050.html
# fields: https://confluence.atlassian.com/jira/advanced-searching-179442050.html#AdvancedSearching-fields
# r_json = jira._get_json("search/?jql=cf[10601]="+ r"开发运营" + " AND sprint=" + str(mysprint.id) + " order by cf[10601], issuetype ")
# r_json = jira._get_json("search/?jql=sprint=" + str(mysprint.id) + " order by cf[10601], issuetype ")
# curr_team = ''
# for myissue in r_json['issues']:
# issuename = xstr(myissue['fields']['summary'])
# issuekey = xstr(myissue['key'])
# issueid = xstr(myissue['id'])
# issuetype = xstr(myissue['fields']['issuetype']['name'])
# try:
# parentid = xstr(myissue['fields']['parent']['id'])
# except Exception as e:
# parentid = ''
# try:
# owner = xstr(myissue['fields']['assignee']['displayName'])
# except Exception as e:
# owner = ''
# try:
# estimate = xstr(myissue['fields']['timeestimate'])
# except Exception as e:
# estimate = ''
# try:
# team = xstr(myissue['fields']['customfield_10601'][0]['value'])
# except Exception as e:
# team = ''
# try:
# epiclink = xstr(myissue['fields']['summary'])
# except Exception as e:
# epiclink = ''
# if (curr_team != team):
# print(r'当前正在处理:' + team + '......')
# curr_team = team
# print('issuename:' + issuename)
# print('issuekey:' + issuekey)
# print('issueid:' + issueid)
# print('parentid:' + parentid)
# print('issuetype:' + issuetype)
# print('owner:' + owner)
# print('estimate:' + estimate)
# print('team:' + team)
# print('epiclink:' + epiclink)
# i += 1
# if (i % PRT_COL == 1):
# trLine = outerTab << tr()
# parentRow = 0
# if (issuetype == 'Story') or (issuetype == r'任务'):
# parentRow = findNameByKey(sortedlist, epiclink)
# else:
# parentRow = findNameById(sortedlist, parentid)
# if parentRow is None:
# parentRow = ['','','','','','']
def xstr(s):
return '' if | |
from random import shuffle
from full_brevity import *
from relational import *
from incremental import *
from util import generate_phrase, generate_phrase_rel
if __name__ == '__main__':
# This data is based on the drawer pictures from Vienthen and Dale (2006)
# Drawers are numbered (oddly) from left to right on even rows and right to left on odd rows (top to bottom)
facts = [
# Row 1
[Type, "drawer", "d1"], ["color", "blue", "d1"],
["row", "1", "d1"], ["col", "1", "d1"], ["corner", "true", "d1"],
[Rel, "above", "d1", "d8"], [Rel, "left", "d1", "d2"],
[Rel, "above", "d1", "d9"], [Rel, "left", "d1", "d3"],
[Rel, "above", "d1", "d16"], [Rel, "left", "d1", "d4"],
[Type, "drawer", "d2"], ["color", "orange", "d2"],
["row", "1", "d2"], ["col", "2", "d2"], ["corner", "false", "d2"],
[Rel, "above", "d2", "d7"], [Rel, "left", "d2", "d3"], [Rel, "right", "d2", "d1"],
[Rel, "above", "d2", "d10"], [Rel, "left", "d2", "d4"], [Rel, "above", "d2", "d15"],
[Type, "drawer", "d3"], ["color", "pink", "d3"],
["row", "1", "d3"], ["col", "3", "d3"], ["corner", "false", "d3"],
[Rel, "above", "d3", "d6"], [Rel, "left", "d3", "d4"], [Rel, "right", "d3", "d2"],
[Rel, "above", "d3", "d11"], [Rel, "right", "d3", "d1"], [Rel, "above", "d3", "d14"],
[Type, "drawer", "d4"], ["color", "yellow", "d4"],
["row", "1", "d4"], ["col", "4", "d4"], ["corner", "true", "d4"],
[Rel, "above", "d4", "d5"], [Rel, "right", "d4", "d3"],
[Rel, "above", "d4", "d12"], [Rel, "right", "d4", "d2"],
[Rel, "above", "d4", "d13"], [Rel, "right", "d4", "d1"],
# Row 2
[Type, "drawer", "d5"], ["color", "pink", "d5"],
["row", "2", "d5"], ["col", "4", "d5"], ["corner", "false", "d5"],
[Rel, "above", "d5", "d12"], [Rel, "below", "d5", "d4"],
[Rel, "above", "d5", "d13"], [Rel, "right", "d5", "d6"], [Rel, "right", "d5", "d7"], [Rel, "right", "d5", "d8"],
[Type, "drawer", "d6"], ["color", "yellow", "d6"],
["row", "2", "d6"], ["col", "3", "d6"], ["corner", "false", "d6"],
[Rel, "above", "d6", "d11"], [Rel, "left", "d6", "d5"], [Rel, "right", "d6", "d7"], [Rel, "below", "d6", "d3"],
[Rel, "above", "d6", "d14"], [Rel, "right", "d6", "d8"],
[Type, "drawer", "d7"], ["color", "blue", "d7"],
["row", "2", "d7"], ["col", "2", "d7"], ["corner", "false", "d7"],
[Rel, "above", "d7", "d10"], [Rel, "left", "d7", "d6"], [Rel, "right", "d7", "d8"], [Rel, "below", "d7", "d2"],
[Rel, "above", "d7", "d15"], [Rel, "left", "d7", "d5"],
[Type, "drawer", "d8"], ["color", "blue", "d8"],
["row", "2", "d8"], ["col", "1", "d8"], ["corner", "false", "d8"],
[Rel, "above", "d8", "d9"], [Rel, "left", "d8", "d7"], [Rel, "below", "d8", "d1"],
[Rel, "above", "d8", "d16"], [Rel, "left", "d8", "d6"], [Rel, "left", "d8", "d5"],
# Row 3
[Type, "drawer", "d9"], ["color", "orange", "d9"],
["row", "3", "d9"], ["col", "1", "d9"], ["corner", "false", "d9"],
[Rel, "above", "d9", "d16"], [Rel, "left", "d9", "d10"], [Rel, "below", "d9", "d8"],
[Rel, "left", "d9", "d11"], [Rel, "left", "d9", "d12"], [Rel, "below", "d9", "d1"],
[Type, "drawer", "d10"], ["color", "blue", "d10"],
["row", "3", "d10"], ["col", "2", "d10"], ["corner", "false", "d10"],
[Rel, "above", "d10", "d15"], [Rel, "left", "d10", "d11"], [Rel, "right", "d10", "d9"], [Rel, "below", "d10", "d7"],
[Rel, "left", "d10", "d12"], [Rel, "below", "d10", "d2"],
[Type, "drawer", "d11"], ["color", "yellow", "d11"],
["row", "3", "d11"], ["col", "3", "d11"], ["corner", "false", "d11"],
[Rel, "above", "d11", "d14"], [Rel, "left", "d11", "d12"], [Rel, "right", "d11", "d10"], [Rel, "below", "d11", "d6"],
[Rel, "right", "d11", "d9"], [Rel, "below", "d11", "d3"],
[Type, "drawer", "d12"], ["color", "orange", "d12"],
["row", "3", "d12"], ["col", "4", "d12"], ["corner", "false", "d12"],
[Rel, "above", "d12", "d13"], [Rel, "right", "d12", "d11"], [Rel, "below", "d12", "d5"],
[Rel, "right", "d12", "d10"], [Rel, "right", "d12", "d9"], [Rel, "below", "d12", "d4"],
# Row 4
[Type, "drawer", "d13"], ["color", "pink", "d13"],
["row", "4", "d13"], ["col", "4", "d13"], ["corner", "true", "d13"],
[Rel, "below", "d13", "d12"], [Rel, "right", "d13", "d14"],
[Rel, "below", "d13", "d5"], [Rel, "right", "d13", "d15"],
[Rel, "below", "d13", "d4"], [Rel, "right", "d13", "d16"],
[Type, "drawer", "d14"], ["color", "orange", "d14"],
["row", "4", "d14"], ["col", "3", "d14"], ["corner", "false", "d14"],
[Rel, "below", "d14", "d11"], [Rel, "left", "d14", "d13"], [Rel, "right", "d14", "d15"],
[Rel, "below", "d14", "d6"], [Rel, "right", "d14", "d16"], [Rel, "below", "d14", "d3"],
[Type, "drawer", "d15"], ["color", "pink", "d15"],
["row", "4", "d15"], ["col", "2", "d15"], ["corner", "false", "d15"],
[Rel, "below", "d15", "d10"], [Rel, "left", "d15", "d14"], [Rel, "right", "d15", "d16"],
[Rel, "below", "d15", "d7"], [Rel, "below", "d15", "d2"], [Rel, "left", "d15", "d13"],
[Type, "drawer", "d16"], ["color", "yellow", "d16"],
["row", "4", "d16"], ["col", "1", "d16"], ["corner", "true", "d16"],
[Rel, "below", "d16", "d9"], [Rel, "left", "d16", "d15"],
[Rel, "below", "d16", "d1"], [Rel, "left", "d16", "d14"],
[Rel, "below", "d16", "d8"], [Rel, "left", "d16", "d13"]
]
#These are the data collected from subjects in Viethen and Dale's work. (2006)
human_facts = {
1: [
[["color", "blue", "d1"], ["row", "1", "d1"], ["col", "1", "d1"], ["corner", "true", "d1"]],
[["row", "1", "d1"], ["col", "1", "d1"], ["corner", "true", "d1"]],
[["row", "1", "d2"], ["col", "2", "d2"]]
],
2: [
[["color", "orange", "d2"], ["col", "2", "d2"]],
[["color", "orange", "d2"], ["row", "1", "d2"]],
[["color", "orange", "d2"], [Rel, "above", "d2", "d7"], ["color", "blue", "d7"]],
[["col", "2", "d2"], ["row", "1", "d2"]],
[["color", "orange", "d2"], ["col", "2", "d2"]],
[["row", "1", "d2"], ["col", "2", "d2"]],
],
3: [
[["color", "pink", "d3"], ["row", "1", "d3"]],
[["color", "pink", "d3"], ["row", "1", "d3"]],
[["color", "pink", "d3"], ["row", "1", "d3"], ["col", "3", "d3"]],
[["row", "1", "d3"], ["col", "3", "d3"]],
[["color", "pink", "d3"], ["row", "1", "d3"]],
[["color", "pink", "d3"], ["row", "1", "d3"]],
[["color", "pink", "d3"], ["row", "1", "d3"]],
[["row", "1", "d3"], ["col", "3", "d3"]],
],
4: [
[["row", "1", "d4"], ["col", "4", "d4"], ["corner", "true", "d4"]],
[["color", "yellow", "d4"], ["row", "1", "d4"], ["col", "4", "d4"], ["corner", "true", "d4"]],
[["col", "4", "d4"], ["row", "1", "d4"]],
[["row", "1", "d4"], ["col", "3", "d4"]],
[["row", "1", "d4"], ["col", "3", "d4"]],
[["row", "1", "d4"], ["col", "4", "d4"], ["corner", "true", "d4"]],
[["row", "1", "d4"], ["col", "3", "d4"]],
[["row", "1", "d4"], ["col", "3", "d4"]],
],
5: [
[["color", "yellow", "d4"], [Rel, "above", "d4", "d5"], ["color", "pink", "d5"]],
[["color", "pink", "d5"], ["col", "4", "d5"], ["row", "2", "d5"]],
[["color", "pink", "d5"], ["row", "2", "d5"], ["col", "4", "d5"]],
[["color", "pink", "d5"], ["col", "4", "d5"], [Rel, "below", "d5", "d4"], ["color", "yellow", "d4"]],
[["color", "pink", "d5"], ["row", "2", "d5"]],
],
6: [
[["col", "3", "d6"], ["row", "2", "d6"]],
[["row", "2", "d6"], ["col", "3", "d6"]],
[["row", "2", "d6"], ["col", "3", "d6"]],
[["color", "yellow", "d6"], [Rel, "above", "d6", "d11"], ["color", "yellow", "d11"]],
[["color", "yellow", "d6"], ["row", "2", "d6"]],
[["color", "yellow", "d6"], ["row", "2", "d6"]],
[[Rel, "right", "d6", "d7"], [Rel, "right" "d7", "d8"], ["color", "blue", "d7"], ["color", "blue", "d8"]],
[["color", "yellow", "d6"], [Rel, "above", "d6", "d11"], ["color", "yellow", "d11"]],
[["color", "yellow", "d6"], ["col", "3", "d6"], ["row", "2", "d6"]],
[["col", "3", "d6"], ["row", "2", "d6"]],
[["col", "3", "d6"], ["row", "2", "d6"]],
],
7: [
[["color", "blue", "d7"], ["col", "2", "d7"], ["row", "2", "d7"]],
[["color", "blue", "d7"], ["col", "2", "d7"], ["row", "2", "d7"]],
[["color", "blue", "d7"], [Rel, "below", "d7", "d2"], ["col", "2", "d7"], ["col", "2", "d2"]],
[["col", "2", "d7"], ["row", "2", "d7"]],
[["color", "blue", "d7"], [Rel, "below", "d7", "d2"], ["color", "orange", "d2"]],
[["col", "2", "d7"], ["row", "2", "d7"]],
[["row", "2", "d7"], ["col", "2", "d7"]],
[["col", "2", "d7"], ["row", "2", "d7"]],
[["row", "2", "d7"], ["col", "2", "d7"]],
[["col", "2", "d7"], ["row", "2", "d7"]],
],
8: [
[["row", "2", "d8"], ["col", "1", "d8"]],
[["col", "1", "d8"], ["row", "2", "d8"]],
[["row", "2", "d8"], ["col", "1", "d8"]],
[["color", "blue", "d8"], ["row", "2", "d8"], ["col", "1", "d8"]],
[["row", "2", "d8"], ["col", "1", "d8"]],
[["row", "2", "d8"], ["col", "1", "d8"]],
[["row", "2", "d8"], ["col", "1", "d8"]],
],
9: [
[["color", "orange", "d9"], ["col", "1", "d9"], ["row", "3", "d9"]],
[["color", "orange", "d9"], ["col", "1", "d9"], ["row", "3", "d9"]],
[["color", "orange", "d9"], ["col", "1", "d9"], ["row", "3", "d9"]],
[["row", "3", "d9"], ["col", "1", "d9"]],
[["color", "orange", "d9"], ["col", "1", "d9"]],
[["color", "orange", "d9"], ["col", "1", "d9"]],
[["row", "3", "d9"], ["col", "1", "d9"]],
[["color", "orange", "d9"], ["col", "1", "d9"]],
],
10: [
[["color", "blue", "d10"], ["row", "3", "d10"], ["col", "2", "d10"]],
[["color", "blue", "d10"], ["row", "3", "d10"]],
[["color", "blue", "d10"], [Rel, "above", "d10", "d15"], ["color", "pink", "d15"]],
[["color", "blue", "d10"], [Rel, "above", "d10", "d15"], ["color", "pink", "d15"]],
[["col", "2", "d10"], ["row", "3", "d10"]],
],
11: [
[["col", "3", "d11"], ["row", | |
for _ in range(not_matching_col_count + matching_col_count):
print("N")
print(not_matching_col_words)
mod_word = not_matching_col_words[0][0]
print(mod_word)
self._col_data.append((self.col_names[col_index], make_transformer(affix_type, mod_word)))
not_matching_col_words.pop(0)
not_matching_col_words = [(data[0], data[1] + calc_similarity(mod_word, data[0])) for data in
not_matching_col_words]
not_matching_col_words.sort(key=lambda x: x[1])
col_index += 1
def _gen_words(self, size: int, gen_type_dist: Dict[str, float], word_templates: List[Template],
matcher: List[Word],
phonemes: List[Word], feature_to_type, feature_to_sounds: Dict[str, List[Sound]]):
cad_count = round(size * gen_type_dist["CAD"])
# Doesnt depend on affix type.
word_matching_edge_count = cad_count
word_not_matching_edge_count = size - word_matching_edge_count
matching_size_each_template = max(1, word_matching_edge_count // len(word_templates))
not_matching_size_each_template = max(1, word_not_matching_edge_count // len(word_templates))
valid_match_templates = []
valid_not_match_templates = []
match_words = []
not_match_words = []
print(size)
print("match end: ", word_matching_edge_count)
print("not match end: ", word_matching_edge_count)
# Loop over the word templates to ensure at least 1 matching/not-matching word for each template
for i in range(len(word_templates)):
if len(match_words) < word_matching_edge_count:
match_gen = word_templates[i].generate_words_start_with(matcher, phonemes,
matching_size_each_template,
feature_to_sounds)
for word in match_gen:
if self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds):
match_words.append(word)
if len(match_words) > 0:
valid_match_templates.append(word_templates[i])
if len(not_match_words) < word_not_matching_edge_count:
not_match_gen = word_templates[i].generate_words_not_start_with(matcher, phonemes,
not_matching_size_each_template,
feature_to_sounds)
for word in not_match_gen:
if self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds):
not_match_words.append(word)
if len(not_match_words) > 0:
if word_templates[i] != []:
valid_not_match_templates.append(word_templates[i])
# If the number of generated matching words is less than designated matching word counts, randomly choose
# from templates to generate more matching words, by the number of words that need to be generated more.
if len(match_words) < word_matching_edge_count:
extra_match_words = random.choice(valid_match_templates).generate_words_start_with(matcher, phonemes,
word_matching_edge_count - len(
match_words),
feature_to_sounds)
match_words.extend([word for word in extra_match_words if
self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds)])
# Same as above, but this time for not-matching words.
if len(not_match_words) < word_not_matching_edge_count:
extra_not_match_words = random.choice(valid_not_match_templates).generate_words_not_start_with(
matcher, phonemes, word_not_matching_edge_count - len(not_match_words), feature_to_sounds)
not_match_words.extend([word for word in extra_not_match_words if
self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds)])
print("match words:")
print(match_words)
# print("not match words:")
# print(not_match_words)
return match_words, not_match_words
class SuffixParadigm(Paradigm):
def __init__(self, word_templates: List[Template], rule: Rule,
matchers: Union[Tuple[List[Word], List[Word]], Tuple[List[Word], List[Word], List[Word]]],
phonemes: List[Word], feature_to_type: Dict[str, str], feature_to_sounds: Dict[str, List[Sound]],
shuffled: bool, affix_type: str, selected_family_name: str, side: str,
given_gloss: Union[List[str], None], ur_words: Union[List[Word], None]):
super().__init__(word_templates, rule, matchers, phonemes, feature_to_type,
feature_to_sounds, shuffled, affix_type, selected_family_name, side, given_gloss, ur_words)
def _gen_UR(self,
matchers: Union[Tuple[List[Word], List[Word]], Tuple[List[Word], List[Word], List[Word]]],
word_templates, phonemes, feature_to_type, feature_to_sounds,
matching_col_count, not_matching_col_count, rule, words, affix_type):
"""
NOTE: dependent of affix type
Generate word affixes based on the length of the matchers (2 or 3)
"""
if len(matchers) not in [2, 3]:
raise NotImplementedError
elif len(matchers) == 2:
match_words, not_match_words = self._gen_words(self.row_count, self.gen_type_dist, word_templates,
matchers[0], phonemes, feature_to_type, feature_to_sounds)
else: # when len(matchers) == 3
if random.random() < 0.5:
matchers = matchers[0], [Word(str(w1) + str(w2)) for w1 in matchers[1] for w2 in matchers[2]]
match_words, not_match_words = self._gen_words(self.row_count, self.gen_type_dist, word_templates,
matchers[0], phonemes, feature_to_type,
feature_to_sounds)
else:
matchers = [Word(str(w0) + str(w1)) for w0 in matchers[0] for w1 in matchers[1]], matchers[2]
match_words, not_match_words = self._gen_words(self.row_count, self.gen_type_dist, word_templates,
matchers[0], phonemes, feature_to_type,
feature_to_sounds)
words.extend(match_words)
words.extend(not_match_words)
return matchers
def _gen_affix(self, matchers: Union[Tuple[List[Word], List[Word]], Tuple[List[Word], List[Word], List[Word]]],
phonemes, feature_to_sounds, matching_col_count, not_matching_col_count, rule, affix_type):
"""
NOTE: dependent of affix type
Generate word affixes based on the length of the matchers (2 or 3)
"""
mod_word = None
matching_col_words = []
not_matching_col_words = []
for col_template in self.col_templates:
matching_col_words.extend(
col_template.generate_words_start_with(matchers[1], phonemes, 5, feature_to_sounds))
not_matching_col_words.extend(
col_template.generate_words_not_start_with(matchers[1], phonemes, 5, feature_to_sounds))
col_index = 0
if not_matching_col_count == 1 or random.random() < 0.5:
self._col_data.append((self.col_names[0], make_transformer(affix_type, mod_word)))
not_matching_col_count -= 1
col_index += 1
matching_col_words = [(word, 0) for word in matching_col_words]
not_matching_col_words = [(word, 0) for word in not_matching_col_words]
random.shuffle(matching_col_words)
random.shuffle(not_matching_col_words)
print(rule)
for _ in range(matching_col_count):
print("M")
print(matching_col_words)
mod_word = matching_col_words[0][0]
print(mod_word)
self._col_data.append((self.col_names[col_index], make_transformer(affix_type, mod_word)))
matching_col_words.pop(0)
matching_col_count -= 1
matching_col_words = [(data[0], data[1] + calc_similarity(mod_word, data[0])) for data in
matching_col_words]
matching_col_words.sort(key=lambda x: x[1])
col_index += 1
if len(matching_col_words) == 0:
break
for _ in range(not_matching_col_count + matching_col_count):
print("N")
print(not_matching_col_words)
mod_word = not_matching_col_words[0][0]
print(mod_word)
self._col_data.append((self.col_names[col_index], make_transformer(affix_type, mod_word)))
not_matching_col_words.pop(0)
not_matching_col_words = [(data[0], data[1] + calc_similarity(mod_word, data[0])) for data in
not_matching_col_words]
not_matching_col_words.sort(key=lambda x: x[1])
col_index += 1
# for _ in range(self._num_dummy):
# print("IR")
# print(dummy_col_words)
# mod_word = dummy_col_words[0][0]
# print(mod_word)
# self._col_data.append((self.col_names[col_index], make_transformer(self._dummy_affix, mod_word)))
# dummy_col_words.pop(0)
# self._num_dummy -= 1
# col_index += 1
def _gen_words(self, size: int, gen_type_dist: Dict[str, float], word_templates: List[Template],
matcher: List[Word],
phonemes: List[Word], feature_to_type, feature_to_sounds: Dict[str, List[Sound]]):
cad_count = round(size * gen_type_dist["CAD"])
# Doesnt depend on affix type.
word_matching_edge_count = cad_count
word_not_matching_edge_count = size - word_matching_edge_count
matching_size_each_template = max(1, word_matching_edge_count // len(word_templates))
not_matching_size_each_template = max(1, word_not_matching_edge_count // len(word_templates))
valid_match_templates = []
valid_not_match_templates = []
match_words = []
not_match_words = []
print(size)
print("match end: ", word_matching_edge_count)
print("not match end: ", word_matching_edge_count)
# Loop over the word templates to ensure at least 1 matching/not-matching word for each template
for i in range(len(word_templates)):
if len(match_words) < word_matching_edge_count:
match_gen = word_templates[i].generate_words_end_with(matcher, phonemes,
matching_size_each_template,
feature_to_sounds)
for word in match_gen:
if self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds):
match_words.append(word)
if len(match_words) > 0:
valid_match_templates.append(word_templates[i])
if len(not_match_words) < word_not_matching_edge_count:
not_match_gen = word_templates[i].generate_words_not_end_with(matcher, phonemes,
not_matching_size_each_template,
feature_to_sounds)
for word in not_match_gen:
if self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds):
not_match_words.append(word)
if len(not_match_words) > 0:
valid_not_match_templates.append(word_templates[i])
# If the number of generated matching words is less than designated matching word counts, randomly choose
# from templates to generate more matching words, by the number of words that need to be generated more.
if len(match_words) < word_matching_edge_count:
extra_match_words = random.choice(valid_match_templates).generate_words_end_with(
matcher, phonemes, word_matching_edge_count - len(match_words), feature_to_sounds)
match_words.extend([word for word in extra_match_words if
self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds)])
# Same as above, but this time for not-matching words.
if len(not_match_words) < word_not_matching_edge_count:
extra_not_match_words = random.choice(valid_not_match_templates).generate_words_not_end_with(
matcher, phonemes, word_not_matching_edge_count - len(not_match_words), feature_to_sounds)
not_match_words.extend([word for word in extra_not_match_words if
self._check_valid_UR(word, phonemes, feature_to_type, feature_to_sounds)])
print("match words:")
print(match_words)
return match_words, not_match_words
class ParadigmGenerator:
def __init__(self, rule: Rule, phonemes: List[Word], templates: List[Template],
feature_to_type: Dict[str, str], feature_to_sounds: Dict[str, List[Sound]]):
self._phonemes = phonemes
self._feature_to_type = feature_to_type
self._feature_to_sounds = feature_to_sounds
self._rule = rule
self._templates = templates
def _get_valid_question(self, shuffled: bool, affix_type: Optional[str]) -> [Paradigm, Paradigm]:
retry_limit = 2
trial = 0
import random
import os
transform_data = _get_trans_data(
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/paradigmtransdata.txt'))
selected_family_name = random.choice(list(transform_data.keys()))
sides = ["side1", "side2"]
random.shuffle(sides)
primary_side, secondary_side = sides[0], sides[1]
return_paradigm = [None, None]
while trial < retry_limit:
try:
matchers = construct_matchers(self._rule, self._phonemes, self._feature_to_sounds)
except ValueError:
return None
paradigm1 = make_paradigm(self._templates, self._rule, matchers, self._phonemes,
self._feature_to_type, self._feature_to_sounds,
shuffled, affix_type,
selected_family_name=selected_family_name, side=primary_side, given_gloss=None,
ur_words=None)
return_paradigm[0] = paradigm1
given_gloss = paradigm1.gloss_column
ur_words = paradigm1.UR_words
p2_col_count = 0
if (paradigm1.col_count <= 3) and (paradigm1 is not None):
if affix_type == "PREFIX":
secondary_affix_type = "SUFFIX"
else:
secondary_affix_type = "PREFIX"
paradigm2 = make_paradigm(self._templates, self._rule, matchers, self._phonemes,
self._feature_to_type, self._feature_to_sounds,
shuffled, secondary_affix_type,
selected_family_name=selected_family_name, side=secondary_side,
given_gloss=given_gloss, ur_words=ur_words)
return_paradigm[1] = paradigm2
p2_col_count = paradigm2.col_count
print("colcount: " + str(paradigm1.col_count) + "/" + str(p2_col_count))
return return_paradigm
LOGGER.error("Rule %s\n" % str(self._rule))
LOGGER.error("Phoneme %s\n" % str(self._phonemes))
LOGGER.error("Exceeded maximum retry limit. Failed to find a question!\n")
return None
def get_paradigm_question(self, shuffled: bool, isIPAg: bool, feature_to_type: Dict[str, str],
feature_to_sounds: Dict[str, List[Sound]], affix_type: Optional[str]) -> Optional[Dict]:
questions = self._get_valid_question(shuffled, affix_type)
if questions is None:
return None
poi = " ".join(self._rule.get_interest_phones(self._phonemes, feature_to_type, feature_to_sounds)[1])
header_row = []
trans_patterns = []
ur_words = [str(w) for w in questions[0].UR_words]
core_data = []
for _ in range(len(questions[0].applied_core_data)):
core_data.append([])
rule = str(self._rule)
phonemes = [str(w) for w in self._phonemes]
templates = str(self._templates)
gloss = [str(w) for w in questions[0].gloss_column]
for question in questions:
if question is not None:
header_row.extend([str(w) for w in question.trans_names])
trans_patterns.extend(str(w) for w in question.UR_trans_pattern)
for i in range(len(question.applied_core_data)):
core_data[i].extend([str(w) for w in question.applied_core_data[i]])
if isIPAg:
question_data = {
'header_row': [str(w).replace('g', 'ɡ') for w in header_row],
'trans_patterns': [str(w).replace('g', 'ɡ') for w in trans_patterns],
'ur_words': [str(w).replace('g', 'ɡ') for w in ur_words],
'core_data': [[str(w).replace('g', 'ɡ') for w in row] for row in core_data],
'rule': rule,
'phonemes': [str(w).replace('g', 'ɡ') for w in phonemes],
'templates': templates,
'Gloss': gloss,
'poi': poi.replace('g', 'ɡ')
}
else:
question_data = {
'header_row': header_row,
'trans_patterns': trans_patterns,
'ur_words': ur_words,
'core_data': core_data,
'rule': rule,
'phonemes': phonemes,
'templates': templates,
'Gloss': gloss,
'poi': poi
}
LOGGER.debug(question_data)
return question_data
def _get_trans_data(filename: str):
types = {}
with open(filename, encoding='utf-8') as data_file:
lines = [l.rstrip() for l in data_file.readlines()]
curr_type = None
curr_side = None
for line in lines:
line = line.replace('ɡ', 'g')
if len(line) == | |
import discord
import asyncio
import random
import pekofy as peko
import replies
# import os - use this if you want to use the for filename method instead of the bot.load_extension
import datetime
from discord.ext import commands, tasks
from itertools import cycle
from discord.ext.commands import CommandNotFound
token = 'token'
intents = discord.Intents.default()
intents.members = True
intents.guilds = True
toDoDB = [] # empty list for storing stuffs lol
# bot = discord.Client()
bot = commands.Bot(command_prefix='!', intents=intents)
# bot.load_extension('cogs.Todo') # load the Todo feature with timers
# Variables needed for the formatted strings
status = cycle(['!help', '@KumikoNyan', '@MoeFAX', '@Jinrooo', 'Destiny 2', 'Fallout 4', 'Skyrim SE', 'I love you!'])
text = 'This is **Biology Class**:\n '
text1 = 'This is **English for Academic and Professional Purposes Class**:\n '
text2 = 'This is **Physics Class**:\n '
text3 = 'This is **Entrepreneurship Class**:\n '
text4 = 'This is **Homeroom**:\n '
text5 = 'This is **Guidance**:\n '
text6 = 'Here is your class schedule for this term. Enjoy Learning!\n '
text7 = "Stay updated with Elizabeth Seton School's Facebook Page:\n "
text8 = "Check out the official website of Elizabeth Seton School:\n "
text9 = "**MEGA**:\n "
text10 = "**GOOGLE DRIVE**:\n "
text11 = "**DROPBOX**:\n "
text12 = "**MICROSOFT ONEDRIVE**:\n "
text13 = "**PASTEBIN**:\n "
text14 = "**ESS BOT OFFICIAL GITHUB REPOSITORY:**\n"
text15 = """
:alarm_clock: **How to use the Reminder?** :alarm_clock:
```'$remind/remindme/remind_me [Time] [Your Reminder].'
Time can be in:
d - days\nh - hours\nm - minutes\ns - seconds
'!helpr' - shows this menu.
example: !remindme 30s Check the stove.\nby @KumikoNyan```"""
# Pekofy Bot Functions
def reply_chance(percent):
return random.randint(0, 100) <= percent
# ToDo List Functions by @KumikoNyan
def channelDBManager(channel):
channelExists = False
for db in toDoDB:
if db[0] == channel:
channelExists = True
currentDB = db[1]
if not channelExists:
toDoDB.append([channel, []])
for db in toDoDB:
if db[0] == channel:
currentDB = db[1]
try:
return currentDB
except:
print("channeldbmanager fatal fail - exiting")
exit()
def newToDo(desc, channel):
db = channelDBManager(channel)
status = True
tempTodo = [desc, status]
db.append(tempTodo)
def doneToDo(id, channel):
db = channelDBManager(channel)
i = 0
success = False
for todo in db:
i += 1
if i == id:
success = True
tempI = i - 1
db.remove(db[tempI])
return success
def listToDo(channel):
db = channelDBManager(channel)
id = 0
returnList = []
for todo in db:
temptodo = []
for i in todo:
temptodo.append(i)
id += 1
temptodo.insert(0, id)
returnList.append(temptodo)
return returnList
# Checks if the Bot is ready
@bot.event
async def on_ready():
change_status.start() # will change the status or activity of the bot given the parameters
print('--------------------------')
print('The Bot is ready!')
print(bot.user.name)
print(bot.user.id)
print('--------------------------')
# This godsend error handler to avoid "COMMAND NOT FOUND" false positive.
@bot.event
async def on_command_error(ctx, error): # you shouldn't remove ctx or the run will get piled up with 'CommandNotFound'
if isinstance(error, CommandNotFound):
return
raise error
# Logging
# Shows the members and Guilds (Servers) added or removed
@bot.event
async def on_member_join(member):
print(f'{member} has joined the server.')
@bot.event
async def on_member_remove(member):
print(f'{member} has left the server.')
@bot.event
async def on_guild_join(guild):
print(f'We have been added by {guild}!')
@bot.event
async def on_guild_remove(guild):
print(f'We have been removed by {guild}!')
# Class Links and Schedule
@bot.event
async def on_message(message):
await bot.process_commands(message) # allows commands to work with events
if message.author == bot.user.id:
return
if message.content == '!help':
await message.channel.purge(limit=2)
content = """
:mechanical_arm: **List of Commands:** :mechanical_leg:
```'!help' - shows this menu.\n'!commands' - DMs this menu.\n'!bio' - shows the class link for the Biology class.
'!eap' - shows the class link for EAP class.\n'!phy' - shows the class link for Physics class.
'!entrep' - shows the class link for Entrepreneurship class.\n'!hr' - shows the class link for Homeroom.
'!guidance' - shows the class link for Guidance class.\n'!sched' - shows the current schedule for the term.```
:school: **ELIZABETH SETON SCHOOL INFORMATION:** :school_satchel:
```'!essfb' - shows the official Facebook page of Elizabeth Seton School.
'!ess' - shows the official website of Elizabeth Seton School.```
:alarm_clock: **How to use the Reminder?** :alarm_clock:
```'!remind/remindme/remind_me [Time] [Your Reminder].'
Time can be in:
d - days\nh - hours\nm - minutes\ns - seconds
'!helpr' - shows this menu.
example: !remindme 30s Check the stove.\nby @KumikoNyan```
:question: **How to use the Todo List?** :question:
```'!todo' - Show the current todo list.\n'!add [TODO]' - Add a new todo.\n'!done [ID]' - Mark a todo done (delete).
'!helpt' - Show this menu.\nby @KumikoNyan```
:file_folder: **File Hosting Sites and More!** :open_file_folder:
```'!mega' - shows the MEGA file hosting site.\n'!gd' - shows Google Drive.\n'!db' - shows Dropbox.
'!md' - shows Microsoft OneDrive.\n'!pb' - shows the reliable Paste Bin!
'!git' - shows the official GitHub repository of the ESS-Bot for Discord!```
:ok_hand: **Extra Commands:** :ok_hand:
```'!ping' - to check the current latency.\n'!clear [value]' - clears certain amount of messages (limit is 10).
'!8 [Question]' - try out our new 8 Ball Magic Game!```
""".format(message)
await message.channel.send(content)
if message.content == '!bio':
await message.channel.purge(limit=1)
content = f"{text}".format(message)
await message.channel.send(content)
if message.content == '!eap':
await message.channel.purge(limit=1)
content = f"{text1} ".format(message)
await message.channel.send(content)
if message.content == '!phy':
await message.channel.purge(limit=1)
content = f"{text2}".format(message)
await message.channel.send(content)
if message.content == '!entrep':
await message.channel.purge(limit=1)
content = f"{text3}".format(message)
await message.channel.send(content)
if message.content == '!hr':
await message.channel.purge(limit=1)
content = f"{text4}".format(message)
await message.channel.send(content)
if message.content == '!guidance':
await message.channel.purge(limit=1)
content = f"{text5}".format(message)
await message.channel.send(content)
if message.content == '!sched':
await message.channel.purge(limit=1)
content = f"{text6}".format(message)
await message.channel.send(content)
if message.content == '!essfb':
await message.channel.purge(limit=1)
content = f"{text7}".format(message)
await message.channel.send(content)
if message.content == '!ess':
await message.channel.purge(limit=1)
content = f"{text8}".format(message)
await message.channel.send(content)
if message.content == '!mega':
await message.channel.purge(limit=1)
content = f'{text9} https://mega.nz/login'.format(message)
await message.channel.send(content)
if message.content == '!gd':
await message.channel.purge(limit=1)
content = f'{text10} https://drive.google.com/'.format(message)
await message.channel.send(content)
if message.content == '!db':
await message.channel.purge(limit=1)
content = f'{text11} https://www.dropbox.com/login'.format(message)
await message.channel.send(content)
if message.content == '!md':
await message.channel.purge(limit=1)
content = f"{text12} https://onedrive.live.com/about/en-us/signin/".format(message)
await message.channel.send(content)
if message.content == '!pb':
await message.channel.purge(limit=1)
content = f'{text13} https://pastebin.pl/'.format(message)
await message.channel.send(content)
if message.content == '!git':
await message.channel.purge(limit=1)
content = f'{text14} https://github.com/KumikoNyan/ESS-Elizabeth-Seton-School-Bot'.format(message)
await message.channel.send(content)
if message.content == '!helpr':
await message.channel.purge(limit=1)
content = f'{text15}'.format(message)
await message.channel.send(content)
# ----------------------------------------------------------------------------------
# Pekofy Bot thanks to @bemxio
# pain peko (not used regex for faster results)
if message.content.lower() in ["pain", "pain.", "pain...", "pain peko", "pain peko."] and reply_chance(50):
await message.channel.send(replies.pain_peko_reply)
# hey moona
if "moona" in message.content.lower() and "pekora" in message.content.lower() and reply_chance(25):
await message.channel.send(replies.hey_moona_reply)
# help command
if message.content.startswith("!helpeko"):
await message.channel.send(replies.helpeko)
# pekofy command
if message.content.startswith('!pekofy'):
channel = message.channel
if message.reference:
reply = message.reference.resolved
else:
return
if reply.author == bot.user:
await message.channel.send(replies.found_myself)
return
reply = reply.content
"""
try:
reply = message.reference.resolved.content
except AttributeError: # if no message.reference found
reply = await channel.history(limit=2).flatten()
reply = reply[1].content
"""
reply = peko.pekofy(reply)
# if it couldn't be pekofied, give a random pekora clip
if reply in ["NOTHING_CHANGED", "NO_LETTER"]:
reply = random.choice(replies.nothing_changed_reply_list)
await message.channel.send(reply)
# insulting people
if message.content.lower() == "insult me peko":
await message.channel.send(random.choice(replies.insults))
if message.content == "!pekopasta": # easter egg
await message.channel.send(replies.cursed_pekopasta)
# rating reactions
if message.reference: # if the message is a reply
if message.reference.resolved.author == bot.user:
if "good bot" in message.content.lower():
await message.channel.send(random.choice(replies.thanks))
if "bad bot" in message.content.lower():
await message.channel.send(random.choice(replies.sorrys))
if "cute bot" in message.content.lower():
await message.channel.send(random.choice(replies.cutes))
if message.content.lower() in ["i love you", "love you", "love", "i love you peko", "love you peko",
"love peko"]:
await message.channel.send(random.choice(replies.loves))
# --------------------------------------------------------------------------------
# Easter Eggs (Not really important but hey)
if message.content == '!hello': # Hello!
content = 'Hello! {0.author.mention}'.format(message)
await message.channel.send(content)
if message.content == '!gay': # Kumiko and Reina
content = f'https://media1.tenor.com/images/bcd39f94b6e5e78b25bce85eb37c4b4a/tenor.gif?itemid=7347112'.format(
message)
await message.channel.send(content)
if message.content == '!noice': # NOICE
content = f'https://media1.tenor.com/images/bcb961b67dc5ec34381372494c85c8fe/tenor.gif?itemid=8843762'.format(
message)
await message.channel.send(content)
if message.content == '!fckshit': # <NAME>
content = f'https://media1.tenor.com/images/05472297e9c5285b0b7ad18496ce6257/tenor.gif?itemid=18959222'.format(
message)
await message.channel.send(content)
if message.content == '!rickroll': # Rick Roll
content = f'https://media1.tenor.com/images/8c409e6f39acc1bd796e8031747f19ad/tenor.gif?itemid=17029825'.format(
message)
await message.channel.send(content)
# This is a simple code that will compliment you - again not important but hey, why not?
if message.content == '!Compliment':
msg = await message.channel.send("You're bad!")
await asyncio.sleep(1.0)
await msg.edit(content='Just joking, you are good!')
# ------------------------------------------------------
# ToDo Event by @KumikoNyan
# !help for more command info about the ToDo function
# !todo to check your current todo list
# !add to add more stuffs to your list (lol?)
# !done (list number) (e.g. 1) to mark it as done
# ------------------------------------------------------
channel = str(message.channel.id)
command = str(message.content)
if command.startswith("!"):
print(toDoDB)
if command.startswith("!add "):
command = command.replace("!add ", "")
if command == "":
await message.author.send(":x: No ToDo entered.")
print(":x: No ToDo entered.")
newToDo(command, channel)
await message.author.send(":white_check_mark: ToDo `" + command + "` added.")
await message.channel.purge(limit=1)
print(":white_check_mark: ToDo | |
<filename>subaligner/utils.py
import os
import subprocess
import pysubs2
import requests
import shutil
import cchardet
from pycaption import (
CaptionConverter,
SRTWriter,
SRTReader,
DFXPWriter,
DFXPReader,
SAMIWriter,
SAMIReader,
)
from typing import Optional, TextIO, BinaryIO, Union, Callable, Any, Tuple
from .exception import TerminalException
from subaligner.lib.to_srt import STL, SRT
class Utils(object):
"""Utility functions
"""
FFMPEG_BIN = os.getenv("FFMPEG_PATH") or os.getenv("ffmpeg_path") or "ffmpeg"
@staticmethod
def srt2ttml(srt_file_path: str, ttml_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to TTML subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
ttml_file_path {string} -- The path to the TTML file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(srt_file_path)
with open(srt_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), SRTReader())
if ttml_file_path is None:
ttml_file_path = srt_file_path.replace(".srt", ".xml")
with open(ttml_file_path, "wb") as file:
file.write(converter.write(DFXPWriter()).encode(encoding))
@staticmethod
def ttml2srt(ttml_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert TTML subtitles to SubRip subtitles.
Arguments:
ttml_file_path {string} -- The path to the TTML file.
srt_file_path {string} -- The path to the SubRip file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(ttml_file_path)
with open(ttml_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), DFXPReader())
if srt_file_path is None:
srt_file_path = ttml_file_path.replace(".xml", ".srt")
with open(srt_file_path, "wb") as file:
file.write(converter.write(SRTWriter()).encode(encoding))
@staticmethod
def srt2vtt(srt_file_path: str, vtt_file_path: Optional[str] = None, timeout_secs: int = 30) -> None:
"""Convert SubRip subtitles to WebVTT subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
vtt_file_path {string} -- The path to the WebVTT file.
timeout_secs {int} -- The timeout in seconds on conversion {default: 30}.
"""
_vtt_file_path = srt_file_path.replace(".srt", ".vtt") if vtt_file_path is None else vtt_file_path
encoding = Utils.detect_encoding(srt_file_path)
command = "{0} -y -sub_charenc {1} -i {2} -f webvtt {3}".format(Utils.FFMPEG_BIN, encoding, srt_file_path, _vtt_file_path)
timeout_msg = "Timeout on converting SubRip to WebVTT: {}".format(srt_file_path)
error_msg = "Cannot convert SubRip to WebVTT: {}".format(srt_file_path)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot convert SubRip to WebVTT: {} with error {}".format(
srt_file_path, std_err
)
)
Utils.remove_trailing_newlines(_vtt_file_path, encoding)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def vtt2srt(vtt_file_path: str, srt_file_path: Optional[str] = None, timeout_secs: int = 30) -> None:
"""Convert WebVTT subtitles to SubRip subtitles.
Arguments:
vtt_file_path {string} -- The path to the WebVTT file.
srt_file_path {string} -- The path to the SubRip file.
timeout_secs {int} -- The timeout in seconds on conversion {default: 30}.
"""
_srt_file_path = vtt_file_path.replace(".vtt", ".srt") if srt_file_path is None else srt_file_path
encoding = Utils.detect_encoding(vtt_file_path)
command = "{0} -y -sub_charenc {1} -i {2} -f srt {3}".format(Utils.FFMPEG_BIN, encoding, vtt_file_path, _srt_file_path)
timeout_msg = "Timeout on converting WebVTT to SubRip: {}".format(vtt_file_path)
error_msg = "Cannot convert WebVTT to SubRip: {}".format(vtt_file_path)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot convert WebVTT to SubRip: {} with error {}".format(
vtt_file_path, std_err
)
)
Utils.remove_trailing_newlines(_srt_file_path, encoding)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def srt2ass(srt_file_path: str, ass_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to Advanced SubStation Alpha v4.0+ subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
ass_file_path {string} -- The path to the ASS file.
"""
new_ass_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", ass_file_path, "ass", "ass")
Utils.remove_trailing_newlines(new_ass_file_path, encoding)
@staticmethod
def ass2srt(ass_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert Advanced SubStation Alpha v4.0+ subtitles to SubRip subtitles.
Arguments:
ass_file_path {string} -- The path to the ASS file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(ass_file_path, "ass", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2ssa(srt_file_path: str, ssa_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to SubStation Alpha v4.0 subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
ssa_file_path {string} -- The path to the SSA file.
"""
new_ssa_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", ssa_file_path, "ssa", "ssa")
Utils.remove_trailing_newlines(new_ssa_file_path, encoding)
@staticmethod
def ssa2srt(ssa_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert SubStation Alpha v4.0 subtitles to SubRip subtitles.
Arguments:
ssa_file_path {string} -- The path to the SSA file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(ssa_file_path, "ssa", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2microdvd(srt_file_path: str, microdvd_file_path: Optional[str] = None, frame_rate: float = 25.0):
"""Convert SubRip subtitles to MicroDVD subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
microdvd_file_path {string} -- The path to the MicroDVD file.
frame_rate {float} -- The frame rate for frame-based MicroDVD.
"""
new_microdvd_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", microdvd_file_path, "sub", "microdvd", frame_rate=frame_rate)
Utils.remove_trailing_newlines(new_microdvd_file_path, encoding)
@staticmethod
def microdvd2srt(microdvd_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert MicroDVD subtitles to SubRip subtitles.
Arguments:
microdvd_file_path {string} -- The path to the MPL2 file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(microdvd_file_path, "sub", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2mpl2(srt_file_path: str, mpl2_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to MPL2 subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
mpl2_file_path {string} -- The path to the MPL2 file.
"""
new_mpl2_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", mpl2_file_path, "txt", "mpl2")
Utils.remove_trailing_newlines(new_mpl2_file_path, encoding)
@staticmethod
def mpl22srt(mpl2_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert MPL2 subtitles to SubRip subtitles.
Arguments:
mpl2_file_path {string} -- The path to the MPL2 file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(mpl2_file_path, "txt", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2tmp(srt_file_path: str, tmp_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to TMP subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
tmp_file_path {string} -- The path to the TMP file.
"""
new_tmp_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", tmp_file_path, "tmp", "tmp")
Utils.remove_trailing_newlines(new_tmp_file_path, encoding)
@staticmethod
def tmp2srt(tmp_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert TMP subtitles to SubRip subtitles.
Arguments:
mpl2_file_path {string} -- The path to the TMP file.
tmp_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(tmp_file_path, "tmp", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2sami(srt_file_path: str, sami_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to SAMI subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
sami_file_path {string} -- The path to the SAMI file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(srt_file_path)
with open(srt_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), SRTReader())
if sami_file_path is None:
sami_file_path = srt_file_path.replace(".srt", ".smi")
with open(sami_file_path, "wb") as file:
file.write(converter.write(SAMIWriter()).encode(encoding))
@staticmethod
def sami2srt(sami_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert SAMI subtitles to SubRip subtitles.
Arguments:
sami_file_path {string} -- The path to the SAMI file.
srt_file_path {string} -- The path to the SubRip file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(sami_file_path)
with open(sami_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), SAMIReader())
if srt_file_path is None:
srt_file_path = sami_file_path.replace(".smi", ".srt")
with open(srt_file_path, "wb") as file:
file.write(converter.write(SRTWriter()).encode(encoding))
Utils.remove_trailing_newlines(srt_file_path, encoding)
@staticmethod
def stl2srt(stl_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert EBU-STL subtitles to SubRip subtitles.
Arguments:
stl_file_path {string} -- The path to the EBU-STL file.
srt_file_path {string} -- The path to the SubRip file.
"""
encoding = Utils.detect_encoding(stl_file_path)
stl = STL(stl_file_path, True)
if srt_file_path is None:
srt_file_path = stl_file_path.replace(".stl", ".srt")
srt = SRT(srt_file_path)
for sub in stl:
(tci, tco, txt) = sub
srt.write(tci, tco, txt)
srt.file.close()
stl.file.close()
Utils.remove_trailing_newlines(srt_file_path, encoding)
@staticmethod
def extract_teletext_as_subtitle(ts_file_path: str, page_num: int, output_file_path: str, timeout_secs: int = 30) -> None:
"""Extract DVB Teletext from MPEG transport stream files and convert them into the output format.
Arguments:
ts_file_path {string} -- The path to the Transport Stream file.
page_num {int} -- The page number for the Teletext
output_file_path {string} -- The path to the output file.
timeout_secs {int} -- The timeout in seconds on extraction {default: 30}.
"""
command = "{0} -y -fix_sub_duration -txt_page {1} -txt_format text -i {2} {3}".format(Utils.FFMPEG_BIN, page_num, ts_file_path, output_file_path)
timeout_msg = "Timeout on extracting Teletext from transport stream: {} on page: {}".format(ts_file_path, page_num)
error_msg = "Cannot extract Teletext from transport stream: {} on page: {}".format(ts_file_path, page_num)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot extract Teletext from transport stream: {} on page: {} with error {}".format(
ts_file_path, page_num, std_err
)
)
Utils.remove_trailing_newlines(output_file_path, None)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def extract_matroska_subtitle(mkv_file_path: str, stream_index: int, output_file_path: str, | |
OF INPUT FILES']['clean']
paramDict['num_cores'] = configObj.get('num_cores')
paramDict['rules_file'] = configObj['rules_file'] if configObj['rules_file'] != "" else None
log.info('USER INPUT PARAMETERS for Separate Drizzle Step:')
util.printParams(paramDict, log=log)
paramDict['logfile'] = logfile
# override configObj[build] value with the value of the build parameter
# this is necessary in order for AstroDrizzle to always have build=False
# for single-drizzle step when called from the top-level.
run_driz(imageObjectList, output_wcs.single_wcs, paramDict, single=True,
build=False, wcsmap=wcsmap)
else:
log.info('Single drizzle step not performed.')
if procSteps is not None:
procSteps.endStep('Separate Drizzle')
def drizFinal(imageObjectList, output_wcs, configObj,
build=None, wcsmap=None, logfile=None, procSteps=None):
if procSteps is not None:
procSteps.addStep('Final Drizzle')
# ConfigObj needs to be parsed specifically for driz_final set of parameters
final_step = util.getSectionName(configObj, _final_step_num_)
# This can be called directly from MultiDrizle, so only execute if
# switch has been turned on (no guarantee MD will check before calling).
if configObj[final_step]['driz_combine']:
paramDict = buildDrizParamDict(configObj, single=False)
paramDict['crbit'] = configObj['crbit']
paramDict['proc_unit'] = configObj['proc_unit']
paramDict['wht_type'] = configObj[final_step]['final_wht_type']
paramDict['rules_file'] = configObj['rules_file'] if configObj['rules_file'] != "" else None
# override configObj[build] value with the value of the build parameter
# this is necessary in order for MultiDrizzle to always have build=False
# for single-drizzle step when called from the top-level.
if build is None:
build = paramDict['build']
# Record whether or not intermediate files should be deleted when finished
paramDict['clean'] = configObj['STATE OF INPUT FILES']['clean']
paramDict['logfile'] = logfile
log.info('USER INPUT PARAMETERS for Final Drizzle Step:')
util.printParams(paramDict, log=log)
run_driz(imageObjectList, output_wcs.final_wcs, paramDict, single=False,
build=build, wcsmap=wcsmap)
else:
log.info('Final drizzle step not performed.')
if procSteps is not None:
procSteps.endStep('Final Drizzle')
# Run 'drizzle' here...
#
def mergeDQarray(maskname, dqarr):
""" Merge static or CR mask with mask created from DQ array on-the-fly here.
"""
maskarr = None
if maskname is not None:
if isinstance(maskname, str):
# working with file on disk (default case)
if os.path.exists(maskname):
mask = fileutil.openImage(maskname, memmap=False)
maskarr = mask[0].data.astype(np.bool)
mask.close()
else:
if isinstance(maskname, fits.HDUList):
# working with a virtual input file
maskarr = maskname[0].data.astype(np.bool)
else:
maskarr = maskname.data.astype(np.bool)
if maskarr is not None:
# merge array with dqarr now
np.bitwise_and(dqarr, maskarr, dqarr)
def updateInputDQArray(dqfile, dq_extn, chip, crmaskname, cr_bits_value):
if not isinstance(crmaskname, fits.HDUList) and not os.path.exists(crmaskname):
log.warning('No CR mask file found! Input DQ array not updated.')
return
if cr_bits_value is None:
log.warning('Input DQ array not updated!')
return
if isinstance(crmaskname, fits.HDUList):
# in_memory case
crmask = crmaskname
else:
crmask = fileutil.openImage(crmaskname, memmap=False)
if os.path.exists(dqfile):
fullext = dqfile + "[" + dq_extn + str(chip) + "]"
infile = fileutil.openImage(fullext, mode='update', memmap=False)
__bitarray = np.logical_not(crmask[0].data).astype(np.int16) * cr_bits_value
np.bitwise_or(infile[dq_extn, chip].data, __bitarray, infile[dq_extn, chip].data)
infile.close()
crmask.close()
def buildDrizParamDict(configObj, single=True):
chip_pars = ['units', 'wt_scl', 'pixfrac', 'kernel', 'fillval', 'bits', 'maskval']
cfunc_pars = {'pixfrac': float}
# Initialize paramDict with global parameter(s)
paramDict = {'build': configObj['build'], 'stepsize': configObj['stepsize'],
'coeffs': configObj['coeffs'], 'wcskey': configObj['wcskey']}
# build appro
if single:
driz_prefix = 'driz_sep_'
stepnum = 3
else:
driz_prefix = 'final_'
stepnum = 7
section_name = util.getSectionName(configObj, stepnum)
# Copy values from configObj for the appropriate step to paramDict
for p in list(configObj[section_name].keys()) + [driz_prefix + 'units']:
if p.startswith(driz_prefix):
par = p[len(driz_prefix):]
if par == 'units':
if single:
# Hard-code single-drizzle to always returns 'cps'
paramDict[par] = 'cps'
else:
paramDict[par] = configObj[section_name][driz_prefix + par]
else:
val = configObj[section_name][driz_prefix + par]
if par in cfunc_pars:
val = cfunc_pars[par](val)
paramDict[par] = val
log.info("Interpreted paramDict with single={} as:\n{}".format(single, paramDict))
return paramDict
def _setDefaults(configObj={}):
"""set up the default parameters to run drizzle
build,single,units,wt_scl,pixfrac,kernel,fillval,
rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data
Used exclusively for unit-testing, if any are defined.
"""
paramDict = {"build": True,
"single": True,
"stepsize": 10,
"in_units": "cps",
"wt_scl": 1.,
"pixfrac": 1.,
"kernel": "square",
"fillval": 999.,
"maskval": None,
"rot": 0.,
"scale": 1.,
"xsh": 0.,
"ysh": 0.,
"blotnx": 2048,
"blotny": 2048,
"outnx": 4096,
"outny": 4096,
"data": None,
"driz_separate": True,
"driz_combine": False,
"logfile": "astrodrizzle.log"}
if(len(configObj) != 0):
for key in configObj.keys():
paramDict[key] = configObj[key]
return paramDict
def interpret_maskval(paramDict):
""" Apply logic for interpreting final_maskval value...
"""
# interpret user specified final_maskval value to use for initializing
# output SCI array...
if 'maskval' not in paramDict:
return 0
maskval = paramDict['maskval']
if maskval is None:
maskval = np.nan
else:
maskval = float(maskval) # just to be clear and absolutely sure...
return maskval
def run_driz(imageObjectList, output_wcs, paramDict, single, build, wcsmap=None):
""" Perform drizzle operation on input to create output.
The input parameters originally was a list
of dictionaries, one for each input, that matches the
primary parameters for an ``IRAF`` `drizzle` task.
This method would then loop over all the entries in the
list and run `drizzle` for each entry.
Parameters required for input in paramDict:
build,single,units,wt_scl,pixfrac,kernel,fillval,
rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data
"""
# Insure that input imageObject is a list
if not isinstance(imageObjectList, list):
imageObjectList = [imageObjectList]
#
# Setup the versions info dictionary for output to PRIMARY header
# The keys will be used as the name reported in the header, as-is
#
_versions = {'AstroDrizzle': __version__,
'PyFITS': util.__fits_version__,
'Numpy': util.__numpy_version__}
# Set sub-sampling rate for drizzling
# stepsize = 2.0
log.info(' **Using sub-sampling value of %s for kernel %s' %
(paramDict['stepsize'], paramDict['kernel']))
maskval = interpret_maskval(paramDict)
outwcs = copy.deepcopy(output_wcs)
# Check for existance of output file.
if (not single and build and
fileutil.findFile(imageObjectList[0].outputNames['outFinal'])):
log.info('Removing previous output product...')
os.remove(imageObjectList[0].outputNames['outFinal'])
# print out parameters being used for drizzling
log.info("Running Drizzle to create output frame with WCS of: ")
output_wcs.printwcs()
# Will we be running in parallel?
pool_size = util.get_pool_size(paramDict.get('num_cores'), len(imageObjectList))
will_parallel = single and pool_size > 1 and platform.system() != "Windows"
if will_parallel:
log.info('Executing %d parallel workers' % pool_size)
else:
if single: # not yet an option for final drizzle, msg would confuse
log.info('Executing serially')
# Set parameters for each input and run drizzle on it here.
#
# Perform drizzling...
numctx = 0
for img in imageObjectList:
numctx += img._nmembers
_numctx = {'all': numctx}
# if single:
# Determine how many chips make up each single image
for img in imageObjectList:
for chip in img.returnAllChips(extname=img.scienceExt):
plsingle = chip.outputNames['outSingle']
if plsingle in _numctx: _numctx[plsingle] += 1
else: _numctx[plsingle] = 1
# Compute how many planes will be needed for the context image.
_nplanes = int((_numctx['all'] - 1) / 32) + 1
# For single drizzling or when context is turned off,
# minimize to 1 plane only...
if single or imageObjectList[0][1].outputNames['outContext'] in [None, '', ' ']:
_nplanes = 1
#
# An image buffer needs to be setup for converting the input
# arrays (sci and wht) from FITS format to native format
# with respect to byteorder and byteswapping.
# This buffer should be reused for each input if possible.
#
_outsci = _outwht = _outctx = _hdrlist = None
if (not single) or \
(single and (not will_parallel) and (not imageObjectList[0].inmemory)):
# Note there are four cases/combinations for single drizzle alone here:
# (not-inmem, serial), (not-inmem, parallel), (inmem, serial), (inmem, parallel)
_outsci = np.empty(output_wcs.array_shape, dtype=np.float32)
_outsci.fill(maskval)
_outwht = np.zeros(output_wcs.array_shape, dtype=np.float32)
# initialize context to 3-D array but only pass appropriate plane to drizzle as needed
_outctx = np.zeros((_nplanes,) + output_wcs.array_shape, dtype=np.int32)
_hdrlist = []
# Keep track of how many chips have been processed
# For single case, this will determine when to close
# one product and open the next.
_chipIdx = 0
# Remember the name of the 1st image that goes into this particular product
# Insure that the header reports the proper values for the start of the
# exposure time used to make this; in particular, TIME-OBS and DATE-OBS.
template = None
#
# Work on each image
#
subprocs = []
for img in imageObjectList:
chiplist = img.returnAllChips(extname=img.scienceExt)
# How many inputs should go into this product?
num_in_prod = _numctx['all']
if single:
num_in_prod = _numctx[chiplist[0].outputNames['outSingle']]
# The name of the 1st image
fnames = []
for chip in chiplist:
fnames.append(chip.outputNames['data'])
if _chipIdx == 0:
template = fnames
else:
template.extend(fnames)
# Work each image, possibly in parallel
if will_parallel:
# use multiprocessing.Manager only if in parallel and in memory
if img.inmemory:
manager = multiprocessing.Manager()
dproxy = manager.dict(img.virtualOutputs) # copy & wrap it in proxy
img.virtualOutputs | |
= "Folder Path").grid(row = 3, column = 0, padx = 10 , pady = 10)
tk.Label(self, text = "Saving Path").grid(row = 4, column = 0, padx = 10 , pady = 10)
tk.Label(self, text = "One Min Matrix File").grid(row = 5, column = 0, padx = 10 , pady = 10)
# Single Bin File Analysis
ttk.Button(self, text="Calliper & Movement Examine", command = lambda: controller.show_frame(CalliperPage)).grid(row = 2, column = 1, padx = 10 , pady = 10)
ttk.Button(self, text="Single Channel Dashboard", command =lambda: controller.show_frame(TimeDashBoard)).grid(row = 2, column = 2, padx = 10 , pady = 10)
ttk.Button(self, text="MultiChannel Dashboard", command =showdialog).grid(row = 2, column = 3, padx = 10 , pady = 10)
# Processing Folder
tk.Entry( self, textvariable = self.InputFolderPath , width = 65).grid(row=3, column=1, padx=10, pady=10)
tk.Entry( self, textvariable = self.OutputFolderPath, width = 65 ).grid(row=4, column=1, padx=10, pady=10)
ttk.Button(self, text="Processing Folder", command = lambda: self.processing_folder(self.InputFolderPath.get())).grid(row = 3, column = 2, rowspan = 2, padx = 10 , pady = 10)
# Multiple Bin File Analysis
ttk.Button(self, text="Project Settings", command =showdialog).grid(row = 5, column = 1, padx = 10 , pady = 10)
ttk.Button(self, text="Dashboard", command =showdialog).grid(row = 5, column = 2, padx = 10 , pady = 10)
ttk.Button(self, text="Tests", command =showdialog).grid(row = 5, column = 3, padx = 10 , pady = 10)
ttk.Button(self, text="Calliper & Movement",
command=lambda: controller.show_frame(CalliperPage)).grid(row = 6, column = 0, padx = 10 , pady = 10)
ttk.Button(self, text="Signal & Details",
command=lambda: controller.show_frame(SignalDetailPage)).grid(row = 6, column =1, padx = 10 , pady = 10)
ttk.Button(self, text="Thickness & Energy Analysis",
command=lambda: controller.show_frame(GraphMainPage)).grid(row = 6, column = 2, padx = 10 , pady = 10)
ttk.Button(self, text="Dashboard",
command=lambda: controller.show_frame(TimeDashBoard)).grid(row = 6, column = 3, padx = 10 , pady = 10)
def processing_folder(self, path):
file_map = Dir_Parser(path)
for minute_key, minute_file_list in file_map.items():
file_num = len(minute_file_list)
total_signal_matrices = np.empty((MATRICES_SIZE[0], MATRICES_SIZE[1] * file_num, MATRICES_SIZE[2]), dtype = 'float16')
total_roll_r = np.zeros(( 260 * file_num , 1 ), dtype=np.uint16)
index = 0
for item in minute_file_list:
with open(item, "rb") as bin_file:
total_signal_matrices[:, index*MATRICES_SIZE[1] : (index + 1)*MATRICES_SIZE[1], :], total_roll_r[index * 260 : (index + 1) * 260, :] = processBinFile(bin_file)
index = index + 1
np.save(self.OutputFolderPath.get() + '/' + minute_key + '-signal', total_signal_matrices)
np.save(self.OutputFolderPath.get() + '/' + minute_key + '-roll', total_roll_r)
print("Finshied Stacking minute files")
return
class SettingInterface:
"""
This interface make the settings in each Page coherent
"""
def __init__(self):
self.trLayout_sel_G = 0
#def update(self):
setting_interface = SettingInterface()
class CalliperPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.initVariable(parent, controller, setting_interface)
self.createPlots(parent, controller)
self.createWidgets(parent, controller)
self.bind("<Key>", self.key)
self.bind("<Button-1>", self.click_callback)
cid = self.canvas_calliper_plot.mpl_connect('button_press_event', self.onclick_caliper)
def initVariable(self, parent, controller, interface):
self.trLayout = tk.StringVar()
self.trLayout_sel = tk.IntVar()
self.trLayout_sel.set(0)
self.trLayout.set( TR_LAYOUT_NAME[0] )
self.channel_no = tk.IntVar()
self.round_no = tk.IntVar()
self.channel_no.set(0)
self.round_no.set(0)
self.start_delay = tk.IntVar()
self.start_delay.set(6601)
def createWidgets(self, parent, controller):
tk.Label(self, text= """Calliper & Positioning Dashboard""", font=LARGE_FONT).grid(row=0, column=0, columnspan = 7, padx=10, pady=10, sticky='EW')
ttk.Separator(self, orient= 'horizontal').grid(row = 1, column = 0, columnspan = 8, sticky="ew", padx=10, pady=10)
tk.Label(self, text = "TrLayout").grid(row = 2, column = 0, padx = 10 , pady = 10)
tr_layout_box = ttk.Combobox(self, textvariable = self.trLayout, width = 35, values = TR_LAYOUT_NAME )
#, postcommand = self.updtcblist(self, interface))
tr_layout_box.grid(row = 2, column = 1, padx = 10 , pady = 10) #初始化
#trLayout_sel_G = tr_layout_box.current()
tk.Label(self, text = "Channel").grid(row = 3, column = 0, padx = 10 , pady = 10)
tk.Label(self, text = "Round").grid(row = 5, column = 0, padx = 10, pady = 10)
tk.Entry( self, textvariable = self.channel_no ).grid(row=3, column=1, padx=10, pady=10)
tk.Entry( self, textvariable = self.round_no ).grid(row=5, column=1, padx=10, pady=10)
button_fw_chn = ttk.Button(self, text=">", command = lambda: self.chn_forward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax))
button_fw_chn.grid(row = 4, column = 1, padx = 10 , pady = 10)
button_fw_rd = ttk.Button(self, text=">", command = lambda: self.rd_forward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax))
button_fw_rd.grid(row = 6, column = 1, padx = 10 , pady = 10)
button_bw_chn = ttk.Button(self, text="<", command = lambda: self.chn_backward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax))
button_bw_chn.grid(row = 4, column = 0, padx = 10 , pady = 10)
button_bw_rd = ttk.Button(self, text="<", command = lambda: self.rd_backward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax))
button_bw_rd.grid(row = 6, column = 0, padx = 10 , pady = 10)
tk.Label(self, text = "Start Delay", font = NORM_FONT).grid(row = 8, column = 0, padx = 10, pady = 10)
tk.Entry( self, textvariable = self.start_delay ).grid(row=8, column=1, padx=10, pady=10)
ttk.Button(self, text="Signal Plot", command = lambda: self.signal_plot_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax
)).grid(row = 9, column = 0, columnspan = 2, rowspan = 1, padx = 10, pady = 10)
ttk.Button(self, text="Filtered Calliper Map Plot", command =lambda: self.calliper_plot_callback(self.canvas_calliper_plot,
self.calliper_plot_ax)).grid(row = 10, column = 0, columnspan = 2, rowspan = 1, padx = 10, pady = 10)
ttk.Button(self, text="Save Map").grid(row = 13, column = 0, columnspan = 2, rowspan = 1, padx = 10, pady = 10)
ttk.Button(self, text="Return To Home",
command = lambda: controller.show_frame(StartPage)).grid(row = 14, column = 0, columnspan = 1, rowspan = 1, padx = 10, pady = 10)
ttk.Button(self, text="Signal Channel Dashboard",
command = lambda: controller.show_frame(TimeDashBoard)).grid(row = 14, column = 1, columnspan = 1, rowspan = 1, padx = 10, pady = 10)
def key(self, event):
"""
wasd,p
"""
self.focus_set()
if event.char == 'd':
self.rd_forward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax)
if event.char == 'a':
self.rd_backward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax)
if event.char == 'w':
self.chn_backward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax)
if event.char == 's':
self.chn_forward_callback(self.canvas_signal_plot,
self.signal_plot_ax,
self.canvas_offset_plot,
self.offset_plot_ax)
if event.char == 'p':
self.calliper_plot_callback(self.canvas_calliper_plot,
self.calliper_plot_ax)
def click_callback(self, event):
self.focus_set()
print ("clicked at", event.x, event.y)
def createPlots(self, parent, controller):
signal_plot = Figure(figsize = (4,3))
self.signal_plot_ax = signal_plot.add_subplot(111)
self.canvas_signal_plot = FigureCanvasTkAgg(signal_plot, self)
self.canvas_signal_plot._tkcanvas.grid(row=2, column = 2, padx=0, pady=0 , columnspan = 3, rowspan = 4)
self.canvas_signal_plot.show()
offset_plot = Figure(figsize = (4,3))
self.offset_plot_ax = offset_plot.add_subplot(111)
self.canvas_offset_plot = FigureCanvasTkAgg(offset_plot, self)
self.canvas_offset_plot._tkcanvas.grid(row=2, column = 5, padx=0, pady=0 , columnspan = 3, rowspan = 4)
self.canvas_offset_plot.show()
calliper_plot = Figure(figsize = (8,4))
self.calliper_plot_ax = calliper_plot.add_subplot(111)
self.canvas_calliper_plot = FigureCanvasTkAgg(calliper_plot, self)
self.canvas_calliper_plot._tkcanvas.grid(row=6, column = 2, padx=0, pady=10 , columnspan = 8, rowspan = 4)
self.canvas_calliper_plot.show()
def calliper_plot_callback(self,canvas,ax):
Layout_index = TR_LAYOUT_NAME.index(self.trLayout.get())
tr_layout = TR_LAYOUT[Layout_index]
TRIGGER_MAP = calculate_trigger_map(NORM_SIGNAL_MATRICES, trLayout = tr_layout)
TRIGGER_MAP = median_filter_2D(TRIGGER_MAP, 2)
CALLIPER_MAP = calculate_calliper(TRIGGER_MAP)
transducer_tdc = processRoll_r (ROLL_R)
#
ax.clear()
ax.imshow(CALLIPER_MAP, aspect = 'auto',interpolation='none')
ax.plot(transducer_tdc, 'r')
canvas.draw()
def signal_plot_callback(self, signal_canvas, signal_ax, position_canvas, position_ax):
c = ['r','b','g','k'] # plot marker colors
rd, chn, start_delay, trLayout = self.get_info_from_self()
signal_to_plot = NORM_SIGNAL_MATRICES[chn, rd, :]
signal_ax.clear()
position_ax.clear()
signal_ax.plot(signal_to_plot, color=c[3])
signal_ax.set_title('Full Signal Plot')
theta = np.arange(0, 2*np.pi, 0.01)
x = 0 + 0.9144 * np.cos(theta)
y = 0 + 0.9144 * np.sin(theta)
transducer_tdc = processRoll_r (ROLL_R)
x_offset , y_offset = find_offset(transducer_tdc, chn, rd, CALLIPER_MAP)
position_ax.plot(x, y)
position_ax.plot(x_offset,y_offset,'ro',label="point")
position_ax.axis('equal')
position_ax.set_title('Offset Plot')
position_ax.text(-1, 0.8, 'x offset =' + "{0:.3f}%".format(x_offset*100) +';\n'+'y_offset = ' + "{0:.3f}%".format(y_offset*100) , fontsize=10)
signal_canvas.draw()
position_canvas.draw()
return
def onclick_caliper(self, event):
"""
onclick event for binding with canvas class
"""
time_point = int(event.xdata)
chn = int(event.ydata)
self.round_no.set(str(time_point))
self.channel_no.set(str(chn))
self.signal_plot_callback(self.canvas_signal_plot, self.signal_plot_ax, self.canvas_offset_plot, self.offset_plot_ax)
print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, time_point, chn))
def get_info_from_self(self):
rd = self.round_no.get()
chn = self.channel_no.get()
start_delay = self.start_delay.get()
trLayout = list(self.trLayout.get())
return rd, chn, start_delay, trLayout
def chn_forward_callback(self, signal_canvas, signal_ax, position_canvas, position_ax):
rd, chn, start_delay, trLayout = self.get_info_from_self()
chn = (chn + 1) % MATRICES_SIZE[0]
self.channel_no.set(chn)
self.signal_plot_callback(signal_canvas, signal_ax, position_canvas, position_ax)
def chn_backward_callback(self, signal_canvas, signal_ax, position_canvas, position_ax):
rd, chn, start_delay, trLayout = self.get_info_from_self()
chn = (chn - 1) % MATRICES_SIZE[0]
self.channel_no.set(chn)
self.signal_plot_callback(signal_canvas, signal_ax, position_canvas, position_ax)
def rd_forward_callback(self, signal_canvas, signal_ax, position_canvas, position_ax):
rd, chn, start_delay, trLayout | |
shape (n_leads, seq_len), or (seq_len,)
class_map: dict,
class map, mapping names to waves to numbers from 0 to n_classes-1,
the keys should contain "pwave", "qrs", "twave"
fs: real number,
sampling frequency of the signal corresponding to the `masks`,
used to compute the duration of each waveform
mask_format: str, default "channel_first",
format of the mask, used only when `masks.ndim = 2`
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first")
leads: str or list of str, optional,
the names of leads corresponding to the channels of the `masks`
Returns
-------
waves: dict,
each item value is a list containing the `ECGWaveForm`s corr. to the lead;
each item key is from `leads` if `leads` is set,
otherwise would be "lead_1", "lead_2", ..., "lead_n"
"""
if masks.ndim == 1:
_masks = masks[np.newaxis,...]
elif masks.ndim == 2:
if mask_format.lower() not in ["channel_first", "lead_first",]:
_masks = masks.T
else:
_masks = masks.copy()
else:
raise ValueError(f"masks should be of dim 1 or 2, but got a {masks.ndim}d array")
_leads = [f"lead_{idx+1}" for idx in range(_masks.shape[0])] if leads is None else leads
assert len(_leads) == _masks.shape[0]
_class_map = ED(deepcopy(class_map))
waves = ED({lead_name:[] for lead_name in _leads})
for channel_idx, lead_name in enumerate(_leads):
current_mask = _masks[channel_idx,...]
for wave_name, wave_number in _class_map.items():
if wave_name.lower() not in ["pwave", "qrs", "twave",]:
continue
current_wave_inds = np.where(current_mask==wave_number)[0]
if len(current_wave_inds) == 0:
continue
np.where(np.diff(current_wave_inds)>1)
split_inds = np.where(np.diff(current_wave_inds)>1)[0].tolist()
split_inds = sorted(split_inds+[i+1 for i in split_inds])
split_inds = [0] + split_inds + [len(current_wave_inds)-1]
for i in range(len(split_inds)//2):
itv_start = current_wave_inds[split_inds[2*i]]
itv_end = current_wave_inds[split_inds[2*i+1]]+1
w = ECGWaveForm(
name=wave_name.lower(),
onset=itv_start,
offset=itv_end,
peak=np.nan,
duration=1000*(itv_end-itv_start)/fs, # ms
)
waves[lead_name].append(w)
waves[lead_name].sort(key=lambda w: w.onset)
return waves
def mask_to_intervals(mask:np.ndarray,
vals:Optional[Union[int,Sequence[int]]]=None,
right_inclusive:bool=False) -> Union[list, dict]:
""" finished, checked,
Parameters
----------
mask: ndarray,
1d mask
vals: int or sequence of int, optional,
values in `mask` to obtain intervals
right_inclusive: bool, default False,
if True, the intervals will be right inclusive
otherwise, right exclusive
Returns
-------
intervals: dict or list,
the intervals corr. to each value in `vals` if `vals` is `None` or `Sequence`;
or the intervals corr. to `vals` if `vals` is int.
each interval is of the form `[a,b]`
"""
if vals is None:
_vals = list(set(mask))
elif isinstance(vals, int):
_vals = [vals]
else:
_vals = vals
# assert set(_vals) & set(mask) == set(_vals)
bias = 0 if right_inclusive else 1
intervals = {v:[] for v in _vals}
for v in _vals:
valid_inds = np.where(np.array(mask)==v)[0]
if len(valid_inds) == 0:
continue
split_indices = np.where(np.diff(valid_inds)>1)[0]
split_indices = split_indices.tolist() + (split_indices+1).tolist()
split_indices = sorted([0] + split_indices + [len(valid_inds)-1])
for idx in range(len(split_indices)//2):
intervals[v].append(
[valid_inds[split_indices[2*idx]], valid_inds[split_indices[2*idx+1]]+bias]
)
if isinstance(vals, int):
intervals = intervals[vals]
return intervals
def list_sum(l:Sequence[list]) -> list:
""" finished, checked,
Parameters
----------
l: sequence of list,
the sequence of lists to obtain the summation
Returns
-------
l_sum: list,
sum of `l`,
i.e. if l = [list1, list2, ...], then l_sum = list1 + list2 + ...
"""
l_sum = reduce(lambda a,b: a+b, l, [])
return l_sum
def read_log_txt(fp:str,
epoch_startswith:str="Train epoch_",
scalar_startswith:Union[str,Iterable[str]]="train/|test/") -> pd.DataFrame:
""" finished, checked,
read from log txt file, in case tensorboard not working
Parameters
----------
fp: str,
path to the log txt file
epoch_startswith: str,
indicator of the start of the start of an epoch
scalar_startswith: str or iterable of str,
indicators of the scalar recordings,
if is str, should be indicators separated by "|"
Returns
-------
summary: DataFrame,
scalars summary, in the format of a pandas DataFrame
"""
with open(fp, "r") as f:
content = f.read().splitlines()
if isinstance(scalar_startswith, str):
field_pattern = f"^({scalar_startswith})"
else:
field_pattern = f"""^({"|".join(scalar_startswith)})"""
summary = []
new_line = None
for l in content:
if l.startswith(epoch_startswith):
if new_line:
summary.append(new_line)
epoch = re.findall("[\d]+", l)[0]
new_line = {"epoch": epoch}
if re.findall(field_pattern, l):
field, val = l.split(":")
field = field.strip()
val = float(val.strip())
new_line[field] = val
summary.append(new_line)
summary = pd.DataFrame(summary)
return summary
def read_event_scalars(fp:str, keys:Optional[Union[str,Iterable[str]]]=None) -> Union[pd.DataFrame,Dict[str,pd.DataFrame]]:
""" finished, checked,
read scalars from event file, in case tensorboard not working
Parameters
----------
fp: str,
path to the event file
keys: str or iterable of str, optional,
field names of the scalars to read,
if is None, scalars of all fields will be read
Returns
-------
summary: DataFrame or dict of DataFrame
the wall_time, step, value of the scalars
"""
try:
from tensorflow.python.summary.event_accumulator import EventAccumulator
except:
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
event_acc = EventAccumulator(fp)
event_acc.Reload()
if keys:
if isinstance(keys, str):
_keys = [keys]
else:
_keys = keys
else:
_keys = event_acc.scalars.Keys()
summary = {}
for k in _keys:
df = pd.DataFrame([[item.wall_time, item.step, item.value] for item in event_acc.scalars.Items(k)])
df.columns = ["wall_time", "step", "value"]
summary[k] = df
if isinstance(keys, str):
summary = summary[k]
return summary
def dicts_equal(d1:dict, d2:dict) -> bool:
""" finished, checked,
Parameters
----------
d1, d2: dict,
the two dicts to compare equality
Returns
-------
bool, True if `d1` equals `d2`
NOTE
----
the existence of numpy array, torch Tensor, pandas DataFrame and Series would probably
cause errors when directly use the default `__eq__` method of dict,
for example `{"a": np.array([1,2])} == {"a": np.array([1,2])}` would raise the following
```python
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
Example
-------
>>> d1 = {"a": pd.DataFrame([{"hehe":1,"haha":2}])[["haha","hehe"]]}
>>> d2 = {"a": pd.DataFrame([{"hehe":1,"haha":2}])[["hehe","haha"]]}
>>> dicts_equal(d1, d2)
... True
"""
import torch
if len(d1) != len(d2):
return False
for k,v in d1.items():
if k not in d2 or not isinstance(d2[k], type(v)):
return False
if isinstance(v, dict):
if not dicts_equal(v, d2[k]):
return False
elif isinstance(v, np.ndarray):
if v.shape != d2[k].shape or not (v==d2[k]).all():
return False
elif isinstance(v, torch.Tensor):
if v.shape != d2[k].shape or not (v==d2[k]).all().item():
return False
elif isinstance(v, pd.DataFrame):
if v.shape != d2[k].shape or set(v.columns) != set(d2[k].columns):
# consider: should one check index be equal?
return False
# for c in v.columns:
# if not (v[c] == d2[k][c]).all():
# return False
if not (v.values == d2[k][v.columns].values).all():
return False
elif isinstance(v, pd.Series):
if v.shape != d2[k].shape or v.name != d2[k].name:
return False
if not (v==d2[k]).all():
return False
# TODO: consider whether there are any other dtypes that should be treated similarly
else: # other dtypes whose equality can be directly checked
if v != d2[k]:
return False
return True
def default_class_repr(c:object, align:str="center", depth:int=1) -> str:
""" finished, checked,
Parameters
----------
c: object,
the object to be represented
align: str, default "center",
the alignment of the class arguments
Returns
-------
str,
the representation of the class
"""
indent = 4*depth*" "
closing_indent = 4*(depth-1)*" "
if not hasattr(c, "extra_repr_keys"):
return repr(c)
elif len(c.extra_repr_keys()) > 0:
max_len = max([len(k) for k in c.extra_repr_keys()])
extra_str = "(\n" + \
",\n".join([
f"""{indent}{k.ljust(max_len, " ") if align.lower() in ["center", "c"] else k} = {default_class_repr(eval(f"c.{k}"),align,depth+1)}""" \
for k in c.__dir__() if k in c.extra_repr_keys()
]) + \
f"{closing_indent}\n)"
else:
extra_str = ""
return f"{c.__class__.__name__}{extra_str}"
class MovingAverage(object):
""" finished, checked, to be improved,
moving average
References
----------
[1] https://en.wikipedia.org/wiki/Moving_average
"""
def __init__(self, data:Optional[Sequence]=None, **kwargs:Any) -> NoReturn:
"""
Parameters
----------
data: array_like,
the series data to compute its moving average
kwargs: auxilliary key word arguments
"""
if data is None:
self.data = np.array([])
else:
self.data = np.array(data)
self.verbose = kwargs.get("verbose", 0)
def __call__(self, data:Optional[Sequence]=None, method:str="ema", **kwargs:Any) -> np.ndarray:
"""
Parameters
----------
method: str,
method for computing moving average, can be one of
- "sma", "simple", "simple moving average"
- "ema", "ewma", "exponential", "exponential weighted", "exponential moving average", "exponential weighted moving average"
- "cma", "cumulative", "cumulative moving average"
- "wma", "weighted", "weighted moving average"
"""
m = method.lower().replace("_", " ")
if m in ["sma", "simple", "simple moving average"]:
func = self._sma
elif m in ["ema", "ewma", "exponential", "exponential weighted", "exponential moving average", "exponential weighted moving average"]:
func = self._ema
elif m in ["cma", "cumulative", "cumulative moving average"]:
func = self._cma
elif m in ["wma", "weighted", "weighted moving average"]:
func = self._wma
else:
raise NotImplementedError
if data is not None:
self.data = np.array(data)
return func(**kwargs)
def _sma(self, window:int=5, center:bool=False, **kwargs:Any) -> np.ndarray:
"""
simple moving average
Parameters
----------
window: int, | |
from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgBaselineNED(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgBaselineNED._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgBaselineNED._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgBaselineNED._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgBaselineNED, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_ECEF = 0x020D
class MsgVelECEF(SBP):
"""SBP class for message MSG_VEL_ECEF (0x020D).
You can have MSG_VEL_ECEF inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed (ECEF)
coordinates. The full GPS time is given by the preceding MSG_GPS_TIME with
the matching time-of-week (tow).
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
x : int
Velocity ECEF X coordinate
y : int
Velocity ECEF Y coordinate
z : int
Velocity ECEF Z coordinate
accuracy : int
Velocity estimated standard deviation
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'x' / construct.Int32sl,
'y' / construct.Int32sl,
'z' / construct.Int32sl,
'accuracy' / construct.Int16ul,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgVelECEF,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgVelECEF, self).__init__()
self.msg_type = SBP_MSG_VEL_ECEF
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.x = kwargs.pop('x')
self.y = kwargs.pop('y')
self.z = kwargs.pop('z')
self.accuracy = kwargs.pop('accuracy')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgVelECEF.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgVelECEF(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgVelECEF._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgVelECEF._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgVelECEF._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgVelECEF, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_ECEF_COV = 0x0215
class MsgVelECEFCov(SBP):
"""SBP class for message MSG_VEL_ECEF_COV (0x0215).
You can have MSG_VEL_ECEF_COV inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed (ECEF)
coordinates. The full GPS time is given by the preceding MSG_GPS_TIME with
the matching time-of-week (tow).
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
x : int
Velocity ECEF X coordinate
y : int
Velocity ECEF Y coordinate
z : int
Velocity ECEF Z coordinate
cov_x_x : float
Estimated variance of x
cov_x_y : float
Estimated covariance of x and y
cov_x_z : float
Estimated covariance of x and z
cov_y_y : float
Estimated variance of y
cov_y_z : float
Estimated covariance of y and z
cov_z_z : float
Estimated variance of z
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'x' / construct.Int32sl,
'y' / construct.Int32sl,
'z' / construct.Int32sl,
'cov_x_x' / construct.Float32l,
'cov_x_y' / construct.Float32l,
'cov_x_z' / construct.Float32l,
'cov_y_y' / construct.Float32l,
'cov_y_z' / construct.Float32l,
'cov_z_z' / construct.Float32l,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgVelECEFCov,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgVelECEFCov, self).__init__()
self.msg_type = SBP_MSG_VEL_ECEF_COV
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.x = kwargs.pop('x')
self.y = kwargs.pop('y')
self.z = kwargs.pop('z')
self.cov_x_x = kwargs.pop('cov_x_x')
self.cov_x_y = kwargs.pop('cov_x_y')
self.cov_x_z = kwargs.pop('cov_x_z')
self.cov_y_y = kwargs.pop('cov_y_y')
self.cov_y_z = kwargs.pop('cov_y_z')
self.cov_z_z = kwargs.pop('cov_z_z')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgVelECEFCov.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgVelECEFCov(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgVelECEFCov._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgVelECEFCov._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgVelECEFCov._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgVelECEFCov, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_NED = 0x020E
class MsgVelNED(SBP):
"""SBP class for message MSG_VEL_NED (0x020E).
You can have MSG_VEL_NED inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84 tangent
plane centered at the current position. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow).
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
n : int
Velocity North coordinate
e : int
Velocity East coordinate
d : int
Velocity Down coordinate
h_accuracy : int
Horizontal velocity estimated standard deviation
v_accuracy : int
Vertical velocity estimated standard deviation
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'n' / construct.Int32sl,
'e' / construct.Int32sl,
'd' / construct.Int32sl,
'h_accuracy' / construct.Int16ul,
'v_accuracy' / construct.Int16ul,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgVelNED,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgVelNED, self).__init__()
self.msg_type = SBP_MSG_VEL_NED
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.n = kwargs.pop('n')
self.e = kwargs.pop('e')
self.d = kwargs.pop('d')
self.h_accuracy = kwargs.pop('h_accuracy')
self.v_accuracy = kwargs.pop('v_accuracy')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgVelNED.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgVelNED(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgVelNED._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgVelNED._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgVelNED._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgVelNED, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_NED_COV = 0x0212
class MsgVelNEDCov(SBP):
"""SBP class for message MSG_VEL_NED_COV (0x0212).
You can have MSG_VEL_NED_COV inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84 tangent
plane centered at the current position. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow). This message is
similar to the | |
metaseq(self.interleaved_wmma_shape[0], 1)
self.lds_iterations = metaseq(
warp_tile_shape_km[1] // self.lds_shape[0], 1)
self.stride_in_access = tile_shape_km[1] // self.element_per_acc
self.add_member("pointer_", self.const_access_pointer)
self.add_member("byte_offset_, wmma_k_index_", self.index_t)
# cudasim members
self.pointer_: ArrayPtr = None
self.byte_offset_ = -1
self.wmma_k_index_ = -1
@pccm.cuda.constructor(device=True, forceinline=True)
def ctor(self):
code = pccm.code()
code.arg("ptr", self.pointer)
code.arg("warp_idx_k, warp_idx_mn, lane_idx", "int")
code.ctor_init("wmma_k_index_", "0")
code.raw(f"""
int quad = (lane_idx / 4);
int lane_in_quad = (lane_idx % 4);
int access_contiguous;
""")
if self.left:
code.raw("""
// swizzle id: tid[4]|tid[1:0]|(tid[2]^tid[4])
access_contiguous = ((quad & 0x4) << 1) + ((lane_in_quad) << 1) +
((quad & 0x1) ^ ((quad & 0x4) >> 2));
""")
else:
code.raw("""
// swizzle id: tid[4]|tid[1:0]|tid[3]
access_contiguous = ((quad & 0x4) << 1) + (lane_in_quad << 1) +
((quad & 0x2) >> 1 ^ ((quad & 0x4) >> 2));
""")
code.raw(f"""
byte_offset_ = access_contiguous * sizeof({self.dtype}) * {self.element_per_acc};
pointer_ = reinterpret_cast<{self.const_access_pointer}>(ptr);
add_warp_offset(warp_idx_k, warp_idx_mn);
""")
return code
async def python_ctor(self, ptr: ArrayPtr, warp_idx_k: int,
warp_idx_mn: int, lane_idx: int):
new_obj = VoltaWarpTileIteratorCrosswise(self.dtype,
self.tile_shape_km,
self.warp_tile_shape_km,
self.left)
quad = lane_idx // 4
lane_in_quad = (lane_idx % 4)
if self.left:
# swizzle id: tid[4]|tid[1:0]|(tid[2]^tid[4])
access_contiguous = (((quad & 0x4) << 1) + ((lane_in_quad) << 1) +
((quad & 0x1) ^ ((quad & 0x4) >> 2)))
else:
# swizzle id: tid[4]|tid[1:0]|tid[3]
access_contiguous = (((quad & 0x4) << 1) + (lane_in_quad << 1) +
((quad & 0x2) >> 1 ^ ((quad & 0x4) >> 2)))
new_obj.byte_offset_ = access_contiguous * self.dtype.itemsize(
) * self.element_per_acc
new_obj.pointer_ = ptr.change_access_size(self.element_per_acc)
new_obj.add_warp_offset_python(warp_idx_k, warp_idx_mn)
return new_obj
@pccm.cuda.member_function(device=True, forceinline=True)
def add_warp_offset(self):
code = pccm.FunctionCode(f"""
int mn_offset = warp_idx_mn;
int k_offset = {self.num_warp_gemm_iters} * warp_idx_k;
// kTileShapeKM: [K, M|N]
// TODO better offset
auto offset = k_offset * {self.interleaved_wmma_shape[2]} * {self.stride_in_access} +
mn_offset * {self.warp_tile_shape_km[1]} * 4 / {self.element_per_acc};
// printf2_block_once(threadIdx.x, offset);
pointer_ += offset;
""")
return code.arg("warp_idx_k, warp_idx_mn", "int")
def add_warp_offset_python(self, warp_idx_k, warp_idx_mn):
mn_offset = warp_idx_mn
k_offset = self.num_warp_gemm_iters * warp_idx_k
# kTileShapeKM: [K, M|N]
# TODO better offset
offset = (
k_offset * self.interleaved_wmma_shape[2] * self.stride_in_access +
mn_offset * self.warp_tile_shape_km[1] * 4 // self.element_per_acc)
# printf2_block_once(threadIdx.x, offset)
self.pointer_ += offset
@pccm.cuda.member_function(device=True, forceinline=True)
def tile_increment(self):
code = pccm.FunctionCode(f"""
// this function is only called when move warp iter back to start offset.
// so we need to reset wmma_k_index_
wmma_k_index_ = 0;
// tv::printf2_block_once("tile_increment_warp", threadIdx.x, kLineSize * num, kLineSize, num);
pointer_ += {self.line_size} * num;
""")
return code.arg("num", "int")
def tile_increment_python(self, num: int):
self.wmma_k_index_ = 0
self.pointer_ += self.line_size * num
@pccm.cuda.member_function(name="operator++",
device=True,
forceinline=True)
def operator_pp(self):
code = pccm.FunctionCode(f"""
wmma_k_index_ = (wmma_k_index_ + 1) & 7;
// handle permute (i)
if (wmma_k_index_ == 4 || wmma_k_index_ == 0) {{
// ptr swapped in k = 4-7, so we 'swap' ptr here.
// byte_offset_ -=(+=) self.sizeof_element * self.kElementsPerAccess
byte_offset_ ^= 1 * sizeof({self.dtype}) * {self.element_per_acc};
}}
pointer_ += {self.line_size};
return *this;
""")
return code.ret(f"{self.class_name} &")
def increment_python(self):
self.wmma_k_index_ = (self.wmma_k_index_ + 1) & 7
if (self.wmma_k_index_ == 4 or self.wmma_k_index_ == 0):
self.byte_offset_ ^= 1 * self.dtype.itemsize(
) * self.element_per_acc
self.pointer_ += self.line_size
return self
@pccm.cuda.member_function(device=True, forceinline=True)
def load_with_pointer_offset(self):
code = pccm.FunctionCode(f"""
{self.index_t} byte_offset = pointer_offset * sizeof({self.dtype});
{self.access_pointer} dst_ptr = reinterpret_cast<{self.access_pointer}>(&frag);
// kRow: 1, kCol: 2
TV_PRAGMA_UNROLL
for (int s = 0; s < {self.lds_iterations[0]}; ++s) {{
TV_PRAGMA_UNROLL
for (int c = 0; c < {self.lds_iterations[1]}; ++c) {{
int idx = c + s * {self.lds_iterations[1]};
{self.const_access_pointer} source_ptr =
pointer_ + {self.lds_shape[1]} * c * {self.line_size} + {self.lds_shape[0]} * s / 2;
char const *source_byte_ptr =
reinterpret_cast<char const *>(source_ptr) + byte_offset +
byte_offset_;
dst_ptr[idx] = *(reinterpret_cast<{self.const_access_pointer}>(source_byte_ptr));
if (wmma_k_index_ & 0x2) {{
uint64_t *low = reinterpret_cast<uint64_t *>(&frag) + idx * 2;
uint64_t *high = reinterpret_cast<uint64_t *>(&frag) + idx * 2 + 1;
uint64_t tmp = *low;
*low = *high;
*high = tmp;
}}
}}
}}
""")
code.arg("frag", f"{self.fragment_t}&").arg("pointer_offset",
str(self.index_t))
return code
async def load_with_pointer_offset_python(self, frag: ArrayPtr,
pointer_offset: int):
byte_offset = pointer_offset * self.dtype.itemsize()
dst_ptr = frag.change_access_size(self.element_per_acc)
ptr_addrs = np.zeros((frag.length, ), dtype=np.int32)
for s in range(self.lds_iterations[0]):
for c in range(self.lds_iterations[1]):
idx = c + s * self.lds_iterations[1] # type: int
source_ptr = self.pointer_ + self.lds_shape[
1] * c * self.line_size + self.lds_shape[
0] * s // 2 # type: ArrayPtr
source_byte_ptr = source_ptr.change_access_byte_size(
1) + byte_offset + self.byte_offset_
dst_ptr[idx] = source_byte_ptr.change_access_size(
self.element_per_acc)[0]
ptr_addrs[idx * dst_ptr.access_size:(idx + 1) *
dst_ptr.access_size] = np.arange(
source_byte_ptr.offset,
source_byte_ptr.offset + dst_ptr.access_size)
await checkers.smem_bank_conflicit_check(
source_byte_ptr.change_access_size(self.element_per_acc),
0)
if self.wmma_k_index_ & 0x2:
frag_uint64 = frag.change_access_byte_size(8)
low = frag_uint64 + idx * 2
high = frag_uint64 + idx * 2 + 1
tmp = low.copy()
low[0] = high[0]
high[0] = tmp[0]
return ptr_addrs
@pccm.cuda.member_function(device=True, forceinline=True)
def load(self):
code = pccm.FunctionCode(f"""
load_with_pointer_offset(frag, 0);
""")
code.arg("frag", f"{self.fragment_t}&")
return code
async def load_python(self, frag: ArrayPtr):
return await self.load_with_pointer_offset_python(frag, 0)
@pccm.cuda.member_function(device=True, forceinline=True)
def set_kgroup_index(self):
code = pccm.FunctionCode("wmma_k_index_ = wmma_k;")
code.arg("wmma_k", "int")
return code
def set_wmma_k_index_python(self, wmma_k: int):
self.wmma_k_index_ = wmma_k
class VoltaWarpTileIteratorCongruous(bases.GemmWarpIterator):
def __init__(self,
dtype: dtypes.DType,
tile_shape_km: MetaArray[int],
warp_tile_shape_km: MetaArray[int],
left: bool = False):
self.interleaved_wmma_shape = metaseq(32, 32, 4)
self.advance_axis = 0 if left else 1
self.inst_shape = metaseq(16, 16, 4)
element_count = warp_tile_shape_km[1] // self.interleaved_wmma_shape[
self.advance_axis]
element_count *= self.interleaved_wmma_shape[2]
element_count *= self.interleaved_wmma_shape[
self.advance_axis] // self.inst_shape[self.advance_axis]
super().__init__(dtype, element_count, 8)
self.add_dependency(TensorViewNVRTC, GemmBasicKernel)
self.tile_shape_km = tile_shape_km
self.warp_tile_shape_km = warp_tile_shape_km
self.left = left
self.num_warp_gemm_iters = warp_tile_shape_km[
0] // self.interleaved_wmma_shape[2]
self.contig_element_per_line = 4
self.stride_of_volta_block = self.contig_element_per_line
self.line_size = tile_shape_km[
1] * self.stride_of_volta_block // self.element_per_acc
self.lds_shape = metaseq(self.interleaved_wmma_shape[2],
self.interleaved_wmma_shape[0])
self.lds_iterations = metaseq(
warp_tile_shape_km[1] // self.lds_shape[1],
self.interleaved_wmma_shape[2] // self.lds_shape[0])
if not left:
self.lds_iterations = self.lds_iterations[::
-1] # type: MetaArray[int]
self.pointer_count = 2 if left else 1
self.stride_in_access = tile_shape_km[1] // self.element_per_acc
self.add_member("pointers_",
self.const_access_pointer,
array=f"[{self.pointer_count}]")
self.add_member("byte_offset_", self.index_t)
# cudasim members
self.pointers_: List[ArrayPtr] = [None] * self.pointer_count
self.byte_offset_ = -1
@pccm.cuda.constructor(device=True, forceinline=True)
def ctor(self):
code = pccm.code()
code.arg("ptr", self.pointer)
code.arg("warp_idx_k, warp_idx_mn, lane_idx", "int")
if self.left:
code.raw(f"""
int vec_row = (lane_idx >> 4); // tid[4]
int vec_col = ((lane_idx & 4) >> 2); // tid[2]
TV_PRAGMA_UNROLL
for (int i = 0; i < {self.pointer_count}; ++i) {{
if (i == 1) {{
vec_row |= 2;
}}
int access_contiguous_idx = (vec_col << 2) | ((lane_idx & 3) ^ vec_row);
int access_contiguous = access_contiguous_idx;
int access_strided = vec_row;
pointers_[i] = reinterpret_cast<{self.const_access_pointer}>(ptr) +
access_contiguous + access_strided * {self.stride_in_access};
}}
""")
else:
code.raw(f"""
int access_strided = (lane_idx >> 3) & 0x3;
int access_contiguous = ((lane_idx ^ (lane_idx >> 3)) & 0x3);
pointers_[0] = reinterpret_cast<{self.const_access_pointer}>(ptr) +
access_contiguous + access_strided * {self.stride_in_access};
""")
code.raw(f"""
add_warp_offset(warp_idx_k, warp_idx_mn);
""")
return code
async def python_ctor(self, ptr: ArrayPtr, warp_idx_k: int,
warp_idx_mn: int, lane_idx: int):
new_obj = VoltaWarpTileIteratorCongruous(self.dtype,
self.tile_shape_km,
self.warp_tile_shape_km,
self.left)
if self.left:
vec_row = (lane_idx >> 4) # tid[4]
vec_col = ((lane_idx & 4) >> 2) # tid[2]
for i in range(self.pointer_count):
if (i == 1):
vec_row |= 2
access_contiguous_idx = (vec_col << 2) | (
(lane_idx & 3) ^ vec_row)
access_contiguous = access_contiguous_idx
access_strided = vec_row
new_obj.pointers_[i] = (
ptr.change_access_size(self.element_per_acc) +
access_contiguous +
access_strided * new_obj.stride_in_access)
else:
access_strided = (lane_idx >> 3) & 0x3
access_contiguous = ((lane_idx ^ (lane_idx >> 3)) & 0x3)
new_obj.pointers_[0] = (
ptr.change_access_size(self.element_per_acc) +
access_contiguous + access_strided * new_obj.stride_in_access)
new_obj.byte_offset_ = 0
new_obj.add_warp_offset_python(warp_idx_k, warp_idx_mn)
return new_obj
@pccm.cuda.member_function(device=True, forceinline=True)
def add_warp_offset(self):
code = pccm.FunctionCode(f"""
int mn_offset = warp_idx_mn;
int k_offset = {self.num_warp_gemm_iters} * warp_idx_k;
// TODO why?
if ({pccm.boolean(self.left)}) {{
if ({self.warp_tile_shape_km[1]} == {self.lds_shape[1]}) {{
if (mn_offset % 2) {{
auto tmp_pointer = pointers_[0];
pointers_[0] = pointers_[1];
pointers_[1] = tmp_pointer;
}}
mn_offset = mn_offset / 2 * 2;
}}
}}
auto offset = k_offset * {self.stride_in_access} * {self.interleaved_wmma_shape[2]} *
{self.element_per_acc} +
mn_offset * {self.warp_tile_shape_km[1]};
// if (!Left){{
// tv::printf2_block_once(threadIdx.x, offset);
// }}
TV_PRAGMA_UNROLL
for (int i = 0; i < {self.pointer_count}; ++i) {{
pointers_[i] += offset / {self.element_per_acc};
}}
""")
return code.arg("warp_idx_k, warp_idx_mn", "int")
def add_warp_offset_python(self, warp_idx_k, warp_idx_mn):
mn_offset = warp_idx_mn
k_offset = self.num_warp_gemm_iters * warp_idx_k
if self.left:
if (self.warp_tile_shape_km[1] == self.lds_shape[1]):
if (mn_offset % 2):
tmp_pointer = self.pointers_[0]
self.pointers_[0] = self.pointers_[1]
self.pointers_[1] = tmp_pointer
mn_offset = mn_offset // 2 * 2
offset = (k_offset * self.stride_in_access *
self.interleaved_wmma_shape[2] * self.element_per_acc +
mn_offset * self.warp_tile_shape_km[1])
| |
np.zeros((nr_mol - 1, 3))
for i, index in enumerate(to_be_added):
configset_cog[i, :] = resgroup[index].atoms.center(None)
while nr_added < nr_mol:
# Find indices of nearest neighbours of a) res in micelle
# (imin) and b) res not yet in micelle (jmin). Indices
# w.r.t. resgroup
imin, jmin = self._unwrap_ns(
refset_cog, configset_cog, added, to_be_added, box
)
# Translate jmin by an appropriate vector if separated by a
# pbc from rest of micelle.
cog_i = resgroup[imin].atoms.center(weights=weights)
cog_j = resgroup[jmin].atoms.center(weights=weights)
dx = self._pbc(cog_j, cog_i, box)
xtest = cog_i + dx
shift = xtest - cog_j
if np.dot(shift, shift) > 1e-8:
if verbosity > 0.5:
print(
"Shifting molecule {:d} by {:.2f}, {:.2f}, {:.2f}".format(
jmin, shift[0], shift[1], shift[2]
)
)
for atom in resgroup[jmin].atoms:
atom.position += shift
cog_j += shift
# Add added res COG to res already in micelle
refset_cog = np.vstack((refset_cog, cog_j))
nr_added += 1
added.append(jmin)
# Remove added res from res not already in micelle.
_index = to_be_added.index(jmin)
configset_cog = np.delete(configset_cog, _index, 0)
del to_be_added[_index]
def _unwrap_ns(
self, refset_cog, configset_cog, added, to_be_added, box, method="pkdtree"
):
"""
Find NN in refset_cog and configset_cog and pass back
the indices stored in added and to_be added.
"""
distances = []
dist = 8.0
while len(distances) < 1:
pairs, distances = MDAnalysis.lib.distances.capped_distance(
refset_cog,
configset_cog,
dist,
box=box,
method=method,
return_distances=True,
)
dist += 0.5
minpair = np.where(distances == np.amin(distances))[0][0]
imin = added[pairs[minpair][0]]
jmin = to_be_added[pairs[minpair][1]]
return imin, jmin
@staticmethod
def _pbc(r1, r2, box):
# Rectangular boxes only
# Calculate fdiag, hdiag, mhdiag
fdiag = box[:3]
hdiag = fdiag / 2
dx = r1 - r2
# Loop over dims
for i in range(3):
# while loop upper limit: if dx > hdiag shift by - fdiag
while dx[i] > hdiag[i]:
dx[i] -= fdiag[i]
# while loop lower limit: if dx > hdiag shift by + fdiag
while dx[i] < -hdiag[i]:
dx[i] += fdiag[i]
return dx
@staticmethod
def _sort_eig(eig_val, eig_vec, reverse=False, test=False, tensor=False):
"""
Sort eig_val and eig_vec so that largest eig_value is last and
smalles is first. Commute eig_vec accordingly.
"""
for i in range(2, 0, -1):
index = np.where(eig_val == np.max(eig_val[: i + 1]))[0][0]
# Switch columns
eig_vec[:, [i, index]] = eig_vec[:, [index, i]]
eig_val[i], eig_val[index] = eig_val[index], eig_val[i]
if test:
if isinstance(tensor, np.ndarray):
for i in range(3):
t1 = np.matmul(tensor, eig_vec[:, i])
t2 = eig_val[i] * eig_vec[:, i]
if not np.allclose(t1, t2):
print(i, t1, t2)
raise RuntimeError("Eigenvector sorting gone wrong!")
assert eig_val[2] >= eig_val[1]
assert eig_val[1] >= eig_val[0]
if reverse:
eig_vec[:, [0, 2]] = eig_vec[:, [2, 0]]
eig_val[0], eig_val[2] = eig_val[2], eig_val[0]
return eig_val, eig_vec
@staticmethod
def _gyration_tensor(cluster, weights, test=False):
"""
Calculate gyration tensor either unweighted or mass weighted
(pass vector of masses for that purpose).
gyration tensor:
G_ab = 1/\sum_i wi \sum_i w_i r_a r_b for a = {x, y, z}
"""
r = np.subtract(cluster.atoms.positions, cluster.atoms.center(weights))
if weights is None:
weights = np.ones(r.shape)
else:
weights = np.broadcast_to(weights, (3, weights.shape[0])).transpose()
if test:
assert np.abs(np.sum(r * weights)) < 1e-7
gyration_tensor = np.matmul(r.transpose(), r * weights)
return gyration_tensor
def calc_f_factors(self, cluster, unwrap=False, test=False):
"""
Calculate eigenvalues of gryation tensor (see self.gyration())
and calculate f_32 and f_21 from their square roots:
f_32 = (Rg_33 - Rg_22) / Rg_33
f_21 = (Rg_22 - Rg_11) / Rg_33
Rg_33 is the eigenvalue belonging to the principal axis -- largest
value.
J. Phys. Chem. B 2014, 118, 3864−3880, and:
MOLECULAR SIMULATION 2020, VOL. 46, NO. 4, 308–322.
Parameters:
-----------
cluster: MDAnalysis.ResidueGroup
cluster on which to perform analysis on.
unwrap: bool, optional
Wether or not to unwrap cluster around pbc. Default False.
Returns:
--------
f-factors : tuple of float
f_32 and f_21, as defined above.
"""
rg_33, rg_22, rg_11 = np.sqrt(self.gyration(cluster, unwrap, test))
f_32 = (rg_33 - rg_22) / rg_33
f_21 = (rg_22 - rg_11) / rg_33
return (f_32, f_21)
def gyration(self, cluster, unwrap=False, test=False):
"""
Calculte the gyration tensor defined as:
Rg_ab = 1/N sum_i a_i*b_i ; a,b = {x,y,z}
The eigenvalues of these vector are helpful
to determine the shape of clusters. See:
J. Phys. Chem. B 2014, 118, 3864−3880, and:
MOLECULAR SIMULATION 2020, VOL. 46, NO. 4, 308–322.
Parameters:
-----------
cluster: MDAnalysis.ResidueGroup
cluster on which to perform analysis on.
unwrap: bool, optional
Wether or not to unwrap cluster around pbc. Default False.
Returns:
--------
eigenvalues : tuple of float
eigenvalues (Rg_11^2, Rg_22^2, Rg_33^2) of the gyration
tensor in nm, starting with the largest one corresponding to the
major axis (different than for inertia per gyration definiton).
"""
if unwrap:
self.unwrap_cluster(cluster)
gyration_tensor = self._gyration_tensor(cluster, None, test=test)
gyration_tensor /= cluster.n_residues
eig_val, eig_vec = np.linalg.eig(gyration_tensor)
# Sort eig_vals and vector
eig_val, eig_vec = self._sort_eig(
eig_val, eig_vec, reverse=True, test=test, tensor=gyration_tensor
)
# Return in nm^2
return eig_val / 100.0
def inertia_tensor(self, cluster, unwrap=False, test=True):
"""
Calculte the inertia tensor defined as:
Ig_ab = 1/M sum_i m_i*(r^2 d_ab - r_a*r_b)
with a,b = {x,y,z} and r = (x,y,z) a d_ab is the
kronecker delta. Basically mass weightes distance of a particle
from an axis.
The matrix is diagonalised and the eigenvalues are the
moment of inertia along the principal axis, where the smallest
value accompanies the major axis (the most mass is
close to this axis). The largest value accompanies the minor
axis.
Parameters:
-----------
cluster: MDAnalysis.ResidueGroup
cluster on which to perform analysis on.
unwrap: bool, optional
Wether or not to unwrap cluster around pbc. Default False.
test: bool, optional
Useful to compare some raw data with mdanalysis functions
on the fly for when you're not sure if you fucke something
up.
Returns:
--------
eigenvalue : tuple of float
Starting with the lowest value corresponding to the major axis.
"""
if unwrap:
self.unwrap_cluster(cluster)
masses = cluster.atoms.masses
inertia_tensor = self._gyration_tensor(cluster, masses, test=test)
trace = np.trace(inertia_tensor)
trace_array = trace * np.eye(3)
inertia_tensor = trace_array - inertia_tensor
if test:
assert np.sum(inertia_tensor - cluster.moment_of_inertia() < 1e-6)
inertia_tensor /= np.sum(cluster.masses)
eig_val, eig_vec = np.linalg.eig(inertia_tensor)
# Sort eig_vals and vector
eig_val, eig_vec = self._sort_eig(
eig_val, eig_vec, test=test, tensor=inertia_tensor
)
return eig_val / 100.0
def rgyr(
self, cluster, mass=False, components=True, pca=True, unwrap=False, test=False
):
"""
Calculate the radius of gyration with mass weightes or non
mass weighted units (along prinicipal components)
Rg = sqrt(sum_i mi(xi^2+yi^2+zi^2)/sum_i mi) if mass weighted
Rg = sqrt(sum_i (xi^2+yi^2+zi^2)/sum_i i) if not mass weighted
component rg defined like this:
rg_x = sqrt(sum_i mi(yi^2+zi^2)/sum_i mi),
Parameters
----------
cluster: MDAnalysis.ResidueGroup
cluster on which to perform analysis on.
mass : boolean, optional
wether or not to mass weight radii, by default False
components : boolean, optional
wether or not to calculate rgyr components, by default False
pca : booelan, optional
wether or not to calculate rgyr components w.r.t. principal
component vectors or not, by default True
unwrap : boolean, optional
wether or not to unwrap cluster around pbc, by default False
test : boolean, optional
wether or not to perform some sanity checks, by default False
Returns:
rg : float
radius of gyration
rg_i : floats, optional
If components is True, the components along x, y, z direction
and if pca is also true along the threeprincipal axis, starting
with the principal axis with the largest eigenvalue.
"""
if unwrap:
self.unwrap_cluster(cluster)
if mass:
weights = cluster.atoms.masses
else:
weights = np.ones(cluster.atoms.masses.shape)
gyration_tensor = self._gyration_tensor(cluster, weights, test=test)
# transform to nm
factor = 100.0 * sum(weights)
rg2 = np.trace(gyration_tensor)
rg2 /= factor
if components:
r = np.subtract(cluster.atoms.positions, cluster.atoms.center(weights))
if pca:
# Calculate eigenvectors for Karhunen-Loeve Transformation
eig_val, eig_vec = np.linalg.eig(gyration_tensor)
eig_val, eig_vec = self._sort_eig(
eig_val, eig_vec, reverse=True, test=test, tensor=gyration_tensor
)
r = np.matmul(r, eig_vec) # y = A_t * r w/ A = eig_vec
weights = np.broadcast_to(weights, (3, weights.shape[0])).transpose()
if test:
assert np.abs(np.sum(r * weights)) < 1e-8
# Although just trace needed, probably fastest
principal_gyration_tensor = np.matmul(r.transpose(), r * weights)
principal_rg2 = np.trace(principal_gyration_tensor)
principal_rg2 /= factor
if test:
| |
<reponame>davidbradway/openclto
# -*- coding: utf-8 -*-
"""
This module implements a simple interface to UspPlugin DLLs
The basic class in the module is UspPlugin which implements an API to
UspPlugin DLL.
The API functions are:
UspPlugin
GetPluginInfo
Initialize
InitializeCL
Cleanup
SetParams
SetInBufSize
Prepare
GetOutBufSize
ProcessCLIO
ProcessMemIO
To get more information type:
>>> import pyuspplugin
>>> help pyuspplugin.UspPlugin
Furthermore, the module defines a couple of help classes:
PluginInfo - Structure describing the resources needed by a plugin
SampleFormat - Types of samples handled by the UspPlugin DLLs
BuffSize - Structure to define size of individual buffer
"""
import ctypes as ct
import pyopencl as cl
import numpy as np
#-----------------------------------------------------------------------------
class SampleFormat:
"""Values that describe the format of the samples.
SAMPLE_FORMAT_xxxx, where xxxx is one of:
UINT8, UINT16, UINT16X2, INT8, INT16, INT16X2, FLOAT32
FLOAT32X2, INT32, INT32X2
"""
uint8 = ct.c_int(0)
uint16 = ct.c_int(1)
uint16x2 = ct.c_int(2)
int8 = ct.c_int(3)
int16 = ct.c_int(4)
int16x2 = ct.c_int(5)
float32 = ct.c_int(6)
float32x2 = ct.c_int(7)
int32 = ct.c_int(8)
int32x2 = ct.c_int(9)
#-----------------------------------------------------------------------------
#
# Sample format table will be used for conversion between BKM types, C-types
# NumPy types. It contains also a textual description of the types.
# When the header is read, the SampleFormat is given as an enumerated value,
# which can be looked-up from the SampleFormat enumeration (above)
#
SampleFormatTbl = [('uint8', SampleFormat.uint8, np.uint8, ct.c_byte, 1),
('uint16', SampleFormat.uint16, np.uint16, ct.c_uint16, 1),
('uint16x2', SampleFormat.uint16x2, np.uint16, ct.c_uint16, 2),
('int8', SampleFormat.int8, np.int8, ct.c_int8, 1),
('int16', SampleFormat.int16, np.int16, ct.c_int16, 1),
('int16x2', SampleFormat.int16x2, np.int16, ct.c_int16, 2),
('float32', SampleFormat.float32, np.float32, ct.c_float, 1),
('float32x2', SampleFormat.float32x2, np.float32, ct.c_float, 2),
('int32', SampleFormat.int32, np.int32, ct.c_int32, 1),
('int32x2', SampleFormat.int32x2, np.int32, ct.c_int32, 2)
]
SampleBytes = {SampleFormat.uint8.value: 1,
SampleFormat.uint16.value:2,
SampleFormat.uint16x2.value:4,
SampleFormat.int8.value:1,
SampleFormat.int16.value:2,
SampleFormat.int16x2.value:4,
SampleFormat.float32.value:4,
SampleFormat.float32x2.value:8,
SampleFormat.int32.value:4,
SampleFormat.int32x2.value:8,
}
# ----------------------------------------------------------------------------
class PluginInfo(ct.Structure):
""" Structure returning information about a plugin.
Fields are NumInBuffers, NumOutBuffers, UseOpenCL, InCLMem, OutCLMem
"""
pass
PluginInfo._fields_ =\
[('NumInBuffers', ct.c_int), # Number of input buffers
('NumOutBuffers', ct.c_int), # Number of output buffers
('UseOpenCL', ct.c_int), # Does the module use open cl ?
('InCLMem', ct.c_int), # Are inputs OpenCL memory objects (1 - yes, 0 - no)
('OutCLMem', ct.c_int), ] # Are inputs OpenCL memory objects (1 - yes, 0 - no)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class BuffSize(ct.Structure):
""" Structure describing the size of a buffer.
Fields
------
sampleType - Integer. See SampleFormat
width, height, depth - Number of elements in every direction
widthLen, heightLen, depthLen - Dimensions measured in bytes
"""
pass
BuffSize._fields_ =\
[('sampleType', ct.c_int),
('width', ct.c_size_t), # Number of samples along innermost dimension
('height', ct.c_size_t), # Number of samples along second dimension
('depth', ct.c_size_t), # Number of samples along third dimension
('widthLen', ct.c_size_t), # Length along first (innermost) dimension in bytes
('heightLen', ct.c_size_t), # Length along second dimension in bytes
('depthLen', ct.c_size_t), ] # Length along third dimension in bytes
def BuffSizeCreate(sampleType, width, height, depth):
""" Creates a new BuffSize object with no zero padding.
INPUTS
------
sampleType - A value of type SampleFormat. Can be just a number
width - Number of elements along first dimension
height - Number of elements along second dimension
depth - Number of elements along third dimensions
"""
if ('value' in dir(sampleType)):
sampleType = sampleType.value
numSmpBytes = SampleBytes[sampleType];
bufSize = BuffSize()
bufSize.sampleType = sampleType
bufSize.width = width
bufSize.height = height
bufSize.depth = depth
bufSize.widthLen = numSmpBytes * bufSize.width
bufSize.heightLen = bufSize.widthLen * bufSize.height
bufSize.depthLen = bufSize.depth * bufSize.heightLen
return bufSize
#------------------------------------------------------------------------------
class DbgOclMem(ct.Structure):
pass
DbgOclMem._fields_=\
[('name', ct.c_char_p), # Name of the variable
('mem', ct.c_void_p), # cl_mem - Pointer to open-cl mem object
('bufSize', BuffSize), ] # Size of the buffer being debugged
class DbgMem(ct.Structure):
pass;
DbgMem._fields_=\
[('name', ct.c_char_p), # Name of the variable
('ptr', ct.c_void_p), # cl_mem - Pointer to open-cl mem object
('bufSize', BuffSize), ] # Size of the buffer being debugged
#-----------------------------------------------------------------------------
class UspPlugin():
"""Class that handles plug-in modules.
Usage pattern with OpenCL
-------------------------
.. code:: python
import pyopencl as cl
import numpy as np
import pyusplugin as pu
plugin = pu.UspPlugin('Path-to-my-dll-with-extension')
info = plugin.GetPluginInfo()
if (info.UseOpenCL == 0):
print ('Example supports OpenCL only')
raise NotImplementedError
ctx = cl.create_some_context()
cmd = cl.CommandQueue(ctx)
inbuf = np.random.rand(1024).astype(np.float32)
output = np.empty_like(inbuf)
# Define the sizes of the buffers
insize = pu.BuffSize() # Size
sf = SampleFormat()
insize.sampleType = sf.SAMPLE_FORMAT_FLOAT32
insize.width = 1024
insize.height = insize.depth = 1
insize.widthLen = data_size * ct.sizeof(ct.c_float)
insize.depthLen = insize.heightLen = insize.widthLen * insize.height;
#
plugin.InitializeCL(ctx, 'path-to-dll. Maybe needed to load CL source');
plugin.SetInBufSize(insize, 0)
plugin.Prepare()
outsize = plugin.GetOutBufSize( 0 )
mf = cl.mem_flags
inmem = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf = inbuf )
outmem = cl.Buffer(ctx, mf.WRITE_ONLY, 4096)
plugin.ProcessCLIO([inmem], [outmem], cmd)
cl.enqueue_copy(cmd, outbuf, outmem) # Read output
# Inspect results
"""
def __init__(self, dllname):
self.hDLL = ct.CDLL(dllname)
# Create prototypes for the functions
GetPluginInfoProto = ct.CFUNCTYPE(None, ct.POINTER(PluginInfo))
InitializeProto = ct.CFUNCTYPE(ct.c_int, ct.c_char_p)
InitializeCLProto = ct.CFUNCTYPE(ct.c_int, ct.c_void_p, ct.c_void_p, ct.c_char_p) # cl_context, cl_device, char* path
InitializeProto = ct.CFUNCTYPE(ct.c_int, ct.c_char_p)
CleanupProto = ct.CFUNCTYPE(ct.c_int)
SetParamsProto = ct.CFUNCTYPE(ct.c_int, ct.POINTER(ct.c_float), ct.c_size_t, ct.POINTER(ct.c_int), ct.c_size_t)
SetInBufSizeProto = ct.CFUNCTYPE(ct.c_int, ct.POINTER(BuffSize), ct.c_int)
PrepareProto = ct.CFUNCTYPE(ct.c_int)
GetOutBufSizeProto = ct.CFUNCTYPE(ct.c_int, ct.POINTER(BuffSize), ct.c_int)
ProcessCLIOProto = ct.CFUNCTYPE(ct.c_int, ct.c_void_p, ct.c_size_t, ct.c_void_p, ct.c_size_t, ct.c_void_p, ct.c_void_p, ct.c_void_p)
ProcessMemIOProto = ct.CFUNCTYPE(ct.c_int, ct.c_void_p, ct.c_size_t, ct.c_void_p, ct.c_size_t)
GetDbgOclMemProto = ct.CFUNCTYPE(ct.POINTER(DbgOclMem), ct.POINTER(ct.c_uint32))
GetDbgMemProto = ct.CFUNCTYPE(ct.POINTER(DbgMem), ct.POINTER(ct.c_uint32))
#DbgOclMem* __cdecl GetDbgOclMem(uint32_t* arrayLen)
self._GetPluginInfo = GetPluginInfoProto(("GetPluginInfo", self.hDLL))
self._Initialize = InitializeProto(("Initialize", self.hDLL))
self._InitializeCL = InitializeCLProto(("InitializeCL", self.hDLL))
self._Cleanup = CleanupProto(("Cleanup", self.hDLL))
self._SetParams = SetParamsProto(("SetParams", self.hDLL))
self._SetInBufSize = SetInBufSizeProto(("SetInBufSize", self.hDLL))
self._Prepare = PrepareProto(("Prepare", self.hDLL))
self._GetOutBufSize = GetOutBufSizeProto(("GetOutBufSize", self.hDLL))
self._ProcessCLIO = ProcessCLIOProto(("ProcessCLIO", self.hDLL))
self._ProcessMemIO = ProcessMemIOProto(("ProcessMemIO", self.hDLL))
# Debug interface
self._GetDbgOclMem = GetDbgOclMemProto(("GetDbgOclMem", self.hDLL))
self._GetDbgMem = GetDbgMemProto(("GetDbgMem", self.hDLL))
def GetPluginInfo(self):
""" Returns information about the DLL - Using OpenCL etc."""
info = PluginInfo()
self._GetPluginInfo(ct.byref(info))
return info
def Initialize(self, pathToDll):
""" Initialize DLL. Set path to DLL. """
res = self._Initialize(ct.c_char_p(pathToDll))
return res
def InitializeCL(self, context, pathToDLL):
""" Initialize a DLL which uses OpenCL.
context is a OpenCL context created using pyopencl
"""
res = self._InitializeCL(context.obj_ptr,
context.devices[0].obj_ptr,
ct.c_char_p(pathToDLL))
res = 0
return res
def Cleanup(self):
""" Releas allocated resources. Called before DLL is unloaded"""
res = self._Cleanup()
return res
def SetParams(self, floatParams, intParams=[]):
"""Set parameters to the processing module.
INPUTS
------
floatParams - Array with floating point parameters
intParams - Array with integer parameters
OUTPUT
------
0 if no error has occured, otherwise a value != 0
"""
if (len(floatParams) > 0):
fp = (ct.c_float * len(floatParams))()
fp[:] = floatParams[:]
else:
fp = (ct.c_float)()
if (len(intParams) > 0):
ip = (ct.c_int * len(intParams))()
ip[:] = floatParams[:]
else:
ip = (ct.c_int)()
res = self._SetParams(fp, len(fp), ip, len(ip))
return res
def SetInBufSize(self, bufSize, bufnum=0):
""" Set the size of input buffer.
INPUTS
------
bufSize: BuffSize() structure
Structure describing the size of an input buffer and the
type of the samples
bufnum: Index of input buffer starting from 0
OUTPUT
------
0 if no errors
"""
res = self._SetInBufSize(ct.byref(bufSize), bufnum)
return res
def Prepare(self):
""" Prepare processing.
Call this function BEFORE the actual processing
INPUT
-----
None
OUTPUT
------
0 if no errors
"""
res = self._Prepare()
return res
def GetOutBufSize(self, bufnum):
""" Get the size of an output buffer
USAGE
-----
size = obj.GetOutBufSize(bufnum)
INPUT
-----
bufnum: Buffer index starting from 0
OUTPUT
------
size: BuffSize() structure
"""
buf = BuffSize()
res = self._GetOutBufSize(ct.byref(buf), bufnum)
if (res != 0):
print(' GetOutBufSize failed !')
return buf
def ProcessCLIO(self, inbufs, outbufs, cmdqueue, evin, evout):
""" Process data where both inputs and output are OpenCL mem objects
USAGE
-----
res = obj.ProcessCLIO(inbufs, outbufs, cmdqueue)
INPUTS
------
inbufs: list of pyopencl.Buffer() objects
outbufs: list of pyopencl.Buffer() objects
cmdqueue : pyopencl command queue
evin : pyopencl user event - input
evout : pyopencl user event - Filled in by the DLL
REMARK
------
If you have only 1 buffer, remember the []
obj.Process([inbuf], [outbuf])
OUTPUT
------
0 if no errors
"""
inbuf_array = (ct.c_void_p * len(inbufs))() # Instantiate array of pointers
for n in range(0, len(inbufs)):
inbuf_array[n] = inbufs[n].obj_ptr
outbuf_array = (ct.c_void_p * len(outbufs))() # Instantiate array of pointers
for n in range(0, len(outbufs)):
outbuf_array[n] = | |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
import time
import ctypes
import tempfile
import numpy
import h5py
from pyscf import lib
from functools import reduce
from pyscf.lib import logger
from pyscf import gto
from pyscf import ao2mo
from pyscf.cc import ccsd
from pyscf.cc import _ccsd
from pyscf.cc import ccsd_rdm
from pyscf.scf import rhf_grad
from pyscf.scf import cphf
BLKSIZE = 192
def IX_intermediates(mycc, t1, t2, l1, l2, eris=None, d1=None, d2=None):
if eris is None:
# Note eris are in Chemist's notation
eris = ccsd._ERIS(mycc)
if d1 is None:
d1 = ccsd_rdm.gamma1_intermediates(mycc, t1, t2, l1, l2)
doo, dov, dvo, dvv = d1
if d2 is None:
_d2tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fd2intermediate = h5py.File(_d2tmpfile.name, 'w')
ccsd_rdm.gamma2_outcore(mycc, t1, t2, l1, l2, fd2intermediate)
dovov = fd2intermediate['dovov']
dvvvv = fd2intermediate['dvvvv']
doooo = fd2intermediate['doooo']
doovv = fd2intermediate['doovv']
dovvo = fd2intermediate['dovvo']
dovvv = fd2intermediate['dovvv']
dooov = fd2intermediate['dooov']
else:
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
nov = nocc * nvir
nvir_pair = nvir * (nvir+1) //2
_tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fswap = h5py.File(_tmpfile.name, 'w')
fswap.create_group('e_vvov')
fswap.create_group('c_vvov')
# Note Ioo, Ivv are not hermitian
Ioo = numpy.zeros((nocc,nocc))
Ivv = numpy.zeros((nvir,nvir))
Ivo = numpy.zeros((nvir,nocc))
Xvo = numpy.zeros((nvir,nocc))
eris_oooo = _cp(eris.oooo)
eris_ooov = _cp(eris.ooov)
d_oooo = _cp(doooo)
d_oooo = _cp(d_oooo + d_oooo.transpose(1,0,2,3))
#:Ioo += numpy.einsum('jmlk,imlk->ij', d_oooo, eris_oooo) * 2
Ioo += lib.dot(eris_oooo.reshape(nocc,-1), d_oooo.reshape(nocc,-1).T, 2)
d_oooo = _cp(d_oooo.transpose(0,2,3,1))
#:Xvo += numpy.einsum('iljk,ljka->ai', d_oooo, eris_ooov) * 2
Xvo += lib.dot(eris_ooov.reshape(-1,nvir).T, d_oooo.reshape(nocc,-1).T, 2)
Xvo +=(numpy.einsum('kj,kjia->ai', doo, eris_ooov) * 4
- numpy.einsum('kj,ikja->ai', doo+doo.T, eris_ooov))
eris_oooo = eris_ooov = d_oooo = None
d_ovov = numpy.empty((nocc,nvir,nocc,nvir))
blksize = 8
for p0, p1 in prange(0, nocc, blksize):
d_ovov[p0:p1] = _cp(dovov[p0:p1])
d_ovvo = _cp(dovvo[p0:p1])
for i in range(p0,p1):
d_ovov[i] += d_ovvo[i-p0].transpose(0,2,1)
d_ovvo = None
d_ovov = lib.transpose_sum(d_ovov.reshape(nov,nov)).reshape(nocc,nvir,nocc,nvir)
#:Ivo += numpy.einsum('jbka,jbki->ai', d_ovov, eris.ovoo)
Ivo += lib.dot(d_ovov.reshape(-1,nvir).T, _cp(eris.ovoo).reshape(-1,nocc))
eris_ovov = _cp(eris.ovov)
#:Ioo += numpy.einsum('jakb,iakb->ij', d_ovov, eris.ovov)
#:Ivv += numpy.einsum('jcib,jcia->ab', d_ovov, eris.ovov)
Ioo += lib.dot(eris_ovov.reshape(nocc,-1), d_ovov.reshape(nocc,-1).T)
Ivv += lib.dot(eris_ovov.reshape(-1,nvir).T, d_ovov.reshape(-1,nvir))
eris_ovov = None
fswap['dovvo'] = d_ovov.transpose(0,1,3,2)
d_ovov = None
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = max(nvir**3*2.5, nvir**3*2+nocc*nvir**2)
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/unit))
iobuflen = int(256e6/8/(blksize*nvir))
log.debug1('IX_intermediates pass 1: block size = %d, nocc = %d in %d blocks',
blksize, nocc, int((nocc+blksize-1)/blksize))
for istep, (p0, p1) in enumerate(prange(0, nocc, blksize)):
d_ooov = _cp(dooov[p0:p1])
eris_oooo = _cp(eris.oooo[p0:p1])
eris_ooov = _cp(eris.ooov[p0:p1])
#:Ivv += numpy.einsum('ijkb,ijka->ab', d_ooov, eris_ooov)
#:Ivo += numpy.einsum('jlka,jlki->ai', d_ooov, eris_oooo)
Ivv += lib.dot(eris_ooov.reshape(-1,nvir).T, d_ooov.reshape(-1,nvir))
Ivo += lib.dot(d_ooov.reshape(-1,nvir).T, eris_oooo.reshape(-1,nocc))
#:Ioo += numpy.einsum('klja,klia->ij', d_ooov, eris_ooov)
#:Xvo += numpy.einsum('kjib,kjba->ai', d_ooov, eris.oovv)
eris_oovv = _cp(eris.oovv[p0:p1])
tmp = _cp(d_ooov.transpose(0,1,3,2).reshape(-1,nocc))
Ioo += lib.dot(_cp(eris_ooov.transpose(0,1,3,2).reshape(-1,nocc)).T, tmp)
Xvo += lib.dot(eris_oovv.reshape(-1,nvir).T, tmp)
eris_oooo = tmp = None
d_ooov = d_ooov + dooov[:,p0:p1].transpose(1,0,2,3)
eris_ovov = _cp(eris.ovov[p0:p1])
#:Ioo += numpy.einsum('ljka,lika->ij', d_ooov, eris_ooov)
#:Xvo += numpy.einsum('jikb,jakb->ai', d_ooov, eris_ovov)
for i in range(p1-p0):
lib.dot(eris_ooov[i].reshape(nocc,-1),
d_ooov[i].reshape(nocc,-1).T, 1, Ioo, 1)
lib.dot(eris_ovov[i].reshape(nvir,-1),
d_ooov[i].reshape(nocc,-1).T, 1, Xvo, 1)
d_ooov = None
#:Ioo += numpy.einsum('kjba,kiba->ij', d_oovv, eris.oovv)
#:Ivv += numpy.einsum('ijcb,ijca->ab', d_oovv, eris.oovv)
#:Ivo += numpy.einsum('kjba,kjib->ai', d_oovv, eris.ooov)
d_oovv = _cp(doovv[p0:p1]) + doovv[:,p0:p1].transpose(1,0,3,2)
for i in range(p1-p0):
Ioo += lib.dot(eris_oovv[i].reshape(nocc, -1), d_oovv[i].reshape(nocc,-1).T)
Ivv += lib.dot(eris_oovv.reshape(-1,nvir).T, d_oovv.reshape(-1,nvir))
Ivo += lib.dot(d_oovv.reshape(-1,nvir).T,
_cp(eris_ooov.transpose(0,1,3,2).reshape(-1,nocc)))
eris_ooov = None
d_oovv = _ccsd.precontract(d_oovv.reshape(-1,nvir,nvir)).reshape(p1-p0,nocc,-1)
d_ovvv = numpy.empty((p1-p0,nvir,nvir,nvir))
ao2mo.outcore._load_from_h5g(dovvv, p0*nvir, p1*nvir,
d_ovvv.reshape(-1,nvir**2))
#:Ivo += numpy.einsum('jadc,jidc->ai', d_ovvv, eris_oovv)
for i in range(p1-p0):
Ivo += lib.dot(d_ovvv[i].reshape(nvir,-1), eris_oovv[i].reshape(nocc,-1).T)
eris_oovv = None
# tril part of (d_ovvv + d_ovvv.transpose(0,1,3,2))
c_ovvv = _ccsd.precontract(d_ovvv.reshape(-1,nvir,nvir))
ao2mo.outcore._transpose_to_h5g(fswap, 'c_vvov/%d'%istep, c_ovvv, iobuflen)
c_ovvv = c_ovvv.reshape(-1,nvir,nvir_pair)
eris_ovx = _cp(eris.ovvv[p0:p1])
ao2mo.outcore._transpose_to_h5g(fswap, 'e_vvov/%d'%istep,
eris_ovx.reshape(-1,nvir_pair), iobuflen)
#:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
#:Ivv += numpy.einsum('ibdc,iadc->ab', d_ovvv, eris_ovvv)
for i in range(p1-p0):
lib.dot(eris_ovx[i].reshape(nvir,-1),
d_oovv[i].reshape(nocc,-1).T, 1, Xvo, 1)
lib.dot(eris_ovx[i].reshape(nvir,-1),
c_ovvv[i].reshape(nvir,-1).T, 1, Ivv, 1)
c_ovvv = d_oovv = None
eris_ovvo = numpy.empty((p1-p0,nvir,nvir,nocc))
for i in range(p1-p0):
d_ovvv[i] = _ccsd.sum021(d_ovvv[i])
eris_ovvo[i] = eris_ovov[i].transpose(0,2,1)
#:Ivo += numpy.einsum('abjc,ibjc->ai', d_ovvv, eris_ovov)
Ivo += lib.dot(d_ovvv.reshape(-1,nvir).T, eris_ovvo.reshape(-1,nocc))
eris_ovvo = eris_ovov = None
eris_ovvv = lib.unpack_tril(eris_ovx.reshape(-1,nvir_pair))
eris_ovx = None
eris_ovvv = eris_ovvv.reshape(p1-p0,nvir,nvir,nvir)
#:Ivv += numpy.einsum('icdb,icda->ab', d_ovvv, eris_ovvv)
#:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
Ivv += lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvv.reshape(-1,nvir))
Xvo[:,p0:p1] +=(numpy.einsum('cb,iacb->ai', dvv, eris_ovvv) * 4
- numpy.einsum('cb,icba->ai', dvv+dvv.T, eris_ovvv))
d_ovvo = _cp(fswap['dovvo'][p0:p1])
#:Xvo += numpy.einsum('jbic,jbca->ai', d_ovov, eris_ovvv)
lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvo.reshape(-1,nocc), 1, Xvo, 1)
d_ovvv = d_ovvo = eris_ovvv = None
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc*nvir**2 + nvir**3*2.5
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/unit))
log.debug1('IX_intermediates pass 2: block size = %d, nocc = %d in %d blocks',
blksize, nocc, int((nocc+blksize-1)/blksize))
for p0, p1 in prange(0, nvir, blksize):
off0 = p0*(p0+1)//2
off1 = p1*(p1+1)//2
d_vvvv = _cp(dvvvv[off0:off1]) * 4
for i in range(p0, p1):
d_vvvv[i*(i+1)//2+i-off0] *= .5
d_vvvv = lib.unpack_tril(d_vvvv)
eris_vvvv = lib.unpack_tril(_cp(eris.vvvv[off0:off1]))
#:Ivv += numpy.einsum('decb,deca->ab', d_vvvv, eris_vvvv) * 2
#:Xvo += numpy.einsum('dbic,dbca->ai', d_vvov, eris_vvvv)
lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvv.reshape(-1,nvir), 2, Ivv, 1)
#:d_vvvv = _cp(d_vvvv + d_vvvv.transpose(0,1,3,2))
d_vvov = numpy.empty((off1-off0,nocc,nvir))
ao2mo.outcore._load_from_h5g(fswap['c_vvov'], off0, off1, d_vvov.reshape(-1,nov))
d_vvvo = _cp(d_vvov.transpose(0,2,1))
lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvo.reshape(-1,nocc), 1, Xvo, 1)
d_vvov = eris_vvvv = None
eris_vvov = numpy.empty((off1-off0,nocc,nvir))
ao2mo.outcore._load_from_h5g(fswap['e_vvov'], off0, off1,
eris_vvov.reshape(-1,nov))
eris_vvvo = _cp(eris_vvov.transpose(0,2,1))
#:Ioo += numpy.einsum('abjc,abci->ij', d_vvov, eris_vvvo)
#:Ivo += numpy.einsum('dbca,dbci->ai', d_vvvv, eris_vvvo) * 2
lib.dot(d_vvvv.reshape(-1,nvir).T, eris_vvvo.reshape(-1,nocc), 2, Ivo, 1)
lib.dot(eris_vvvo.reshape(-1,nocc).T, d_vvvo.reshape(-1,nocc), 1, Ioo, 1)
eris_vvov = eris_vovv = d_vvvv = None
del(fswap['e_vvov'])
del(fswap['c_vvov'])
del(fswap['dovvo'])
fswap.close()
_tmpfile = None
if d2 is None:
for key in fd2intermediate.keys():
del(fd2intermediate[key])
fd2intermediate.close()
_d2tmpfile = None
Ioo *= -1
Ivv *= -1
Ivo *= -1
Xvo += Ivo
return Ioo, Ivv, Ivo, Xvo
def response_dm1(mycc, t1, t2, l1, l2, eris=None, IX=None):
if eris is None:
# Note eris are in Chemist's notation
eris = ccsd._ERIS(mycc)
if IX is None:
Ioo, Ivv, Ivo, Xvo = IX_intermediates(mycc, t1, t2, l1, l2, eris)
else:
Ioo, Ivv, Ivo, Xvo = IX
nocc, nvir = t1.shape
nmo = nocc + nvir
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nocc*nvir**2)))
def fvind(x):
x = x.reshape(Xvo.shape)
if eris is None:
mo_coeff = mycc.mo_coeff
dm = reduce(numpy.dot, (mo_coeff[:,nocc:], x, mo_coeff[:,:nocc].T))
dm = (dm + dm.T) * 2
v = reduce(numpy.dot, (mo_coeff[:,nocc:].T, mycc._scf.get_veff(mol, dm),
mo_coeff[:,:nocc]))
else:
v = numpy.zeros((nocc,nvir))
for p0, p1 in prange(0, nocc, blksize):
eris_ovov = _cp(eris.ovov[p0:p1])
v[p0:p1] += numpy.einsum('iajb,bj->ia', eris_ovov, x) * 4
v[p0:p1] -= numpy.einsum('ibja,bj->ia', eris_ovov, x)
eris_ovov = None
v[p0:p1] -= numpy.einsum('ijba,bj->ia', _cp(eris.oovv[p0:p1]), x[:,p0:p1])
return v.T
mo_energy = eris.fock.diagonal()
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[:nocc] = 2
dvo = cphf.solve(fvind, mo_energy, mo_occ, Xvo, max_cycle=30)[0]
dm1 = numpy.zeros((nmo,nmo))
dm1[nocc:,:nocc] = dvo
dm1[:nocc,nocc:] = dvo.T
return dm1
#
# Note: only works with canonical orbitals
# Non-canonical formula refers to JCP, 95, 2639
#
def kernel(mycc, t1=None, t2=None, l1=None, l2=None, eris=None, atmlst=None,
mf_grad=None, verbose=logger.INFO):
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if l1 is None: l1 = mycc.l1
if l2 is None: l2 = mycc.l2
if eris is None: eris = ccsd._ERIS(mycc)
if mf_grad is None:
mf_grad = rhf_grad.Gradients(mycc._scf)
log = logger.Logger(mycc.stdout, mycc.verbose)
time0 = time.clock(), time.time()
mol = mycc.mol
moidx = numpy.ones(mycc.mo_coeff.shape[1], dtype=numpy.bool)
if isinstance(mycc.frozen, (int, numpy.integer)):
raise NotImplementedError('frozen orbital ccsd_grad')
moidx[:mycc.frozen] = False
else:
moidx[mycc.frozen] = False
mo_coeff = mycc.mo_coeff[:,moidx] #FIXME: ensure mycc.mo_coeff is canonical orbital
mo_energy = eris.fock.diagonal()
nocc, nvir = t1.shape
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
log.debug('Build ccsd rdm1 intermediates')
d1 = ccsd_rdm.gamma1_intermediates(mycc, t1, t2, l1, l2)
doo, dov, dvo, dvv = d1
time1 = log.timer('rdm1 intermediates', *time0)
log.debug('Build ccsd rdm2 intermediates')
_d2tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fd2intermediate = h5py.File(_d2tmpfile.name, 'w')
d2 = ccsd_rdm.gamma2_outcore(mycc, t1, t2, l1, l2, fd2intermediate)
time1 = log.timer('rdm2 intermediates', *time1)
log.debug('Build ccsd response_rdm1')
Ioo, Ivv, Ivo, Xvo = IX_intermediates(mycc, t1, t2, l1, l2, eris, d1, d2)
time1 = log.timer('response_rdm1 intermediates', *time1)
dm1mo = response_dm1(mycc, t1, t2, l1, l2, eris, (Ioo, Ivv, Ivo, Xvo))
dm1mo[:nocc,:nocc] = doo + doo.T
dm1mo[nocc:,nocc:] = dvv + dvv.T
dm1ao = reduce(numpy.dot, (mo_coeff, dm1mo, mo_coeff.T))
im1 = numpy.zeros_like(dm1mo)
im1[:nocc,:nocc] = Ioo
im1[nocc:,nocc:] = Ivv
im1[nocc:,:nocc] = Ivo
im1[:nocc,nocc:] = Ivo.T
im1 = reduce(numpy.dot, (mo_coeff, im1, mo_coeff.T))
time1 = log.timer('response_rdm1', *time1)
log.debug('symmetrized rdm2 and MO->AO transformation')
_dm2file = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
# Basically, 4 times of dm2 is computed. *2 in _rdm2_mo2ao, *2 in _load_block_tril
fdm2 = h5py.File(_dm2file.name, 'w')
dm1_with_hf = dm1mo.copy()
for i in range(nocc): # HF 2pdm ~ 4(ij)(kl)-2(il)(jk), diagonal+1 because of 4*dm2
dm1_with_hf[i,i] += 1
_rdm2_mo2ao(mycc, d2, dm1_with_hf, mo_coeff, fdm2)
time1 = log.timer('MO->AO transformation', *time1)
for key in fd2intermediate.keys():
del(fd2intermediate[key])
fd2intermediate.close()
#TODO: pass hf_grad object to compute h1 and s1
log.debug('h1 and | |
<gh_stars>1-10
"""
:Author: <NAME> <<EMAIL>>
Module implementing non-parametric regressions using kernel methods.
"""
import numpy as np
import scipy
from scipy import linalg
import kde
import kernels
import py_local_linear
from compat import irange
from cyth import HAS_CYTHON
local_linear = None
def useCython():
"""
Switch to using Cython methods if available
"""
global local_linear
if HAS_CYTHON:
import cy_local_linear
local_linear = cy_local_linear
def usePython():
"""
Switch to using the python implementation of the methods
"""
global local_linear
local_linear = py_local_linear
if HAS_CYTHON:
useCython()
else:
usePython()
def compute_bandwidth(reg):
"""
Compute the bandwidth and covariance for the model, based of its xdata attribute
"""
if reg.bandwidth_function:
bw = np.atleast_2d(reg.bandwidth_function(reg.xdata, model=reg))
cov = np.dot(bw, bw).real
elif reg.covariance_function:
cov = np.atleast_2d(reg.covariance_function(reg.xdata, model=reg))
bw = linalg.sqrtm(cov)
else:
return reg.bandwidth, reg.covariance
return bw, cov
class RegressionKernelMethod(object):
r"""
Base class for regression kernel methods
"""
def fit(self, reg):
"""
Fit the method and returns the fitted object that will be used for actual evaluation.
The object needs to call the :py:meth:`pyqt_fit.nonparam_regression.NonParamRegression.set_actual_bandwidth`
method with the computed bandwidth and covariance.
:Default: Compute the bandwidth based on the real data and set it in the regression object
"""
reg.set_actual_bandwidth(*compute_bandwidth(reg))
return self
def evaluate(self, points, out):
"""
Evaluate the regression of the provided points.
:param ndarray points: 2d-array of points to compute the regression on. Each column is a point.
:param ndarray out: 1d-array in which to store the result
:rtype: ndarray
:return: The method must return the ``out`` array, updated with the regression values
"""
raise NotImplementedError()
class SpatialAverage(RegressionKernelMethod):
r"""
Perform a Nadaraya-Watson regression on the data (i.e. also called
local-constant regression) using a gaussian kernel.
The Nadaraya-Watson estimate is given by:
.. math::
f_n(x) \triangleq \frac{\sum_i K\left(\frac{x-X_i}{h}\right) Y_i}
{\sum_i K\left(\frac{x-X_i}{h}\right)}
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: ndarray or callable
:param cov: If an ndarray, it should be a 2D array giving the matrix of
covariance of the gaussian kernel. Otherwise, it should be a function
``cov(xdata, ydata)`` returning the covariance matrix.
"""
def __init__(self):
self.correction = 1.
def fit(self, reg):
self = super(SpatialAverage, self).fit(reg)
self.inv_bw = linalg.inv(reg.bandwidth)
return self
def evaluate(self, reg, points, out):
d, m = points.shape
norm = np.zeros((m,), points.dtype)
xdata = reg.xdata[..., np.newaxis]
ydata = reg.fitted_ydata
correction = self.correction
N = reg.N
inv_bw = scipy.linalg.inv(reg.bandwidth)
kernel = reg.kernel
out.fill(0)
# iterate on the internal points
for i, ci in np.broadcast(irange(N),
irange(correction.shape[0])):
diff = correction[ci] * (xdata[:, i, :] - points)
#tdiff = np.dot(inv_cov, diff)
#energy = np.exp(-np.sum(diff * tdiff, axis=0) / 2.0)
energy = kernel(np.dot(inv_bw, diff)).squeeze()
out += ydata[i] * energy
norm += energy
out[norm > 0] /= norm[norm > 0]
return out
@property
def correction(self):
"""
The correction coefficient allows to change the width of the kernel
depending on the point considered. It can be either a constant (to
correct globaly the kernel width), or a 1D array of same size as the
input.
"""
return self._correction
@correction.setter
def correction(self, value):
value = np.atleast_1d(value)
assert len(value.shape) == 1, "Error, the correction must be a single value or a 1D array"
self._correction = value
def set_density_correction(self):
"""
Add a correction coefficient depending on the density of the input
"""
est = kde.KDE1D(self.xdata)
dens = est(self.xdata)
dm = dens.max()
dens[dens < 1e-50] = dm
self._correction = dm / dens
@property
def q(self):
"""
Degree of the fitted polynom
"""
return 0
class LocalLinearKernel1D(RegressionKernelMethod):
r"""
Perform a local-linear regression using a gaussian kernel.
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i)\right)^2
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
"""
def fit(self, reg):
return super(LocalLinearKernel1D, self).fit(reg)
def evaluate(self, reg, points, out):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
points = points[0]
xdata = reg.xdata[0]
ll = local_linear.local_linear_1d
if not isinstance(reg.kernel, kernels.normal_kernel1d):
ll = py_local_linear.local_linear_1d
li2, out = ll(reg.bandwidth, xdata, reg.fitted_ydata, points, reg.kernel, out)
self.li2 = li2
return out
@property
def q(self):
"""
Degree of the fitted polynom
"""
return 1
class PolynomialDesignMatrix1D(object):
def __init__(self, degree):
self.degree = degree
powers = np.arange(0, degree + 1).reshape((1, degree + 1))
self.powers = powers
def __call__(self, dX, out=None):
return np.power(dX, self.powers, out)
class LocalPolynomialKernel1D(RegressionKernelMethod):
r"""
Perform a local-polynomial regression using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i) - \ldots -
a_q \frac{(x-X_i)^q}{q!}\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial and :math:`h` is the bandwidth of
the method. It is also recommended to have :math:`\int_\mathbb{R} x^2K(x)dx
= 1`, (i.e. variance of the kernel is 1) or the effective bandwidth will be
scaled by the square-root of this integral (i.e. the standard deviation of
the kernel).
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:param int q: Order of the polynomial to fit. **Default:** 3
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_covariance``
"""
def __init__(self, q=3):
self._q = q
@property
def q(self):
'''
Degree of the fitted polynomials
'''
return self._q
@q.setter
def q(self, val):
self._q = int(val)
def fit(self, reg):
assert reg.dim == 1, "This method can only be used with 1D data"
if self.q == 0:
obj = SpatialAverage()
return obj.fit(reg)
elif self.q == 1:
obj = LocalLinearKernel1D()
return obj.fit(reg)
self = super(LocalPolynomialKernel1D, self).fit(reg)
self.designMatrix = PolynomialDesignMatrix1D(self.q)
return self
def evaluate(self, reg, points, out):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put
in this array
"""
xdata = reg.xdata[0, :, np.newaxis] # make it a column vector
ydata = reg.fitted_ydata[:, np.newaxis] # make it a column vector
points = points[0] # make it a line vector
bw = reg.bandwidth
kernel = reg.kernel
designMatrix = self.designMatrix
for i, p in enumerate(points):
dX = (xdata - p)
Wx = kernel(dX / bw)
Xx = designMatrix(dX)
WxXx = Wx * Xx
XWX = np.dot(Xx.T, WxXx)
Lx = linalg.solve(XWX, WxXx.T)[0]
out[i] = np.dot(Lx, ydata)
return out
class PolynomialDesignMatrix(object):
"""
Class used to create a design matrix for polynomial regression
"""
def __init__(self, dim, deg):
self.dim = dim
self.deg = deg
self._designMatrixSize()
def _designMatrixSize(self):
"""
Compute the size of the design matrix for a n-D problem of order d.
Can also compute the Taylors factors (i.e. the factors that would be
applied for the taylor decomposition)
:param int dim: Dimension of the problem
:param int deg: Degree of the fitting polynomial
:param bool factors: If true, the out includes the Taylor factors
:returns: The number of columns in the design matrix and, if required,
a ndarray with the taylor coefficients for each column of
the design matrix.
"""
dim = self.dim
deg = self.deg
init = 1
dims = [0] * (dim + 1)
cur = init
prev = 0
#if factors:
# fcts = [1]
fact = 1
for i in irange(deg):
diff = cur - prev
prev = cur
old_dims = list(dims)
fact *= (i + 1)
for j in irange(dim):
dp = diff - old_dims[j]
cur += dp
dims[j + 1] = dims[j] + dp
# if factors:
# fcts += [fact]*(cur-prev)
self.size = cur
#self.factors = np.array(fcts)
def __call__(self, x, out=None):
"""
Creates the design matrix for polynomial | |
a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^&+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response.
**Attributes accepted in a Puppet createServer request:**
* ``PUPPET_ADMIN_PASSWORD`` : To work with the Puppet Enterprise console, a password must use ASCII characters.
* ``PUPPET_R10K_REMOTE`` : The r10k remote is the URL of your control repository (for example, ssh://git@your.git-repo.com:user/control-repo.git). Specifying an r10k remote opens TCP port 8170.
* ``PUPPET_R10K_PRIVATE_KEY`` : If you are using a private Git repository, add PUPPET_R10K_PRIVATE_KEY to specify an SSH URL and a PEM-encoded private SSH key.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks CM deletes the oldest backups if this number is exceeded. The default value is ``1`` .
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: **[REQUIRED]**
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, if you are using API commands instead, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: **[REQUIRED]**
The Amazon EC2 instance type to use. For example, ``m4.large`` . Recommended instance types include ``t2.medium`` and greater, ``m4.*`` , or ``c4.xlarge`` and greater.
:type KeyPair: string
:param KeyPair:
The Amazon EC2 key pair to set for the instance. This parameter is optional; if desired, you may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
The start time for a one-hour period each week during which AWS OpsWorks CM performs maintenance on the instance. Valid values must be specified in the following format: ``DDD:HH:MM`` . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See ``TimeWindowDefinition`` for more information.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
The start time for a one-hour period during which AWS OpsWorks CM backs up application-level data on your server if automated backups are enabled. Valid values must be specified in one of the following formats:
* ``HH:MM`` for daily backups
* ``DDD:HH:MM`` for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
**Example:** ``08:00`` , which represents a daily start time of 08:00 UTC.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds:
A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by ``SubnetIds`` .
If you do not specify this parameter, AWS OpsWorks CM creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
- *(string) --*
:type ServiceRoleArn: string
:param ServiceRoleArn: **[REQUIRED]**
The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.
:type SubnetIds: list
:param SubnetIds:
The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.
For more information about supported Amazon EC2 platforms, see `Supported Platforms <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html>`__ .
- *(string) --*
:type BackupId: string
:param BackupId:
If you specify this field, AWS OpsWorks CM creates the server by using the backup represented by BackupId.
:rtype: dict
:returns:
"""
pass
def delete_backup(self, BackupId: str) -> Dict:
"""
Deletes a backup. You can delete both manual and automated backups. This operation is asynchronous.
An ``InvalidStateException`` is thrown when a backup deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteBackup>`_
**Request Syntax**
::
response = client.delete_backup(
BackupId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ``ServerName-yyyyMMddHHmmssSSS`` .
:rtype: dict
:returns:
"""
pass
def delete_server(self, ServerName: str) -> Dict:
"""
Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance). When you run this command, the server state is updated to ``DELETING`` . After the server is deleted, it is no longer returned by ``DescribeServer`` requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when a server deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteServer>`_
**Request Syntax**
::
response = client.delete_server(
ServerName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ServerName: string
:param ServerName: **[REQUIRED]**
The ID of the server to delete.
:rtype: dict
:returns:
"""
pass
def describe_account_attributes(self) -> Dict:
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeAccountAttributes>`_
**Request Syntax**
::
response = client.describe_account_attributes()
**Response Syntax**
::
{
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
**Response Structure**
- *(dict) --*
- **Attributes** *(list) --*
The attributes that are currently set for the account.
- *(dict) --*
Stores account attributes.
- **Name** *(string) --*
The attribute name. The following are supported attribute names.
* *ServerLimit:* The number of current servers/maximum number of servers allowed. By default, you can have a maximum of 10 servers.
* *ManualBackupLimit:* The number of current manual backups/maximum number of backups allowed. By default, you can have a maximum of 50 manual backups saved.
- **Maximum** *(integer) --*
The maximum allowed value.
- **Used** *(integer) --*
The current usage, such as the current number of servers that are associated with the account.
:rtype: dict
:returns:
"""
pass
def describe_backups(self, BackupId: str = None, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes backups. The results | |
<filename>pyro/util.py
from __future__ import absolute_import, division, print_function
import functools
import numbers
import random
import warnings
from collections import defaultdict
from contextlib import contextmanager
import graphviz
import torch
from six.moves import zip_longest
from pyro.poutine.util import site_is_subsample
def set_rng_seed(rng_seed):
"""
Sets seeds of torch and torch.cuda (if available).
:param int rng_seed: The seed value.
"""
torch.manual_seed(rng_seed)
random.seed(rng_seed)
try:
import numpy as np
np.random.seed(rng_seed)
except ImportError:
pass
def torch_isnan(x):
"""
A convenient function to check if a Tensor contains any nan; also works with numbers
"""
if isinstance(x, numbers.Number):
return x != x
return torch.isnan(x).any()
def torch_isinf(x):
"""
A convenient function to check if a Tensor contains any +inf; also works with numbers
"""
if isinstance(x, numbers.Number):
return x == float('inf') or x == -float('inf')
return (x == float('inf')).any() or (x == -float('inf')).any()
def warn_if_nan(value, msg=""):
"""
A convenient function to warn if a Tensor or its grad contains any nan,
also works with numbers.
"""
if torch.is_tensor(value) and value.requires_grad:
value.register_hook(lambda x: warn_if_nan(x, msg))
if torch_isnan(value):
warnings.warn("Encountered NaN{}".format((': ' if msg else '.') + msg), stacklevel=2)
def warn_if_inf(value, msg="", allow_posinf=False, allow_neginf=False):
"""
A convenient function to warn if a Tensor or its grad contains any inf,
also works with numbers.
"""
if torch.is_tensor(value) and value.requires_grad:
value.register_hook(lambda x: warn_if_inf(x, msg, allow_posinf, allow_neginf))
if (not allow_posinf) and (value == float('inf') if isinstance(value, numbers.Number)
else (value == float('inf')).any()):
warnings.warn("Encountered +inf{}".format((': ' if msg else '.') + msg), stacklevel=2)
if (not allow_neginf) and (value == -float('inf') if isinstance(value, numbers.Number)
else (value == -float('inf')).any()):
warnings.warn("Encountered -inf{}".format((': ' if msg else '.') + msg), stacklevel=2)
def save_visualization(trace, graph_output):
"""
:param pyro.poutine.Trace trace: a trace to be visualized
:param graph_output: the graph will be saved to graph_output.pdf
:type graph_output: str
Take a trace generated by poutine.trace with `graph_type='dense'` and render
the graph with the output saved to file.
- non-reparameterized stochastic nodes are salmon
- reparameterized stochastic nodes are half salmon, half grey
- observation nodes are green
Example:
trace = pyro.poutine.trace(model, graph_type="dense").get_trace()
save_visualization(trace, 'output')
"""
g = graphviz.Digraph()
for label, node in trace.nodes.items():
if site_is_subsample(node):
continue
shape = 'ellipse'
if label in trace.stochastic_nodes and label not in trace.reparameterized_nodes:
fillcolor = 'salmon'
elif label in trace.reparameterized_nodes:
fillcolor = 'lightgrey;.5:salmon'
elif label in trace.observation_nodes:
fillcolor = 'darkolivegreen3'
else:
# only visualize RVs
continue
g.node(label, label=label, shape=shape, style='filled', fillcolor=fillcolor)
for label1, label2 in trace.edges:
if site_is_subsample(trace.nodes[label1]):
continue
if site_is_subsample(trace.nodes[label2]):
continue
g.edge(label1, label2)
g.render(graph_output, view=False, cleanup=True)
def check_traces_match(trace1, trace2):
"""
:param pyro.poutine.Trace trace1: Trace object of the model
:param pyro.poutine.Trace trace2: Trace object of the guide
:raises: RuntimeWarning, ValueError
Checks that (1) there is a bijection between the samples in the two traces
and (2) at each sample site two traces agree on sample shape.
"""
# Check ordinary sample sites.
vars1 = set(name for name, site in trace1.nodes.items() if site["type"] == "sample")
vars2 = set(name for name, site in trace2.nodes.items() if site["type"] == "sample")
if vars1 != vars2:
warnings.warn("Model vars changed: {} vs {}".format(vars1, vars2))
# Check shapes agree.
for name in vars1:
site1 = trace1.nodes[name]
site2 = trace2.nodes[name]
if hasattr(site1["fn"], "shape") and hasattr(site2["fn"], "shape"):
shape1 = site1["fn"].shape(*site1["args"], **site1["kwargs"])
shape2 = site2["fn"].shape(*site2["args"], **site2["kwargs"])
if shape1 != shape2:
raise ValueError("Site dims disagree at site '{}': {} vs {}".format(name, shape1, shape2))
def check_model_guide_match(model_trace, guide_trace, max_iarange_nesting=float('inf')):
"""
:param pyro.poutine.Trace model_trace: Trace object of the model
:param pyro.poutine.Trace guide_trace: Trace object of the guide
:raises: RuntimeWarning, ValueError
Checks the following assumptions:
1. Each sample site in the model also appears in the guide and is not
marked auxiliary.
2. Each sample site in the guide either appears in the model or is marked,
auxiliary via ``infer={'is_auxiliary': True}``.
3. Each :class:``~pyro.iarange`` statement in the guide also appears in the
model.
4. At each sample site that appears in both the model and guide, the model
and guide agree on sample shape.
"""
# Check ordinary sample sites.
guide_vars = set(name for name, site in guide_trace.nodes.items()
if site["type"] == "sample"
if type(site["fn"]).__name__ != "_Subsample")
aux_vars = set(name for name, site in guide_trace.nodes.items()
if site["type"] == "sample"
if site["infer"].get("is_auxiliary"))
model_vars = set(name for name, site in model_trace.nodes.items()
if site["type"] == "sample" and not site["is_observed"]
if type(site["fn"]).__name__ != "_Subsample")
enum_vars = set(name for name, site in model_trace.nodes.items()
if site["type"] == "sample" and not site["is_observed"]
if type(site["fn"]).__name__ != "_Subsample"
if site["infer"].get("_enumerate_dim") is not None
if name not in guide_vars)
if aux_vars & model_vars:
warnings.warn("Found auxiliary vars in the model: {}".format(aux_vars & model_vars))
if not (guide_vars <= model_vars | aux_vars):
warnings.warn("Found non-auxiliary vars in guide but not model, "
"consider marking these infer={{'is_auxiliary': True}}:\n{}".format(
guide_vars - aux_vars - model_vars))
if not (model_vars <= guide_vars | enum_vars):
warnings.warn("Found vars in model but not guide: {}".format(model_vars - guide_vars - enum_vars))
# Check shapes agree.
for name in model_vars & guide_vars:
model_site = model_trace.nodes[name]
guide_site = guide_trace.nodes[name]
if hasattr(model_site["fn"], "event_dim") and hasattr(guide_site["fn"], "event_dim"):
if model_site["fn"].event_dim != guide_site["fn"].event_dim:
raise ValueError("Model and guide event_dims disagree at site '{}': {} vs {}".format(
name, model_site["fn"].event_dim, guide_site["fn"].event_dim))
if hasattr(model_site["fn"], "shape") and hasattr(guide_site["fn"], "shape"):
model_shape = model_site["fn"].shape(*model_site["args"], **model_site["kwargs"])
guide_shape = guide_site["fn"].shape(*guide_site["args"], **guide_site["kwargs"])
if model_shape == guide_shape:
continue
# Allow broadcasting outside of max_iarange_nesting.
if len(model_shape) > max_iarange_nesting:
model_shape = model_shape[len(model_shape) - max_iarange_nesting:]
if len(guide_shape) > max_iarange_nesting:
guide_shape = guide_shape[len(guide_shape) - max_iarange_nesting:]
if model_shape == guide_shape:
continue
for model_size, guide_size in zip_longest(reversed(model_shape), reversed(guide_shape), fillvalue=1):
if model_size != guide_size:
raise ValueError("Model and guide shapes disagree at site '{}': {} vs {}".format(
name, model_shape, guide_shape))
# Check subsample sites introduced by iarange.
model_vars = set(name for name, site in model_trace.nodes.items()
if site["type"] == "sample" and not site["is_observed"]
if type(site["fn"]).__name__ == "_Subsample")
guide_vars = set(name for name, site in guide_trace.nodes.items()
if site["type"] == "sample"
if type(site["fn"]).__name__ == "_Subsample")
if not (guide_vars <= model_vars):
warnings.warn("Found iarange statements in guide but not model: {}".format(guide_vars - model_vars))
def check_site_shape(site, max_iarange_nesting):
actual_shape = list(site["log_prob"].shape)
# Compute expected shape.
expected_shape = []
for f in site["cond_indep_stack"]:
if f.dim is not None:
# Use the specified iarange dimension, which counts from the right.
assert f.dim < 0
if len(expected_shape) < -f.dim:
expected_shape = [None] * (-f.dim - len(expected_shape)) + expected_shape
if expected_shape[f.dim] is not None:
raise ValueError('\n '.join([
'at site "{}" within iarange("", dim={}), dim collision'.format(site["name"], f.name, f.dim),
'Try setting dim arg in other iaranges.']))
expected_shape[f.dim] = f.size
expected_shape = [-1 if e is None else e for e in expected_shape]
# Check for iarange stack overflow.
if len(expected_shape) > max_iarange_nesting:
raise ValueError('\n '.join([
'at site "{}", iarange stack overflow'.format(site["name"]),
'Try increasing max_iarange_nesting to at least {}'.format(len(expected_shape))]))
# Ignore dimensions left of max_iarange_nesting.
if max_iarange_nesting < len(actual_shape):
actual_shape = actual_shape[len(actual_shape) - max_iarange_nesting:]
# Check for incorrect iarange placement on the right of max_iarange_nesting.
for actual_size, expected_size in zip_longest(reversed(actual_shape), reversed(expected_shape), fillvalue=1):
if expected_size != -1 and expected_size != actual_size:
raise ValueError('\n '.join([
'at site "{}", invalid log_prob shape'.format(site["name"]),
'Expected {}, actual {}'.format(expected_shape, actual_shape),
'Try one of the following fixes:',
'- enclose the batched tensor in a with iarange(...): context',
'- .independent(...) the distribution being sampled',
'- .permute() data dimensions']))
# TODO Check parallel dimensions on the left of max_iarange_nesting.
def _are_independent(counters1, counters2):
for name, counter1 in counters1.items():
if name in counters2:
if counters2[name] != counter1:
return True
return False
def check_traceenum_requirements(model_trace, guide_trace):
"""
Warn if user could easily rewrite the model or guide in a way that would
clearly avoid invalid dependencies on enumerated variables.
:class:`~pyro.infer.traceenum_elbo.TraceEnum_ELBO` enumerates over
synchronized products rather than full cartesian products. Therefore models
must ensure that no variable outside of an iarange depends on an enumerated
variable inside that iarange. Since full dependency checking is impossible,
this function aims to warn only in cases where models can be easily
rewitten to be obviously correct.
"""
enumerated_sites = set(name for name, site in guide_trace.nodes.items()
if site["type"] == "sample" and site["infer"].get("enumerate"))
for role, trace in [('model', model_trace), ('guide', guide_trace)]:
irange_counters = {}
enumerated_contexts = defaultdict(set)
for name, site in trace.nodes.items():
if site["type"] != "sample":
continue
irange_counter = {f.name: f.counter for f in site["cond_indep_stack"] if not f.vectorized}
context = frozenset(f for f in | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for xml parsing"""
# Libraries
from lxml import etree
# Modules
import datamodel
from jarvis.shared_orchestrator import (function_inheritance, fun_elem_inheritance,
fun_inter_inheritance, phy_elem_inheritance,
phy_inter_inheritance)
class XmlParser3SE:
def __init__(self):
self.xml_dict = {'xml_function_list': set(),
'xml_consumer_function_list': [],
'xml_producer_function_list': [],
'xml_data_list': set(),
'xml_state_list': set(),
'xml_transition_list': set(),
'xml_fun_elem_list': set(),
'xml_view_list': set(),
'xml_attribute_list': set(),
'xml_fun_inter_list': set(),
'xml_phy_elem_list': set(),
'xml_phy_inter_list': set(),
'xml_type_list': set()}
self.root = None
def parse_xml(self, input_filename):
"""Parses the whole xml then returns lists of objects/relationship"""
# To speed up parsing (see lxml doc) : TBC if can be extended to xml_writer
parser = etree.XMLParser(collect_ids=False)
# Parse the XML file
tree = etree.parse(input_filename, parser)
# Get the XML tree
self.root = tree.getroot()
# Check xml root tag
if not check_xml(self.root):
user_msg = f"Xml's file structure has changed, please delete {input_filename} " \
f"and re-execute your whole notebook"
return user_msg
self.xml_dict = {'xml_type_list': get_type_list(self.root),
'xml_function_list': get_functions(self.root),
'xml_state_list': get_state(self.root),
'xml_transition_list': get_transition(self.root),
'xml_fun_elem_list': get_functional_element(self.root),
'xml_view_list': get_views(self.root),
'xml_attribute_list': get_attributes(self.root),
'xml_fun_inter_list': get_functional_interface(self.root),
'xml_phy_elem_list': get_physical_element(self.root),
'xml_phy_inter_list': get_physical_interface(self.root),
}
# Create data(and set predecessors), consumers, producers lists
self.xml_dict['xml_data_list'], self.xml_dict['xml_producer_function_list'], self.xml_dict[
'xml_consumer_function_list'] = get_data(self.root, self.xml_dict['xml_function_list'])
return generate_inheritance(**self.xml_dict)
def check_xml(root):
"""Check xml file root, since jarvis4se version 1.3 it's <systemAnalysis>"""
if root.tag == "systemAnalysis":
return True
else:
return False
def get_functions(root):
"""Get Function objects"""
function_list = set()
parent_list = {}
xml_function_list = root.iter('function')
for xml_function in xml_function_list:
# Instantiate functions and add them to a list
function = datamodel.Function(p_id=xml_function.get('id'), p_name=xml_function.get('name'),
p_alias=xml_function.get('alias'),
p_type=xml_function.get('type'),
p_derived=xml_function.get('derived'))
function.set_operand()
function_list.add(function)
# Looking for functions with "functionalPart" i.e childs and create a list
xml_function_part_list = xml_function.iter('functionPart')
for xml_function_part in xml_function_part_list:
parent_list[xml_function_part.get('id')] = function.id
# Loop to set parent() and add_child() to functions
for child_id in parent_list:
for child_function in function_list:
if child_function.id == child_id:
for parent_function in function_list:
if parent_function.id == parent_list[child_id]:
child_function.set_parent(parent_function)
parent_function.add_child(child_function)
break
break
for elem in function_list:
for derived in function_list:
if elem.derived == derived.id:
elem.derived = derived
break
return function_list
def get_data(root, function_list):
"""Get Data objects"""
data_list = set()
consumer_function_list = []
producer_function_list = []
xml_data_list = root.iter('data')
for xml_data in xml_data_list:
# Instantiate data and add it to a list
data = datamodel.Data(p_id=xml_data.get('id'),
p_name=xml_data.get('name'),
p_type=xml_data.get('type'))
data_list.add(data)
# looking for all elements with tag "consumer" and create a list [flow, consumer_function]
xml_consumer_list = xml_data.iter('consumer')
for xml_consumer in xml_consumer_list:
for function in function_list:
if xml_consumer.get('id') == function.id:
consumer_function_list.append([xml_data.get('name'), function])
if xml_consumer.get('role') != 'none':
function.set_input_role(xml_data.get('name'))
# Avoid to reset the input role once already set
elif function.input_role is None:
function.set_input_role(None)
# looking for all elements with tag "producer" and create a list [flow, producer_function]
xml_producer_list = xml_data.iter('producer')
for xml_producer in xml_producer_list:
for function in function_list:
if xml_producer.get('id') == function.id:
producer_function_list.append([xml_data.get('name'), function])
# Loop on the data_list once created to find the predecessor and add it to list
for d in data_list:
xml_data_list = root.iter('data')
for xml_data in xml_data_list:
if xml_data.get('id') == d.id:
# looking for all elements with tag "predecessor" and create a
# list [flow, producer_function]
xml_predecessor_list = xml_data.iter('predecessor')
for xml_predecessor in xml_predecessor_list:
for dodo in data_list:
if xml_predecessor.get('id') == dodo.id:
d.add_predecessor(dodo)
return data_list, producer_function_list, consumer_function_list
def get_state(root):
"""Get State objects"""
state_list = set()
state_parent_dict = {}
xml_state_list = root.iter('state')
for xml_state in xml_state_list:
# Instantiate states and add them to a list
state = datamodel.State(p_id=xml_state.get('id'),
p_name=xml_state.get('name'),
p_alias=xml_state.get('alias'),
p_type=xml_state.get('type'))
state_list.add(state)
# Looking for states with "statePart" i.e child and create a list
xml_state_part_list = xml_state.iter('statePart')
for xml_state_part in xml_state_part_list:
state_parent_dict[xml_state_part.get('id')] = state.id
# Looking for allocated functions and add them to the state
xml_allocated_function_list = xml_state.iter('allocatedFunction')
for xml_allo_fun in xml_allocated_function_list:
state.add_allocated_function(xml_allo_fun.get("id"))
# Loop to set parent() and add_child() to states
for child_id in state_parent_dict:
for child_state in state_list:
if child_state.id == child_id:
for parent_state in state_list:
if parent_state.id == state_parent_dict[child_id]:
child_state.set_parent(parent_state)
parent_state.add_child(child_state)
break
break
return state_list
def get_transition(root):
"""Get Transition objects"""
transition_list = set()
xml_transition_list = root.iter('transition')
for xml_transition in xml_transition_list:
# Instantiate transitions and add them to a list
transition = datamodel.Transition(p_id=xml_transition.get('id'),
p_name=xml_transition.get('name'),
p_alias=xml_transition.get('alias'),
p_type=xml_transition.get('type'),
p_source=xml_transition.get('source'),
p_destination=xml_transition.get('destination'))
# Looking for conditions and add them to the transition
xml_transition_condition_list = xml_transition.iter('condition')
for xml_condition in xml_transition_condition_list:
transition.add_condition(xml_condition.get("text"))
transition_list.add(transition)
return transition_list
def get_functional_element(root):
"""Get Functional Element objects"""
functional_element_list = set()
fun_elem_parent_dict = {}
xml_functional_element_list = root.iter('functionalElement')
for xml_func_elem in xml_functional_element_list:
# Instantiate functional element and add them to a list
fun_elem = datamodel.FunctionalElement(p_id=xml_func_elem.get('id'),
p_name=xml_func_elem.get('name'),
p_alias=xml_func_elem.get('alias'),
p_type=xml_func_elem.get('type'),
p_derived=xml_func_elem.get('derived'))
functional_element_list.add(fun_elem)
# Looking for "functionalElementPart" i.e child and create a list
xml_functional_part_list = xml_func_elem.iter('functionalElementPart')
for xml_state_part in xml_functional_part_list:
fun_elem_parent_dict[xml_state_part.get('id')] = fun_elem.id
# Looking for allocated states and add them to the functional element
xml_allocated_state_list = xml_func_elem.iter('allocatedState')
for xml_allo_state in xml_allocated_state_list:
fun_elem.add_allocated_state(xml_allo_state.get("id"))
# Looking for allocated functions and add them to the functional element
xml_allocated_function_list = xml_func_elem.iter('allocatedFunction')
for xml_allo_fun in xml_allocated_function_list:
fun_elem.add_allocated_function(xml_allo_fun.get("id"))
# Looking for exposed interface and add them to the functional element
xml_exposed_interface_list = xml_func_elem.iter('exposedInterface')
for xml_exp_inter in xml_exposed_interface_list:
fun_elem.add_exposed_interface(xml_exp_inter.get("id"))
# Loop to set parent() and add_child() to fun elem
for child_id in fun_elem_parent_dict:
for child_state in functional_element_list:
if child_state.id == child_id:
for parent_state in functional_element_list:
if parent_state.id == fun_elem_parent_dict[child_id]:
child_state.set_parent(parent_state)
parent_state.add_child(child_state)
break
break
for elem in functional_element_list:
for derived in functional_element_list:
if elem.derived == derived.id:
elem.derived = derived
break
return functional_element_list
def get_views(root):
"""Get View objects"""
view_list = set()
xml_view_list = root.iter('view')
for xml_view in xml_view_list:
# Instantiate view and add them to a list
view = datamodel.View(p_id=xml_view.get('id'),
p_name=xml_view.get('name'),
p_type=xml_view.get('type'))
# Looking for allocated items and add them to the view
xml_allocated_item_list = xml_view.iter('allocatedItem')
for xml_allo_item in xml_allocated_item_list:
view.add_allocated_item(xml_allo_item.get("id"))
view_list.add(view)
return view_list
def get_attributes(root):
"""Get Attribute objects"""
attribute_list = set()
xml_attribute_list = root.iter('attribute')
for xml_attribute in xml_attribute_list:
# Instantiate Attribute and add them to a list
attribute = datamodel.Attribute(p_id=xml_attribute.get('id'),
p_name=xml_attribute.get('name'),
p_alias=xml_attribute.get('alias'),
p_type=xml_attribute.get('type'))
# Looking for described items and add them to the attribute
xml_described_item_list = xml_attribute.iter('describedItem')
for xml_described_item in xml_described_item_list:
attribute.add_described_item((xml_described_item.get("id"),
xml_described_item.get("value")))
attribute_list.add(attribute)
return attribute_list
def get_functional_interface(root):
"""Get Functional Interface objects"""
functional_interface_list = set()
xml_fun_inter_list = root.iter('functionalInterface')
for xml_fun_inter in xml_fun_inter_list:
# Instantiate fun_inter and add them to a list
fun_inter = datamodel.FunctionalInterface(p_id=xml_fun_inter.get('id'),
p_name=xml_fun_inter.get('name'),
p_alias=xml_fun_inter.get('alias'),
p_type=xml_fun_inter.get('type'),
p_derived=xml_fun_inter.get('derived'))
# Looking for allocated data and add them to the fun inter
xml_allocated_data_list = xml_fun_inter.iter('allocatedData')
for xml_allo_data in xml_allocated_data_list:
fun_inter.add_allocated_data(xml_allo_data.get("id"))
functional_interface_list.add(fun_inter)
for elem in functional_interface_list:
for derived in functional_interface_list:
if elem.derived == derived.id:
elem.derived = derived
break
return functional_interface_list
def get_physical_element(root):
"""Get Physical Element objects"""
physical_element_list = set()
phy_elem_parent_dict = {}
xml_physical_element_list = root.iter('physicalElement')
for xml_phy_elem in xml_physical_element_list:
# Instantiate functional element and add them to a list
phy_elem = datamodel.PhysicalElement(p_id=xml_phy_elem.get('id'),
p_name=xml_phy_elem.get('name'),
p_alias=xml_phy_elem.get('alias'),
p_type=xml_phy_elem.get('type'),
p_derived=xml_phy_elem.get('derived'))
physical_element_list.add(phy_elem)
# Looking for "physicalPart" i.e child and create a list
xml_functional_part_list = xml_phy_elem.iter('physicalElementPart')
for xml_state_part in xml_functional_part_list:
phy_elem_parent_dict[xml_state_part.get('id')] = phy_elem.id
# Looking for allocated functions and add them to the functional element
xml_allocated_fun_elem_list = xml_phy_elem.iter('allocatedFunctionalElement')
for xml_allo_fun_elem in xml_allocated_fun_elem_list:
phy_elem.add_allocated_fun_elem(xml_allo_fun_elem.get("id"))
# Looking for exposed interface and add them to the functional element
xml_exposed_interface_list = xml_phy_elem.iter('exposedInterface')
for xml_exp_inter in xml_exposed_interface_list:
phy_elem.add_exposed_interface(xml_exp_inter.get("id"))
# Loop to set parent() and add_child() to fun elem
for child_id in phy_elem_parent_dict:
for child_state in physical_element_list:
if child_state.id == child_id:
for parent_state in physical_element_list:
if parent_state.id == phy_elem_parent_dict[child_id]:
child_state.set_parent(parent_state)
parent_state.add_child(child_state)
break
break
for elem in physical_element_list:
for derived in physical_element_list:
if elem.derived == derived.id:
elem.derived = derived
break
return physical_element_list
def get_physical_interface(root):
"""Get Physical Interface objects"""
physical_interface_list = set()
xml_phy_inter_list = root.iter('physicalInterface')
for xml_phy_inter in xml_phy_inter_list:
# Instantiate phy_inter and add them to a list
phy_inter = datamodel.PhysicalInterface(p_id=xml_phy_inter.get('id'),
p_name=xml_phy_inter.get('name'),
p_alias=xml_phy_inter.get('alias'),
p_type=xml_phy_inter.get('type'),
p_derived=xml_phy_inter.get('derived'))
# Looking for allocated fun_inter and add them to the phy inter
xml_allocated_inter_list = xml_phy_inter.iter('allocatedFunctionalInterface')
for xml_allo_inter in xml_allocated_inter_list:
phy_inter.add_allocated_fun_inter(xml_allo_inter.get("id"))
physical_interface_list.add(phy_inter)
for inter in physical_interface_list:
for derived in physical_interface_list:
if inter.derived == derived.id:
inter.derived = derived
break
return physical_interface_list
def get_type_list(root):
"""Get Type objects"""
type_list = set()
xml_type_list = root.iter('type')
for xml_type in xml_type_list:
# Instantiate Type and add them to a list
type_obj = datamodel.Type(p_id=xml_type.get('id'),
p_name=xml_type.get('name'),
p_alias=xml_type.get('alias'),
p_base=xml_type.get('base'))
type_list.add(type_obj)
for obj_type in type_list:
for base in type_list:
if obj_type.base == base.name:
obj_type.base = base
break
return type_list
def generate_inheritance(**xml_dict):
"""For all abjects that are derived/inherited update xml_lists accordingly"""
inheritance_list = [xml_dict['xml_function_list'], xml_dict['xml_fun_elem_list'],
xml_dict['xml_fun_inter_list'], xml_dict['xml_phy_elem_list'],
xml_dict['xml_phy_inter_list']]
switch_inheritance = {
0: function_inheritance,
1: fun_elem_inheritance,
2: fun_inter_inheritance,
3: phy_elem_inheritance,
4: phy_inter_inheritance,
| |
# Code adapted from SMP. Add Scheduler
# Implement Logging
import sys
import os
import numpy as np
import torch
import datetime as dt
import matplotlib.pyplot as plt
import time
import torchvision
import random
import seaborn as sns
import pickle
from segmentation_models_pytorch.utils.metrics import IoU
from sklearn.metrics import roc_curve, roc_auc_score, silhouette_score
from copy import deepcopy
from tqdm import tqdm as tqdm
from segmentation_models_pytorch.utils.meter import AverageValueMeter
from collections import OrderedDict
from utils.misc import log_print, compute_cm_binary, get_iou_score
from sklearn.cluster import KMeans
from models.auxillary import Accuracy
class Epoch:
def __init__(self, model, loss, metrics, stage_name, device='cpu', verbose=True, logger = None, classes = ['sugar','flower','fish','gravel'], enable_class_wise_metrics = True, autoencoder = False):
self.model = model
self.loss = loss
self.metrics = metrics
self.stage_name = stage_name
self.verbose = verbose
self.device = device
self.logger = logger
self.classes = classes
self.enable_class_wise_metrics = enable_class_wise_metrics
self.autoencoder = autoencoder
self._to_device()
def _to_device(self):
self.model.to(self.device)
self.loss[0].to(self.device)
self.loss[1].to(self.device)
for metric in self.metrics:
metric.to(self.device)
def _format_logs(self, logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
def batch_update(self, x, y, z):
raise NotImplementedError
def on_epoch_start(self):
pass
@staticmethod
def get_confusion_matrix(y_pred, y, threshold = 0.5):
# Shape of y and y_pred = (bs, class, height, width)
# Takes in y and y_pred and returns a class * [tn, fp, fn, tp] array
# Remember to threshold the values of y_pred first which are probabilities
y = y.cpu().detach().numpy().astype(int)
y_pred = y_pred.cpu().detach().numpy()
y_pred = np.where(y_pred > threshold, 1, 0)
if len(y_pred.shape) == 4:
bs, classes, height, width = y.shape
y = np.transpose(y, [1,0,2,3]).reshape(classes, -1)
y_pred = np.transpose(y_pred, [1,0,2,3]).reshape(classes, -1)
else:
_, classes = y.shape
y = y.transpose()
y_pred = y_pred.transpose()
cm = []
for clas in range(classes):
tn, fp, fn, tp = compute_cm_binary(y[clas,:], y_pred[clas,:])
cm.append([tn, fp, fn, tp])
return np.array(cm)
def run(self, dataloader):
self.on_epoch_start()
logs = {}
loss_meter = AverageValueMeter()
if self.enable_class_wise_metrics:
metric_meter_classes = self.classes + ['overall']
else:
metric_meter_classes = ['overall']
metrics_meters = {f'{metric.__name__}_{_class}': AverageValueMeter() for metric in self.metrics for _class in metric_meter_classes}
metrics_meters['overall_c_accuracy'] = AverageValueMeter()
accuracy = Accuracy(threshold=0.5)
confusion_matrices_epoch = []
with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:
# Run for 1 epoch
for x, y, z in iterator:
x, y, z = x.to(self.device), y.to(self.device), z.to(self.device)
loss_value, y_pred, z_pred = self.batch_update(x, y, z)
# update loss logs
loss_meter.add(loss_value)
loss_logs = {self.loss[0].__name__: loss_meter.mean}
logs.update(loss_logs)
# update metrics logs
for metric_fn in self.metrics:
metric_value = metric_fn(y_pred, y).cpu().detach().numpy()
metrics_meters[f'{metric_fn.__name__}_overall'].add(metric_value)
if self.enable_class_wise_metrics:
for i in range(0,len(self.classes)):
if len(y_pred.shape) == 4:
metric_value = metric_fn(y_pred[:,i,:,:], y[:,i,:,:]).cpu().detach().numpy()
elif len(y_pred.shape) == 2:
metric_value = metric_fn(y_pred[:,i], y[:,i]).cpu().detach().numpy()
else:
raise NotImplementedError('Shape of y_pred must have length 2 or 4')
metrics_meters[f'{metric_fn.__name__}_{self.classes[i]}'].add(metric_value)
accuracy_value = accuracy(z_pred, z).cpu().detach().numpy()
metrics_meters['overall_c_accuracy'].add(accuracy_value)
metrics_logs = {k: v.mean for k, v in metrics_meters.items() if 'overall' in k}
logs.update(metrics_logs)
if self.verbose:
s = self._format_logs(logs)
iterator.set_postfix_str(s)
# compute confusion matrix
confusion_matrices_epoch.append(self.get_confusion_matrix(y_pred, y))
confusion_matrices_epoch = np.array(confusion_matrices_epoch).sum(axis = 0)
cumulative_logs = {k: v.sum/v.n for k, v in metrics_meters.items()}
cumulative_logs['loss'] = loss_meter.sum/loss_meter.n
log_print(" ".join([f"{k}:{v:.4f}" for k, v in cumulative_logs.items()]), self.logger)
if not self.autoencoder:
for i in range(len(self.classes)):
log_print(f"Confusion Matrix of {self.classes[i]}, TN: {confusion_matrices_epoch[i,0]}. FP: {confusion_matrices_epoch[i,1]}, FN: {confusion_matrices_epoch[i,2]}, TP: {confusion_matrices_epoch[i,3]}", self.logger)
return cumulative_logs, confusion_matrices_epoch
class TrainEpoch(Epoch):
def __init__(self, model, loss, metrics, optimizer, device='cpu', verbose=True, logger = None, classes = ['sugar','flower','fish','gravel'], enable_class_wise_metrics = True, autoencoder = False):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='train',
device=device,
verbose=verbose,
logger=logger,
classes = classes,
enable_class_wise_metrics = enable_class_wise_metrics,
autoencoder = autoencoder
)
self.optimizer = optimizer
def on_epoch_start(self):
self.model.train()
def batch_update(self, x, y, z):
self.optimizer.zero_grad()
prediction, class_preds = self.model.forward(x)
loss = self.loss[0](prediction, y) + self.loss[1](class_preds, z)
loss.backward()
self.optimizer.step()
loss_value = loss.cpu().item()
assert not np.isnan(loss_value), 'Loss cannot be NaN. Please restart'
return loss_value, prediction, class_preds
class ValidEpoch(Epoch):
def __init__(self, model, loss, metrics, device='cpu', verbose=True, logger = None, classes = ['sugar','flower','fish','gravel'], enable_class_wise_metrics = True, autoencoder = False):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='valid',
device=device,
verbose=verbose,
logger = logger,
classes = classes,
enable_class_wise_metrics = enable_class_wise_metrics,
autoencoder = autoencoder
)
def on_epoch_start(self):
self.model.eval()
def batch_update(self, x, y, z):
with torch.no_grad():
prediction, class_preds = self.model.forward(x)
seg_loss = self.loss[0](prediction, y)
clas_loss = self.loss[1](class_preds, z)
loss_value = seg_loss.cpu().item() + clas_loss.cpu().item()
return loss_value, prediction, class_preds
def plot_loss_metrics(validation_dataloader_list, losses, metrics, metric_values, metric_names, lr, num_epochs, batch_size, plots_save_path, start_time, logger = None):
# Implement plotting feature
# The code is only meant to work with the top 2 validation datasets, and no more
fig, ax = plt.subplots(1,(1+len(metrics)), figsize = (5*(1+len(metrics)),5))
fig.suptitle(f"Learning Rate: {lr:.5f}, Max Epochs: {num_epochs} Batch size: {batch_size}, Metric: {metrics[0].__name__}")
ax[0].set_title('Loss Value')
ax[0].plot(losses['train'], color = 'skyblue', label="Training Loss")
ax[0].plot(losses['val'][0], color = 'orange', label = "Validation Loss")
if len(validation_dataloader_list) > 1:
ax[0].plot(losses['val'][1], color = 'green', label = "Validation Loss 2")
ax[0].legend()
idx = 0
for _, metric_name in enumerate(metric_names):
if 'overall' in metric_name: # Only plot for the overall metric and not all metrics
ax[idx+1].set_title(metric_name)
ax[idx+1].plot(metric_values['train'][metric_name], color = 'skyblue', label=f"Training {metric_name}")
ax[idx+1].plot(metric_values['val'][0][metric_name], color = 'orange', label=f"Validation 1 {metric_name}")
if len(validation_dataloader_list) > 1:
ax[idx+1].plot(metric_values['val'][1][metric_name], color = 'green', label=f"Validation 2 {metric_name}")
ax[idx+1].legend()
idx += 1
if not os.path.exists(plots_save_path):
os.makedirs(plots_save_path)
plt.savefig(os.path.join(plots_save_path,"nn_training_" + str(start_time).replace(':','').replace(' ',' ').replace(' ','_') + ".png"))
log_print('Metric & Loss Plot Saved', logger)
plt.close()
def plot_cm(confusion_matrices, classes, validation_dataloader_list, start_time, plots_save_path, colors = ['black','orange','red','green'], logger = None):
# Plot another plot for the confusion matrices
fig, ax = plt.subplots(len(classes),len(validation_dataloader_list)+1, figsize=(10*(len(validation_dataloader_list)+1), 7*len(classes)))
for class_idx, _class in enumerate(classes):
for clx_idx, classification in enumerate(['TN','FP','FN','TP']):
if len(classes) > 1:
ax[class_idx,0].plot([cm[class_idx,clx_idx] for cm in confusion_matrices['train']], color = colors[clx_idx], label=f"{classification}")
ax[class_idx,0].set_title(f'Training Confusion Matrix {_class}', fontsize=12)
ax[class_idx,0].legend()
ax[class_idx,1].plot([cm[class_idx,clx_idx] for cm in confusion_matrices['val'][0]], color = colors[clx_idx], label=f"{classification}")
ax[class_idx,1].set_title(f'Val 1 Confusion Matrix {_class}', fontsize=12)
ax[class_idx,1].legend()
if len(validation_dataloader_list) > 1:
ax[class_idx,2].plot([cm[class_idx,clx_idx] for cm in confusion_matrices['val'][1]], color = colors[clx_idx], label=f"{classification}")
ax[class_idx,2].set_title(f'Val 2 Confusion Matrix {_class}', fontsize=12)
ax[class_idx,2].legend()
else:
ax[0].plot([cm[class_idx,clx_idx] for cm in confusion_matrices['train']], color = colors[clx_idx], label=f"{classification}")
ax[0].set_title(f'Training Confusion Matrix {_class}', fontsize=12)
ax[0].legend()
ax[1].plot([cm[class_idx,clx_idx] for cm in confusion_matrices['val'][0]], color = colors[clx_idx], label=f"{classification}")
ax[1].set_title(f'Val 1 Confusion Matrix {_class}', fontsize=12)
ax[1].legend()
if len(validation_dataloader_list) > 1:
ax[2].plot([cm[class_idx,clx_idx] for cm in confusion_matrices['val'][1]], color = colors[clx_idx], label=f"{classification}")
ax[2].set_title(f'Val 2 Confusion Matrix {_class}', fontsize=12)
ax[2].legend()
fig.suptitle(f'Confusion Matrix Plot Across Epochs', fontsize=20)
plt.savefig(os.path.join(plots_save_path,"nn_training_cm_" + str(start_time).replace(':','').replace(' ',' ').replace(' ','_') + ".png"))
plt.close()
def train_model(train_dataloader,
validation_dataloader_list,
model,
loss,
metrics,
optimizer,
scheduler = None,
batch_size = 1,
num_epochs = 12,
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
autoencoder = False,
classes = ['sugar','flower','fish','gravel'],
logger = None,
verbose = True,
only_validation = False,
model_save_path = os.path.join(os.getcwd(),'weights'),
model_save_prefix = '',
plots_save_path = os.path.join(os.getcwd(),'plots')
):
if type(validation_dataloader_list) != list:
raise TypeError('validation_dataloader_list must be a list of validation dataloaders')
if torch.cuda.is_available():
log_print('Using GPU', logger)
else:
log_print('Using CPU', logger)
# Define Epochs
train_epoch = TrainEpoch(
model = model,
loss = loss,
metrics = metrics,
optimizer = optimizer,
device = device,
verbose = verbose,
logger = logger,
classes = classes,
autoencoder = autoencoder
)
valid_epoch = ValidEpoch(
model = model,
loss = loss,
metrics = metrics,
device = device,
verbose = verbose,
logger = logger,
classes = classes,
autoencoder = autoencoder
)
# Record for plotting
metric_names = [f'{metric.__name__}_{_class}' for metric in metrics for _class in ['overall'] + classes]
losses = {'train':[],'val':{idx:[] for idx in range(len(validation_dataloader_list))}}
metric_values = {'train':{name:[] for name in metric_names},'val':{idx:{name:[] for name in metric_names} for idx in range(len(validation_dataloader_list))}}
confusion_matrices = {'train':[],'val':{idx:[] for idx in range(len(validation_dataloader_list))}}
# Run Epochs
best_perfmeasure = 0
best_epoch = -1
start_time = dt.datetime.now()
log_print('Training model...', logger)
for epoch in range(num_epochs):
log_print(f'\nEpoch: {epoch}', logger)
if not only_validation:
train_logs, train_cm = train_epoch.run(train_dataloader)
losses['train'].append(train_logs['loss'])
confusion_matrices['train'].append(train_cm)
for metric in metric_names:
metric_values['train'][metric].append(train_logs[metric])
valid_logs = {}
for valid_idx, validation_dataloader in enumerate(validation_dataloader_list):
valid_logs[valid_idx], val_cm = valid_epoch.run(validation_dataloader)
losses['val'][valid_idx].append(valid_logs[valid_idx]['loss'])
confusion_matrices['val'][valid_idx].append(val_cm)
for metric in metric_names:
metric_values['val'][valid_idx][metric].append(valid_logs[valid_idx][metric])
if scheduler is not None:
scheduler.step()
log_print(f"Next Epoch Learning Rate: {optimizer.state_dict()['param_groups'][0]['lr']}", logger)
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
if best_perfmeasure < valid_logs[0][metric_names[0]]: # Right now the metric to be chosen for best_perf_measure is always the first metric for the first validation dataset
best_perfmeasure = valid_logs[0][metric_names[0]]
best_epoch = epoch
torch.save(model, os.path.join(model_save_path,model_save_prefix + 'best_model.pth'))
log_print('Best Model Saved', logger)
torch.save(model, os.path.join(model_save_path,model_save_prefix + 'current_model.pth'))
log_print('Current Model Saved', logger)
log_print(f'Best epoch: {best_epoch} Best Performance Measure: {best_perfmeasure:.5f}', logger)
log_print(f'Time Taken to train: {dt.datetime.now()-start_time}', logger)
plot_loss_metrics(validation_dataloader_list, losses, metrics, metric_values, metric_names, optimizer.state_dict()['param_groups'][0]['lr'], num_epochs, batch_size, plots_save_path, start_time, logger = logger)
if not autoencoder:
plot_cm(confusion_matrices, classes, validation_dataloader_list, start_time, plots_save_path, logger = logger)
# Sum up confusion matrix | |
<reponame>jdlarsen-UA/LB-colloids<filename>lb_colloids/Colloids/Colloid_Math.py
"""
ColloidMath is the primary mathematics module for Colloid Simulations.
This module contains both Physical and Chemical formulations of colloid forces within a
porous media. The DLVO and ColloidColloid classes contain complex formulations
of chemical interaction forces. Other classes contain physical force calculations or
provide mathematical conversion from Force to a Velocity like unit that can be used
to recover the change in colloid position. Users should not have to call these classes
directly when running a model.
Basic examples of how these modules are called assume that a user has already provided input to
the lbIO.Config() module and the appropriate dictionaries have been built. For more information
on required parameters and keywords please inspect API documentation for each respective class.
Docstrings also provide basic mathematical relationships for each class.
>>> from lb_colloids import ColloidMath as cm
>>>
>>> grav = cm.Gravity(**PhysicalDict)
>>> grav.gravity # returns the gravity force on a colloid
>>> bouy = cm.Bouyancy(**PhysicalDict)
>>> bouy.bouyancy # returns the bouyancy force on a colloid
>>> gap = cm.Gap(xarr, yarr, **PhysicalDict)
>>> brownian = cm.Brownian(gap.f1, gap.f2, **PhysicalDict)
>>> brownian.brownian_x # returns brownian force in the x-direction on a colloid
>>> brownian.brownian_y
>>> drag = cm.Drag(ux, uy, gap.f1, gap.f2, gap.f3, gap.f4, **PhysicalDict)
>>> drag.drag_x # returns an array of drag forces in the x-direction
>>> dlvo = cm.DLVO(xarr, yarr, **ChemicalDict)
>>> dlvo.EDLx # returns an array of electric double layer forces in the x-direction
>>> dlvo.LewisABy # returns an array of lewis acid base forces in the y-direction
>>> dlvo.LVDWx # returns an array of lifshitz-van der waals forces in the x-direction
>>> colcol = cm.ColloidColloid(xarr, **ChemicalDict)
>>> colcol.x_array # returns an array of dlvo forces for colloid-colloid interactions
>>> colcol.update(Singleton.positions) # updates the class to generate new colloid-colloid interaction arrays
"""
from .LB_Colloid import Singleton
# import ColUtils
import numpy as np
import sys
import copy
class ForceToVelocity:
"""
Class that calculates a "velocity-like" value from force arrays
Parameters:
----------
:param np.ndarray forces: Array of forces felt by a colloid
:keyword float ts: Physical time step value
:keyword float rho_colloid: Colloid particle density, default :math:`2650 kg/m^3`
:keyword float ac: colloid radius, default 1e-6 m
Returns:
-------
:return: velocity (np.array, np.float) Array of "velocities" calculated from forces
"""
def __init__(self, forces, **kwargs):
params = {'rho_colloid': 2650., 'ac': 1e-6, 'ts': 1.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
rho_colloid = params['rho_colloid']
ac = params['ac']
ts = params['ts']
self.mass_colloid = (4. / 3.) * np.pi * (ac * ac * ac) * rho_colloid
self.velocity = 0.5 * (forces * ts) / self.mass_colloid
class Velocity:
"""
Class that dimensionalizes LB velocity from non-dimensional lattice Boltzmann units
Parameters:
----------
:param np.ndarray LBx: Array of Lattice Boltzmann velocities in the x-direction
:param np.ndarray LBy: Array of Lattice Boltzmann velocities in the y-direction
:keyword float ts: Time step value, default is 1.
:keyword float scale_lb: Scale the dimensionalized velocity from lattice Boltzmann. Use with caution. Default is 1
:param float velocity_factor: LB to physical velocity conversion factor. Default is 1
Returns:
-------
:return: xvelocity (np.array, np.float) array of dimensionalized velocities in the x-direction
:return: yvelocity (np.array, np.float) array of dimensionalized velocities in the y-direction
"""
def __init__(self, LBx, LBy, velocity_factor, **kwargs):
params = {'lb_timestep': 1e-5, 'ts': 1, 'scale_lb': 1.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
ts = params['ts']
self.xvelocity = LBx * velocity_factor * params['scale_lb']
self.yvelocity = LBy * velocity_factor * params['scale_lb']
class Gravity:
"""
Class to generate the estimated gravitational force experienced by a colloid
.. math::
F^{G} = \\frac{-4 \pi a_{c}^{3} \\rho_{c} g}{3}
Parameters:
----------
:keyword float rho_colloid: Particle density of a colloid in :math:`kg/m^3`. Default is 2650.
:keyword float ac: colloid radius in m. Default is 1e-6
Returns:
-------
:return: gravity (float) Gravitational force that a colloid experiences
"""
def __init__(self, **kwargs):
params = {'rho_colloid': 2650., 'ac': 1e-6}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
ac = params['ac']
rho_colloid = params['rho_colloid']
self.colloid_mass = (4./3.)*np.pi*(ac*ac*ac)*rho_colloid
self.gravity = (self.colloid_mass*-9.81)
class Bouyancy:
"""
Class to estimate the gravitational force experienced by a colloid. Gravity
is applied as a positive value to maintain vector direction.
.. math::
F^{b} = \\frac{4 \pi a_{c}^{3} \\rho_{w} g}{3}
Parameters:
----------
:keyword flaot rho_water: density of water :math:`kg/m^3`. Default is 997.
:keyword float rho_colloid: particle density of a colloid in :math:`kg/m^3`. Default is 2650.
:keyword float ac: colloid radius in m. Default is 1e-6.
Returns:
-------
:return: bouyancy (float) Bouyancy force that a colloid experiences
"""
def __init__(self, **kwargs):
params = {'rho_water': 997., 'rho_colloid': 2650., 'ac': 1e-6}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
rho_water = params['rho_water']
rho_colloid = params['rho_colloid']
ac = params['ac']
self.water_mass = (4./3.)*np.pi*(ac*ac*ac)*rho_water
self.bouyancy = self.water_mass * 9.81
class Brownian:
"""
Class to estimate brownian forces on colloids. Uses the relationships outlined in Qui et. al. 2010
where
.. math::
F_{x}^{B} = \\xi \sqrt{\\frac{2D_{0}}{f_{1}dt}}G(0,1)
F_{y}^{B} = \\xi \sqrt{\\frac{2D_{0}}{f_{4}dt}}G(0,1)
Parameters:
----------
:param np.ndarray f1: Drag force correction term [Gao et. al. 2010. Computers and Math with App]
:param np.ndarray f4: Drag force correction term [Gao et. al. 2010]
:keyword float ac: Colloid radius. Default 1e-6
:keyword float viscosity: Dynamic viscosity of water. Default 8.9e-4 Pa S.
:keyword float T: Absolute temperature in K. Default is 298.15
Returns:
-------
:return: brownian_x: (np.ndarray) array of browian (random)
forces in the x direction [Qiu et. al 2011.]
:return: brownian_y: (np.ndarray) array of browian (random)
forces in the y direction [Qiu et. al 2011.]
"""
def __init__(self, f1, f4, **kwargs):
params = {'viscosity': 8.9e-4, 'ac': 1e-6, 'T': 298.15}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
self.mu = 0
self.sigma = 1
self.f1 = f1
self.f4 = f4
self.ac = params['ac']
self.ts = params['ts']
self.viscosity = params['viscosity']
self.boltzmann = 1.38e-23
self.epsilon = 6. * np.pi * self.viscosity * self.ac
self.T = params['T']
self.diffusive = (self.boltzmann * self.T) / self.epsilon
# self.brownian_x = self.Brown_xforce(self.epsilon, self.diffusive, f4)
# self.brownian_y = self.Brown_yforce(self.epsilon, self.diffusive, f1)
@property
def brownian_x(self):
return self.epsilon * np.sqrt(((2 * self.diffusive)/(self.f4 * self.ts))) * \
np.random.normal(self.mu, self.sigma, self.f4.shape)
@property
def brownian_y(self):
return self.epsilon * np.sqrt(((2 * self.diffusive)/(self.f1 * self.ts))) * \
np.random.normal(self.mu, self.sigma, self.f1.shape)
class Drag:
"""
Class to calculate colloidal drag forces from fluid velocity arrays. Based from calculations
outlined in Gao et, al 2010 and Qui et. al. 2011.
.. math::
F_{x}^{D} = \\frac{\\xi}{f_{4}} (f_{3}u_{x} - V_{x})
F_{y}^{D} = \\xi (f_{2} u_{y} - \\frac{V_{y}}{f_{1}})
Parameters:
----------
:param np.ndarray ux: fluid velocity in the x-direction
:param np.ndarray uy: fluid velocity in the y-direction
:param np.ndarray Vx: colloid velocity in the x-direction
:param np.ndarray Vy: colloid velocity in the y-direction
:param np.ndarray f1: Hydrodynamic force correction term [Gao et. al. 2010.]
:param np.ndarray f2: Hydrodynamic force correction term [Gao et. al. 2010.]
:param np.ndarray f3: Hydrodynamic force correction term [Gao et. al. 2010.]
:param np.ndarray f4: Hydrodynamic force correction term [Gao et. al. 2010.]
:keyword float ac: Colloid radius. Default is 1e-6 m
:keyword float viscosity: Dynamic fluid viscosity of water. Default 8.9e-4 Pa S
:keyword float rho_colloid: Colloid particle density. Default :math:`2650 kg/m^3`
:keyword float rho_water: Water density. Default :math:`997 kg/m^3`
Returns:
-------
:return: drag_x (np.ndarray) non-vectorized drag forces in the x-direction
:return: drag_y: (np.ndarray) non-vectorized drag forces in the y-direction
"""
def __init__(self, ux, uy, f1, f2, f3, f4, **kwargs):
params = {'ac': 1e-6, 'viscosity': 8.9e-4, 'rho_colloid': 2650., 'rho_water': 997.,
'T': 298.15, 'ts': 1.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
self.ac = params['ac']
self.viscosity = params['viscosity']
self.rho_water = params['rho_water']
self.rho_colloid = params['rho_colloid']
self.ux = ux
self.uy = uy
self.f1 = f1
self.f2 = f2
self.f3 = f3
self.f4 = f4
self.epsilon = 6. * np.pi * self.viscosity * self.ac
self.vx = -((self.rho_colloid - self.rho_water)*((2*self.ac)**2)*9.81)/(18*self.viscosity)
self.vy = -((self.rho_colloid - self.rho_water)*((2*self.ac)**2)*9.81)/(18*self.viscosity)
# self.drag_x = self.drag_xforce(ux, self.Vcol, self.epsilon, f3, f4)
# self.drag_y = self.drag_yforce(uy, self.Vcol, self.epsilon, f1, f2)
self.all_physical_params = copy.copy(params)
@property
def drag_x(self):
"""
:return: drag force array in the x direction
"""
return (self.epsilon / self.f4) * ((self.f3 * self.ux) - self.vx)
@property
def drag_y(self):
return self.epsilon * ((self.f2 * self.uy) - (self.vy / self.f1))
def update(self, vx, vy):
"""
Updates the colloid velocity array for producing | |
input: rxn_list_I = list of reaction IDs
# output: rxn_net_O = net reaction (cobra Reaction object)
from cobra.core.Reaction import Reaction
#rxn_net_O = cobra_model_I.reactions.get_by_id(rxn_list_I[0]);
#for r in rxn_list_I[1:]:
# if cobra_model_I.reactions.get_by_id(r).reversibility:
# print r + " is reversible!";
# print "continue?"
# rxn_net_O += cobra_model_I.reactions.get_by_id(r);
# check input:
if not len(stoich_list_I) == len(rxn_list_I):
print("error in " + rxn_id_I + ": there are " + str(len(rxn_list_I)) + " rxn ids and " + str(len(stoich_list_I)) + " coefficients");
exit(-1);
rxn_net_O = Reaction(rxn_id_I);
for i,r in enumerate(rxn_list_I):
mets = {};
metlist = [];
metlist = cobra_model_I.reactions.get_by_id(r).products + cobra_model_I.reactions.get_by_id(r).reactants;
for met in metlist:
mets[met] = cobra_model_I.reactions.get_by_id(r).get_coefficient(met)*stoich_list_I[i];
rxn_net_O.add_metabolites(mets);
rxn_net_O.subsystem = cobra_model_I.reactions.get_by_id(r).subsystem; #copy over the subsystem
# check net reaction
#if not rxn_net_O.check_mass_balance():
#print "error: " + rxn_id_I + " is not elementally balanced";
#print rxn_net_O.id;
#print rxn_net_O.build_reaction_string();
return rxn_net_O;
def get_solBySub(self,cobra_model_I,sol_I,sub_I):
sol_O = {};
for k,v in sol_I.items():
try:
if cobra_model_I.reactions.get_by_id(k).subsystem == sub_I:
sol_O[k] = v;
except:
print(k + ' reaction not found')
return sol_O;
def groupBySameFlux(self,cobra_model_I,sol_I):
flux_list = [];
for r,f in sol_I.items():
if not f in flux_list and float(f)>0.0:
flux_list.append(f)
sameFlux_O = {};
for f in flux_list:
rxn_list = [];
for r,v in sol_I.items():
if v==f:
rxn_list.append(r);
stoich = [1]*len(rxn_list)
rxnName = '';
for rxn in rxn_list:
rxnName = rxnName + rxn + '_';
rxnName = rxnName[:-1];
# check that the reaction name is less than 225 characters
if len(rxnName)>224:
rxnName = rxnName[:224];
sameFlux_O[rxnName] = {'reactions':rxn_list,
'stoichiometry':stoich,
'flux':f};
#netRxn = make_net_reaction(cobra_model_copy,rxnName,rxn_list,stoich)
#sameFlux_O[rxnName] = {'reactions':rxn_list,
# 'stoichiometry':stoich,
# 'flux':f,
# 'net':netRxn};
return sameFlux_O
def add_net_reaction_subsystem(self,cobra_model_IO,sol_I,subs_I):
'''make net reactions for specific subsystems grouped
by reactions that have the same flux from pfba'''
#input: cobra_model
# sol_I = pfba solution
# sub_I = list of model subsystems
#output: cobra_model
# convert model to irreversible
# convert_to_irreversible(cobra_model_IO);
# Make net reactions for pathways outside of the scope
# of the isotopomer model
for s in subs_I:
sol = get_solBySub(cobra_model_IO,sol_I,s)
sameFlux = groupBySameFlux(cobra_model_IO,sol)
netRxns = {};
for k,v in sameFlux.items():
if len(v['reactions'])>1:
netRxns[k] = v;
add_net_reaction(cobra_model_IO,netRxns);
# add subsystem information back in
for k in sameFlux.keys():
cobra_model_IO.reactions.get_by_id(k).subsystem = s
remove_noflux_reactions(cobra_model_IO,sol_I,subs_I)
# convert model back to reversible
# revert_to_reversible(cobra_model_IO);
def remove_noflux_reactions(self,cobra_model,sol=None,subs=[]):
'''remove noflux reactions'''
# Input: cobra_model
# sol = pfba solution
# subs = string of specific subsystems to reduce
# Output: cobra_model
# if the lower and upper bounds are zero, the reactions
# are removed
cobra_model.optimize()
sol_f = cobra_model.solution.f
# Reduce model
rxns_noflux = [];
# set lb and ub for all reactions with 0 flux to 0;
if sol:
if subs:
for k,v in sol.items():
try:
if (float(v) < 0.0 or float(v) == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
cobra_model.remove_reactions(k)
rxns_noflux.append(k);
except:
print('reaction is not in model: ' + k)
else:
for k,v in sol.items():
try:
if (float(v) < 0.0 or float(v) == 0.0):
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
cobra_model.remove_reactions(k)
rxns_noflux.append(k);
except:
print('reaction is not in model: ' + k)
else:
if subs:
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0 and cobra_model.reactions.get_by_id(r.id).subsystem in subs:
cobra_model.remove_reactions(r.id)
else:
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0:
cobra_model.remove_reactions(r.id)
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def get_reactionsInfo(self,cobra_model):
'''return the number of reactions and the number of reactions
that cannot carry a flux (i.e. lb and ub of 0.0)'''
nrxn_O = len(cobra_model.reactions);
nrxn_noflux_O = 0;
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0:
nrxn_noflux_O += 1;
return nrxn_O, nrxn_noflux_O
#model reduction iteration functions
def makeIsotopomerModel_iteration01(self,pfba_file,netrxn_irreversible_model_filename,fva_reduced_model_filename,reduced_lbub_filename):
'''iteration 1:
identification of reactions that can be lumped in pathways outside the model scope'''
cobra_model = self.load_ALEWt();
# Make the model irreversible for downstream manipulations:
convert_to_irreversible(cobra_model);
# Add lumped isotopomer reactions
self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible);
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Write pfba solution to file
with open(pfba_file,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v]);
# Read in pfba solution
pfba_sol = {};
with open(pfba_file,mode='r') as infile:
dictreader = csv.DictReader(infile)
for r in dictreader:
pfba_sol[r['Reaction']] = r['Flux'];
# Make net reactions for pathways outside of the scope
# of the isotopomer model
subs = ['Cell Envelope Biosynthesis',
'Glycerophospholipid Metabolism',
'Lipopolysaccharide Biosynthesis / Recycling',
'Membrane Lipid Metabolism',
'Murein Biosynthesis'
'Murein Recycling',
'Cofactor and Prosthetic Group Biosynthesis',
#'Transport, Inner Membrane',
#'Transport, Outer Membrane',
#'Transport, Outer Membrane Porin',
'tRNA Charging',
'Unassigned',
'Exchange',
'Inorganic Ion Transport and Metabolism',
'Nitrogen Metabolism'];
self.add_net_reaction_subsystem(cobra_model,pfba_sol,subs);
self.remove_noflux_reactions(cobra_model,pfba_sol,['Transport, Outer Membrane Porin','Transport, Inner Membrane','Transport, Outer Membrane'])
revert_to_reversible(cobra_model);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename)
# Reduce model using FVA:
self.reduce_model(cobra_model,fva_reduced_model_filename)
# Remove all reactions with 0 flux
self.remove_noflux_reactions(cobra_model);
with open(reduced_lbub_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Subsystem'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.subsystem]);
def makeIsotopomerModel_iteration02(self,pfba_filename,fva_reduced_model_filename,netrxn_irreversible_model_filename,reduced_lbub_filename):
'''iteration 2:
addition of finalized lumped reactions that are in pathways that are within the scope of the model
and reduction by removing reactions with zero optimal minimal flux outside the scope of the model'''
cobra_model = load_ALEWt();
# Make the model irreversible for downstream manipulations:
convert_to_irreversible(cobra_model);
cobra_model.optimize();
# Add lumped isotopomer reactions
self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible,True);
cobra_model.optimize();
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Write pfba solution to file
with open(pfba_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux','Subsystem'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v,cobra_model.reactions.get_by_id(k).subsystem]);
# Read in pfba solution
pfba_sol = {};
with open(pfba_filename,mode='r') as infile:
dictreader = csv.DictReader(infile)
for r in dictreader:
pfba_sol[r['Reaction']] = r['Flux'];
# remove noflux reactions for pathways outside of the scope
# of the isotopomer model
subs = ['Cell Envelope Biosynthesis',
'Glycerophospholipid Metabolism',
'Lipopolysaccharide Biosynthesis / Recycling',
'Membrane Lipid Metabolism',
'Murein Biosynthesis'
'Murein Recycling',
'Cofactor and Prosthetic Group Biosynthesis',
'Transport, Inner Membrane',
'Transport, Outer Membrane',
'Transport, Outer Membrane Porin',
'tRNA Charging',
'Unassigned',
#'Exchange',
'Inorganic Ion Transport and Metabolism',
'Nitrogen Metabolism',
'Alternate Carbon Metabolism'];
self.remove_noflux_reactions(cobra_model,pfba_sol,subs)
# Reduce model using FVA:
self.reduce_model(cobra_model,fva_reduced_model_filename)
# Reset secretion products that may have been turned off
secrete = ['EX_meoh_LPAREN_e_RPAREN_',
'EX_5mtr_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_co_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_ac_LPAREN_e_RPAREN_',
'EX_fum_LPAREN_e_RPAREN_',
'EX_for_LPAREN_e_RPAREN_',
'EX_etoh_LPAREN_e_RPAREN_',
'EX_lac_DASH_L_LPAREN_e_RPAREN_',
'EX_pyr_LPAREN_e_RPAREN_',
'EX_succ_LPAREN_e_RPAREN_'];
for s in secrete:
cobra_model.reactions.get_by_id(s).upper_bound = 1000.0;
# Remove all reactions with 0 flux
r1,r2 = self.get_reactionsInfo(cobra_model);
while r2 !=0:
self.remove_noflux_reactions(cobra_model);
r1,r2 = self.get_reactionsInfo(cobra_model);
print(r1,r2);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename)
with open(reduced_lbub_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Subsystem'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.subsystem]);
def makeIsotopomerModel_cobraMAT(self,model_filename,xml_filename,mat_filename,csv_filename,isotopomer_mapping_filename,ko_list=[],flux_dict={},description=None):
'''iteration 3:
Remove reactions that are thermodynamically unfavorable and add isotopomer data'''
# Read in the sbml file and define the model conditions
cobra_model = create_cobra_model_from_sbml_file(model_filename, print_time=True)
# Modify glucose uptake:
if cobra_model.reactions.has_id('EX_glc_LPAREN_e_RPAREN__reverse'):
lb,ub = cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').lower_bound,cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').upper_bound;
EX_glc_mets = {};
EX_glc_mets[cobra_model.metabolites.get_by_id('glc_DASH_D_e')] = -1;
EX_glc = Reaction('EX_glc_LPAREN_e_RPAREN_');
EX_glc.add_metabolites(EX_glc_mets);
cobra_model.add_reaction(EX_glc)
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -ub;
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').upper_bound = lb;
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN__reverse'])
## Remove thermodynamically infeasible reactions:
#infeasible = [];
#loops = [];
#cobra_model.remove_reactions(infeasible + loops);
# Apply KOs, if any:
for ko in ko_list:
cobra_model.reactions.get_by_id(ko).lower_bound = 0.0;
cobra_model.reactions.get_by_id(ko).upper_bound = 0.0;
# Apply flux constraints, if any:
for rxn,flux in flux_dict.items():
cobra_model.reactions.get_by_id(rxn).lower_bound = flux['lb'];
cobra_model.reactions.get_by_id(rxn).upper_bound = flux['ub'];
# Change description, if any:
if description:
cobra_model.description = description;
# Read in isotopomer model
isotopomer_mapping = self.read_isotopomer_mapping_csv(isotopomer_mapping_filename); #broken
isotopomer_str = self.build_isotopomer_str(isotopomer_mapping);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,xml_filename)
# Add isotopomer field to model
for r in cobra_model.reactions:
if r.id in isotopomer_str:
cobra_model.reactions.get_by_id(r.id).isotopomer = isotopomer_str[r.id];
else:
cobra_model.reactions.get_by_id(r.id).isotopomer = '';
# Add null basis:
cobra_model_array = cobra_model.to_array_based_model();
N = self.calculate.null(cobra_model_array.S.todense()) #convert S from sparse to full and compute the nullspace
cobra_model.N = N;
# solve and save pFBA for later use:
optimize_minimal_flux(cobra_model,True,solver='gurobi');
# add match field:
match = numpy.zeros(len(cobra_model.reactions));
cobra_model.match = match;
# write model to mat
save_matlab_model_isotopomer(cobra_model,mat_filename);
with open(csv_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Genes','Subsystem','Isotopomer'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.gene_reaction_rule,
r.subsystem,
r.isotopomer]);
#ecoli_INCA modifications
def expand_ecoliINCA01(self,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA | |
# -*- coding: utf-8 -*-
from copy import deepcopy
from functools import lru_cache, partial
from io import FileIO
from json import dumps, loads
from logging import debug, exception, info, warning
from re import findall
from time import sleep, time
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple
from httpx import HTTPStatusError, put
from slugify import slugify
from lumapps.api.base_client import BaseClient
from lumapps.api.decorators import (
none_on_400_ALREADY_ARCHIVED,
none_on_400_SUBSCRIPTION_ALREADY_EXISTS_OR_PINNED,
none_on_404,
none_on_http_codes,
raise_known_save_errors,
retry_on_http_codes,
)
from lumapps.api.errors import (
FileDownloadError,
FileUploadError,
FolderCreationError,
GetTokenError,
LumAppsClientConfError,
MissingMetadataError,
NonIdpGroupInCommunityError,
get_http_err_content,
)
from lumapps.api.helpers import content_is_template, new_lumapps_uuid
from lumapps.api.utils import DiscoveryCacheDict
to_json = partial(dumps, indent=4)
RESERVED_SLUGS = frozenset(["news", "admin", "content", "registration"])
ApiClient = BaseClient
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i : i + n]
ApiClient = BaseClient # pragma: no cover
class LumAppsClient(BaseClient): # pragma: no cover
def __init__(
self,
customer_id: str,
instance_id: Optional[str],
*args,
cache: Optional[Any] = None,
dry_run: bool = False,
**kwargs,
):
""" Create a LumAppsClient associated to a particular LumApps platform and site
Args:
customer_id: The id of the platform you target
instance_id: The id of the instance you target
args: The args to pass to the BaseClient
cache: The cache to use
dry_run: Whether to run in dry_run mode or not. This will
avoid saving things when callings save endpoints
kwargs: The kwargs to pass to the BaseClient
"""
if not customer_id:
raise LumAppsClientConfError("customer_id required")
self.customer_id = customer_id
self.instance_id = instance_id
if cache:
self.cache = cache
else:
self.cache = DiscoveryCacheDict()
self.dry_run = dry_run
self._langs = None
super().__init__(*args, **kwargs)
self._cached_metadata = {}
def _get_token_and_expiry(self, email: str) -> Tuple[str, int]:
resp = self.get_call("user/getToken", customerId=self.customer_id, email=email)
return resp["accessToken"], int(resp["expiresAt"]) # type: ignore
def get_token_getter(self, email: str):
assert email
def f():
k = f"{self.customer_id}|TOKEN|{email}"
vals = self.cache.get(k)
if vals:
return vals
try:
token, expiry = self._get_token_and_expiry(email)
except HTTPStatusError as err:
if err.response.status_code == 403:
raise GetTokenError(get_http_err_content(err))
raise
self.cache.set(k, (token, expiry), int(expiry - time() - 180))
return token, expiry
return f
def get_user_api(self, email: str, prune: bool = True) -> "LumAppsClient":
return LumAppsClient(
self.customer_id,
self.instance_id,
cache=self.cache,
dry_run=self.dry_run,
token_getter=self.get_token_getter(email),
prune=prune,
api_info=self.api_info,
no_verify=self.no_verify,
proxy_info=self.proxy_info,
)
@property # type: ignore
@lru_cache()
def langs(self) -> List[str]:
if self._langs:
return self._langs
k = f"{self.customer_id}|INSTANCE_LANGS|{self.instance_id}"
langs = self.cache.get(k)
if not langs:
inst = self.get_instance()
default_lang = inst.get("defaultLang")
langs = [lang for lang in inst["langs"] if lang != default_lang]
if default_lang:
langs.insert(0, default_lang)
self.cache.set(k, langs, 7200)
return langs
@property # type: ignore
@lru_cache()
def first_lang(self) -> str:
return self.langs[0] # type: ignore
def misc_urlinfo(self, url: str) -> Optional[Dict[str, Any]]: # type: ignore
"""
url: https://youtu.be/YPlnv2ovjw4#blabla?fooo
Returns something looking like:
{
"url": "https://www.youtube.com/
watch?v=YPlnv2ovjw4&feature=youtu.be",
"images": ["https://i.ytimg.com/vi/YPlnv2ovjw4/maxresdefault.jpg"],
"description": "Learn how to prepare for tsunamis: TsunamiZone.org",
"title": "Staying Safe Where the Waves Break! - YouTube"
}
"""
return self.get_call("misc/urlinfo", url=url) # type: ignore
def get_available_instance_slug(self, desired_slug: str) -> str:
""" Find an available instance slug according to
the given slug and the current token customer.
If the exact slug is not avaialble -i at the end until we
found an available one or i reachs 300.
Args:
desired_slug: The desired slug
Returns:
The first available slug found
Raises:
Exception: Reached 300 try
"""
post_fix = None
while True:
c = self.get_instance(slug=desired_slug, fields="id")
if not c:
return desired_slug
if not post_fix:
post_fix = 1
desired_slug += "-1"
else:
desired_slug = desired_slug[: -len(str(post_fix)) - 1]
post_fix += 1
desired_slug += "-" + str(post_fix)
if post_fix > 100:
raise Exception("300 limit as slug postfix")
def get_available_slug(self, desired_slug: str) -> str:
"""
Find an available content slug according to
the given slug and the current token customer.
If the exact slug is not avaialble -i at the end until we
found an available one or i reachs 300.
Args:
desired_slug: The desired slug
Returns:
The first available slug found
Raises:
Exception: Reached 300 try
"""
post_fix = None
while True:
if desired_slug in RESERVED_SLUGS:
c = True
else:
c = self.get_content_by_slug(desired_slug, fields="id")
if not c:
return desired_slug
if not post_fix:
post_fix = 1
desired_slug += "-1"
else:
desired_slug = desired_slug[: -len(str(post_fix)) - 1]
post_fix += 1
desired_slug += "-" + str(post_fix)
if post_fix > 300:
raise Exception("300 limit as slug postfix")
def get_available_slugs(self, titles: dict) -> Dict[str, str]:
slugs = {lang: slugify(title) for lang, title in titles.items()}
for lang, slug in slugs.items():
slugs[lang] = self.get_available_slug(slug)
return slugs
@none_on_404
def get_content(
self,
content_id: str,
fields: str = None,
action: str = "PAGE_EDIT",
cache: bool = False,
) -> Optional[Dict[str, Any]]:
""" Get a content via his id
Args:
content_id: The id of the content to get
fields: The fields projection to apply
action: PAGE_EDIT
cache: Whether to cache the result or not
Returns:
The retrieved content or None if it was not found
"""
if cache:
c = self.cache.get(f"{self.customer_id}|CONTENT|{content_id}")
if c:
return c
params = {}
if action:
params["action"] = action
if fields:
params["fields"] = fields
return self.get_call("content/get", uid=content_id, **params) # type: ignore
@none_on_404
def get_content_by_slug(
self, slug: str, fields: str = None, action: str = "PAGE_EDIT"
) -> Optional[Dict[str, Any]]:
"""
Get a content via his slug
Args:
slug: The slug of the content to get
fields: The fields projection to apply
action: PAGE_EDIT
Returns:
The retrieved content or None if it was not found
"""
params = {}
if action:
params["action"] = action
if fields:
params["fields"] = fields
return self.get_call(
"content/get", instance=self.instance_id, slug=slug, **params
) # type: ignore
@lru_cache()
def _get_template(self, template_id: str) -> Dict[str, Any]:
return self.get_call("template/get", uid=template_id) # type: ignore
def get_template(self, template_id: str) -> Dict[str, Any]:
return deepcopy(self._get_template(template_id))
def iter_content_templates(
self, content_type_id: str, **kwargs: dict
) -> Generator[Dict[str, Any], None, None]:
yield from self.iter_call(
"template/list",
instance=self.instance_id,
customContentType=content_type_id,
**kwargs,
) # type: ignore
def iter_newsletters(self, **kwargs: dict) -> Generator[Dict[str, Any], None, None]:
yield from self.iter_call(
"newsletter/list",
instance=self.instance_id,
customer=self.customer_id,
**kwargs,
) # type: ignore
def add_categories_to_community(
self, community: dict, categories: list
) -> Optional[Dict[str, Any]]:
c = community
c.setdefault("tagsDetails", [])
tags = c["tagsDetails"]
lang = self.first_lang
for tag in tags:
tag_name = tag["name"][lang]
if tag_name in categories:
categories.remove(tag_name)
if not categories:
return c
for cat in categories:
tags.append(
{
"$tempId": new_lumapps_uuid(),
"instance": community["instance"],
"kind": "community",
"name": {lang: cat},
}
)
return None
def get_community_category_ids(
self, community: dict, categories: Sequence
) -> List[str]:
lst = []
categs = set(categories)
for tag in community.get("tagsDetails", []):
if tag["name"][self.first_lang] in categs:
lst.append(tag["uid"])
return lst
def get_community_template(self, template_id: str) -> Dict[str, Any]:
return self.get_call("communitytemplate/get", uid=template_id) # type: ignore
def iter_community_templates(
self, **params
) -> Generator[Dict[str, Any], None, None]:
yield from self.iter_call(
"communitytemplate/list", instanceId=self.instance_id, **params
)
def save_community_template(self, templ: Dict[str, Any]) -> Dict[str, Any]:
debug(f"Saving community template: {to_json(templ)}")
if self.dry_run:
return templ
return self.get_call("communitytemplate/save", body=templ)
@none_on_404
def get_community(
self, community_id: str, fields: str = None
) -> Optional[Dict[str, Any]]:
return self.get_call("community/get", uid=community_id, fields=fields)
def iter_communities(self, **kwargs: dict) -> Generator[Dict[str, Any], None, None]:
body = {"lang": "", "instanceId": self.instance_id}
body.update(**kwargs)
try:
yield from self.iter_call("community/list", body=body)
except HTTPStatusError as e:
if e.response.status_code == 400 and "FEATURE_NOT_ENABLED" in str(e):
return
raise
def iter_all_posts(
self, **iter_posts_kwargs: dict
) -> Generator[Dict[str, Any], None, None]:
for c in self.iter_communities(maxResults=100, fields="items(id)"):
for p in self.iter_community_posts(c["id"], **iter_posts_kwargs):
yield p
def iter_contents(
self, content_type_id: str = None, **kwargs
) -> Generator[Dict[str, Any], None, None]:
""" Iterate over the contents on the current lumapps site \n
https://apiv1.lumapps.com/#operation/Content/List
Args:
content_type_id: The id of a content type.
This will be used to filter the retrieved contents
kwargs: The args to pass to the request (see lumapps api doc)
"""
body = {"lang": "", "instanceId": self.instance_id, "action": "PAGE_EDIT"}
if content_type_id:
body["customContentType"] = content_type_id
body.update(**kwargs)
yield from self.iter_call("content/list", body=body)
def iter_content_lists(
self, content_type_id: str, **kwargs: dict
) -> Generator[Dict[str, Any], None, None]:
body = {
"customContentType": content_type_id,
"customContentTypeTags": [],
"instanceId": self.instance_id,
"lang": self.first_lang,
"type": "custom_list",
}
yield from self.iter_call("content/list", body=body, **kwargs)
def get_news_content_type(self) -> Dict[str, Any]:
for ct in self.iter_content_types():
if ct["functionalInnerId"] == "news":
return ct
def get_page_content_type(self) -> Dict[str, Any]:
for ct in self.iter_content_types():
if ct["functionalInnerId"] == "page":
return ct
@none_on_404
def get_content_type(self, content_type_id: str) -> Optional[Dict[str, Any]]:
return self.get_call("customcontenttype/get", uid=content_type_id)
def iter_content_types(
self, **kwargs: | |
from Scripts import DataStructures
from src.Miscellaneous import bcolors
import os
import pandas as pd
import numpy as np
import matlab.engine
import matplotlib; matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from glob import glob
from shutil import copyfile
from scipy.signal import savgol_filter
matlab_smoothTimeSeriesFile = "SmoothTimeSeriesLog.m"
header_timeSeriesCSV = "rtosTick,pitch,roll,yaw," + \
"ax,ay,az,gx,gy,gz,mx,my,mz," +\
"m1CMD,m2CMD,m3CMD,m4CMD," +\
"asp,asr,asy,rsp,rsr,rsy\n"
motor_max = 1650
motor_min = 1060
gyro_symmetric_range = 250
ahrs_max = 30
ahrs_min = -30
mapped_motor_max = 1.0
mapped_motor_min = 0.0
mapped_gyro_symmetric_range = 1.0
mapped_ahrs_max = 1.0
mapped_ahrs_min = -1.0
AHRS_UPDATE_FREQ = 500
PID_UPDATE_FREQ = 125
# TODO: Eventually fill this out for all input types
class MotorCommandConverter:
def __init__(self, base_path):
pass
def dat2csv(self, input_dir, output_dir, filename):
try:
input_motor_file = input_dir + filename
output_motor_csv_file = output_dir + filename
if not os.path.exists(output_motor_csv_file):
os.makedirs(output_motor_csv_file)
raw_motor_data = []
input_file_size = os.path.getsize(input_motor_file)
with open(input_motor_file, "rb") as input_file:
bytes_read = 0
while bytes_read < input_file_size:
measurement = DataStructures.SDLogMotor()
measurement.unpack_raw_hex(input_file.read(measurement.structSizeInBytes))
raw_motor_data.append(measurement)
bytes_read += measurement.structSizeInBytes
# Write all the measurements to a csv file for processing later
with open(output_motor_csv_file, 'w') as output_file:
[output_file.write(raw_motor_data[x].as_csv()) for x in range(len(raw_motor_data))]
print("Done parsing Motor log file.")
except:
print("Motor file doesn't exist. Continuing on.")
def _interpolate(self):
pass
class DataConverter:
def __init__(self, raw_path, csv_path, matlab_path):
"""
Instruct the conversion module where to find all the relevant information
:param base_path: Root directory for all data
:param raw_path: Directory containing .dat log files
:param csv_path: Directory for placing csv results
:param matlab_path: Directory containing matlab scripts
"""
self._raw_path = raw_path
self._csv_path = csv_path
self._matlab_path = matlab_path
self._raw_dat_filenames = []
self.data_folders = []
self.gyro_symmetric_range_actual = gyro_symmetric_range # The actual +/- data range recorded by the gyro
self.gyro_symmetric_range_mapped = mapped_gyro_symmetric_range # The desired +/- data range input for the NN
self.motor_range_actual_max = motor_max # ESC input max throttle signal in mS
self.motor_range_actual_min = motor_min # ESC input min throttle signal in mS
self.motor_range_mapped_max = mapped_motor_max # NN input max throttle signal (unitless)
self.motor_range_mapped_min = mapped_motor_min # NN input min throttle signal (unitless)
self.ahrs_range_actual_min = ahrs_min
self.ahrs_range_actual_max = ahrs_max
self.ahrs_range_mapped_min = mapped_ahrs_min
self.ahrs_range_mapped_max = mapped_ahrs_max
def add_dat_filenames(self, filenames):
self._raw_dat_filenames = filenames
def add_dat_folders(self, folders):
self.data_folders = folders
def raw_data_2_csv(self):
"""
Takes the raw byte code from the SD Card logs and converts them into easy to
interpret CSV files
"""
for folder in self.data_folders:
print(bcolors.OKGREEN + 'Changing directory to: ' + self._raw_path + folder + bcolors.ENDC)
for file in self._raw_dat_filenames:
# ----------------------
# Parse the AHRS data
# ----------------------
if 'ahrsLog' in file:
try:
input_ahrs_log_file = self._raw_path + folder + file
output_ahrs_csv_file = self._csv_path + folder + 'ahrsLog.csv'
if not os.path.exists(self._csv_path + folder):
os.makedirs(self._csv_path + folder)
raw_ahrs_data = []
input_file_size = os.path.getsize(input_ahrs_log_file)
with open(input_ahrs_log_file, "rb") as input_file:
bytes_read = 0
while bytes_read < input_file_size:
if 'Full' in file:
measurement = DataStructures.SDLogAHRSFull()
elif 'Minimal' in file:
measurement = DataStructures.SDLogAHRSMinimal()
measurement.unpack_raw_hex(input_file.read(measurement.structSizeInBytes))
raw_ahrs_data.append(measurement)
bytes_read += measurement.structSizeInBytes
# Write all the measurements to a csv file for processing later
with open(output_ahrs_csv_file, 'w') as output_file:
[output_file.write(raw_ahrs_data[x].as_csv()) for x in range(len(raw_ahrs_data))]
print("Done parsing AHRS log file.")
except:
print("AHRS file doesn't exist. Continuing on.")
# ----------------------
# Parse the motor data
# ----------------------
if 'motor' in file:
try:
input_motor_log_file = self._raw_path + folder + file
output_motor_csv_file = self._csv_path + folder + 'motorLog.csv'
if not os.path.exists(self._csv_path + folder):
os.makedirs(self._csv_path + folder)
raw_motor_data = []
input_file_size = os.path.getsize(input_motor_log_file)
with open(input_motor_log_file, "rb") as input_file:
bytes_read = 0
while bytes_read < input_file_size:
measurement = DataStructures.SDLogMotor()
measurement.unpack_raw_hex(input_file.read(measurement.structSizeInBytes))
raw_motor_data.append(measurement)
bytes_read += measurement.structSizeInBytes
# Write all the measurements to a csv file for processing later
with open(output_motor_csv_file, 'w') as output_file:
[output_file.write(raw_motor_data[x].as_csv()) for x in range(len(raw_motor_data))]
print("Done parsing Motor log file.")
except:
print("Motor file doesn't exist. Continuing on.")
# ----------------------
# Parse the angle setpoint data
# ----------------------
if 'angle' in file:
try:
input_angle_setpoint_log_file = self._raw_path + folder + file
output_angle_setpoint_csv_file = self._csv_path + folder + 'angleSetpoints.csv'
if not os.path.exists(self._csv_path + folder):
os.makedirs(self._csv_path + folder)
raw_angle_setpoint_data = []
input_file_size = os.path.getsize(input_angle_setpoint_log_file)
with open(input_angle_setpoint_log_file, "rb") as input_file:
bytes_read = 0
while bytes_read < input_file_size:
measurement = DataStructures.SDLogAngleSetpoint()
measurement.unpack_raw_hex(input_file.read(measurement.structSizeInBytes))
raw_angle_setpoint_data.append(measurement)
bytes_read += measurement.structSizeInBytes
with open(output_angle_setpoint_csv_file, 'w') as output_file:
[output_file.write(raw_angle_setpoint_data[x].as_csv()) for x in range(len(raw_angle_setpoint_data))]
print("Done parsing Angle Setpoint log file.")
except:
print("Angle setpoint file doesn't exist. Continuing on.")
# ----------------------
# Parse the rate setpoint data
# ----------------------
if 'rate' in file:
try:
input_rate_setpoint_log_file = self._raw_path + folder + file
output_rate_setpoint_csv_file = self._csv_path + folder + 'rateSetpoints.csv'
if not os.path.exists(self._csv_path + folder):
os.makedirs(self._csv_path + folder)
raw_rate_setpoint_data = []
input_file_size = os.path.getsize(input_rate_setpoint_log_file)
with open(input_rate_setpoint_log_file, "rb") as input_file:
bytes_read = 0
while bytes_read < input_file_size:
measurement = DataStructures.SDLogRateSetpoint()
measurement.unpack_raw_hex(input_file.read(measurement.structSizeInBytes))
raw_rate_setpoint_data.append(measurement)
bytes_read += measurement.structSizeInBytes
with open(output_rate_setpoint_csv_file, 'w') as output_file:
[output_file.write(raw_rate_setpoint_data[x].as_csv()) for x in range(len(raw_rate_setpoint_data))]
print("Done parsing Rate Setpoint log file.")
except:
print("Rate setpoint file doesn't exist. Continuing on.")
def create_time_series_from_csv_logs(self, ahrs_type_full=True):
"""
This function takes the output of raw_data_2_csv and filters through it to produce a single csv file with
concatenated sensor data at each time step. Because the freeRTOS implementation in ValkyrieFCS has slight delays
between tasks, there are "gaps" in the information that must be filled in.
"""
def stringify(data):
csv_stringified_data = [(str(x) + ',') for x in data]
csv_stringified_data[-1] = csv_stringified_data[-1].replace(",", "")
return ''.join(csv_stringified_data)
def write_time_series_to_file(filename, header_str, data):
data_keys = list(data.keys())
data_keys.sort(key=int)
with open(filename, 'w') as file:
file.write(header_str)
for key_val in data_keys:
line = str(key_val) + ',' + \
stringify(data[key_val]['ahrsMeas']) + ',' + \
stringify(data[key_val]['motorCMD']) + ',' + \
stringify(data[key_val]['angleSet']) + ',' + \
stringify(data[key_val]['rateSet']) + '\n'
file.write(line)
print(bcolors.OKBLUE + "------------Starting creation of time series log file------------" + bcolors.ENDC)
for folder in self.data_folders:
print(bcolors.OKGREEN + 'Changing directory to: ' + self._raw_path + folder + bcolors.ENDC)
time_col = 0
m1_col = 1
m2_col = 2
m3_col = 3
m4_col = 4
pitch_col = 1
roll_col = 2
yaw_col = 3
ax_col = 4
ay_col = 5
az_col = 6
gx_col = 7
gy_col = 8
gz_col = 9
mx_col = 10
my_col = 11
mz_col = 12
update_rate_ms = int(1000.0/PID_UPDATE_FREQ)
motor_csv_file = self._csv_path + folder + 'motorLog.csv'
ahrs_csv_file = self._csv_path + folder + 'ahrsLog.csv'
angle_setpoint_csv_file = self._csv_path + folder + 'angleSetpoints.csv'
rate_setpoint_csv_file = self._csv_path + folder + 'rateSetpoints.csv'
raw_timeseries_csv_file = self._csv_path + folder + 'timeSeriesDataRaw.csv'
int_timeseries_csv_file = self._csv_path + folder + 'timeSeriesDataInterpolated.csv'
# ------------------------------
# Grab every possible recorded freeRTOS tick value and
# initialize the time_series_data dictionary to zero
# ------------------------------
#all_recorded_ticks = np.r_[motor_ticks, ahrs_ticks, angle_setpoint_ticks, rate_setpoint_ticks]
all_recorded_ticks = np.empty([1, 1])
motor_data_valid = True
if os.path.exists(motor_csv_file):
motor_data = pd.read_csv(motor_csv_file, header=None)
motor_ticks = np.array(motor_data[time_col]).reshape(-1, 1)
all_recorded_ticks = np.r_[all_recorded_ticks, motor_ticks]
else:
motor_data_valid = False
ahrs_data_valid = True
if os.path.exists(ahrs_csv_file):
ahrs_data = pd.read_csv(ahrs_csv_file, header=None)
ahrs_ticks = np.array(ahrs_data[time_col]).reshape(-1, 1)
all_recorded_ticks = np.r_[all_recorded_ticks, ahrs_ticks]
else:
ahrs_data_valid = False
angle_setpoint_data_valid = True
if os.path.exists(angle_setpoint_csv_file):
angle_setpoint_data = pd.read_csv(angle_setpoint_csv_file, header=None)
angle_setpoint_ticks = np.array(angle_setpoint_data[time_col]).reshape(-1, 1)
all_recorded_ticks = np.r_[all_recorded_ticks, angle_setpoint_ticks]
else:
angle_setpoint_data_valid = False
rate_setpoint_data_valid = True
if os.path.exists(rate_setpoint_csv_file):
rate_setpoint_data = pd.read_csv(rate_setpoint_csv_file, header=None)
rate_setpoint_ticks = np.array(rate_setpoint_data[time_col]).reshape(-1, 1)
all_recorded_ticks = np.r_[all_recorded_ticks, rate_setpoint_ticks]
else:
rate_setpoint_data_valid = False
if np.size(all_recorded_ticks) == (1, 1):
print("No available data for this round. Continuing to next folder...")
continue
print("\tCreating time series structure...")
time_series_data = {}
for tic in all_recorded_ticks[:, 0]:
if np.isnan(tic):
continue
if int(tic) not in time_series_data:
time_series_data[int(tic)] = {
# [M1, M2, M3, M4]
'motorCMD': [0, 0, 0, 0],
# [pitch, roll, yaw, ax, ay, az, gx, gy, gz, mx, my, mz]
'ahrsMeas': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [pitch, roll, yaw]
'angleSet': [0, 0, 0],
# [pitch, roll, yaw]
'rateSet': [0, 0, 0]}
# ------------------------------
# Fill in the time_series_data dict with pertinent information
# at each valid tic mark
# ------------------------------
if motor_data_valid:
print("\tFilling motor data...")
for idx in range(0, len(motor_ticks)):
tic = motor_data[time_col][idx]
time_series_data[tic]['motorCMD'] = np.array([motor_data[m1_col][idx],
motor_data[m2_col][idx],
motor_data[m3_col][idx],
motor_data[m4_col][idx]])
if ahrs_data_valid:
print("\tFilling ahrs data...")
for idx in range(0, len(ahrs_ticks)):
tic = ahrs_data[time_col][idx]
if ahrs_type_full:
time_series_data[tic]['ahrsMeas'] = np.array([
ahrs_data[pitch_col][idx], ahrs_data[roll_col][idx], ahrs_data[yaw_col][idx],
ahrs_data[ax_col][idx], ahrs_data[ay_col][idx], ahrs_data[az_col][idx],
ahrs_data[gx_col][idx], ahrs_data[gy_col][idx], ahrs_data[gz_col][idx],
ahrs_data[mx_col][idx], ahrs_data[my_col][idx], ahrs_data[mz_col][idx]])
else: # Assumes minimal ahrs data logging type
time_series_data[tic]['ahrsMeas'] = np.array([
ahrs_data[pitch_col][idx], ahrs_data[roll_col][idx], ahrs_data[yaw_col][idx]])
if angle_setpoint_data_valid:
print("\tFilling angle setpoint data...")
for idx in range(0, len(angle_setpoint_ticks)):
tic = angle_setpoint_data[time_col][idx]
time_series_data[tic]['angleSet'] = np.array([angle_setpoint_data[pitch_col][idx],
angle_setpoint_data[roll_col][idx],
angle_setpoint_data[yaw_col][idx]])
if rate_setpoint_data_valid:
print("\tFilling rate | |
this before?
# dist_word_idx = list_name.index(last_dist_word) # if list_dist.__len__() > 0 else 0
dist_word_idx = list_name.index(last_dist_word) if list_dist.__len__() > 0 else 0
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
self.analysis_response.name_as_submitted,
self.analysis_response.name_original_tokens,
self.analysis_response.name_tokens,
dist_word_idx
)
issue.name_actions = [
NameAction(
type=NameActions.BRACKETS,
position=WordPositions.END,
message="Add a Descriptive Word Here",
word=last_dist_word,
index=offset_idx
)
]
# Setup boxes
issue.setup = self.setup_config
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute([]))
return issue
class TooManyWordsIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.TOO_MANY_WORDS
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="Names longer than three words, not including proper designations, may be sent to examination.",
line2="Please check wait times at the top of the screen.",
consenting_body=None,
designations=None,
show_reserve_button=False,
show_examination_button=True,
conflicts=None,
setup=None,
name_actions=None
)
# Setup boxes
issue.setup = self.setup_config
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute([]))
return issue
"""
General Name Issues
"""
class ContainsWordsToAvoidIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.WORDS_TO_AVOID
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
list_name = self._lc_list_items(self.analysis_response.name_tokens) # procedure_result.values['list_name']
list_avoid = self._lc_list_items(procedure_result.values['list_avoid'])
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="The word(s) " + self._join_list_words(list_avoid) + " cannot be used.",
line2="",
consenting_body=None,
designations=None,
show_reserve_button=False,
show_examination_button=False,
conflicts=None,
setup=None,
name_actions=[]
)
# TODO: If there's a duplicate of a word to avoid, just grabbing the index might not do!
issue.name_actions = []
for word in list_avoid:
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
self.analysis_response.name_as_submitted,
self.analysis_response.name_original_tokens,
self.analysis_response.name_tokens,
list_name.index(word)
)
issue.name_actions.append(
NameAction(
type=NameActions.STRIKE,
word=word,
index=offset_idx
)
)
# Setup boxes
issue.setup = self.setup_config
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute([]))
return issue
class WordSpecialUse(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.WORD_SPECIAL_USE
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
list_name = self._lc_list_items(self.analysis_response.name_tokens) # procedure_result.values['list_name']
list_special = self._lc_list_items(procedure_result.values['list_special'])
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="The word(s) " + self._join_list_words(list_special) + " must go to examination ",
line2=None,
consenting_body=None,
designations=None,
show_reserve_button=False,
show_examination_button=False,
conflicts=None,
setup=None,
name_actions=[]
)
# TODO: If there's a duplicate of a word to avoid, just grabbing the index might not do!
issue.name_actions = []
for word in list_special:
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
self.analysis_response.name_as_submitted,
self.analysis_response.name_original_tokens,
self.analysis_response.name_tokens,
list_name.index(word)
)
issue.name_actions.append(
NameAction(
type=NameActions.HIGHLIGHT,
word=word,
index=offset_idx
)
)
# Setup boxes
issue.setup = self.setup_config
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute([]))
return issue
class NameRequiresConsentIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.NAME_REQUIRES_CONSENT
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
list_name = self.analysis_response.name_tokens # procedure_result.values['list_name']
list_consent = self._lc_list_items(procedure_result.values['list_consent'])
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="The word(s) " + self._join_list_words(list_consent) + " are restricted and may require consent.",
line2="Please check the options below.",
consenting_body=ConsentingBody(
name="",
email=""
),
designations=None,
show_reserve_button=None,
show_examination_button=False,
conflicts=None,
setup=None,
name_actions=[]
)
issue.name_actions = []
for word in list_consent:
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
self.analysis_response.name_as_submitted,
self.analysis_response.name_original_tokens,
self.analysis_response.name_tokens,
list_name.index(word.lower())
)
issue.name_actions.append(
NameAction(
type=NameActions.HIGHLIGHT,
word=word,
index=offset_idx
)
)
# TODO: Where does this info come from?
issue.consenting_body = ConsentingBody(
name="Example Conflict Company Ltd.",
email="<EMAIL>"
)
# Setup boxes
issue.setup = self.setup_config
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute([]))
return issue
class CorporateNameConflictIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.CORPORATE_CONFLICT
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
name_as_submitted = self.analysis_response.name_as_submitted
list_original = self._lc_list_items(self.analysis_response.name_original_tokens)
list_name = self._lc_list_items(self.analysis_response.name_tokens)
all_designations = self._lc_list_items(self.analysis_response.analysis_service.get_all_designations())
#all_combined_designations = all_designations + remove_periods_designation(all_designations)
list_name_as_submitted = self._lc_list_items(self.analysis_response.name_as_submitted_tokenized)
# Filter out designations from the tokens
list_tokens = [item for item in list_name_as_submitted if item not in all_designations]
list_dist = procedure_result.values['list_dist'] # Don't lower case this one it's a list wrapped list
list_desc = procedure_result.values['list_desc'] # Don't lower case this one it's a list wrapped list
list_conflicts = procedure_result.values['list_conflicts'] # Don't lower case this one it's a dict
list_corp_num = procedure_result.values['corp_num']
list_consumption_date = procedure_result.values['consumption_date']
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="Too similar to an existing name.",
line2=None,
consenting_body=None,
designations=None,
show_reserve_button=None,
show_examination_button=False,
conflicts=[],
setup=None,
name_actions=[]
)
'''
eg:
list_name: <class 'list'>: ['mountain', 'view', 'growers']
list_dist: <class 'list'>: [['mountain'], ['mountain', 'view']]
list_desc: <class 'list'>: [['view', 'growers'], ['growers']]
list_conflicts: <class 'dict'>: {'MOUNTAIN VIEW GROWERS INC.': {'mountain': ['mountain'], 'view': ['view'], 'growers': ['growers']}}
'''
# Grab the first conflict
current_conflict_name = list(list_conflicts.keys())[0] # eg: 'MOUNTAIN VIEW GROWERS INC.'
current_corp_num = list_corp_num[0]
current_consumption_date = list_consumption_date[0]
current_conflict = list_conflicts[current_conflict_name] # eg: {'mountain': ['mountain'], 'view': ['view'], 'growers': ['growers']}
current_conflict_keys = list(current_conflict.keys()) if current_conflict else []
is_exact_match = (list_name == current_conflict_keys)
list_dist_words = list(set([item for sublist in list_dist for item in sublist]))
list_desc_words = list(set([item for sublist in list_desc for item in sublist]))
# Apply our is_exact_match strategy:
# - Add brackets after the first distinctive word
# - Add brackets after the last descriptive word?
# - Strike out the last word
list_remove = [] # These are passed down to the Template
if is_exact_match:
# Loop over the token words, we need to decide to do with each word
for token_idx, word in enumerate(list_tokens):
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
name_as_submitted,
list_original,
list_tokens,
token_idx
)
# Highlight the conflict words
if list_tokens.index(word) != list_tokens.index(list_tokens[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
endIndex=offset_idx,
type=NameActions.HIGHLIGHT
))
# Strike out the last matching word
if list_tokens.index(word) == list_tokens.index(list_tokens[-1]):
list_remove.append(word)
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
endIndex=offset_idx,
type=NameActions.STRIKE
))
if not is_exact_match:
# Loop over the list_name words, we need to decide to do with each word
for token_idx, word in enumerate(list_tokens):
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
name_as_submitted,
list_original,
list_tokens,
token_idx
)
# This code has duplicate blocks because it allows us to tweak the response for composite token matches separately from normal words if necessary
if composite_token_offset and composite_token_offset > 0:
# <class 'list'>: ['mountain', 'view']
# Highlight the conflict words
if word in current_conflict_keys and current_conflict_keys.index(word) != current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.HIGHLIGHT
))
# Strike out the last matching word
if word in current_conflict_keys and current_conflict_keys.index(word) == current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.STRIKE
))
else:
# Highlight the conflict words
if word in current_conflict_keys and current_conflict_keys.index(word) != current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.HIGHLIGHT
))
# Strike out the last matching word
if word in current_conflict_keys and current_conflict_keys.index(word) == current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.STRIKE
))
issue.conflicts = []
conflict = Conflict(
name=current_conflict_name,
date=date.today(),
corp_num= current_corp_num,
consumption_date= current_consumption_date
)
issue.conflicts.append(conflict)
# Setup boxes
issue.setup = self.setup_config
# Replace template strings in setup boxes
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute({
'list_name': self._join_list_words(list_name),
'list_remove': self._join_list_words(list_remove),
'list_dist': self._join_list_words(list_dist_words),
'list_desc': self._join_list_words(list_desc_words)
}))
return issue
class DesignationNonExistentIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.DESIGNATION_NON_EXISTENT
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
list_name = self._lc_list_items(self.analysis_response.name_tokens) # procedure_result.values['list_name']
correct_designations = self._lc_list_items(procedure_result.values['correct_designations'])
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="Further Action. A designation is required. Please select one from Option 1 below.",
line2=None,
consenting_body=None,
designations=correct_designations,
show_reserve_button=False,
show_examination_button=False,
conflicts=None,
setup=None,
name_actions=[]
)
# Setup boxes
issue.setup = self.setup_config
# Replace template strings in setup boxes
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute({
'list_name': self._join_list_words(list_name),
'correct_designations': self._join_list_words(correct_designations)
}))
return issue
class DesignationMismatchIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.DESIGNATION_MISMATCH
status_text = "Further Action Required"
issue = None
def create_issue(self, procedure_result):
list_name = self.analysis_response.name_tokens
list_name_incl_designation = self.analysis_response.name_original_tokens
incorrect_designations = procedure_result.values['incorrect_designations']
correct_designations = procedure_result.values['correct_designations']
incorrect_designations_lc = self._lc_list_items(incorrect_designations, True)
list_name_incl_designation_lc = self._lc_list_items(list_name_incl_designation)
entity_type_description = get_entity_type_description(self.entity_type)
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="The " + self._join_list_words(incorrect_designations) + " designation(s) cannot be used with selected entity type of " + entity_type_description + " </b>",
line2=None,
consenting_body=None,
designations=correct_designations,
show_reserve_button=False,
show_examination_button=False,
conflicts=None,
setup=None,
name_actions=[]
)
# Loop over the list_name words, we need to decide to do with each word
for word in list_name_incl_designation_lc:
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
self.analysis_response.name_as_submitted,
self.analysis_response.name_original_tokens,
| |
-self.state_equations.jacobian(self.expec)
self.C_in = simplify(self.state_equations
- self.Gamma0 @ self.endog
+ self.Gamma1 @ self.endogl
+ self.Psi @ self.exog
+ self.Pi @ self.expec)
# Obs Equation
if generate_obs:
self.obs_matrix = Matrix(eye(self.n_obs))
self.obs_offset = Matrix(zeros(self.n_obs))
else:
self.obs_matrix = self.obs_equations.jacobian(self.endog)
self.obs_offset = self.obs_equations - self.obs_matrix @ self.endog
def _calc_posterior(self, theta):
P = self._calc_prior(theta)
L = self._log_likelihood(theta)
f = P + L
return f*1000 # x1000 is here to increase precison of the posterior mode-finding algorithm.
def _calc_prior(self, theta):
prior_dict = self.prior_dict
df_prior = self.prior_info.copy()
df_prior['pdf'] = nan
for param in prior_dict.keys():
mu = df_prior.loc[str(param)]['mean']
sigma = df_prior.loc[str(param)]['std']
dist = df_prior.loc[str(param)]['distribution'].lower()
theta_i = theta[param]
# since we are goig to take logs, the density function only needs the terms that depend on
# theta_i, this will help speed up the code a little and will not affect optimization output.
if dist == 'beta':
a = ((mu ** 2) * (1 - mu)) / (sigma ** 2) - mu
b = a * mu / (1 - mu)
pdf_i = (theta_i**(a - 1)) * ((1 - theta_i)**(b - 1))
elif dist == 'gamma':
a = (mu/sigma)**2
b = mu/a
pdf_i = theta_i**(a - 1) * exp(-theta_i/b)
elif dist == 'invgamma':
a = (mu/sigma)**2 + 2
b = mu * (a - 1)
pdf_i = (theta_i**(- a - 1)) * exp(-b/theta_i)
elif dist == 'uniform':
a = mu - sqrt(3) * sigma
b = 2 * mu - a
pdf_i = 1/(b - a)
else: # Normal
pdf_i = exp(-((theta_i - mu)**2)/(2 * (sigma**2)))
df_prior.loc[str(param), 'pdf'] = pdf_i
df_prior['log pdf'] = log(df_prior['pdf'].astype(float))
P = df_prior['log pdf'].sum()
return P
def _log_likelihood(self, theta):
Gamma0, Gamma1, Psi, Pi, C_in, obs_matrix, obs_offset = self._eval_matrix(theta, to_array=True)
for mat in [Gamma0, Gamma1, Psi, Pi, C_in]:
if isnan(mat).any() or isinf(mat).any():
return -inf
G1, C_out, impact, fmat, fwt, ywt, gev, eu, loose = gensys(Gamma0, Gamma1, C_in, Psi, Pi)
if eu[0] == 1 and eu[1] == 1:
# TODO add observation covariance to allow for measurment errors
kf = KalmanFilter(G1, obs_matrix, impact @ impact.T, None, C_out.reshape(self.n_state),
obs_offset.reshape(self.n_obs))
L = kf.loglikelihood(self.data)
else:
L = - inf
return L
def _eval_matrix(self, theta, to_array):
if to_array:
# state matrices
Gamma0 = array(self.Gamma0.subs(theta)).astype(float)
Gamma1 = array(self.Gamma1.subs(theta)).astype(float)
Psi = array(self.Psi.subs(theta)).astype(float)
Pi = array(self.Pi.subs(theta)).astype(float)
C_in = array(self.C_in.subs(theta)).astype(float)
# observation matrices
obs_matrix = array(self.obs_matrix.subs(theta)).astype(float)
obs_offset = array(self.obs_offset.subs(theta)).astype(float)
else:
# state matrices
Gamma0 = self.Gamma0.subs(theta)
Gamma1 = self.Gamma1.subs(theta)
Psi = self.Psi.subs(theta)
Pi = self.Pi.subs(theta)
C_in = self.C_in.subs(theta)
# observation matrices
obs_matrix = self.obs_matrix.subs(theta)
obs_offset = self.obs_offset.subs(theta)
return Gamma0, Gamma1, Psi, Pi, C_in, obs_matrix, obs_offset
def _get_prior_info(self):
prior_info = self.prior_dict
param_names = [str(s) for s in list(self.params)]
df_prior = pd.DataFrame(columns=['distribution', 'mean', 'std', 'param a', 'param b'],
index=param_names)
for param in prior_info.keys():
mu = prior_info[param]['mean']
sigma = prior_info[param]['std']
dist = prior_info[param]['dist'].lower()
if dist == 'beta':
a = ((mu ** 2) * (1 - mu)) / (sigma ** 2) - mu
b = a * mu / (1 - mu)
elif dist == 'gamma':
a = (mu / sigma) ** 2
b = mu / a
elif dist == 'invgamma':
a = (mu / sigma) ** 2 + 2
b = mu * (a - 1)
elif dist == 'uniform':
a = mu - sqrt(3) * sigma
b = 2 * mu - a
else: # Normal
a = mu
b = sigma
df_prior.loc[str(param)] = [dist, mu, sigma, a, b]
return df_prior
def _res2irr(self, theta_res):
prior_info = self.prior_info
theta_irr = theta_res.copy()
for param in theta_res.keys():
a = prior_info.loc[str(param)]['param a']
b = prior_info.loc[str(param)]['param b']
dist = prior_info.loc[str(param)]['distribution'].lower()
theta_i = theta_res[param]
if dist == 'beta':
theta_irr[param] = log(theta_i / (1 - theta_i))
elif dist == 'gamma' or dist == 'invgamma':
theta_irr[param] = log(theta_i)
elif dist == 'uniform':
theta_irr[param] = log((theta_i - a) / (b - theta_i))
else: # Normal
theta_irr[param] = theta_i
return theta_irr
def _irr2res(self, theta_irr):
prior_info = self.prior_info
theta_res = theta_irr.copy()
for param in theta_irr.keys():
a = prior_info.loc[str(param)]['param a']
b = prior_info.loc[str(param)]['param b']
dist = prior_info.loc[str(param)]['distribution'].lower()
lambda_i = theta_irr[param]
if dist == 'beta':
theta_res[param] = exp(lambda_i) / (1 + exp(lambda_i))
elif dist == 'gamma':
theta_res[param] = exp(lambda_i)
elif dist == 'invgamma':
theta_res[param] = exp(lambda_i)
elif dist == 'uniform':
theta_res[param] = (a + b * exp(lambda_i)) / (1 + exp(lambda_i))
else: # Normal
theta_res[param] = lambda_i
return theta_res
def _plot_chains(self, chains, show_charts):
n_cols = int(self.n_param ** 0.5)
n_rows = n_cols + 1 if self.n_param > n_cols ** 2 else n_cols
subplot_shape = (n_rows, n_cols)
plt.figure(figsize=(7*1.61, 7))
for count, param in enumerate(list(self.params)):
ax = plt.subplot2grid(subplot_shape, (count // n_cols, count % n_cols))
ax.plot(chains[str(param)], linewidth=0.5, color='darkblue')
ax.set_title(self.prior_dict[param]['label'])
plt.tight_layout()
if show_charts:
plt.show()
def _plot_prior_posterior(self, chains, show_charts):
n_bins = int(sqrt(chains.shape[0]))
n_cols = int(self.n_param ** 0.5)
n_rows = n_cols + 1 if self.n_param > n_cols ** 2 else n_cols
subplot_shape = (n_rows, n_cols)
plt.figure(figsize=(7 * 1.61, 7))
for count, param in enumerate(list(self.params)):
mu = self.prior_info.loc[str(param)]['mean']
sigma = self.prior_info.loc[str(param)]['std']
a = self.prior_info.loc[str(param)]['param a']
b = self.prior_info.loc[str(param)]['param b']
dist = self.prior_info.loc[str(param)]['distribution']
ax = plt.subplot2grid(subplot_shape, (count // n_cols, count % n_cols))
ax.hist(chains[str(param)], bins=n_bins, density=True,
color='royalblue', edgecolor='black')
ax.set_title(self.prior_dict[param]['label'])
x_min, x_max = ax.get_xlim()
x = linspace(x_min, x_max, n_bins)
if dist == 'beta':
y = beta.pdf(x, a, b)
ax.plot(x, y, color='red')
elif dist == 'gamma':
y = gamma.pdf(x, a, scale=b)
ax.plot(x, y, color='red')
elif dist == 'invgamma':
y = (b**a) * invgamma.pdf(x, a) * exp((1 - b) / x)
ax.plot(x, y, color='red')
elif dist == 'uniform':
y = uniform.pdf(x, loc=a, scale=b - a)
ax.plot(x, y, color='red')
else: # Normal
y = norm.pdf(x, loc=mu, scale=sigma)
ax.plot(x, y, color='red')
plt.tight_layout()
if show_charts:
plt.show()
def _posterior_table(self, chains):
df = self.prior_info[['distribution', 'mean', 'std']]
df = df.rename({'distribution': 'prior dist', 'mean': 'prior mean', 'std': 'prior std'}, axis=1)
df['posterior mode'] = chains.mode().mean()
df['posterior mean'] = chains.mean()
df['posterior 5%'] = chains.quantile(0.05)
df['posterior 95%'] = chains.quantile(0.95)
return df
def gensys(g0, g1, c, psi, pi, div=None, realsmall=0.000001):
"""
This code is a translation from matlab to python of <NAME>'s 'gensys'.
https://dge.repec.org/codes/sims/linre3a/
"""
# TODO Assert variable types
unique = False
eu = [0, 0]
nunstab = 0
zxz = False
n = g1.shape[0]
if div is None:
div = 1.01
fixdiv = False
else:
fixdiv = True
a, b, q, z = qz(g0, g1, 'complex')
# TODO the Matrix class will be deprecated in a future version of numpy. Should be changed to the ndarray.
# Scipy's version of 'qz' is different from MATLAB's, Q needs to be hermitian transposed to get same output
q = array(matrix(q).H)
for i in range(n):
if not fixdiv:
if abs(a[i, i]) > 0:
divhat = abs(b[i, i]/a[i, i])
if (1 + realsmall < divhat) and divhat <= div:
div = 0.5 * (1 + divhat)
nunstab = nunstab + (abs(b[i, i]) > div * abs(a[i, i]))
if (abs(a[i, i]) < realsmall) and abs(b[i, i] < realsmall):
zxz = True
if not zxz:
a, b, q, z, _ = qzdiv(div, a, b, q, z)
gev = vstack([diagonal(a), diagonal(b)]).T
if zxz:
# print('Coincident zeros. Indeterminancy and/or nonexistence')
eu = [-2, -2]
return None, None, None, None, None, None, None, eu, None
q1 = q[:n - nunstab, :]
q2 = q[n - nunstab:, :]
z1 = z[:, :n - nunstab].T
z2 = z[:, n - nunstab:]
a2 = a[n - nunstab:, n - nunstab:]
b2 = a[n - nunstab:, n - nunstab:]
etawt = q2 @ pi
neta = pi.shape[1]
# Case for no stable roots
if nunstab == 0:
bigev = 0
etawt = zeros((0, neta))
ueta = zeros((0, 0))
deta = zeros((0, 0))
veta = zeros((neta, 0))
else:
ueta, deta, veta = svd(etawt)
deta = diag(deta)
veta = array(matrix(veta).H)
md = min(deta.shape)
bigev = where(diagonal(deta[:md, :md]) > realsmall)[0]
ueta = ueta[:, bigev]
veta = veta[:, bigev]
deta = deta[bigev, bigev]
if deta.ndim == 1:
deta = diag(deta)
# TODO check this
try:
if len(bigev) >= nunstab:
eu[0] = 1
except TypeError:
if bigev >= nunstab:
eu[0] = 1
# Case for all stable roots
if nunstab == | |
method)
assert url.endswith('GetItemAudioFulfillment')
eq_('<AudioFulfillmentRequest><ItemId>bib id</ItemId><PatronId>patron id</PatronId></AudioFulfillmentRequest>', kwargs['data'])
eq_(200, response.status_code)
eq_("A license", response.content)
def test_fulfill(self):
patron = self._patron()
# This miracle book is available either as an audiobook or as
# an EPUB.
work = self._work(
data_source_name=DataSource.BIBLIOTHECA, with_license_pool=True
)
[pool] = work.license_pools
# Let's fulfill the EPUB first.
self.api.queue_response(
200, headers={"Content-Type": "presumably/an-acsm"},
content="this is an ACSM"
)
fulfillment = self.api.fulfill(
patron, 'password', pool, internal_format='ePub'
)
assert isinstance(fulfillment, FulfillmentInfo)
eq_("this is an ACSM", fulfillment.content)
eq_(pool.identifier.identifier, fulfillment.identifier)
eq_(pool.identifier.type, fulfillment.identifier_type)
eq_(pool.data_source.name, fulfillment.data_source_name)
# The media type reported by the server is passed through.
eq_("presumably/an-acsm", fulfillment.content_type)
# Now let's try the audio version.
license = self.sample_data("sample_findaway_audiobook_license.json")
self.api.queue_response(
200, headers={"Content-Type": "application/json"},
content=license
)
fulfillment = self.api.fulfill(
patron, 'password', pool, internal_format='MP3'
)
assert isinstance(fulfillment, FulfillmentInfo)
# Here, the media type reported by the server is not passed
# through; it's replaced by a more specific media type
eq_(DeliveryMechanism.FINDAWAY_DRM, fulfillment.content_type)
# The document sent by the 'Findaway' server has been
# converted into a web publication manifest.
manifest = json.loads(fulfillment.content)
# The conversion process is tested more fully in
# test_findaway_license_to_webpub_manifest. This just verifies
# that the manifest contains information from the 'Findaway'
# document as well as information from the Work.
metadata = manifest['metadata']
eq_('abcdef01234789abcdef0123', metadata['encrypted']['findaway:checkoutId'])
eq_(work.title, metadata['title'])
# Now let's see what happens to fulfillment when 'Findaway' or
# 'Bibliotheca' sends bad information.
bad_media_type = "application/error+json"
bad_content = "This is not my beautiful license document!"
self.api.queue_response(
200, headers={"Content-Type": bad_media_type},
content=bad_content
)
fulfillment = self.api.fulfill(
patron, 'password', pool, internal_format='MP3'
)
assert isinstance(fulfillment, FulfillmentInfo)
# The (apparently) bad document is just passed on to the
# client as part of the FulfillmentInfo, in the hopes that the
# client will know what to do with it.
eq_(bad_media_type, fulfillment.content_type)
eq_(bad_content, fulfillment.content)
def test_findaway_license_to_webpub_manifest(self):
work = self._work(with_license_pool=True)
[pool] = work.license_pools
document = self.sample_data("sample_findaway_audiobook_license.json")
# Randomly scramble the Findaway manifest to make sure it gets
# properly sorted when converted to a Webpub-like manifest.
document = json.loads(document)
document['items'].sort(key=lambda x: random.random())
document = json.dumps(document)
m = BibliothecaAPI.findaway_license_to_webpub_manifest
media_type, manifest = m(pool, document)
eq_(DeliveryMechanism.FINDAWAY_DRM, media_type)
manifest = json.loads(manifest)
# We use the default context for Web Publication Manifest
# files, but we also define an extension context called
# 'findaway', which lets us include terms coined by Findaway
# in a normal Web Publication Manifest document.
context = manifest['@context']
default, findaway = context
eq_(AudiobookManifest.DEFAULT_CONTEXT, default)
eq_({"findaway" : FindawayManifest.FINDAWAY_EXTENSION_CONTEXT},
findaway)
metadata = manifest['metadata']
# Information about the book has been added to metadata.
# (This is tested more fully in
# core/tests/util/test_util_web_publication_manifest.py.)
eq_(work.title, metadata['title'])
eq_(pool.identifier.urn, metadata['identifier'])
eq_('en', metadata['language'])
# Information about the license has been added to an 'encrypted'
# object within metadata.
encrypted = metadata['encrypted']
eq_(u'http://librarysimplified.org/terms/drm/scheme/FAE',
encrypted['scheme'])
eq_(u'abcdef01234789abcdef0123', encrypted[u'findaway:checkoutId'])
eq_(u'1234567890987654321ababa', encrypted[u'findaway:licenseId'])
eq_(u'3M', encrypted[u'findaway:accountId'])
eq_(u'123456', encrypted[u'findaway:fulfillmentId'])
eq_(u'aaaaaaaa-4444-cccc-dddd-666666666666',
encrypted[u'findaway:sessionKey'])
# Every entry in the license document's 'items' list has
# become a readingOrder item in the manifest.
reading_order = manifest['readingOrder']
eq_(79, len(reading_order))
# The duration of each readingOrder item has been converted to
# seconds.
first = reading_order[0]
eq_(16.201, first['duration'])
eq_("Track 1", first['title'])
# There is no 'href' value for the readingOrder items because the
# files must be obtained through the Findaway SDK rather than
# through regular HTTP requests.
#
# Since this is a relatively small book, it only has one part,
# part #0. Within that part, the items have been sorted by
# their sequence.
for i, item in enumerate(reading_order):
eq_(None, item.get('href', None))
eq_(Representation.MP3_MEDIA_TYPE, item['type'])
eq_(0, item['findaway:part'])
eq_(i+1, item['findaway:sequence'])
# The total duration, in seconds, has been added to metadata.
eq_(28371, int(metadata['duration']))
class TestBibliothecaCirculationSweep(BibliothecaAPITest):
def test_circulation_sweep_discovers_work(self):
# Test what happens when BibliothecaCirculationSweep discovers a new
# work.
# Create an analytics integration so we can make sure
# events are tracked.
integration, ignore = create(
self._db, ExternalIntegration,
goal=ExternalIntegration.ANALYTICS_GOAL,
protocol="core.local_analytics_provider",
)
# We know about an identifier, but nothing else.
identifier = self._identifier(
identifier_type=Identifier.BIBLIOTHECA_ID, foreign_id="ddf4gr9"
)
# We're about to get information about that identifier from
# the API.
data = self.sample_data("item_metadata_single.xml")
# Update availability using that data.
self.api.queue_response(200, content=data)
monitor = BibliothecaCirculationSweep(
self._db, self.collection, api_class=self.api
)
monitor.process_items([identifier])
# Validate that the HTTP request went to the /items endpoint.
request = self.api.requests.pop()
url = request[1]
eq_(url, self.api.full_url("items") + "/" + identifier.identifier)
# A LicensePool has been created for the previously mysterious
# identifier.
[pool] = identifier.licensed_through
eq_(self.collection, pool.collection)
eq_(False, pool.open_access)
# Three circulation events were created for this license pool,
# marking the creation of the license pool, the addition of
# licenses owned, and the making of those licenses available.
circulation_events = self._db.query(CirculationEvent).join(LicensePool).filter(LicensePool.id==pool.id)
eq_(3, circulation_events.count())
types = [e.type for e in circulation_events]
eq_(sorted([CirculationEvent.DISTRIBUTOR_LICENSE_ADD,
CirculationEvent.DISTRIBUTOR_TITLE_ADD,
CirculationEvent.DISTRIBUTOR_CHECKIN
]),
sorted(types))
# Tests of the various parser classes.
#
class TestBibliothecaParser(BibliothecaAPITest):
def test_parse_date(self):
parser = BibliothecaParser()
v = parser.parse_date("2016-01-02T12:34:56")
eq_(datetime(2016, 1, 2, 12, 34, 56), v)
eq_(None, parser.parse_date(None))
eq_(None, parser.parse_date("Some weird value"))
class TestEventParser(BibliothecaAPITest):
def test_parse_empty_list(self):
data = self.sample_data("empty_event_batch.xml")
assert_raises_regexp(
RemoteInitiatedServerError,
"No events returned from server. This may not be an error, but treating it as one to be safe.",
list, EventParser().process_all(data)
)
def test_parse_empty_end_date_event(self):
data = self.sample_data("empty_end_date_event.xml")
[event] = list(EventParser().process_all(data))
(threem_id, isbn, patron_id, start_time, end_time,
internal_event_type) = event
eq_('d5rf89', threem_id)
eq_(u'9781101190623', isbn)
eq_(None, patron_id)
eq_(datetime(2016, 4, 28, 11, 4, 6), start_time)
eq_(None, end_time)
eq_('distributor_license_add', internal_event_type)
class TestPatronCirculationParser(BibliothecaAPITest):
def test_parse(self):
data = self.sample_data("checkouts.xml")
collection = self.collection
loans_and_holds = PatronCirculationParser(collection).process_all(data)
loans = [x for x in loans_and_holds if isinstance(x, LoanInfo)]
holds = [x for x in loans_and_holds if isinstance(x, HoldInfo)]
eq_(2, len(loans))
eq_(2, len(holds))
[l1, l2] = sorted(loans, key=lambda x: x.identifier)
eq_("1ad589", l1.identifier)
eq_("cgaxr9", l2.identifier)
expect_loan_start = datetime(2015, 3, 20, 18, 50, 22)
expect_loan_end = datetime(2015, 4, 10, 18, 50, 22)
eq_(expect_loan_start, l1.start_date)
eq_(expect_loan_end, l1.end_date)
[h1, h2] = sorted(holds, key=lambda x: x.identifier)
# This is the book on reserve.
eq_(collection.id, h1.collection_id)
eq_(DataSource.BIBLIOTHECA, h1.data_source_name)
eq_("9wd8", h1.identifier)
expect_hold_start = datetime(2015, 5, 25, 17, 5, 34)
expect_hold_end = datetime(2015, 5, 27, 17, 5, 34)
eq_(expect_hold_start, h1.start_date)
eq_(expect_hold_end, h1.end_date)
eq_(0, h1.hold_position)
# This is the book on hold.
eq_("d4o8r9", h2.identifier)
eq_(collection.id, h2.collection_id)
eq_(DataSource.BIBLIOTHECA, h2.data_source_name)
expect_hold_start = datetime(2015, 3, 24, 15, 6, 56)
expect_hold_end = datetime(2015, 3, 24, 15, 7, 51)
eq_(expect_hold_start, h2.start_date)
eq_(expect_hold_end, h2.end_date)
eq_(4, h2.hold_position)
class TestCheckoutResponseParser(BibliothecaAPITest):
def test_parse(self):
data = self.sample_data("successful_checkout.xml")
due_date = CheckoutResponseParser().process_all(data)
eq_(datetime(2015, 4, 16, 0, 32, 36), due_date)
class TestErrorParser(BibliothecaAPITest):
def test_exceeded_limit(self):
"""The normal case--we get a helpful error message which we turn into
an appropriate circulation exception.
"""
msg=self.sample_data("error_exceeded_limit.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, PatronLoanLimitReached)
eq_(u'Patron cannot loan more than 12 documents', error.message)
def test_exceeded_hold_limit(self):
msg=self.sample_data("error_exceeded_hold_limit.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, PatronHoldLimitReached)
eq_(u'Patron cannot have more than 15 holds', error.message)
def test_wrong_status(self):
msg=self.sample_data("error_no_licenses.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, NoLicenses)
eq_(
u'the patron document status was CAN_WISH and not one of CAN_LOAN,RESERVATION',
error.message
)
problem = error.as_problem_detail_document()
eq_("The library currently has no licenses for this book.",
problem.detail)
eq_(404, problem.status_code)
def test_internal_server_error_beomces_remote_initiated_server_error(self):
"""Simulate the message we get when the server goes down."""
msg = "The server has encountered an error"
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
eq_(BibliothecaAPI.SERVICE_NAME, error.service_name)
eq_(502, error.status_code)
eq_(msg, error.message)
doc = error.as_problem_detail_document()
eq_(502, doc.status_code)
eq_("Integration error communicating with Bibliotheca", doc.detail)
def test_unknown_error_becomes_remote_initiated_server_error(self):
"""Simulate the message we get when ¯\_(ツ)_/¯."""
msg=self.sample_data("error_unknown.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
eq_(BibliothecaAPI.SERVICE_NAME, error.service_name)
eq_("Unknown error", error.message)
def test_remote_authentication_failed_becomes_remote_initiated_server_error(self):
"""Simulate the message we get when the error message is
'Authentication failed' but our authentication information is
set up correctly.
"""
msg=self.sample_data("error_authentication_failed.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
eq_(BibliothecaAPI.SERVICE_NAME, error.service_name)
eq_("Authentication failed", error.message)
def test_malformed_error_message_becomes_remote_initiated_server_error(self):
msg = """<weird>This error does not follow the standard set out by Bibliotheca.</weird>"""
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
eq_(BibliothecaAPI.SERVICE_NAME, error.service_name)
eq_("Unknown error", error.message)
def test_blank_error_message_becomes_remote_initiated_server_error(self):
msg = """<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Message/></Error>"""
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
eq_(BibliothecaAPI.SERVICE_NAME, error.service_name)
eq_("Unknown error", error.message)
class TestBibliothecaEventParser(object):
# Sample event feed to test out the parser.
TWO_EVENTS = """<LibraryEventBatch xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<PublishId>1b0d6667-a10e-424a-9f73-fb6f6d41308e</PublishId>
<PublishDateTimeInUTC>2014-04-14T13:59:05.6920303Z</PublishDateTimeInUTC>
<LastEventDateTimeInUTC>2014-04-03T00:00:34</LastEventDateTimeInUTC>
<Events>
<CloudLibraryEvent>
<LibraryId>test-library</LibraryId>
<EventId>event-1</EventId>
<EventType>CHECKIN</EventType>
<EventStartDateTimeInUTC>2014-04-03T00:00:23</EventStartDateTimeInUTC>
<EventEndDateTimeInUTC>2014-04-03T00:00:23</EventEndDateTimeInUTC>
<ItemId>theitem1</ItemId>
<ISBN>900isbn1</ISBN>
<PatronId>patronid1</PatronId>
<EventPublishDateTimeInUTC>2014-04-14T13:59:05</EventPublishDateTimeInUTC>
</CloudLibraryEvent>
<CloudLibraryEvent>
<LibraryId>test-library</LibraryId>
<EventId>event-2</EventId>
<EventType>CHECKOUT</EventType>
<EventStartDateTimeInUTC>2014-04-03T00:00:34</EventStartDateTimeInUTC>
<EventEndDateTimeInUTC>2014-04-02T23:57:37</EventEndDateTimeInUTC>
<ItemId>theitem2</ItemId>
<ISBN>900isbn2</ISBN>
<PatronId>patronid2</PatronId>
<EventPublishDateTimeInUTC>2014-04-14T13:59:05</EventPublishDateTimeInUTC>
</CloudLibraryEvent>
</Events>
</LibraryEventBatch>
"""
def test_parse_event_batch(self):
# Parsing the XML gives us | |
<filename>PlusBot/bot.py
import ConfigParser
import decimal
import os
import pickle
import re
import praw
from copy import deepcopy
from flair import Flair
from source import Source
from trader import Trader
from util import replace_markdown, create_table_markdown, format_millis_date, parse_markdown, initVariables, get_post, get_sub, get_records_page
try:
# for Python 2.x
from StringIO import StringIO
except ImportError:
# for Python 3.x
from io import StringIO
from datetime import datetime, timedelta
import itertools
import ConfigParser
# Set globals
config = ConfigParser.ConfigParser()
locations = [os.path.join(os.path.dirname(__file__), 'config.ini'), 'config.ini']
config.read(locations)
botUsername = config.get('DEFAULT', 'USERNAME')
clientSecret = config.get('DEFAULT', 'CLIENT_SECRET')
clientId = config.get('DEFAULT', 'CLIENT_ID')
botPassword = config.get('DEFAULT', 'PASSWORD')
postId = config.get('DEFAULT', 'POST_ID')
# Original Thread
# post_id = '7rxv0z'
# Jan-Feb 2018
# post_id = '7tr7ud'
# Mar 2018
# post_id = '81cu0t'
# post_ids
post_ids = ['81cu0t', '7tr7ud']
comment_verification_re = '(verify|verified|verifies|verifying|confirmed|confirming|confirm|confirms)'
flair_re = '((?P<trades>\d+) Verified)( \| (?P<garlic>\d+\.\d+) GRLC)*'
grlc_pre_re = '(Garlics|Garlic|Garlicoin|GRLC|G)'
grlc_amount_re = '[\.0-9]+\S*'
grlc_scrap_type_re = '(\\\?\[|\{)(?P<type>(S|B))(\\\?\]|\}).*'
grlc_scrap_type_user = '[^\r\n]*'
# username GRLC Number
grlc_scrap_1_re = '[^\r\n0-9]*(' + grlc_pre_re + '[ \t]*(?P<garlic>\d*\.?\d*[KM]*))'
# username Number GRLC
grlc_scrap_2_re = '[^\r\n0-9]*(?P<garlic>\d*\.?\d*[KM]*)[\t ]*' + grlc_pre_re
# GRLC Number username
grlc_scrap_3_re = grlc_pre_re + '[ \t]*(?P<garlic>\d*\.?\d*[KM]*)[^\r\n0-9]*'
# Number GRLC username
grlc_scrap_4_re = '(?P<garlic>\d*\.?\d*[KM]*)[\t ]*' + grlc_pre_re + '[^\r\n0-9]*'
grlc_scraps = [grlc_scrap_1_re, grlc_scrap_2_re]
REPLY_TEMPLATE = "Verification Acknowledged. +1 /u/{0}, +1 /u/{1}. This transaction has been recorded in the " \
"[/r/GarlicMarket records](https://www.reddit.com/r/GarlicMarket/wiki/records)"
COMMENT_LINK_TEMPLATE = "[{2}:{3}](https://www.reddit.com/comments/{0}//{1}#{5} \"{4}\") "
DUPLICATE_TEMPLATE = '**Alert**\r\n\r\n' \
'Another trade has already been recorded within 24 hours of this trade between ' \
'/u/{0} and /u/{1}: {2} ' \
'If you feel this is an error, please contact /u/I_regularly_lurk'
class Bot:
def __init__(self, author_points, needs_updating=None):
self.banned_users = get_banned_users()
print(get_sub().display_name)
# store new flair for user
# get cache of authors and links awarded
self.markdown = get_records_page().content_md
print('Creating author points')
self.author_points = author_points
print('Finished creating author points')
self.fieldnames = []
self.keyValue = list()
if needs_updating:
self.new_user_flairs = {}
self.needs_updating_trader_wiki = needs_updating
for author in needs_updating:
try:
user = self.author_points[author]
current_flair = next(get_sub().flair(author))
flair = self.create_get_flair(author, current_flair.get('flair_text'), current_flair.get('flair_css_class'))
flair.set_flair(user.record, user.grlc)
self.new_user_flairs[author] = flair
except:
print('could not get flair for ' + author)
continue
else:
self.needs_updating_trader_wiki = set()
self.new_user_flairs = {}
self.verify_comments = []
def create_get_wiki(self, username=None):
if username in self.author_points:
trader = self.author_points[username]
else:
trader = Trader(username)
return trader
def create_get_flair(self, username=None, flair_text="", flair_css=""):
if username in self.new_user_flairs:
flair = self.new_user_flairs[username]
else:
flair = Flair(username, flair_text, flair_css)
return flair
def in_key_values(self, parent, child):
parent_author = parent.author.name
child_author = child.author.name
if [parent_author, child_author] in self.keyValue or [child_author, parent_author] in self.keyValue:
return True
return False
def process_comment(self, parent, child, reply):
parent_author = parent.author.name
child_author = child.author.name
previous_trade = is_trade_within_24_hours(self.author_points, parent_author, child_author, child.created_utc)
if previous_trade:
trade_source = previous_trade.source
if parent.id not in trade_source:
dup_reply = DUPLICATE_TEMPLATE.format(parent_author, child_author, trade_source)
self.verify_comments.append({'comment': parent, 'reply': dup_reply})
else:
transaction = scrape_grlc(child_author, parent.body)
if parent.edited and transaction:
transaction['grlc'] = None
# add comment
self.verify_comments.append({'comment': parent, 'reply': reply})
# update wiki entry
self.process_wiki_comment(parent_author, child_author, parent.id, transaction, child.created_utc)
# update flair
self.process_flair_comment(parent_author, parent.author_flair_text, parent.author_flair_css_class,
child_author, child.author_flair_text, child.author_flair_css_class)
def process_wiki_comment(self, parent_author, child_author, comment_id, transaction=None, date=None):
parent_wiki = self.create_get_wiki(parent_author)
child_wiki = self.create_get_wiki(child_author)
try:
if transaction and 'type' in transaction:
t_type = transaction['type']
t_grlc = format_number(transaction['grlc'])
if t_type.lower() == 's':
c_type = 'B'
c_grlc = t_grlc
p_grlc = "-"
elif t_type.lower() == 'b':
c_type = 'S'
c_grlc = "-"
p_grlc = t_grlc
else:
c_type = "N/A"
t_type = "N/A"
c_grlc = "N/A"
p_grlc = "N/A"
parent_comment = COMMENT_LINK_TEMPLATE.format(get_post().id, comment_id, c_type, child_author, p_grlc, date)
child_comment = COMMENT_LINK_TEMPLATE.format(get_post().id, comment_id, t_type, parent_author, c_grlc, date)
except:
pass
self.process_wiki(parent_wiki, 1, c_type, child_author, p_grlc, parent_comment, date)
self.process_wiki(child_wiki, 1, t_type, parent_author, c_grlc, child_comment, date)
def process_wiki(self, trader, amount, tag, partner, grlc, comment=None, date=None):
source = Source(tag, partner, grlc, comment, date)
if source.get_id() not in trader.trade_partners:
trader.trade_partners[source.get_id()] = source
trader.add_record(amount)
if grlc != '-' and grlc != 'N/A':
trader.add_grlc(grlc)
self.needs_updating_trader_wiki.add(trader.get_id())
self.author_points[trader.get_id()] = trader
def process_flair_comment(self, parent_author, parent_flair_text, parent_css_class,
child_author, child_flair_text, child_css_class):
author_flair = self.create_get_flair(parent_author, parent_flair_text, parent_css_class)
author_wiki = self.create_get_wiki(parent_author)
child_flair = self.create_get_flair(child_author, child_flair_text, child_css_class)
child_wiki = self.create_get_wiki(child_author)
author_flair.set_flair(author_wiki.record, author_wiki.grlc)
child_flair.set_flair(child_wiki.record, child_wiki.grlc)
self.new_user_flairs[parent_author] = author_flair
self.new_user_flairs[child_author] = child_flair
def get_comments(self, comment_forest):
comments = {}
for comment in comment_forest:
if self.is_comment_valid(comment) and (comment.author.name not in self.banned_users):
author = str(comment.author.name).lower()
if comments.get(author) is None:
comments[author] = [comment]
else:
comments[author].append(comment)
return comments
def update_flair_reddit(self):
batch_size = 100
update_flairs = list(map((lambda x: x[1].get_dict()), self.new_user_flairs.items()))
for chunk in chunked(update_flairs, batch_size):
try:
print(get_sub().flair.update(chunk))
except:
print('could not update flair IO Error')
pass
print('Finished updating flair')
def update_wiki_reddit(self, reason):
updated_text = self.update_wiki_record_summary(self.markdown)
updated_text = self.update_wiki_totals(updated_text)
get_records_page().edit(content=str(updated_text), reason=reason)
print(reason)
def update_wiki_record_summary(self, markdown):
return replace_markdown(create_table_markdown(self.author_points, ['Username', 'Record', 'GRLC', 'Source', 'Notes']), markdown)
def update_wiki_totals(self, markdown):
unique_traders = 0
grlc = 0
trades = 0
for author in self.author_points:
a = self.author_points[author]
unique_traders += 1
trades += int(a.record)
grlc += float(a.grlc)
# we give credit to both traders so we must divide by 2 to find out how many trades have happened
trades = trades / 2
text = re.sub('GRLC recorded[\*]+:\d*\.?\d*', 'GRLC recorded***:' + str(grlc), markdown)
print(str(grlc))
text = re.sub('Trades[\*]+:\d*\.?\d*', 'Trades**:' + str(trades), text)
print(str(trades))
text = re.sub('Unique Traders[\*]+:\d*\.?\d*', 'Unique Traders**:' + str(unique_traders), text)
print(str(unique_traders))
return text
def scan_comments(self, comments, add_comment=False, detect_dups=True):
commentCounter = 0
for comment in comments:
commentCounter += 1
if comment.is_root and self.is_comment_valid(comment) and (comment.author.name not in self.banned_users):
matches = re.findall('((?<=/u/)|(?<=u/))([A-Za-z0-9_\-]*)', comment.body, re.UNICODE)
commentCounter += matches.__len__()
for group in matches:
for match in group:
if match:
feedbackuser = match.lower()
if feedbackuser != str(comment.author).lower():
comment.replies.replace_more()
child_comments = self.get_comments(comment.replies)
reply = REPLY_TEMPLATE.format(comment.author.name, feedbackuser)
# Check if bot has already recorded the transaction
trade_not_recorded = True
if detect_dups and botUsername.lower() in child_comments:
for child_comment in child_comments[botUsername.lower()]:
if is_string_equals_i(child_comment.body, reply) or '**Alert**' in child_comment.body:
trade_not_recorded = False
break
# Record transaction, no dupes
if trade_not_recorded and feedbackuser in child_comments:
for child_comment in child_comments[feedbackuser]:
confirmed = re.search(comment_verification_re, child_comment.body, re.IGNORECASE)
if confirmed: # and not self.in_key_values(comment, child_comment):
self.process_comment(comment, child_comment, reply)
break
print("scanned " + str(commentCounter))
if add_comment:
print("replying to thread")
for vc in self.verify_comments:
vc['comment'].reply(vc['reply'])
def is_comment_valid(self, comment):
return not comment.banned_by and not comment.removed and comment.author
def sync_flair_with_records(self, force_update=False):
flair_update_necessary = False
for author in self.author_points:
trader = self.author_points[author]
wiki_record = int(trader.record)
wiki_grlc = float(trader.grlc)
try:
current_flair = next(get_sub().flair(author))
except:
print('could not get flair for ' + author)
continue
flair_text = current_flair.get('flair_text')
flair_css = current_flair.get('flair_css_class')
flair_amounts = get_amounts_from_flair(flair_text)
flair_record = flair_amounts['trades']
flair_grlc = flair_amounts['garlic']
if flair_record:
flair_record = int(flair_record)
if flair_grlc:
flair_grlc = float(flair_grlc)
if not force_update and (wiki_record == flair_record and wiki_grlc == flair_grlc):
continue
else:
flr = self.create_get_flair(author, flair_text, flair_css)
flr.set_flair(wiki_record, wiki_grlc)
self.new_user_flairs[author] = flr
flair_update_necessary = True
continue
if flair_update_necessary:
print("Syncing flairs with wiki " + get_records_page().name + " Last Updated: " + str(datetime.utcnow()))
self.update_flair_reddit()
def sync_trader_pages_with_records(self, update=set(), force_update=False):
USER_TEMPLATE = '###{0}\r\n\r\n' \
'####Records\r\n{1}\r\n\r\n' \
'####Garlicoin\r\nSent {2} to other users\r\n\r\n' \
'####Sources\r\n\r\n' \
'{3}' \
'\r\n\r\n' \
'####Notes' \
'{4}'
update_count = 0
print('Beginning sync with author wikis')
for author in self.author_points:
if force_update or author in self.needs_updating_trader_wiki or author in update:
trader = self.author_points[author]
fieldnames = ['Trader', 'Tag', 'GRLC', 'Source', 'Date']
user_template = USER_TEMPLATE.format(trader.username, trader.record, trader.grlc,
create_table_markdown(trader.trade_partners, ['Trader', 'Tag', 'GRLC', 'Source', 'Date']),
trader.notes)
reason = 'Processed ' + trader.username + ' with ' + str(trader.record) + ' grlc: ' + str(trader.grlc)
trader.get_wiki().edit(user_template, reason=reason)
update_count += 1
if update_count % 100 == 0 and update_count > 0:
print('Synced ' + str(update_count) + ' wiki records')
print('Finished syncing author wikis, updated ' + str(update_count) + '/' + str(len(self.author_points)) + ' records ')
# Helper methods
def set_entry(username=None, obj=None, dictionary=dict):
dictionary[username] = obj
def is_string_equals_i(string, compare_string):
return string.lower() == compare_string.lower()
def chunked(it, size):
it = iter(it)
while True:
p = tuple(itertools.islice(it, size))
if not p:
break
yield p
def get_amounts_from_flair(flair_text):
if flair_text:
search_flair = re.search(flair_re, flair_text)
if search_flair:
flair_amounts = re.search(flair_re, flair_text).groupdict()
if 'garlic' not in flair_amounts:
flair_amounts['garlic'] = 0
if 'trades' not in flair_amounts:
flair_amounts['trades'] = 0
return flair_amounts
return {'trades': 0, 'garlic': 0}
def get_banned_users():
banned_users = []
fetching_banned = True
bus = get_sub().banned(limit=1000)
for ban in bus:
banned_users.append(ban.name)
return banned_users
def scrape_grlc(child_author, comment_body):
regex = grlc_scrap_type_re + child_author + grlc_scrap_type_user
scrap = re.search(regex, comment_body, re.IGNORECASE)
if scrap:
trans_type = scrap.group('type')
if trans_type:
for i in range(0, grlc_scraps.__len__()):
if i < 2:
pattern = child_author + grlc_scraps[i]
else:
pattern = grlc_scraps[i] + child_author
garlic = re.search(pattern, comment_body, re.IGNORECASE)
if garlic:
grlc_str = garlic.group('garlic').lower()
try:
number = float(re.sub('[KM]+$', '', grlc_str, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Ejercicio del Algoritmo de Colonia de Hormigas
Taller de la PyConEs 2015: Simplifica tu vida con sistemas complejos y algoritmos genéticos
Este script contiene las funciones y clases necesarias para el ejercicio del laberinto.
Este script usa arrays de numpy, aunque no debería ser difícil para alguien con experiencia
sustituírlos por otras estructuras si es necesario.
También usa la librería Matplotlib en las funciones que dibujan resultados."""
import numpy as np
import matplotlib.pyplot as plt
try:
import laberinto.algen as ag
except:
try:
import Ejercicios.Laberinto.laberinto.algen as ag
except:
print('Problema al cargar el módulo')
#Primero vamos a definir los dos tipos de objeto que necesitamos:
class Map():
def __init__(self, max_steps = 50, veneno = 0) :
self.max_steps = max_steps
self.veneno = veneno
self.list_caminos = []
self.history = []
self.bestpath = None
self.bestscore = -10E8
self.dict_genes = {}
for i in range (10):
for j in range (10):
self.dict_genes[i,j] = 2
#------ -- Matrices del Mapa: ---------
#Esta matriz describe las fronteras de cada casilla con cada número en binario:
#primer dígito binario: frontera superior
#segundo dígito binario: frontera derecha
#tercer dígito binario: frontera inferior
#cuarto dígito binario: frontera izquierda
#Para cada dígito: 1 = frontera cerrada, 0 = frontera abierta
self.grid = np.array([
[9, 3, 9, 5, 5, 5, 5, 5, 5, 3 ],
[10, 12, 6, 9, 1, 5, 5, 3, 9, 6 ],
[10, 9, 3, 10, 10, 9, 3, 10, 10, 11],
[10, 10, 10, 10, 10, 12, 6, 10, 8, 2 ],
[8, 2, 8, 6, 12, 5, 5, 6, 10, 12],
[10, 10, 12, 1, 5, 5, 1, 3, 12, 3 ],
[10, 12, 3, 10, 9, 5, 6, 12, 3, 10],
[8, 5, 6, 10, 10, 9, 3, 11, 10, 10],
[10, 9, 3, 10, 12, 6, 12, 2, 10, 10],
[12, 6, 12, 4, 5, 5, 5, 6, 12, 6 ]
])
#Esta matriz simplemente es un damero que sirve para pintarlo bonito
self.back = np.zeros([10,10])
self.back[::2, ::2] = 1
self.back[1::2, 1::2] = 1
#En esta matriz guardaremos feromonas:
self.feromap = np.zeros((10,10))
def draw_tablero(self):
'''Dibuja el laberinto'''
plt.figure(1, figsize=(10,10))
plt.matshow(self.back, fignum= 1, cmap=plt.cm.Oranges, alpha = 0.4)
#plt.contourf(xx, yy, back, np.linspace(-1, 2, 3), cmap=plt.cm.Blues)
plt.xlim(-1,10)
plt.ylim(-1,10)
x = list(range(10))
y = x
for i in x:
for j in y:
if self.grid[j,i] & 1 :
xx = i + np.array([-0.5, 0.5])
yy = j + np.array([-0.5,-0.5])
plt.plot(xx,yy, 'k', linewidth=3)
if self.grid[j,i] & 2 :
xx = i + np.array([ 0.5, 0.5])
yy = j + np.array([-0.5, 0.5])
plt.plot(xx,yy, 'k', linewidth=3)
if self.grid[j,i] & 4 :
xx = i + np.array([-0.5, 0.5])
yy = j + np.array([ 0.5, 0.5])
plt.plot(xx,yy, 'k', linewidth=3)
if self.grid[j,i] & 8 :
xx = i + np.array([-0.5,-0.5])
yy = j + np.array([-0.5, 0.5])
plt.plot(xx,yy, 'k', linewidth=3)
plt.gca().invert_yaxis()
def create_camino(self):
'''Crea un nuevo camino aleatorio'''
self.list_caminos.append(Camino(False, [self]))
def statistics(self):
'''Analiza los valores de la puntuación de la población'''
scores = []
for j in self.list_caminos :
scores.append(j.fitness)
if j.fitness > self.bestscore:
self.bestscore = j.fitness
self.bestpath = j
self.history.append([min(scores), sum(scores)/len(scores), max(scores)])
def draw_history(self):
'''Dibuja las gráficas de evolución de la puntuación'''
plt.figure(None, figsize=(10, 8))
history = np.array(self.history)
for i in range(3):
plt.plot(history[:, i])
plt.title('Puntuación máxima, media y mínima para cada generación')
def draw_best(self):
'''Dibuja el mejor camino encontrado.
Es necesario pintar el tablero por separado'''
self.bestpath.draw_path(alpha = 0.5, c = 'b', w = 4)
def draw_poison(self):
'''Dibuja las toxinas o feromonas del mapa.
Es necesario pintar el tablero por separado'''
if self.veneno != 0:
maxpoison = np.max(self.feromap)
for i in range(10):
for j in range(10):
poison = 0.8 * self.feromap[j,i] / maxpoison
plt.plot(i , j, 'o', color = 'g', alpha = poison, markersize=40)
def reload_poison(self):
'''Actualiza las feromonas y el valor de la aptitud de las soluciones'''
self.bestpath = None
self.bestscore = -10E8
self.feromap /=2
for i in self.list_caminos:
i.deploy_poison()
for i in self.list_caminos:
calculate_fitness(i)
class Camino():
'''Este objeto contiene una disposición dada de direcciones sobre el mapa,
con la que se puede construir un camino'''
def __init__(self, genome = False, opciones = False):
self.poison = 0
if not opciones:
self.mapa = None
else:
self.mapa = opciones[0]
self.dict_genes = {}
for i in range (10):
for j in range (10):
self.dict_genes[i,j] = 2
if not genome:
self.genome = np.random.randint(0,2,200)
else:
self.genome = genome
def draw_directions(self):
'''Dibuja el tablero y a continuación, dibuja sobre él
el mapa de direcciones'''
self.mapa.draw_tablero()
x = list(range(10))
y = x
for i in x:
for j in y:
if self.directions[j ,i] == 0:
plt.arrow(i, j + 0.4, 0, -0.6, head_width=0.1, head_length=0.2, fc='b', ec='b')
if self.directions[j ,i] == 1:
plt.arrow(i - 0.4, j, 0.6, 0, head_width=0.1, head_length=0.2, fc='b', ec='b')
if self.directions[j ,i] == 2:
plt.arrow(i, j - 0.4, 0, 0.6, head_width=0.1, head_length=0.2, fc='b', ec='b')
if self.directions[j ,i] == 3:
plt.arrow(i + 0.4, j, -0.6, 0, head_width=0.1, head_length=0.2, fc='b', ec='b')
#-- Funciones para calcular el camino
def move(self, row, col, direction):
'''Intenta moverse a la siguiente casilla'''
grid = self.mapa.grid
d = 2 ** direction
if not grid[row, col] & d:
if direction == 0:
return row -1, col
elif direction == 1:
return row , col+1
elif direction == 2:
return row +1, col
elif direction == 3:
return row , col-1
else:
return None
def step(self, row, col, direction, path):
'''Intenta moverse a la siguiente casilla, si no lo consigue
(porque choca con una pared), intenta moverse en otra dirección.
Si la segunda vez tampoco lo consigue, se queda quieto.
Devuelve información sobre si ha chocado o si ha vuelto a la casilla en la que estaba
en el paso anterior'''
wall = False
u_turn = False
newpos = self.move(row, col, direction)
if newpos == None:
wall = True
new_d = np.random.randint(0,4)
newpos = self.move(row, col, new_d)
if newpos != None and 0<= col <=9:
row,col = newpos
if len(path) >=2 and [row, col] == path[-2]:
u_turn = True
return row, col, wall, u_turn
def get_path(self):
'''Calcula el camino a partir del mapa de direcciones'''
max_steps = self.mapa.max_steps
path = [[4,0]]
wall_count = 0
u_turn_count = 0
for nstep in range(max_steps):
#print('step:', nstep, end=' ')
row, col = path[nstep]
row, col, wall, u_turn = self.step(row, col, self.directions[row, col], path)
wall_count += wall
u_turn_count += u_turn
path.append([row, col])
if [row,col] == [4, 10]:
break
self.path, self.wall_count, self.u_turn_count = np.array(path), wall_count, u_turn_count
def deploy_poison(self):
'''Deposita feromonas negativas en las casillas que ha visitado'''
if self.mapa.veneno != 0 :
for i in range(self.path.shape[0]):
row = self.path[i, 0]
col = self.path[i, 1]
if col < 10:
self.poison += self.mapa.feromap[row,col]
self.mapa.feromap[row,col] += 0.1 * self.mapa.veneno
def draw_path(self, alpha = 0.5, c = 'r', w = 8):
'''Dibuja su camino sobre el mapa.
Es necesario pintar el tablero por separado'''
plt.plot(self.path[:,1], self.path[:,0], c, linewidth = w, alpha = alpha)
def calculate_performances(individual):
'''Calcula las performances de un individuo:
En este caso, el camino a partir del mapa de direcciones.
En este paso también se depositan las feromonas'''
individual.directions = np.zeros([10,10], dtype=np.int)
for i in range (10):
for j in range (10):
individual.directions[i,j] = individual.traits[(i,j)]
individual.get_path()
individual.deploy_poison()
def calculate_fitness(individual):
'''Calcula la aptitud de un individuo'''
path, wall_count, u_turn_count = individual.path, individual.wall_count, individual.u_turn_count
poison = individual.poison
max_steps = individual.mapa.max_steps
endx = path[-1,1]
victory = max_steps + 1 - len(path) # >0 si ha llegado al final, mayor cuanto más corto sea el camino
individual.fitness = endx * 4 - 2 * wall_count - 3 * u_turn_count - 0.03 * poison + victory * 5
def avanzar(mapa, n = 100,
max_pop = 100, min_pop = 10,
reproduction_rate = 8, mutation_rate = 0.05):
'''Efectua una cantidad n de generaciones '''
for i in range(n):
print(i+1, end='·')
ag.immigration(mapa.list_caminos, max_pop,
calculate_performances, calculate_fitness,
mapa.dict_genes, Camino, mapa)
ag.tournament(mapa.list_caminos, min_pop)
mapa.reload_poison()
ag.crossover(mapa.list_caminos, reproduction_rate, mutation_rate,
calculate_performances, | |
<reponame>anukaal/pywikibot
"""
A window with a textfield where the user can edit.
Useful for editing the contents of an article.
*New in version 6.1:* Python 3.6 or highter is required.
"""
#
# (C) Pywikibot team, 2003-2021
#
# Distributed under the terms of the MIT license.
#
import tkinter
from tkinter import simpledialog as tkSimpleDialog
from tkinter.scrolledtext import ScrolledText
from typing import Optional
import pywikibot
from pywikibot import __url__
from pywikibot.backports import Tuple
from pywikibot.tools import PYTHON_VERSION
# T164163: Fix idlelib import in Python 3.6
if PYTHON_VERSION >= (3, 6):
from idlelib import replace as ReplaceDialog
from idlelib import search as SearchDialog
from idlelib.config import idleConf
from idlelib.configdialog import ConfigDialog
from idlelib.multicall import MultiCallCreator
else:
from idlelib import ReplaceDialog, SearchDialog
from idlelib.configDialog import ConfigDialog
from idlelib.configHandler import idleConf
from idlelib.MultiCall import MultiCallCreator
class TextEditor(ScrolledText):
"""A text widget with some editing enhancements.
A lot of code here is copied or adapted from the idlelib/EditorWindow.py
file in the standard Python distribution.
"""
def __init__(self, master=None, **kwargs):
"""
Initializer.
Get default settings from user's IDLE configuration.
"""
textcf = self._initialize_config(idleConf.CurrentTheme())
if idleConf.GetOption('main', 'EditorWindow', 'font-bold',
type='bool'):
font_weight = 'bold'
else:
font_weight = 'normal'
textcf['font'] = (
idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow', 'font-size'),
font_weight)
# override defaults with any user-specified settings
textcf.update(kwargs)
super().__init__(master, **textcf)
def _initialize_config(self, Theme):
"""Fix idleConf.GetHighlight method for different Python releases."""
config = {
'padx': 5,
'wrap': 'word',
'undo': 'True',
'width': idleConf.GetOption('main', 'EditorWindow', 'width'),
'height': idleConf.GetOption('main', 'EditorWindow', 'height'),
}
if PYTHON_VERSION >= (3, 7, 4): # T241216
config['foreground'] = idleConf.GetHighlight(
Theme, 'normal')['foreground']
config['background'] = idleConf.GetHighlight(
Theme, 'normal')['background']
config['highlightcolor'] = idleConf.GetHighlight(
Theme, 'hilite')['foreground']
config['highlightbackground'] = idleConf.GetHighlight(
Theme, 'hilite')['background']
config['insertbackground'] = idleConf.GetHighlight(
Theme, 'cursor')['foreground']
else:
config['foreground'] = idleConf.GetHighlight(
Theme, 'normal', fgBg='fg')
config['background'] = idleConf.GetHighlight(
Theme, 'normal', fgBg='bg')
config['highlightcolor'] = idleConf.GetHighlight(
Theme, 'hilite', fgBg='fg')
config['highlightbackground'] = idleConf.GetHighlight(
Theme, 'hilite', fgBg='bg')
config['insertbackground'] = idleConf.GetHighlight(
Theme, 'cursor', fgBg='fg')
return config
def add_bindings(self):
"""Assign key and events bindings to methods."""
# due to IDLE dependencies, this can't be called from __init__
# add key and event bindings
self.bind('<<cut>>', self.cut)
self.bind('<<copy>>', self.copy)
self.bind('<<paste>>', self.paste)
self.bind('<<select-all>>', self.select_all)
self.bind('<<remove-selection>>', self.remove_selection)
self.bind('<<find>>', self.find_event)
self.bind('<<find-again>>', self.find_again_event)
self.bind('<<find-selection>>', self.find_selection_event)
self.bind('<<replace>>', self.replace_event)
self.bind('<<goto-line>>', self.goto_line_event)
self.bind('<<del-word-left>>', self.del_word_left)
self.bind('<<del-word-right>>', self.del_word_right)
keydefs = {'<<copy>>': ['<Control-Key-c>', '<Control-Key-C>'],
'<<cut>>': ['<Control-Key-x>', '<Control-Key-X>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>'],
'<<end-of-file>>': ['<Control-Key-d>', '<Control-Key-D>'],
'<<find-again>>': ['<Control-Key-g>', '<Key-F3>'],
'<<find-selection>>': ['<Control-Key-F3>'],
'<<find>>': ['<Control-Key-f>', '<Control-Key-F>'],
'<<goto-line>>': ['<Alt-Key-g>', '<Meta-Key-g>'],
'<<paste>>': ['<Control-Key-v>', '<Control-Key-V>'],
'<<redo>>': ['<Control-Shift-Key-Z>'],
'<<remove-selection>>': ['<Key-Escape>'],
'<<replace>>': ['<Control-Key-h>', '<Control-Key-H>'],
'<<select-all>>': ['<Control-Key-a>'],
'<<undo>>': ['<Control-Key-z>', '<Control-Key-Z>'],
}
for event, keylist in keydefs.items():
if keylist:
self.event_add(event, *keylist)
def cut(self, event):
"""Perform cut operation."""
if self.tag_ranges('sel'):
self.event_generate('<<Cut>>')
return 'break'
def copy(self, event):
"""Perform copy operation."""
if self.tag_ranges('sel'):
self.event_generate('<<Copy>>')
return 'break'
def paste(self, event):
"""Perform paste operation."""
self.event_generate('<<Paste>>')
return 'break'
def select_all(self, event=None):
"""Perform select all operation."""
self.tag_add('sel', '1.0', 'end-1c')
self.mark_set('insert', '1.0')
self.see('insert')
return 'break'
def remove_selection(self, event=None):
"""Perform remove operation."""
self.tag_remove('sel', '1.0', 'end')
self.see('insert')
def del_word_left(self, event):
"""Perform delete word (left) operation."""
self.event_generate('<Meta-Delete>')
return 'break'
def del_word_right(self, event=None):
"""Perform delete word (right) operation."""
self.event_generate('<Meta-d>')
return 'break'
def find_event(self, event=None):
"""Perform find operation."""
if not self.tag_ranges('sel'):
found = self.tag_ranges('found')
if found:
self.tag_add('sel', found[0], found[1])
else:
self.tag_add('sel', '1.0', '1.0+1c')
SearchDialog.find(self)
return 'break'
def find_again_event(self, event=None):
"""Perform find again operation."""
SearchDialog.find_again(self)
return 'break'
def find_selection_event(self, event=None):
"""Perform find selection operation."""
SearchDialog.find_selection(self)
return 'break'
def replace_event(self, event=None):
"""Perform replace operation."""
ReplaceDialog.replace(self)
return 'break'
def find_all(self, s):
"""
Highlight all occurrences of string s, and select the first one.
If the string has already been highlighted, jump to the next occurrence
after the current selection. (You cannot go backwards using the
button, but you can manually place the cursor anywhere in the
document to start searching from that point.)
"""
if hasattr(self, '_highlight') and self._highlight == s:
try:
if self.get(tkinter.SEL_FIRST, tkinter.SEL_LAST) == s:
return self.find_selection_event(None)
# user must have changed the selection
found = self.tag_nextrange('found', tkinter.SEL_LAST)
except tkinter.TclError:
# user must have unset the selection
found = self.tag_nextrange('found', tkinter.INSERT)
if not found:
# at last occurrence, scroll back to the top
found = self.tag_nextrange('found', 1.0)
if found:
self.do_highlight(found[0], found[1])
else:
# find all occurrences of string s;
# adapted from O'Reilly's Python in a Nutshell
# remove previous uses of tag 'found', if any
self.tag_remove('found', '1.0', tkinter.END)
if s:
self._highlight = s
# start from the beginning (and when we come to the end, stop)
idx = '1.0'
while True:
# find next occurrence, exit loop if no more
idx = self.search(s, idx, nocase=1, stopindex=tkinter.END)
if not idx:
break
# index right after the end of the occurrence
lastidx = '{}+{}c'.format(idx, len(s))
# tag the whole occurrence (start included, stop excluded)
self.tag_add('found', idx, lastidx)
# prepare to search for next occurrence
idx = lastidx
# use a red foreground for all the tagged occurrences
self.tag_config('found', foreground='red')
found = self.tag_nextrange('found', 1.0)
if found:
self.do_highlight(found[0], found[1])
return None
def do_highlight(self, start, end):
"""Select and show the text from index start to index end."""
self.see(start)
self.tag_remove(tkinter.SEL, '1.0', tkinter.END)
self.tag_add(tkinter.SEL, start, end)
self.focus_set()
def goto_line_event(self, event):
"""Perform goto line operation."""
lineno = tkSimpleDialog.askinteger('Goto', 'Go to line number:',
parent=self)
if lineno is None:
return 'break'
if lineno <= 0:
self.bell()
return 'break'
self.mark_set('insert', '{}.0'.format(lineno))
self.see('insert')
return None
class EditBoxWindow(tkinter.Frame):
"""Edit box window."""
def __init__(self, parent=None, **kwargs):
"""Initializer."""
if parent is None:
# create a new window
parent = tkinter.Tk()
self.parent = parent
super().__init__(parent)
self.editbox = MultiCallCreator(TextEditor)(self, **kwargs)
self.editbox.pack(side=tkinter.TOP)
self.editbox.add_bindings()
self.bind('<<open-config-dialog>>', self.config_dialog)
bottom = tkinter.Frame(parent)
# lower left subframe with a textfield and a Search button
bottom_left_frame = tkinter.Frame(bottom)
self.textfield = tkinter.Entry(bottom_left_frame)
self.textfield.pack(side=tkinter.LEFT, fill=tkinter.X, expand=1)
buttonSearch = tkinter.Button(bottom_left_frame, text='Find next',
command=self.find)
buttonSearch.pack(side=tkinter.RIGHT)
bottom_left_frame.pack(side=tkinter.LEFT, expand=1)
# lower right subframe which will contain OK and Cancel buttons
bottom_right_frame = tkinter.Frame(bottom)
buttonOK = tkinter.Button(bottom_right_frame, text='OK',
command=self.pressedOK)
buttonCancel = tkinter.Button(bottom_right_frame, text='Cancel',
command=parent.destroy)
buttonOK.pack(side=tkinter.LEFT, fill=tkinter.X)
buttonCancel.pack(side=tkinter.RIGHT, fill=tkinter.X)
bottom_right_frame.pack(side=tkinter.RIGHT, expand=1)
bottom.pack(side=tkinter.TOP)
# create a toplevel menu
menubar = tkinter.Menu(self.parent)
findmenu = tkinter.Menu(menubar)
findmenu.add_command(label='Find',
command=self.editbox.find_event,
accelerator='Ctrl+F',
underline=0)
findmenu.add_command(label='Find again',
command=self.editbox.find_again_event,
accelerator='Ctrl+G',
underline=6)
findmenu.add_command(label='Find all',
command=self.find_all,
underline=5)
findmenu.add_command(label='Find selection',
command=self.editbox.find_selection_event,
accelerator='Ctrl+F3',
underline=5)
findmenu.add_command(label='Replace',
command=self.editbox.replace_event,
accelerator='Ctrl+H',
underline=0)
menubar.add_cascade(label='Find', menu=findmenu, underline=0)
editmenu = tkinter.Menu(menubar)
editmenu.add_command(label='Cut',
command=self.editbox.cut,
accelerator='Ctrl+X',
underline=2)
editmenu.add_command(label='Copy',
command=self.editbox.copy,
accelerator='Ctrl+C',
underline=0)
editmenu.add_command(label='Paste',
command=self.editbox.paste,
accelerator='Ctrl+V',
underline=0)
editmenu.add_separator()
editmenu.add_command(label='Select all',
command=self.editbox.select_all,
accelerator='Ctrl+A',
underline=7)
editmenu.add_command(label='Clear selection',
command=self.editbox.remove_selection,
accelerator='Esc')
menubar.add_cascade(label='Edit', menu=editmenu, underline=0)
optmenu = tkinter.Menu(menubar)
optmenu.add_command(label='Settings...',
command=self.config_dialog,
underline=0)
menubar.add_cascade(label='Options', menu=optmenu, underline=0)
# display the menu
self.parent.config(menu=menubar)
self.pack()
def edit(self, text: str, jumpIndex: Optional[int] = None,
highlight: Optional[str] = None):
"""
Provide user with editor to modify text.
:param text: the text to be edited
:param jumpIndex: position at which to put the caret
:param highlight: each occurrence of this substring will be highlighted
:return: the modified text, or None if the user didn't save the text
file in his text editor
:rtype: str or None
"""
self.text = None
# put given text into our textarea
self.editbox.insert(tkinter.END, text)
# wait for user to push a button which will destroy (close) the window
# enable word wrap
self.editbox.tag_add('all', '1.0', tkinter.END)
self.editbox.tag_config('all', wrap=tkinter.WORD)
# start search if required
if highlight:
self.find_all(highlight)
if jumpIndex:
# lines are indexed starting at 1
line = text[:jumpIndex].count('\n') + 1
column = jumpIndex - (text[:jumpIndex].rfind('\n') + 1)
# don't know how to place the caret, but scrolling to the right
# line should already be helpful.
self.editbox.see('{}.{}'.format(line, column))
# wait for user to push a button which will destroy (close) the window
self.parent.mainloop()
return self.text
def find_all(self, target):
"""Perform find all operation."""
self.textfield.insert(tkinter.END, target)
self.editbox.find_all(target)
def find(self):
"""Perform find operation."""
# get text to search for
s = self.textfield.get()
if s:
self.editbox.find_all(s)
def config_dialog(self, event=None):
"""Show config dialog."""
ConfigDialog(self, 'Settings')
def pressedOK(self):
"""
Perform OK operation.
Called when user pushes the OK button.
Saves the buffer into a variable, and closes the window.
"""
self.text = self.editbox.get('1.0', tkinter.END)
self.parent.destroy()
def debug(self, event=None):
"""Call quit() and return 'break'."""
self.quit()
return 'break'
# the following class isn't used anywhere in the framework: ####
class ListBoxWindow:
"""List box window."""
# called when user pushes the OK button.
# closes the window.
def pressedOK(self):
"""
Perform OK operation.
Closes listbox.
"""
self.parent.destroy()
def __init__(self, parent=None):
"""Initializer."""
if parent is None:
# create a | |
"""AWS Glue Catalog Module."""
# pylint: disable=redefined-outer-name
import itertools
import logging
import re
import unicodedata
from typing import Any, Dict, Iterator, List, Optional, Tuple
from urllib.parse import quote_plus
import boto3 # type: ignore
import pandas as pd # type: ignore
import sqlalchemy # type: ignore
from awswrangler import _data_types, _utils, exceptions
_logger: logging.Logger = logging.getLogger(__name__)
def delete_table_if_exists(database: str, table: str, boto3_session: Optional[boto3.Session] = None) -> bool:
"""Delete Glue table if exists.
Parameters
----------
database : str
Database name.
table : str
Table name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if deleted, otherwise False.
Examples
--------
>>> import awswrangler as wr
>>> wr.catalog.delete_table_if_exists(database='default', name='my_table') # deleted
True
>>> wr.catalog.delete_table_if_exists(database='default', name='my_table') # Nothing to be deleted
False
"""
client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
try:
client_glue.delete_table(DatabaseName=database, Name=table)
return True
except client_glue.exceptions.EntityNotFoundException:
return False
def does_table_exist(database: str, table: str, boto3_session: Optional[boto3.Session] = None):
"""Check if the table exists.
Parameters
----------
database : str
Database name.
table : str
Table name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if exists, otherwise False.
Examples
--------
>>> import awswrangler as wr
>>> wr.catalog.does_table_exist(database='default', name='my_table')
"""
client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
try:
client_glue.get_table(DatabaseName=database, Name=table)
return True
except client_glue.exceptions.EntityNotFoundException:
return False
def create_parquet_table(
database: str,
table: str,
path: str,
columns_types: Dict[str, str],
partitions_types: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
mode: str = "overwrite",
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Create a Parquet Table (Metadata Only) in the AWS Glue Catalog.
'https://docs.aws.amazon.com/athena/latest/ug/data-types.html'
Parameters
----------
database : str
Database name.
table : str
Table name.
path : str
Amazon S3 path (e.g. s3://bucket/prefix/).
columns_types: Dict[str, str]
Dictionary with keys as column names and vales as data types (e.g. {'col0': 'bigint', 'col1': 'double'}).
partitions_types: Dict[str, str], optional
Dictionary with keys as partition names and values as data types (e.g. {'col2': 'date'}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
description: str, optional
Table description
parameters: Dict[str, str], optional
Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
mode: str
'overwrite' to recreate any possible existing table or 'append' to keep any possible existing table.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.catalog.create_parquet_table(
... database='default',
... table='my_table',
... path='s3://bucket/prefix/',
... columns_types={'col0': 'bigint', 'col1': 'double'},
... partitions_types={'col2': 'date'},
... compression='snappy',
... description='My own table!',
... parameters={'source': 'postgresql'},
... columns_comments={'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}
... )
"""
table = sanitize_table_name(table=table)
partitions_types = {} if partitions_types is None else partitions_types
table_input: Dict[str, Any] = _parquet_table_definition(
table=table, path=path, columns_types=columns_types, partitions_types=partitions_types, compression=compression
)
_create_table(
database=database,
table=table,
description=description,
parameters=parameters,
columns_comments=columns_comments,
mode=mode,
boto3_session=boto3_session,
table_input=table_input,
)
def _parquet_table_definition(
table: str, path: str, columns_types: Dict[str, str], partitions_types: Dict[str, str], compression: Optional[str]
) -> Dict[str, Any]:
compressed: bool = compression is not None
return {
"Name": table,
"PartitionKeys": [{"Name": cname, "Type": dtype} for cname, dtype in partitions_types.items()],
"TableType": "EXTERNAL_TABLE",
"Parameters": {"classification": "parquet", "compressionType": str(compression).lower(), "typeOfData": "file"},
"StorageDescriptor": {
"Columns": [{"Name": cname, "Type": dtype} for cname, dtype in columns_types.items()],
"Location": path,
"InputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"Compressed": compressed,
"NumberOfBuckets": -1,
"SerdeInfo": {
"SerializationLibrary": "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
"Parameters": {"serialization.format": "1"},
},
"StoredAsSubDirectories": False,
"SortColumns": [],
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"classification": "parquet",
"compressionType": str(compression).lower(),
"typeOfData": "file",
},
},
}
def add_parquet_partitions(
database: str,
table: str,
partitions_values: Dict[str, List[str]],
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Add partitions (metadata) to a Parquet Table in the AWS Glue Catalog.
Parameters
----------
database : str
Database name.
table : str
Table name.
partitions_values: Dict[str, List[str]]
Dictionary with keys as S3 path locations and values as a list of partitions values as str
(e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.catalog.add_parquet_partitions(
... database='default',
... table='my_table',
... partitions_values={
... 's3://bucket/prefix/y=2020/m=10/': ['2020', '10'],
... 's3://bucket/prefix/y=2020/m=11/': ['2020', '11'],
... 's3://bucket/prefix/y=2020/m=12/': ['2020', '12']
... }
... )
"""
inputs: List[Dict[str, Any]] = [
_parquet_partition_definition(location=k, values=v, compression=compression)
for k, v in partitions_values.items()
]
_add_partitions(database=database, table=table, boto3_session=boto3_session, inputs=inputs)
def _parquet_partition_definition(location: str, values: List[str], compression: Optional[str]) -> Dict[str, Any]:
compressed: bool = compression is not None
return {
"StorageDescriptor": {
"InputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"Location": location,
"Compressed": compressed,
"SerdeInfo": {
"Parameters": {"serialization.format": "1"},
"SerializationLibrary": "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
},
"StoredAsSubDirectories": False,
},
"Values": values,
}
def get_table_types(database: str, table: str, boto3_session: Optional[boto3.Session] = None) -> Dict[str, str]:
"""Get all columns and types from a table.
Parameters
----------
database : str
Database name.
table : str
Table name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, str]
A dictionary as {'col name': 'col data type'}.
Examples
--------
>>> import awswrangler as wr
>>> wr.catalog.get_table_types(database='default', name='my_table')
{'col0': 'int', 'col1': double}
"""
client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
response: Dict[str, Any] = client_glue.get_table(DatabaseName=database, Name=table)
dtypes: Dict[str, str] = {}
for col in response["Table"]["StorageDescriptor"]["Columns"]:
dtypes[col["Name"]] = col["Type"]
for par in response["Table"]["PartitionKeys"]:
dtypes[par["Name"]] = par["Type"]
return dtypes
def get_databases(
catalog_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None
) -> Iterator[Dict[str, Any]]:
"""Get an iterator of databases.
Parameters
----------
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Iterator[Dict[str, Any]]
Iterator of Databases.
Examples
--------
>>> import awswrangler as wr
>>> dbs = wr.catalog.get_databases()
"""
client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
paginator = client_glue.get_paginator("get_databases")
if catalog_id is None:
response_iterator: Iterator = paginator.paginate()
else:
response_iterator = paginator.paginate(CatalogId=catalog_id)
for page in response_iterator:
for db in page["DatabaseList"]:
yield db
def databases(
limit: int = 100, catalog_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None
) -> pd.DataFrame:
"""Get a Pandas DataFrame with all listed databases.
Parameters
----------
limit : int, optional
Max number of tables to be returned.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
pandas.DataFrame
Pandas DataFrame filled by formatted infos.
Examples
--------
>>> import awswrangler as wr
>>> df_dbs = wr.catalog.databases()
"""
database_iter: Iterator[Dict[str, Any]] = get_databases(catalog_id=catalog_id, boto3_session=boto3_session)
dbs = itertools.islice(database_iter, limit)
df_dict: Dict[str, List] = {"Database": [], "Description": []}
for db in dbs:
df_dict["Database"].append(db["Name"])
if "Description" in db:
df_dict["Description"].append(db["Description"])
else: # pragma: no cover
df_dict["Description"].append("")
return pd.DataFrame(data=df_dict)
def get_tables(
catalog_id: Optional[str] = None,
database: Optional[str] = None,
name_contains: Optional[str] = None,
name_prefix: Optional[str] = None,
name_suffix: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Iterator[Dict[str, Any]]:
"""Get an iterator of tables.
Parameters
----------
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
database : str, optional
Database name.
name_contains : str, optional
Select by a specific string on table name
name_prefix : str, optional
Select by a specific prefix on table name
name_suffix : str, optional
Select by a specific suffix on table name
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Iterator[Dict[str, Any]]
Iterator of tables.
Examples
--------
>>> import awswrangler as wr
>>> tables = wr.catalog.get_tables()
"""
client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
paginator = client_glue.get_paginator("get_tables")
args: Dict[str, str] = {}
if catalog_id is not None:
args["CatalogId"] = catalog_id
if (name_prefix is not None) | |
wrong failing to close?
pass
# try to reconnect a few times
tries = 0
while tries < 3 and not websocket.open:
try:
websocket = await websockets.connect(self.websocket_url)
except websocket_errors:
await asyncio.sleep(2)
tries += 1
self.logs[kind]["end"] = time.time()
logger.info(f"Process exited with {proc.returncode}")
logger.info(f"Disconnecting from websocket {self.websocket_url}")
# Communicate that the program is closing
self.completed_program_counter += 1
await websocket.close()
def _get_host_path(self, *paths):
"""Turns an absolute path inside our docker container, into what the path
would be on the host machine"""
# Take our list of paths and smash 'em together
path = os.path.join(*paths)
# pull front of path, which points to the location inside docker
path = path[len(BASE_DIR):]
# add host to front, so when we run commands in docker on the host they
# can be seen properly
path = os.path.join(HOST_DIRECTORY, path)
return path
async def _run_program_directory(self, program_dir, kind, can_be_output=False):
# If the directory doesn't even exist, move on
if not os.path.exists(program_dir):
logger.info(f"{program_dir} not found, no program to execute")
# Communicate that the program is closing
self.completed_program_counter += 1
return
if os.path.exists(os.path.join(program_dir, "metadata.yaml")):
metadata_path = 'metadata.yaml'
elif os.path.exists(os.path.join(program_dir, "metadata")):
metadata_path = 'metadata'
else:
if can_be_output:
logger.info(
"Program directory missing metadata, assuming it's going to be handled by ingestion "
"program so move it to output"
)
# Copying so that we don't move a code submission w/out a metadata command
shutil.copytree(program_dir, self.output_dir)
return
else:
raise SubmissionException("Program directory missing 'metadata.yaml/metadata'")
logger.info(f"Metadata path is {os.path.join(program_dir, metadata_path)}")
with open(os.path.join(program_dir, metadata_path), 'r') as metadata_file:
metadata = yaml.load(metadata_file.read(), Loader=yaml.FullLoader)
logger.info(f"Metadata contains:\n {metadata}")
command = metadata.get("command")
if not command and kind == "ingestion":
raise SubmissionException("Program directory missing 'command' in metadata")
elif not command:
logger.info(
f"Warning: {program_dir} has no command in metadata, continuing anyway "
f"(may be meant to be consumed by an ingestion program)"
)
return
if os.environ.get("NVIDIA_DOCKER"):
docker_process_name = "nvidia-docker"
else:
docker_process_name = "docker"
docker_cmd = [
docker_process_name,
'run',
# Remove it after run
'--rm',
f'--name={self.ingestion_container_name if kind == "ingestion" else self.program_container_name}',
# Don't allow subprocesses to raise privileges
'--security-opt=no-new-privileges',
# Set the volumes
'-v', f'{self._get_host_path(program_dir)}:/app/program',
'-v', f'{self._get_host_path(self.output_dir)}:/app/output',
# Start in the right directory
'-w', '/app/program',
# Don't buffer python output, so we don't lose any
'-e', 'PYTHONUNBUFFERED=1',
]
if kind == 'ingestion':
# program here is either scoring program or submission, depends on if this ran during Prediction or Scoring
if self.ingestion_only_during_scoring and self.is_scoring:
# submission program moved to 'input/res' with shutil.move() above
ingested_program_location = "input/res"
else:
ingested_program_location = "program"
docker_cmd += ['-v', f'{self._get_host_path(self.root_dir, ingested_program_location)}:/app/ingested_program']
if self.input_data:
docker_cmd += ['-v', f'{self._get_host_path(self.root_dir, "input_data")}:/app/input_data']
if self.is_scoring:
# For scoring programs, we want to have a shared directory just in case we have an ingestion program.
# This will add the share dir regardless of ingestion or scoring, as long as we're `is_scoring`
docker_cmd += ['-v', f'{self._get_host_path(self.root_dir, "shared")}:/app/shared']
# Input from submission (or submission + ingestion combo)
docker_cmd += ['-v', f'{self._get_host_path(self.input_dir)}:/app/input']
# Set the image name (i.e. "codalab/codalab-legacy") for the container
docker_cmd += [self.docker_image]
# Handle Legacy competitions by replacing anything in the run command
command = replace_legacy_metadata_command(
command=command,
kind=kind,
is_scoring=self.is_scoring,
ingestion_only_during_scoring=self.ingestion_only_during_scoring
)
# Append the actual program to run
docker_cmd += command.split(' ')
logger.info(f"Running program = {' '.join(docker_cmd)}")
# This runs the docker command and asynchronously passes data back via websocket
return await self._run_docker_cmd(docker_cmd, kind=kind)
def _put_dir(self, url, directory):
logger.info("Putting dir %s in %s" % (directory, url))
zip_path = make_archive(os.path.join(self.root_dir, str(uuid.uuid4())), 'zip', directory)
self._put_file(url, file=zip_path)
def _put_file(self, url, file=None, raw_data=None, content_type='application/zip'):
if file and raw_data:
raise Exception("Cannot put both a file and raw_data")
headers = {
# For Azure only, other systems ignore these headers
'x-ms-blob-type': 'BlockBlob',
'x-ms-version': '2018-03-28',
}
if content_type:
headers['Content-Type'] = content_type
if file:
logger.info("Putting file %s in %s" % (file, url))
data = open(file, 'rb')
headers['Content-Length'] = str(os.path.getsize(file))
elif raw_data:
logger.info("Putting raw data %s in %s" % (raw_data, url))
data = raw_data
else:
raise SubmissionException('Must provide data, both file and raw_data cannot be empty')
resp = self.requests_session.put(
url,
data=data,
headers=headers,
)
logger.info("*** PUT RESPONSE: ***")
logger.info(f'response: {resp}')
logger.info(f'content: {resp.content}')
def _prep_cache_dir(self, max_size=MAX_CACHE_DIR_SIZE_GB):
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
logger.info("Checking if cache directory needs to be pruned...")
if get_folder_size_in_gb(CACHE_DIR) > max_size:
logger.info("Pruning cache directory")
delete_files_in_folder(CACHE_DIR)
else:
logger.info("Cache directory does not need to be pruned!")
def prepare(self):
if not self.is_scoring:
# Only during prediction step do we want to announce "preparing"
self._update_status(STATUS_PREPARING)
# Setup cache and prune if it's out of control
self._prep_cache_dir()
# A run *may* contain the following bundles, let's grab them and dump them in the appropriate
# sub folder.
bundles = [
# (url to file, relative folder destination)
(self.program_data, 'program'),
(self.ingestion_program_data, 'ingestion_program'),
(self.input_data, 'input_data'),
(self.reference_data, 'input/ref'),
]
if self.is_scoring:
# Send along submission result so scoring_program can get access
bundles += [(self.prediction_result, 'input/res')]
for url, path in bundles:
if url is not None:
# At the moment let's just cache input & reference data
cache_this_bundle = path in ('input_data', 'input/ref')
zip_file = self._get_bundle(url, path, cache=cache_this_bundle)
# TODO: When we have `is_scoring_only` this needs to change...
if url == self.program_data and not self.is_scoring:
# We want to get a checksum of submissions so we can check if they are
# a solution, or maybe match them against other submissions later
logger.info(f"Beginning MD5 checksum of submission: {zip_file}")
checksum = md5(zip_file)
logger.info(f"Checksum result: {checksum}")
self._update_submission({"md5": checksum})
# For logging purposes let's dump file names
for filename in glob.iglob(self.root_dir + '**/*.*', recursive=True):
logger.info(filename)
# Before the run starts we want to download docker images, they may take a while to download
# and to do this during the run would subtract from the participants time.
self._get_docker_image(self.docker_image)
def start(self):
if not self.is_scoring:
self._update_status(STATUS_RUNNING)
program_dir = os.path.join(self.root_dir, "program")
ingestion_program_dir = os.path.join(self.root_dir, "ingestion_program")
logger.info("Running scoring program, and then ingestion program")
loop = asyncio.new_event_loop()
gathered_tasks = asyncio.gather(
self._run_program_directory(program_dir, kind='program', can_be_output=True),
self._run_program_directory(ingestion_program_dir, kind='ingestion'),
self.watch_detailed_results(),
loop=loop,
)
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(self.execution_time_limit)
try:
loop.run_until_complete(gathered_tasks)
except ExecutionTimeLimitExceeded:
raise SubmissionException(f"Execution Time Limit exceeded. Limit was {self.execution_time_limit} seconds")
finally:
self.watch = False
for kind, logs in self.logs.items():
if logs["end"] is not None:
elapsed_time = logs["end"] - logs["start"]
else:
elapsed_time = self.execution_time_limit
return_code = logs["proc"].returncode
if return_code is None:
logger.info('No return code from Process. Killing it')
if kind == 'ingestion':
program_to_kill = self.ingestion_container_name
else:
program_to_kill = self.program_container_name
# Try and stop the program. If stop does not succeed
kill_code = subprocess.call(['docker', 'stop', str(program_to_kill)])
logger.info(f'Kill process returned {kill_code}')
if kind == 'program':
self.program_exit_code = return_code
self.program_elapsed_time = elapsed_time
elif kind == 'ingestion':
self.ingestion_program_exit_code = return_code
self.ingestion_elapsed_time = elapsed_time
logger.info(f'[exited with {logs["proc"].returncode}]')
for key, value in logs.items():
if key not in ['stdout', 'stderr']:
continue
if value["data"]:
logger.info(f'[{key}]\n{value["data"]}')
self._put_file(value["location"], raw_data=value["data"])
# set logs of this kind to None, since we handled them already
logger.info("Program finished")
signal.alarm(0)
if self.is_scoring:
self._update_status(STATUS_FINISHED)
else:
self._update_status(STATUS_SCORING)
def push_scores(self):
"""This is only ran at the end of the scoring step"""
# POST to some endpoint:
# {
# "correct": 1.0
# }
if os.path.exists(os.path.join(self.output_dir, "scores.json")):
scores_file = os.path.join(self.output_dir, "scores.json")
with open(scores_file) as f:
try:
scores = json.load(f)
except json.decoder.JSONDecodeError as e:
raise SubmissionException(f"Could not decode scores json properly, it contains an error.\n{e.msg}")
elif os.path.exists(os.path.join(self.output_dir, "scores.txt")):
scores_file = os.path.join(self.output_dir, "scores.txt")
with open(scores_file) as f:
scores = yaml.load(f)
else:
raise SubmissionException("Could not find scores file, did the scoring program output it?")
url = f"{self.submissions_api_url}/upload_submission_scores/{self.submission_id}/"
data = {
"secret": self.secret,
"scores": scores,
}
logger.info(f"Submitting these scores to {url}: {scores} with data = {data}")
resp = self.requests_session.post(url, json=data)
logger.info(resp)
logger.info(str(resp.content))
def push_output(self):
"""Output is pushed at the end of both prediction and scoring steps."""
# V1.5 compatibility, write program statuses to metadata file
prog_status = {
'exitCode': self.program_exit_code,
# for v1.5 compat, send `ingestion_elapsed_time` if no `program_elapsed_time`
'elapsedTime': self.program_elapsed_time or self.ingestion_elapsed_time,
'ingestionExitCode': self.ingestion_program_exit_code,
'ingestionElapsedTime': self.ingestion_elapsed_time,
}
logger.info(f"Metadata output: {prog_status}")
metadata_path = os.path.join(self.output_dir, 'metadata')
if os.path.exists(metadata_path):
raise SubmissionException("Error, the output directory already contains a metadata file. This file is used "
"to store exitCode and other data, do not write to this file manually.")
with open(metadata_path, 'w') as f:
f.write(yaml.dump(prog_status, default_flow_style=False))
if | |
<gh_stars>1-10
#!/usr/bin/env python
"""This script does x.
Example:
Attributes:
Todo:
"""
import os
import sys
import glob
import numpy as np
import pandas as pd
import radical.analytics as ra
def initialize_entity(ename=None):
entities = {'session': {'sid' : [], # Session ID
'session' : [], # RA session objects
'experiment' : [], # Experiment ID
'TTC' : [], # Time to completion
'nhost' : [], # #host for CU execution
'nunit' : [], # #units
'nunit_done' : [], # #active units
'nunit_failed' : [], # #failed units
'npilot' : [], # #pilots
'npilot_active': [], # #active pilots
'ncore' : [], # #cores
'ncore_active' : []}, # #active cores
'pilot' : {'pid' : [], # Pilot ID
'sid' : [], # Session ID
'hid' : [], # Host ID
'ncore' : [], # #cores
'nunit' : [], # #units executed
'experiment' : []}, # Experiment ID
'unit' : {'uid' : [], # Unit ID
'sid' : [], # Session ID
'pid' : [], # Pilot ID
'hid' : [], # Host ID
'experiment' : []}} # Experiment ID
# Add the duration label of each state of each entity.
for duration in pdm.keys():
entities['session'][duration] = []
entities['pilot'][duration] = []
for duration in udm.keys():
entities['session'][duration] = []
entities['unit'][duration] = []
# Return the empty data structure of the requested entity.
if ename in ['session', 'pilot', 'unit']:
return entities[ename]
else:
error = 'Cannot itialize entity %s' % ename
print error
sys.exit(1)
def load_df(ename=None):
if ename in ['session', 'pilot', 'unit']:
df = pd.DataFrame(initialize_entity(ename=ename))
if os.path.isfile(csvs[ename]):
df = pd.read_csv(csvs[ename], index_col=0)
return df
else:
error = 'Cannot itialize entity %s' % ename
print error
sys.exit(1)
def store_df(new_df, stored=pd.DataFrame(), ename=None):
# skip storing if no new data are passed.
if new_df.empty:
print 'WARNING: attempting to store an empty DF.'
else:
if ename == 'session':
new_sessions = new_df.drop('session', axis=1)
if stored.empty:
sessions = new_sessions
else:
sessions = stored.append(new_sessions)
sessions.to_csv(csvs[ename])
elif ename in ['pilot', 'unit']:
if stored.empty:
df = new_df
else:
df = stored.append(new_df)
df.reset_index(inplace=True, drop=True)
df.to_csv(csvs[ename])
else:
error = 'Cannot store DF to %s' % ename
print error
sys.exit(1)
def parse_osg_hostid(hostid):
'''
Heuristic: eliminate node-specific information from hostID.
'''
domain = None
# Split domain name from IP.
host = hostid.split(':')
# Split domain name into words.
words = host[0].split('.')
# Get the words in the domain name that do not contain
# numbers. Most hostnames have no number but there are
# exceptions.
literals = [l for l in words if not
any((number in set('0123456789')) for number in l)]
# Check for exceptions:
# a. every word of the domain name has a number
if len(literals) == 0:
# Some hostname use '-' instead of '.' as word separator.
# The parser would have returned a single word and the
# any of that word may have a number.
if '-' in host[0]:
words = host[0].split('-')
literals = [l for l in words if not
any((number in set('0123456789')) for number in l)]
# FIXME: We do not check the size of literals.
domain = '.'.join(literals)
# Some hostnames may have only the name of the node. We
# have to keep the IP to decide later on whether two nodes
# are likely to belong to the same cluster.
elif 'nod' in host[0]:
domain = '.'.join(host)
# FIXME: ad hoc parsing
elif 'n0' in host[0]:
domain = 'n0x.10.2.x.x'
# The hostname is identified by an alphanumeric string
else:
domain = '.'.join(host)
# Some hostnames DO have numbers in their name.
elif len(literals) == 1:
domain = '.'.join(words[1:])
# Some hostname are just simple to parse.
else:
domain = '.'.join(literals)
# FIXME: When everything else fails, ad hoc manipulations of
# domain string.
if 'its.osg' in domain:
domain = 'its.osg'
elif 'nodo' in domain:
domain = 'nodo'
elif 'bu.edu' in domain:
domain = 'bu.edu'
return domain
def load_pilots(sid, exp, sra_pilots, pdm, pu_rels):
sys.stdout.write('\n%s --- %s' % (exp, sid))
ps = initialize_entity(ename='pilot')
# Did we already store pilots of this session?
stored_pilots = load_df(ename='pilot')
stored_pids = []
if stored_pilots['sid'].any():
stored_pilots_sid = stored_pilots.loc[
stored_pilots['sid'] == sid].copy()
stored_pids = stored_pilots_sid['pid'].values.tolist()
# Derive properties and duration for each pilot.
for pid in sorted(sra_pilots.list('uid')):
# Skip session if its pilots have been already stored.
if pid in stored_pids:
sys.stdout.write('\n%s already in %s' % (pid, csvs['pilot']))
continue
# Pilot properties.
sys.stdout.write('\n' + pid + ': ')
ps['pid'].append(pid)
ps['sid'].append(sid)
ps['experiment'].append(exp)
# Host ID.
pentity = sra_pilots.get(uid=pid)[0]
if pentity.cfg['hostid']:
ps['hid'].append(parse_osg_hostid(pentity.cfg['hostid']))
else:
ps['hid'].append(None)
# Number of cores of the pilot.
ps['ncore'].append(pentity.description['cores'])
# Number of units executed.
ps['nunit'].append(len(pu_rels[pid]))
# Pilot durations.
for duration in pdm.keys():
if duration not in ps.keys():
ps[duration] = []
try:
ps[duration].append(pentity.duration(pdm[duration]))
sys.stdout.write(' %s' % duration)
except:
print '\nWARNING: Failed to calculate duration %s' % \
duration
ps[duration].append(None)
# Store pilots DF to csv and reload into memory to return the complete
# DF for the given sid.
if ps['pid']:
pilots = pd.DataFrame(ps)
store_df(pilots, stored=stored_pilots, ename='pilot')
stored_pilots = load_df(ename='pilot')
print '\nstored in %s.' % csvs['pilot']
# Returns the DF of the stored pilots if no new pilots have been added;
# the DF with the old and new pilots otherwise.
return stored_pilots
def load_units(sid, exp, sra_units, udm, pilots, sra, pu_rels):
sys.stdout.write('\n%s --- %s' % (exp, sid))
us = initialize_entity(ename='unit')
# Did we already store units of this session?
stored_units = load_df(ename='unit')
stored_uids = []
if stored_units['sid'].any():
stored_units_sid = stored_units.loc[
stored_units['sid'] == sid].copy()
stored_uids = stored_units_sid['uid'].values.tolist()
# Derive properties and duration for each unit.
for uid in sorted(sra_units.list('uid')):
# Skip session if its pilots have been already stored.
if uid in stored_uids:
sys.stdout.write('\n%s already stored in %s' %
(uid, csvs['unit']))
continue
# Properties.
sys.stdout.write('\n' + uid + ': ')
us['uid'].append(uid)
us['sid'].append(sid)
us['experiment'].append(exp)
# Durations.
uentity = sra_units.get(uid=uid)[0]
for duration in udm.keys():
if duration not in us.keys():
us[duration] = []
try:
# TODO: this is a temporary fix for inconsistent state model.
if duration == 'U_AGENT_EXECUTING':
if 'AGENT_STAGING_OUTPUT_PENDING' in \
uentity.states.keys() and \
'FAILED' in uentity.states.keys():
us[duration].append(None)
continue
us[duration].append(uentity.duration(udm[duration]))
sys.stdout.write(' %s' % duration)
except:
print '\nWARNING: Failed to calculate duration %s' % \
duration
us[duration].append(None)
# pilot and host on which the unit has been executed.
punit = [key[0] for key in pu_rels.items() if uid in key[1]][0]
hid = pilots[(pilots['sid'] == sid) &
(pilots['pid'] == punit)]['hid'].tolist()[0]
us['pid'].append(punit)
us['hid'].append(hid)
# Store unit DF to csv and reload into memory to return the complete
# DF for the given sid.
if us['pid']:
units = pd.DataFrame(us)
store_df(units, stored=stored_units, ename='unit')
stored_units = load_df(ename='unit')
print '\nstored in %s.' % csvs['unit']
# Returns the DF of the stored pilots if no new pilots have been added;
# the DF with the old and new pilots otherwise.
return stored_units
def load_session(sid, exp, sra_session, sra_pilots, sra_units,
pdm, udm, pilots, units):
# IF this session has been already stored get out, nothing to do here.
stored_sessions = load_df(ename='session')
if sid in stored_sessions.index.tolist():
sys.stdout.write('%s already stored in %s' % (sid, csvs['session']))
return False
sys.stdout.write('\n%s --- %s' % (exp, sid))
s = initialize_entity(ename='session')
# Session properties: pilots and units.
# sp = sra_session.filter(etype='pilot', inplace=False)
# su = sra_session.filter(etype='unit', inplace=False)
s['sid'].append(sid)
s['session'].append(None)
s['experiment'].append(exp)
s['TTC'].append(sra_session.ttc)
s['nhost'].append(len(pilots.loc[pilots['sid'] == sid]['hid'].unique()))
s['nunit'].append(len(sra_units.get()))
s['npilot'].append(len(sra_pilots.get()))
s['npilot_active'].append(len(sra_pilots.timestamps(state='PMGR_ACTIVE')))
s['nunit_done'].append(len(sra_units.timestamps(state='DONE')))
s['nunit_failed'].append(len(sra_units.timestamps(state='FAILED')))
# Number of cores requested and used by the session's pilots. Make a copy of
# the pilots DF with only the columns we need to limit memory overhead.
pcores = pilots[pilots.sid == sid][['P_LRMS_RUNNING', 'ncore']]
s['ncore'].append(pcores.ncore.sum())
s['ncore_active'].append(pcores[pcores.P_LRMS_RUNNING > 0].ncore.sum())
pcores = None
# Pilots total durations. NOTE: s initialization guarantees
# the existence of duration keys.
for duration in pdm.keys():
s[duration].append(sra_pilots.duration(pdm[duration]))
# Units total durations. NOTE: s initialization guarantees the
# existence of duration keys.
for duration in udm.keys():
s[duration].append(sra_units.duration(udm[duration]))
# Store session.
session = pd.DataFrame(s, index=[sid])
store_df(session, stored=stored_sessions, ename='session')
print '\nstored in %s' % csvs['session']
return True
# -----------------------------------------------------------------------------
if __name__ == '__main__':
datadir = '../data/'
experiment_tag = 'exp'
# Global constants
# File names where to save the DF of each entity of each session.
csvs = {'session': '%ssessions.csv' % datadir,
'pilot' : '%spilots.csv' % datadir,
'unit' : | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
from scipy import stats
import math
def clean_data(df):
"""
INPUT
df_listings - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
This function cleans df_listings using the following steps to produce X and y:
1. Drop rows with 0 price and outlier prices (prices above 2950)
2. Create y as the price column, transformed by log
3. Create X from selected columns
4. Deal with missing values
5. Create dummy variables for selected categorical variables, drop the original columns
"""
# Drop rows with 0 price
df = df[df.price > 0]
df = df[df.price < 2950]
# Create y
y = df['price'].apply(math.log)
# Select columns for X
potential_vars = ['host_listings_count',
'calculated_host_listings_count_private_rooms',
'neighbourhood_cleansed',
'room_type',
'property_type',
'beds',
'availability_365',
'number_of_reviews',
'neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about',
'host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification',]
bool_vars = ['host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification']
free_text_vars = ['neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about']
df = df[potential_vars]
# Deal with missing values
df['number_of_reviews'].fillna(0, inplace=True)
df[bool_vars].fillna('f', inplace=True)
df[free_text_vars].fillna('', inplace=True)
def translate_bool(col):
for index, value in col.iteritems():
col[index] = 1 if value == 't' else 0
return col
def create_bool(col):
for index, value in col.iteritems():
col[index] = 0 if value == '' else 1
return col
fill_mean = lambda col: col.fillna(col.mean())
num_vars = df.select_dtypes(include=['int', 'float']).columns
df[num_vars] = df[num_vars].apply(fill_mean, axis=0)
df[bool_vars] = df[bool_vars].apply(translate_bool, axis=0)
df[bool_vars].dtype = int
df[free_text_vars] = df[free_text_vars].apply(create_bool, axis=0)
df[free_text_vars].dtype = int
# Dummy the categorical variables
cat_vars = ['neighbourhood_cleansed', 'room_type', 'property_type']
for var in cat_vars:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
X = df
return X, y
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
def main():
plot = False # set to true if you would like to see plots
print_log = True # set to true if you would like to see stats outputted to console
print_result = True
# Data Exploration
desired_width=320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 50)
# Get a sense of the numerical data in the available datasets.
df_listings = pd.read_csv('data/listings_boston.csv', dtype={"price": str,
"weekly_price": str,
"monthly_price": str,
"security_deposit": str,
"cleaning_fee": str,
"extra_people": str,
"host_response_rate": str})
# clean up price data to make it numeric
df_listings.loc[:, "price"] = df_listings["price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "weekly_price"] = df_listings["weekly_price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "monthly_price"] = df_listings["monthly_price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "security_deposit"] = df_listings["security_deposit"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "cleaning_fee"] = df_listings["cleaning_fee"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "extra_people"] = df_listings["extra_people"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings["host_response_rate"].fillna("0", inplace=True)
df_listings.loc[:, "host_response_rate"] = df_listings["host_response_rate"].str.replace('%', '').astype('int')
if print_log:
print(df_listings.describe())
df_neighborhoods = pd.read_csv('data/neighbourhoods_boston.csv')
if print_log:
print(df_neighborhoods.describe())
df_reviews = pd.read_csv('data/reviews_boston.csv')
if print_log:
print(df_reviews.describe())
df_calendar = pd.read_csv('data/calendar_boston.csv', dtype={"price": str, "adjusted_price": str})
# clean up price data to make it numeric
df_calendar.loc[:, "price"] = df_calendar["price"].str.replace(',', '').str.replace('$', '').astype('float')
df_calendar.loc[:, "adjusted_price"] = df_calendar["adjusted_price"].str.replace(',', '').str.replace('$', '').astype('float')
if print_log:
print(df_calendar.describe())
# df_neighborhoods is basically empty and can be ignored
# df_reviews is full of unstructured review data and would have to be mined to produce modelable data
# df_listings has descriptive information about the location
# df_calendar has price information and how it varies over time. Price and adjusted price have to be formatted.
# Going to primarily focus on df_listings
# How many N/A values are there for each column?
if print_log:
for col in df_listings.columns:
print(col, ':', df_listings[col].dropna().shape[0] / df_listings[col].shape[0])
# Possible binary variable conversions: neighborhood_overview, space, notes, transit, access, interaction,
# house_rules
# Are there any correlations we should worry about?
num_vars = ["price",
"weekly_price",
"monthly_price",
"security_deposit",
"cleaning_fee",
"extra_people",
'host_listings_count',
'host_total_listings_count',
'calculated_host_listings_count',
'calculated_host_listings_count_entire_homes',
'calculated_host_listings_count_private_rooms',
'calculated_host_listings_count_shared_rooms',
'host_response_rate',
'accommodates',
'bathrooms',
'bedrooms',
'beds',
'square_feet',
'guests_included',
'minimum_nights',
'minimum_minimum_nights',
'maximum_minimum_nights',
'minimum_nights_avg_ntm',
'maximum_nights',
'minimum_maximum_nights',
'maximum_maximum_nights',
'maximum_nights_avg_ntm',
'availability_30',
'availability_60',
'availability_90',
'availability_365',
'number_of_reviews',
'number_of_reviews_ltm',
'reviews_per_month',
'review_scores_rating',
'review_scores_accuracy',
'review_scores_cleanliness',
'review_scores_checkin',
'review_scores_communication',
'review_scores_location',
'review_scores_value'
]
if plot:
sns.heatmap(df_listings[num_vars].corr(), annot=False, fmt=".2f", cmap="YlGnBu", linewidths=.5, square=True)
plt.show()
# Correlation matrix supports some clearly distinct categories of data
# Pricing: price, weekly_price, monthly_price, security_deposit, cleaning_fee, extra_people
# Host: host_listings_count, host_total_listings_count, calculated_host_listings_count,
# calculated_host_listings_count_entire_homes, calculated_host_listings_count_private_rooms,
# calculated_host_listings_count_shared_rooms
# Property: accommodates, bathrooms, bedrooms, beds, square_feet, guests_included, minimum_nights,
# minimum_minimum_nights, maximum_minimum_nights, minimum_nights_avg_ntm, maximum_nights, minimum_maximum_nights,
# maximum_maximum_nights, maximum_nights_avg_ntm
# Availability: availability_30, availability_60, availability_90, availability_365
# Reviews: number_of_reviews, number_of_reviews_ltm, reviews_per_month, review_scores_rating,
# review_scores_cleanliness, review_scores_checkin, review_scores_communication, review_scores_location,
# review_scores_value
# Get a sense of the categorical data in the available data.
cat_vars = ["space",
"description",
"experiences_offered",
"neighborhood_overview",
"notes",
"transit",
"access",
"interaction",
"house_rules",
"host_name",
"host_since",
"host_location",
"host_about",
"host_response_time",
"host_acceptance_rate",
"host_is_superhost",
"host_neighbourhood",
"host_verifications",
"host_has_profile_pic",
"host_identity_verified",
"street",
"neighbourhood",
"neighbourhood_cleansed",
"market",
"smart_location",
"is_location_exact",
"property_type",
"room_type",
"bed_type",
"amenities",
"extra_people",
"calendar_updated",
"has_availability",
"calendar_last_scraped",
"requires_license",
"instant_bookable",
"is_business_travel_ready",
"cancellation_policy",
"require_guest_profile_picture",
"require_guest_phone_verification"]
if print_log:
for col in df_listings[cat_vars].columns:
print(df_listings[[col, 'price']].groupby([col]).mean())
print(df_listings[col].value_counts())
# free text columns: space, description, neighborhood_overview, notes, transit, access, interaction, house_rules,
# host_name, host_about,
# empty: experiences_offered, market, calendar_last_scraped, requires_license, is_business_travel_ready
# boolean: host_is_superhost, host_has_profile_pic, host_identity_verified, is_location_exact, has_availability,
# instant_bookable, require_guest_profile_picture, require_guest_phone_verification, host_about
# categorical: property_type, room_type, bed_type, amenities, calendar_updated, cancellation_policy,
if print_log:
print(pd.crosstab(df_listings['neighbourhood'], df_listings['room_type']))
# Surprised to see that the top neighborhoods are not very desirable areas to vacation in.
# Also the majority of the listings are for an entire unit.
# Explore target variable, price
target = df_listings['price'].copy()
if plot:
plt.hist(target, bins=[0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000])
plt.show()
# how many zero values are there?
if print_log:
print("Number of 0 price listings: ", (target == 0).sum())
# Need to drop 0 price listings
target = target[target != 0]
# Drop outlier prices
# target = target[target <= 4000]
# seems like a log-normal distribution
shape, loc, scale = stats.lognorm.fit(target)
if print_log:
print(shape, loc, scale)
print(stats.kstest(target, "lognorm", args=[shape, loc, scale]))
if plot:
sns.distplot(target, fit=stats.lognorm, kde=False, rug=True)
plt.show()
linspace = np.linspace(0, 1000, 100)
pdf_lognorm = stats.lognorm.pdf(linspace, shape, loc, scale)
plt.hist(target, density=True, bins=[0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000])
plt.plot(linspace, pdf_lognorm, label="lognorm")
plt.show()
X, y = clean_data(df_listings)
print(X.head())
print(y.head())
r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test = find_optimal_lm_mod(X, y, [100, 1000, 2000, 3000], plot=plot)
if print_log:
print(list(zip(X_train.columns, np.exp(lm_model.coef_))))
print(r2_scores_train, r2_scores_test)
selected_vars=X_train.columns
lm_model = LinearRegression(normalize=False)
X = X[selected_vars]
lm_model.fit(X, y)
y_preds = lm_model.predict(X)
if print_result:
print(list(zip(X.columns, | |
<reponame>HBOMAT/AglaUndZufall<filename>zufall/lib/objekte/zufalls_groesse.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# ZufallsGroesse - Klasse von zufall
#
#
# This file is part of zufall
#
#
# Copyright (c) 2019 <NAME> <EMAIL>
#
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
from IPython.display import display, Math
from scipy.stats import rv_discrete
from sympy.core.numbers import (Integer, Rational, Float, Zero, One,
NegativeOne, Half, Pi, E)
from sympy import Add, Mul, Pow, Mod
from sympy.core.symbol import Symbol
from sympy.core.relational import (GreaterThan, StrictGreaterThan, LessThan,
StrictLessThan)
from sympy import sympify, sqrt, nsimplify
from sympy import floor
from sympy.printing.latex import latex
from sympy.core.compatibility import iterable
from zufall.lib.objekte.basis import ZufallsObjekt
from zufall.lib.objekte.datenreihe import DatenReihe
from zufall.lib.funktionen.graf_funktionen import (balken1, polygon_zug,
vert_funktion, verlauf, balken_plus_balken, poly_plus_poly, balken_plus_stetig)
from zufall.lib.objekte.ausnahmen import ZufallError
import zufall
# ZufallsGroesse - Klasse
# ------------------------
class ZufallsGroesse(ZufallsObjekt):
"""
Zufallsgröße
**Kurzname** **ZG**
**Erzeugung**
ZG( *verteilung* )
**Parameter**
*verteilung* : *Liste/Tupel* | *dictionary*
Elemente der Liste sind 2-elementige Listen/Tupel,
die die Zuordnung Wert - Wahrscheinlichkeit bzw.
Häufigkeit/Zahl > 0 beinhalten;
In einem dictionary erfolgt die Zuordnung als
zahl : zugeordnete_zahl
Ist die Summe der zugeordneten Zahlen ungleich 1, wird
eine Wahrscheinlichkeitsverteilung durch Normieren
erzeugt
"""
def __new__(cls, *args, **kwargs):
if kwargs.get("h") in (1, 2, 3):
zufalls_groesse_hilfe(kwargs["h"])
return
zahlen = (Integer, int, Float, float, Rational, One, Zero, NegativeOne, Half,
Mul, Add, Pow, Mod)
kontrolle = True
if kwargs.get('kontrolle') == False:
kontrolle = kwargs.get('kontrolle')
try:
if len(args) != 1:
raise ZufallError("ein Argument angeben")
arg = args[0]
if not isinstance(arg, (list, dict)):
raise ZufallError("eine Liste oder ein dictionary angeben")
if isinstance(arg, list):
if not all(map(lambda x: isinstance(x, (list, tuple)) and len(x) == 2, arg)):
raise ZufallError("die Listenelemente müssen 2-elementige Listen/Tupel sein")
if not all(map(lambda x: isinstance(x[0], (int, Integer)) and type(x[1]) in zahlen, arg)):
raise ZufallError("die Elemente der inneren Listen müssen Zahlen sein")
vert = dict(arg)
else:
if not all(map(lambda x: isinstance(x, (int, Integer)), arg.keys())):
raise ZufallError("die Schlüsselwerte des dictionary müssen ganze Zahlen sein")
if not all(map(lambda x: type(x) in zahlen, arg.values())):
raise ZufallError("die Werte des dictionary müssen Zahlen sein")
vert = arg
if kontrolle:
vv = []
for k in vert:
if isinstance(vert[k], Rational):
vv += [[k, vert[k]]]
else:
try:
vv += [[k, nsimplify(vert[k])]]
except RecursionError:
vv += [[k, vert[k]]]
vert = dict(vv)
s = (sum([vert[k] for k in vert]))
if s != 1:
for k in vert:
vert[k] = vert[k] / s
except ZufallError as e:
print('zufall:', str(e))
return
if kwargs.get('parameter'):
parameter = kwargs.get('parameter')
else:
parameter = None
return ZufallsObjekt.__new__(cls, vert, parameter)
def __str__(self):
return "ZufallsGroesse"
# Eigenschaften + Methoden
# ------------------------
@property
def omega(self):
"""Ergebismenge"""
di = self._vert
return set([k for k in di if di[k]])
@property
def n_omega(self):
"""Größe der Ergebnismenge"""
return len(self.omega)
nOmega = n_omega
@property
def erw(self):
"""Erwartungswert"""
ve = self._vert
return sum([k*ve[k] for k in ve])
def erw_(self, *args, **kwargs):
"""Erwartungswert; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz d=n Darstellung dezimal mit n Kommastellen\n")
return
d = kwargs.get('d')
e = self.erw
if d:
if isinstance(d, (int, Integer)):
if 0 < d <= 12:
return eval(format(float(e), ".%df" % d))
return float(e)
Erw = erw_
@property
def var(self):
"""Varianz"""
ve, e = self._vert, self.erw
return sum([(k - e)**2*ve[k] for k in ve])
def var_(self, *args, **kwargs):
"""Varianz; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz d=n Darstellung dezimal mit n Kommastellen")
print(" b=ja Begriffe\n")
return
if kwargs.get('b'):
print("\nEs sind auch die Begriffe Varianzwert, mittleres Abweichungsquadrat,")
print("Streuungsquadrat oder Dispersion gebräuchlich\n")
return
d = kwargs.get('d')
v = self.var
if d:
if isinstance(d, (int, Integer)):
if 0 < d <= 12:
return eval(format(float(v), ".%df" % d))
return float(v)
Var = var_
@property
def sigma(self):
"""Standardabweichung"""
return sqrt(self.var)
def sigma_(self, *args, **kwargs):
"""Standardabweichung; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz d=n Darstellung dezimal mit n Kommastellen\n")
return
d = kwargs.get('d')
s = self.sigma
if d:
if isinstance(d, (int, Integer)):
if 0 < d <= 12:
return eval(format(float(s), ".%df" % d))
return float(s)
Sigma = sigma_
@property
def _vert(self):
"""Wahrscheinlichkeitsverteilung als internes dict"""
return self.args[0]
@property
def vert(self):
"""Wahrscheinlichkeitsverteilung"""
return self._vert
def vert_(self, s=None, p=None, sp=None, **kwargs):
"""Wahrscheinlichkeitsverteilung; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz")
print("p=ja - Wahrscheinlichkeiten werden als Prozentwerte ausgegeben")
print("d=n - ebenso, dezimal mit n Kommastellen")
print("s=ja - Spaltenausgabe")
print("sp=ja - ebenso, Prozentwerte")
print("sd=n - ebenso, dezimal mit n Kommastellen\n")
return
di = self._vert
di1 = dict()
d = kwargs.get('d')
sd = kwargs.get('sd')
if p or d or sp or sd:
if sd:
d = sd
if d and not (isinstance(d, (int, Integer)) and (1 < d <= 12)):
print("zufall: für d einen Wert aus [2, 12] angeben")
return
for k in di.keys():
if p or sp:
di1[k] = eval(format(float(100*di[k]), ".2f"))
elif d or sd:
di1[k] = eval(format(float(di[k]), ".%df" % d))
if s or sp or sd:
if sp or sd:
di = di1
kk = list(di.keys())
try:
kk.sort()
except TypeError:
pass
for k in kk:
display(Math(latex(k) + '\\quad ' + latex(di[k])))
print(' ')
return
elif d or p:
return di1
return di
Vert = vert_
@property
def _vert_kum(self):
"""Kumulierte Wahrscheinlichkeitsverteilung; internes dict"""
di = self._vert
ll, sum = [], 0
kk = list(di.keys())
kk.sort()
for k in kk:
sum += di[k]
ll += [(k, sum)]
return dict(ll)
@property
def vert_kum(self):
"""Kumulierte Wahrscheinlichkeitsverteilung"""
return self._vert_kum
def vert_kum_(self, s=None, p=None, sp=None, **kwargs):
"""Kumulierte Wahrscheinlichkeitsverteilung; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz")
print("p=ja - Wahrscheinlichkeiten werden als Prozentwerte ausgegeben")
print("d=n - ebenso, dezimal mit n Kommastellen")
print("s=ja - Spaltenausgabe")
print("sp=ja - ebenso, Prozentwerte")
print("sd=n - ebenso, dezimal mit n Kommastellen\n")
return
di = self._vert_kum
di1 = dict()
d = kwargs.get('d')
sd = kwargs.get('sd')
if p or d or sp or sd:
if sd:
d = sd
if d and not (isinstance(d, (int, Integer)) and (1 < d <= 12)):
print("zufall: für d einen Wert aus [2, 12] angeben")
return
for k in di.keys():
if p or sp:
di1[k] = eval(format(float(100*di[k]), ".2f"))
elif d or sd:
di1[k] = eval(format(float(di[k]), ".%df" % d))
if s or sp or sd:
if sp or sd:
di = di1
kk = list(di.keys())
kk.sort()
for k in kk:
display(Math(latex(k) + '\\quad ' + latex(di[k])))
print(' ')
return
elif d or p:
return di1
return di
vertKum = vert_kum
VertKum = vert_kum_
@property
def hist(self):
"""Histogramm"""
daten = self._vert
balken1(daten, typ='W', titel='$\mathrm{Wahrscheinlichkeitsverteilung - Histogramm}$' + '\n')
def hist_(self, *args, **kwargs):
"""Histogramm; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz p=ja Polygonzug-Diagramm")
print("Angabe einer Zufallsgröße | Datenreihe - Vergleich mit anderer Verteilung\n")
return
if kwargs.get('p'):
self.poly_zug
return
daten = self._vert
if not args:
balken1(daten, typ='W', titel='$\mathrm{Wahrscheinlichkeitsverteilung - Histogramm}$' + '\n ')
return
if len(args) != 1:
print("zufall: nur ein Argument angeben")
return
arg = args[0]
nv = importlib.import_module('zufall.lib.objekte.normal_verteilung')
ev = importlib.import_module('zufall.lib.objekte.exponential_verteilung')
NormalVerteilung = nv.NormalVerteilung
ExponentialVerteilung = ev.ExponentialVerteilung
if not isinstance(arg, (ZufallsGroesse, DatenReihe, NormalVerteilung, ExponentialVerteilung)):
print("zufall: Zufallsgröße oder Datenreihe angeben")
return
if isinstance(arg, (NormalVerteilung, ExponentialVerteilung)):
balken_plus_stetig(daten, arg, typ='W',
titel='$\mathrm{Wahrscheinlichkeitsverteilung - Histogramm}$' + '\n')
return
if isinstance(arg, DatenReihe) and not arg.is_ganz:
print("zufall: Klasseneinteilung vornehmen")
return
daten2 = arg._vert
if isinstance(arg, ZufallsGroesse):
typ2 = 'W'
titel = '$\mathrm{Vergleich\; von\; Wahrscheinlickeitsverteilungen}$' + '\n' \
+ '$\mathrm{hell - Ausgangs-, dunkel - Vergleichsverteilung}$' + '\n'
else:
typ2 = 'R'
titel = '$\mathrm{Vergleich\; Wahrsch.-\; und\; Häufigk.-Verteilung}$' + '\n' + \
'$\mathrm{gelb \;- Wahrsch.,\; grün\; -\; relative\; Häufigkeiten}$\n'
balken_plus_balken (daten, daten2, typ1='W', typ2=typ2, titel=titel, )
Hist = hist_
@property
def hist_kum(self):
"""Histogramm der kumulierten Wahrscheinlichkeiten"""
daten = self._vert_kum
balken1(daten, typ='W', titel='$\mathrm{Kumulierte\; Wahrscheinlichkeiten - Histogramm}$' + '\n',
ylabel='$P\,(X \,\\leq \, k)$')
histKum = hist_kum
@property
def poly_zug(self):
"""Polygonzug"""
daten = self._vert
polygon_zug(daten, typ='W', titel='$\mathrm{Wahrscheinlichkeitsverteilung - Polygonzug}$' + | |
instruction address
return DDGViewInstruction(self._cfg, self._ddg, key, simplified=self._simplified)
class DDG(Analysis):
"""
This is a fast data dependence graph directly generated from our CFG analysis result. The only reason for its
existence is the speed. There is zero guarantee for being sound or accurate. You are supposed to use it only when
you want to track the simplest data dependence, and you do not care about soundness or accuracy.
For a better data dependence graph, please consider performing a better static analysis first (like Value-set
Analysis), and then construct a dependence graph on top of the analysis result (for example, the VFG in angr).
Also note that since we are using states from CFG, any improvement in analysis performed on CFG (like a points-to
analysis) will directly benefit the DDG.
"""
def __init__(self, cfg, start=None, call_depth=None, block_addrs=None):
"""
:param cfg: Control flow graph. Please make sure each node has an associated `state` with it. You may
want to generate your CFG with `keep_state=True`.
:param start: An address, Specifies where we start the generation of this data dependence graph.
:param call_depth: None or integers. A non-negative integer specifies how deep we would like to track in the
call tree. None disables call_depth limit.
:param iterable or None block_addrs: A collection of block addresses that the DDG analysis should be performed
on.
"""
# Sanity check
if not cfg._keep_state:
raise AngrDDGError('CFG must have "keep_state" set to True.')
self._cfg = cfg
self._start = self.project.entry if start is None else start
self._call_depth = call_depth
self._block_addrs = block_addrs
# analysis output
self._stmt_graph = networkx.DiGraph()
self._data_graph = networkx.DiGraph()
self._simplified_data_graph = None
self._ast_graph = networkx.DiGraph() # A mapping of ProgramVariable to ASTs
self._symbolic_mem_ops = set()
# Data dependency graph per function
self._function_data_dependencies = None
self.view = DDGView(self._cfg, self, simplified=False)
self.simple_view = DDGView(self._cfg, self, simplified=True)
# Local variables
self._live_defs = None
self._temp_variables = None
self._temp_register_symbols = None
self._temp_edges = None
self._temp_register_symbols = None
self._variables_per_statement = None
self._custom_data_per_statement = None
self._register_edges = None
# Begin construction!
self._construct()
#
# Properties
#
@property
def graph(self):
"""
:returns: A networkx DiGraph instance representing the dependence relations between statements.
:rtype: networkx.DiGraph
"""
return self._stmt_graph
@property
def data_graph(self):
"""
Get the data dependence graph.
:return: A networkx DiGraph instance representing data dependence.
:rtype: networkx.DiGraph
"""
return self._data_graph
@property
def simplified_data_graph(self):
"""
:return:
"""
if self._simplified_data_graph is None:
self._simplified_data_graph = self._simplify_data_graph(self.data_graph)
return self._simplified_data_graph
@property
def ast_graph(self):
return self._ast_graph
#
# Public methods
#
def pp(self):
"""
Pretty printing.
"""
# TODO: make it prettier
for src, dst, data in self.graph.edges(data=True):
print("%s <-- %s, %s" % (src, dst, data))
def dbg_repr(self):
"""
Representation for debugging.
"""
# TODO:
return str(self.graph)
def __contains__(self, code_location):
"""
Returns whether `code_location` is in the graph.
:param code_location: A CodeLocation instance.
:returns: True/False
"""
return code_location in self.graph
def get_predecessors(self, code_location):
"""
Returns all predecessors of the code location.
:param code_location: A CodeLocation instance.
:returns: A list of all predecessors.
"""
return self.graph.predecessors(code_location)
def function_dependency_graph(self, func):
"""
Get a dependency graph for the function `func`.
:param func: The Function object in CFG.function_manager.
:returns: A networkx.DiGraph instance.
"""
if self._function_data_dependencies is None:
self._build_function_dependency_graphs()
if func in self._function_data_dependencies:
return self._function_data_dependencies[func]
# Not found
return None
def data_sub_graph(self, pv, simplified=True, killing_edges=False, excluding_types=None):
"""
Get a subgraph from the data graph or the simplified data graph that starts from node pv.
:param ProgramVariable pv: The starting point of the subgraph.
:param bool simplified: When True, the simplified data graph is used, otherwise the data graph is used.
:param bool killing_edges: Are killing edges included or not.
:param iterable excluding_types: Excluding edges whose types are among those excluded types.
:return: A subgraph.
:rtype: networkx.MultiDiGraph
"""
result = networkx.MultiDiGraph()
result.add_node(pv)
base_graph = self.simplified_data_graph if simplified else self.data_graph
if pv not in base_graph:
return result
# traverse all edges and add them to the result graph if needed
queue = [ pv ]
traversed = set()
while queue:
elem = queue[0]
queue = queue[1:]
if elem in traversed:
continue
traversed.add(elem)
out_edges = base_graph.out_edges(elem, data=True)
if not killing_edges:
# remove killing edges
out_edges = [ (a, b, data) for a, b, data in out_edges if 'type' not in data or data['type'] != 'kill']
if excluding_types:
out_edges = [ (a, b, data) for a, b, data in out_edges if
'type' not in data or data['type'] not in excluding_types
]
for src, dst, data in out_edges:
result.add_edge(src, dst, **data)
if dst not in traversed:
queue.append(dst)
return result
#
# Private methods
#
def _construct(self):
"""
Construct the data dependence graph.
We track the following types of dependence:
- (Intra-IRSB) temporary variable dependencies
- Register dependencies
- Memory dependencies, although it's very limited. See below.
We track the following types of memory access:
- (Intra-functional) Stack read/write.
Trace changes of stack pointers inside a function, and the dereferences of stack pointers.
- (Inter-functional) Stack read/write.
- (Global) Static memory positions.
Keep a map of all accessible memory positions to their source statements per function. After that, we
traverse the CFG and link each pair of reads/writes together in the order of control-flow.
We do not track the following types of memory access
- Symbolic memory access
Well, they cannot be tracked under fastpath mode (which is the mode we are generating the CTF) anyways.
"""
worklist = []
worklist_set = set()
# Initialize the worklist
if self._start is None:
# initial nodes are those nodes in CFG that has no in-degrees
for n in self._cfg.graph.nodes():
if self._cfg.graph.in_degree(n) == 0:
# Put it into the worklist
job = DDGJob(n, 0)
self._worklist_append(job, worklist, worklist_set)
else:
for n in self._cfg.get_all_nodes(self._start):
job = DDGJob(n, 0)
self._worklist_append(job, worklist, worklist_set)
# A dict storing defs set
# DDGJob -> LiveDefinition
live_defs_per_node = {}
while worklist:
# Pop out a node
ddg_job = worklist[0]
l.debug("Processing %s.", ddg_job)
node, call_depth = ddg_job.cfg_node, ddg_job.call_depth
worklist = worklist[ 1 : ]
worklist_set.remove(node)
# Grab all final states. There are usually more than one (one state for each successor), and we gotta
# process all of them
final_states = node.final_states
if node in live_defs_per_node:
live_defs = live_defs_per_node[node]
else:
live_defs = LiveDefinitions()
live_defs_per_node[node] = live_defs
successing_nodes = list(self._cfg.graph.successors(node))
# try to assign every final state to a successor and vice versa
match_suc = defaultdict(bool)
match_state = defaultdict(set)
for suc in successing_nodes:
matched = False
for state in final_states:
try:
if state.solver.eval(state.ip) == suc.addr:
match_suc[suc.addr] = True
match_state[state].add(suc)
matched = True
except (SimUnsatError, SimSolverModeError, ZeroDivisionError):
# ignore
matched = matched
if not matched:
break
# whether all final states could be matched to a successor and vice versa
matches = len(match_suc) == len(successing_nodes) and len(match_state) == len(final_states)
for state in final_states:
if not matches and state.history.jumpkind == 'Ijk_FakeRet' and len(final_states) > 1:
# Skip fakerets if there are other control flow transitions available
continue
new_call_depth = call_depth
if state.history.jumpkind == 'Ijk_Call':
new_call_depth += 1
elif state.history.jumpkind == 'Ijk_Ret':
new_call_depth -= 1
if self._call_depth is not None and call_depth > self._call_depth:
l.debug('Do not trace into %s due to the call depth limit', state.ip)
continue
new_defs = self._track(state, live_defs, node.irsb.statements if node.irsb is not None else None)
#corresponding_successors = [n for n in successing_nodes if
# not state.ip.symbolic and n.addr == state.solver.eval(state.ip)]
#if not corresponding_successors:
# continue
changed = False
# if every successor can be matched with one or more final states (by IP address),
# only take over the LiveDefinition of matching states
if matches:
add_state_to_sucs = match_state[state]
else:
add_state_to_sucs = successing_nodes
for successing_node in add_state_to_sucs:
if (state.history.jumpkind == 'Ijk_Call' or state.history.jumpkind.startswith('Ijk_Sys')) and \
(state.ip.symbolic or successing_node.addr != state.solver.eval(state.ip)):
suc_new_defs = self._filter_defs_at_call_sites(new_defs)
else:
suc_new_defs = new_defs
if successing_node in live_defs_per_node:
defs_for_next_node = live_defs_per_node[successing_node]
else:
defs_for_next_node = LiveDefinitions()
live_defs_per_node[successing_node] = defs_for_next_node
for var, code_loc_set in suc_new_defs.items():
# l.debug("Adding %d new definitions for variable %s.", len(code_loc_set), var)
changed |= defs_for_next_node.add_defs(var, code_loc_set)
if changed:
if (self._call_depth is None) or \
| |
# (c) 2014, <NAME> <<EMAIL>>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import inspect
import os
import re
import shlex
import traceback
import json
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.compat.six import string_types
from ansible.compat.six.moves.urllib.parse import urlunsplit
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
except ImportError:
raise AnsibleError("winrm is not installed")
try:
import xmltodict
except ImportError:
raise AnsibleError("xmltodict is not installed")
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
become_methods = []
allow_executable = False
def __init__(self, *args, **kwargs):
self.has_pipelining = False
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
# FUTURE: Add runas support
super(Connection, self).__init__(*args, **kwargs)
def set_host_overrides(self, host, hostvars=None):
'''
Override WinRM-specific options from host variables.
'''
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext','ssl','kerberos'])
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = 'kerberos,%s' % transport_selector
else:
self._winrm_transport = transport_selector
self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, string_types):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = inspect.getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching kwargs, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos' and not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
# TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that comes from this)
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
from traceback import format_exc
display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex))
stdin_push_failed = True
if stdin_push_failed:
raise AnsibleError('winrm send_input failed')
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
super(Connection, self)._connect()
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
return self
def _reset(self): # used by win_reboot (and any other action that might need to bounce the state)
self.protocol = None
self.shell_id = None
self._connect()
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleConnectionFailure("failed to exec cmd %s" % cmd)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def is_clixml(self, value):
return value.startswith("#< CLIXML")
# hacky way to get just stdout- not always sure of doc framing here, so use with care
def parse_clixml_stream(self, clixml_doc, stream_name='Error'):
clear_xml = clixml_doc.replace('#< CLIXML\r\n', '')
doc = xmltodict.parse(clear_xml)
lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name]
return '\r\n'.join(lines)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda:in_file.read(buffer_size)), ''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + '\r\n'
# cough up the data, as well as an indicator if this is the last chunk so | |
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module to handle state machine output, signals and result reports.
"""
#
# IMPORTS
#
from datetime import datetime
from tessia.server.state_machines import MACHINES
from tessia.server.scheduler import exceptions
import builtins
import logging
import os
import pickle
import signal
import sys
#
# CONSTANTS AND DEFINITIONS
#
# How much time to wait for machine to cleanup after a signal was received
CLEANUP_TIME = 60
# Cancel signals we can handle
CANCEL_SIGNALS = (
signal.SIGTERM,
signal.SIGHUP,
signal.SIGINT
)
# Format used to save the end date as string
DATE_FORMAT = '%Y-%m-%d %H:%M:%S:%f'
# Status codes that will be parsed by the looper
RESULT_CANCELED = -1
RESULT_TIMEOUT = -2
RESULT_EXCEPTION = -3
RESULT_SUCCESS = 0
# String to be used in process comm to identify it as a tessia job in the list
# of processes
WORKER_COMM = 'tessia-job'
# Name for the file that will hold picked parameters when execv-ing
# in interrupted workers
WRAPPER_PARAMETERS_FILE = 'wrapper_init_parameters'
#
# CODE
#
class MachineWrapper:
"""
This class' purpose is to create the appropriate environment for the state
machine to run. That means:
- handle signals to gracefully shutdown the machine
- make sure the job finishes if timeout is reached
- redirect streams to the correct output file defined by the scheduler
- switch to the appropriate working directory so that the machine has a
place to store its files if needed
"""
def __init__(self, run_dir, job_type, job_params, timeout):
"""
Constructor, only initializes internal variables
"""
self._logger = logging.getLogger(__name__)
# instance of state machine
self._machine = None
# we switch to this directory after forking
self._run_dir = run_dir
# job type determines which state machine to use
self._job_type = job_type
# parameters to pass to the state machine
self._job_params = job_params
# path to results file where exit code and end date will be store on
# test end
self._result_file = '{}/.{}'.format(
self._run_dir, os.path.basename(self._run_dir))
self._timeout = timeout
self._mask_timeouts = False
# __init__()
def _pickle_cleanup_parameters(self, ret_code):
"""
Write the parameters needed to build a new MachineWrapper to
run the cleanup routine.
Args:
ret_code (int): status of the state machine start method
Returns:
Raises:
"""
# The file will be written in the current working directory of the job.
with open(WRAPPER_PARAMETERS_FILE, 'wb') as params_file:
pickle.dump(
(ret_code, self._run_dir, self._job_type,
self._job_params, self._timeout),
params_file)
# _pickle_cleanup_parameters()
def _supress_signals(self):
"""
Set any cancel signals do be ignored, cancel any outstanding alarms,
and mask any pending alarms that could already have been triggered.
Masking alarms is necessary since python only calls a signal handling
function at an unspecified time after the signal was happened.
This means that the alarm handling function could be called even after
the alarm was reset with alarm(0).
Args:
Returns:
Raises:
"""
MachineWrapper.set_cancel_signal_handler(signal.SIG_IGN)
self._mask_timeouts = True
signal.alarm(0)
def _handle_cancel(self, *_args, **_kwargs):
"""
Handle a cancel signal.
Args:
Returns:
Raises:
WrapperCanceled
"""
self._logger.error('Caught cancel signal, cleaning up and aborting...')
# Don't let cancel signals arrive after this one is handled.
self._supress_signals()
raise exceptions.WrapperCanceled
# _handle_cancel()
def _handle_timeout(self, *_args, **_kwargs):
"""
Handle a regular timeout when the state machine is executing.
Args:
Returns:
Raises:
WrapperTimeout, when not supressed
"""
self._logger.error(
'Caught timeout signal, cleaning up and aborting...')
if self._mask_timeouts:
# Ignore delayed alarms.
return
# Don't let cancel signals arrive after this one is handled.
self._supress_signals()
raise exceptions.WrapperTimeout
# _handle_timeout()
def _handle_cleanup_timeout(self, *_args, **_kwargs):
"""
Handle a timeout when the state machine is cleaning up after a timeout
or cancel signal.
Args:
Returns:
Raises:
WrapperTimeout, when not supressed
"""
self._logger.error('Caught timeout signal while cleaning up')
if self._mask_timeouts:
return
# Cancel signals are already ignored at the start of the cleanup,
# no point in doing it again here, we just need to supress the alarm.
self._mask_timeouts = True
signal.alarm(0)
raise exceptions.WrapperTimeout
# _handle_cleanup_timeout()
def _write_result(self, ret_code, cleanup_code=None):
"""
Write the result file with exit code, cleanup code and end time,
one in each of three lines.
Args:
ret_code (int): status code for the start method of the machine
cleanup_code (int): status code for the cleanup method
Returns:
Raises:
"""
status = [str(ret_code)]
if cleanup_code is not None:
status.append(str(cleanup_code))
status.append(datetime.utcnow().strftime(DATE_FORMAT))
with open(self._result_file, 'w') as result_file:
result_file.write('{}\n'.format('\n'.join(status)))
# _write_result()
@staticmethod
def set_cancel_signal_handler(handler):
"""
Set the handler function for all cancellation signals.
Args:
handler (function): signal handler, like for signal.signal
Returns:
Raises
"""
for signal_type in CANCEL_SIGNALS:
signal.signal(signal_type, handler)
@staticmethod
def write_comm():
"""
Change the process comm so that the scheduler can identify it.
This is a static method so that it can be called early on by
a new interpreter when handling timeout/cancel signals.
Args:
Returns:
Raises:
"""
with open('/proc/self/comm', 'w') as comm_file:
# comm file gets truncated to 15-bytes + null terminator
comm_file.write(WORKER_COMM)
# write_comm
def start(self):
"""
Redirect the process outputs, setup signals and
start the state machine.
Args:
Returns:
Raises:
"""
os.makedirs(self._run_dir, exist_ok=True)
with open('{}/output'.format(self._run_dir), 'wb') as log_file:
sys.stdout.flush()
os.dup2(log_file.fileno(), sys.stdout.fileno())
sys.stderr.flush()
os.dup2(log_file.fileno(), sys.stderr.fileno())
# replace the original print by one that always performs flush
# so that output goes directly to the file
orig_print = builtins.print
def new_print(*args, **kwargs):
"""Print function with auto-flush"""
if 'flush' in kwargs:
kwargs.pop('flush')
orig_print(*args, **kwargs, flush=True)
builtins.print = new_print
MachineWrapper.write_comm()
os.chdir(self._run_dir)
self._machine = MACHINES.classes[self._job_type](
self._job_params)
try:
# This outer try block is to catch the timeout/cancel
# exceptions.
try:
# This inner try block is to ensure the timeout/cancel
# signals are suppressed in the finally block.
MachineWrapper.set_cancel_signal_handler(self._handle_cancel)
if self._timeout > 0:
# Timeout was provided: set the alarm.
self._mask_timeouts = False
signal.signal(signal.SIGALRM, self._handle_timeout)
signal.alarm(self._timeout)
try:
self._logger.debug("state machine start")
ret_code = self._machine.start()
except Exception:
sys.excepthook(*sys.exc_info())
ret_code = RESULT_EXCEPTION
finally:
self._supress_signals()
# At this point either an exception occurred and therefore
# a cleanup is needed or the machine including the cleanup finished
self._logger.debug(
"state machine finished with ret_code=%d", ret_code)
except exceptions.WrapperCanceled:
self._logger.debug("caught WrapperCanceled exception")
ret_code = RESULT_CANCELED
except exceptions.WrapperTimeout:
self._logger.debug("caught WrapperTimeout exception")
ret_code = RESULT_TIMEOUT
if self._machine.cleaning_up or ret_code == RESULT_SUCCESS:
# The state machine was in the process of cleaning up
# or finished the cleanup,
# don't do it again.
self._write_result(ret_code)
return
# The state machine was not yet cleaning up, do it in
# a new interpreter since the timeout/cancel exceptions
# could have left everything in an undefined state.
self._exec_for_cleanup(ret_code)
# start()
def _exec_for_cleanup(self, ret_code):
"""
Substitute the current process by a new interpreter
that will call the machine's cleanup routine.
Args:
ret_code (int): status code for the executed start method
Returns:
Raises:
"""
self._logger.debug("preparing cleanup")
sys.stdout.flush()
sys.stderr.flush()
self._pickle_cleanup_parameters(ret_code)
python_location = os.readlink('/proc/self/exe')
os.execv(python_location,
['python', '-m', __name__])
# _exec_for_cleanup
def interruption_cleanup(self, ret_code):
"""
Run the cleanup routine for the state machine. Should be called
in a new interpreter after the original machine was preempted by
a timeout or cancel signal.
Report the status of the original state machine and the cleanup phase
in a file.
Args:
ret_code (int): the status of the state machine that was executing
before the cleanup
Returns:
Raises:
"""
machine = MACHINES.classes[self._job_type](
self._job_params)
try:
try:
self._mask_timeouts = False
signal.signal(signal.SIGALRM, self._handle_cleanup_timeout)
signal.alarm(CLEANUP_TIME)
try:
cleanup_code = machine.cleanup()
except Exception:
sys.excepthook(*sys.exc_info())
cleanup_code = RESULT_EXCEPTION
finally:
self._mask_timeouts = True
signal.alarm(0)
except exceptions.WrapperTimeout:
cleanup_code = RESULT_TIMEOUT
self._write_result(ret_code, cleanup_code)
# MachineWrapper
def do_interruption_cleanup():
"""
Read pre-pickled parameters for a machine wrapper and run the cleanup
for this machine. To be called after executing a new python interpreter
when handling cancel/timeout signals.
"""
# Ignore signals since we are already cleaning up.
# signal.SIG_IGN is a special value which tells signal.signal
# to ignore the signal.
MachineWrapper.set_cancel_signal_handler(signal.SIG_IGN)
MachineWrapper.write_comm()
# We should be in the job working directory, where the previous interpreter
# in this process wrote the pickled parameters | |
feedback for the task (the type of feedback is different across tasks)
def get_task_feedback(self, dataframe, feedback_type):
"""
gets overall feedback of the task based on the feedback type
Args:
dataframe(pandas df) - response dataframe
feedback_type (str) - feedback type for the task
Returns:
feedback (dict) - a dictionary containing measured feedback
"""
if feedback_type == 'rt':
fb = dataframe.query('corr_resp==True').groupby(['run_name', 'run_iter'])['rt'].agg('mean')
unit_mult = 1000 # multiplied by the calculated measure
unit_str = 'ms' # string representing the unit measure
elif feedback_type == 'acc':
fb = dataframe.groupby(['run_name', 'run_iter'])['corr_resp'].agg('mean')
unit_mult = 100 # multiplied by the calculated measure
unit_str = '%' # string representing the unit measure
else: # for flexion extension task
fb = pd.DataFrame()
unit_str = ''
# add other possible types of feedback here
fb_curr = None
fb_prev = None
if not fb.empty:
fb_curr = int(round(fb[-1] * unit_mult))
if len(fb)>1:
# get rt of prev. run if it exists
fb_prev = int(round(fb[-2] * unit_mult))
feedback = {'curr': fb_curr, 'prev': fb_prev, 'measure': unit_str}
return feedback
# 10. Get task response dataframe
def get_task_response(self, all_trial_response):
"""
get the responses made for the task and convert it to a dataframe
Args:
all_trial_response - responses made for all the trials in the task
Outputs:
response_df - dataframe containing the responses made for the task
"""
# df for current data
response_df = pd.concat([self.target_file, pd.DataFrame.from_records(all_trial_response)], axis=1)
return response_df
## get the current time in the trial
def get_current_trial_time(self):
"""
gets the current time in the trial. The ttl_flag determines the timings.
"""
# gets the current time based on ttl_flag
if self.ttl_flag:
t_current = ttl.clock.getTime()
else:
t_current = self.clock.getTime()
return t_current
## get the real start time of the trial
def get_real_start_time(self, t0):
"""
gets the real start time and the ttl time
If the user has chosen not to use the ttl pulse.
ttl_time is set to 0.
Args:
t0 - ?????????
"""
if self.ttl_flag:
self.real_start_time = ttl.clock.getTime()
self.ttl_time = t0 - ttl.time
self.ttl_count = ttl.count
else:
# self.real_start_time = self.clock.getTime() - t0
self.real_start_time = self.clock.getTime()
self.ttl_time = 0
self.ttl_count = 0
##????????????????????????????????????
def get_time_before_disp(self):
# start timer before display
if self.ttl_flag:
self.t2 = ttl.clock.getTime()
else:
self.t2 = self.clock.getTime()
# save the response for the task
def save_task_response(self, response_df, file_path):
"""
gets the response dataframe and save it
Args:
response_df(pd dataframe) - response dataframe
file_path - path where response will be saved
"""
# check if a task response file already exists and load it and then update it
if os.path.isfile(file_path):
target_file_results = pd.read_csv(file_path)
target_resp_df = pd.concat([target_file_results, response_df], axis=0, sort=False)
else: # if there is no existing data, just save current data
target_resp_df = response_df
# save all data
target_resp_df.to_csv(file_path, index=None, header=True)
### quits the screen
def screen_quit(self):
keys = event.getKeys()
for key in keys:
if 'q' and 'esc' in key:
self.window.close()
core.quit()
### shows fixation
def show_fixation(self, t0, delta_t):
if self.ttl_flag: # wait for ttl pulse
while ttl.clock.getTime()-t0 <= delta_t:
ttl.check()
else: # do not wait for ttl pulse
while self.clock.getTime()-t0 <= delta_t:
pass
class VisualSearch(Task):
# @property
# def instruction_text(self):
# return response dataframe
def __init__(self, screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save = True):
super(VisualSearch, self).__init__(screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save_response = save)
self.feedback_type = 'rt' # reaction
self.name = 'visual_search'
def _get_stims(self):
# load target and distractor stimuli
# self.stims = [consts.stim_dir/ self.study_name / self.task_name/ f"{d}.png" for d in self.orientations]
self.stims = [consts.stim_dir/ self.task_name/ f"{d}.png" for d in self.orientations]
path_to_display = glob.glob(os.path.join(consts.target_dir, self.study_name, self.task_name, f'*display_pos_*_{self.target_num}*'))
self.tf_display = pd.read_csv(path_to_display[0])
def _get_trial_info(self):
# for this task, no extra fields of the target file are needed
# but just to be consistent with all the other tasks, I'm including a _get_trial_info routine
# which is just calling get_trial_info from the parent class
super().get_trial_info(self.trial)
def _show_stim(self):
# loop over items and display
for idx in self.tf_display[self.tf_display['trial']==self.trial].index:
stim_file = [file for file in self.stims if str(self.tf_display["orientation"][idx]) in file.stem]
stim = visual.ImageStim(self.window, str(stim_file[0]), pos=(self.tf_display['xpos'][idx], self.tf_display['ypos'][idx]), units='deg', size=self.item_size_dva)
stim.draw()
self.window.flip()
def run(self):
self.orientations = list([90, 180, 270, 360]) # ORDER SHOULD NOT CHANGE
self.item_size_dva = 1
# loop over trials and collect data
self.all_trial_response = []
# get display
self._get_stims()
# loop over trials
for self.trial in self.target_file.index:
# get trial info
self._get_trial_info()
# get current time (self.t0)
self.t0 = self.get_current_trial_time()
# show the fixation for the duration of iti
self.show_fixation(self.t0, self.start_time - self.t0)
# collect real_start_time for each block (self.real_start_time)
self.get_real_start_time(self.t0)
# flush any keys in buffer
event.clearEvents()
# display distract (+ target if present)
self._show_stim()
# Start timer before display (get self.t2)
self.get_time_before_disp()
# collect responses and update
wait_time = self.trial_dur
self.trial_response = self.check_trial_response(wait_time = wait_time,
trial_index = self.trial,
start_time = self.t0,
start_time_rt = self.t2)
self.update_trial_response()
# show feedback or fixation cross
if self.target_file['display_trial_feedback'][self.trial] and self.response_made:
self.display_trial_feedback(correct_response = self.correct_response)
else:
self.screen.fixation_cross()
# 5 show fixation for the duration of the iti
## 5.1 get current time
t_start_iti = self.get_current_trial_time()
self.show_fixation(t_start_iti, self.iti_dur)
# 6.
self.screen_quit()
# get the response dataframe
rDf = self.get_task_response(all_trial_response=self.all_trial_response)
return rDf
class NBack(Task):
# @property
# def instruction_text(self):
# return response dataframe
def __init__(self, screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save = True):
super(NBack, self).__init__(screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save_response = save)
self.feedback_type = 'rt' # reaction
self.name = 'n_back'
def _get_trial_info(self):
super().get_trial_info(self.trial)
# stim_path = consts.stim_dir / self.study_name / self.task_name / self.target_file['stim'][self.trial]
stim_path = consts.stim_dir / self.task_name / self.target_file['stim'][self.trial]
self.stim = visual.ImageStim(self.window, str(stim_path))
def _show_stim(self):
self.stim.draw()
self.window.flip()
def run(self):
# loop over trials
self.all_trial_response = [] # collect data
for self.trial in self.target_file.index:
# show image
self._get_trial_info()
# get current time (self.t0)
self.t0 = self.get_current_trial_time()
# show the fixation for the duration of iti
self.show_fixation(self.t0, self.start_time- self.t0)
# collect real_start_time for each block (self.real_start_time)
self.get_real_start_time(self.t0)
# flush any keys in buffer
event.clearEvents()
# display stimulus
self._show_stim()
# Start timer before display (get self.t2)
self.get_time_before_disp()
# collect responses
wait_time = self.trial_dur
self.trial_response = self.check_trial_response(wait_time = wait_time,
trial_index = self.trial,
start_time = self.t0,
start_time_rt = self.t2)
# update trial response
self.update_trial_response()
# display trial feedback
if self.target_file['display_trial_feedback'][self.trial] and self.response_made:
self.display_trial_feedback(correct_response = self.correct_response)
else:
self.screen.fixation_cross()
# 5 show fixation for the duration of the iti
## 5.1 get current time
t_start_iti = self.get_current_trial_time()
self.show_fixation(t_start_iti, self.iti_dur)
# 6.
self.screen_quit()
# get the response dataframe
rDf = self.get_task_response(all_trial_response=self.all_trial_response)
return rDf
class SocialPrediction(Task):
# @property
# def instruction_text(self):
# return "Social Prediction Task\n\nYou have the following options\n\nHandShake = 1\nHug = 2\nHighFive = 3\nKiss = 4\n\nGo as fast as you can while being accurate"
def __init__(self, screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save = True):
super(SocialPrediction, self).__init__(screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save_response = save)
self.feedback_type = 'acc' # reaction
self.name = 'social_prediction'
def _get_trial_info(self):
super().get_trial_info(self.trial)
video_file = self.target_file['stim'][self.trial]
# self.path_to_video = os.path.join(consts.stim_dir, self.study_name, self.task_name, "modified_clips", video_file)
self.path_to_video = os.path.join(consts.stim_dir, self.task_name, "modified_clips", video_file)
def _get_first_response(self):
# display trial feedback
response_made = [dict['resp_made'] for dict in self.trial_response_all if dict['resp_made']]
correct_response = False
if response_made:
response_made = response_made[0]
correct_response = [dict['corr_resp'] for dict in self.trial_response_all if dict['resp_made']][0]
else:
response_made = False
return response_made, correct_response
def _get_response_event(self, response_made):
# save response event
if response_made:
# save the first dict when response was made
response_event = [dict for dict in self.trial_response_all if dict['resp_made']][0]
else:
response_event = [dict for dict in self.trial_response_all][0]
return response_event
def _show_stim(self):
mov = visual.MovieStim3(self.window, self.path_to_video, flipVert=False, flipHoriz=False, loop=False)
# play movie
frames = []
self.trial_response_all = []
image = []
wait_time = self.trial_dur
while mov.status != visual.FINISHED:
if self.ttl_flag:
while (ttl.clock.getTime() - self.t0 <= wait_time):
ttl.check()
# draw frame to screen
mov.draw()
self.window.flip()
else:
while (self.clock.getTime() - self.t0 <= wait_time):
# draw frame to screen
| |
the Option is performed
between the two sections, RID1 and RID2. If RINC > 0, the
Option is performed among all specified sections (RID1 to RID2
with increment of RINC).
intertype
The type of contact interface (pair-based versus general
contact) to be considered; or the type of contact pair to be
trimmed/unselected/auto-set.
The following labels specify the type of contact interface:
* ``""`` : (blank) Include all contact definitions (pair-based
and general contact).
* ``"GCN"`` : Include general contact definitions only (not valid when Option = RESET or AUTO).
The following labels specify the type of contact pairs to be
trimmed/unselected/auto-set (used only when Option = TRIM,
UNSE, or AUTO, and only for pair-based contact definitions):
* ``"ANY"`` : All types (default).
* ``"MPC"`` : MPC-based contact pairs (KEYOPT(2) = 2).
* ``"BOND"`` : Bonded contact pairs (KEYOPT(12) = 3, 5, 6).
* ``"NOSP"`` : No separation contact pairs (KEYOPT(12) = 2, 4).
* ``"INAC"`` : Inactive contact pairs (symmetric contact pairs for MPC contact or KEYOPT(8) = 2).
* ``"TRlevel"`` : mming level (used only when Option = TRIM, UNSE, or MORPH):
* ``"(blank)"`` : Normal trimming (default): remove/unselect contact and target elements which are in far-field.
* ``"AGGRE"`` : Aggressive trimming: remove/unselect contact and target elements which are in far-field, and certain elements in near-field.
cgap
They are only valid when Option = ADJUST or MORPH. Control
parameter for opening gap. Close the opening gap if the
absolute value of the gap is smaller than the CGAP value. CGAP
defaults to ``0.25*PINB`` (where PINB is the pinball radius) for
bonded and no-separation contact; otherwise it defaults to the
value of real constant ICONT.
CPEN
They are only valid when Option = ADJUST or MORPH. Control
parameter for initial penetration. Close the initial
penetration if the absolute value of the penetration is
smaller than the CPEN value. CPEN defaults to ``0.25*PINB`` (where
PINB is the pinball radius) for any type of interface behavior
(either bonded or standard contact).
IOFF
They are only valid when Option = ADJUST or MORPH. Control
parameter for initial adjustment. Input a positive value to
adjust the contact nodes towards the target surface with a
constant interference distance equal to IOFF. Input a negative
value to adjust the contact node towards the target surface
with a uniform gap distance equal to the absolute value of
IOFF.
Notes
-----
The CNCHECK command provides information for surface-to-surface,
node-to-surface, and line-to-line contact pairs (element types
TARGE169, TARGE170, CONTA171, CONTA172, CONTA173, CONTA174,
CONTA175, CONTA176, CONTA177). All contact and target elements of
interest, along with the solid elements and nodes attached to
them, must be selected for the command to function properly. For
performance reasons, the program uses a subset of nodes and
elements based on the specified contact regions (RID1, RID2, RINC)
when executing the CNCHECK command.
For additional details, see the notes section at:
https://www.mm.bme.hu/~gyebro/files/ans_help_v182/ans_cmd/Hlp_C_CNCHECK.html
"""
command = f"CNCHECK,{option},{rid1},{rid2},{rinc},{intertype},{trlevel},{cgap},{cpen},{ioff}"
return self.run(command, **kwargs)
def cnkmod(self, itype="", knum="", value="", **kwargs):
"""Modifies contact element key options.
APDL Command: CNKMOD
Parameters
----------
itype
Contact element type number as defined on the ET command.
knum
Number of the KEYOPT to be modified (KEYOPT(KNUM)).
value
Value to be assigned to the KEYOPT.
Notes
-----
The CNKMOD command has the same syntax as the KEYOPT command. However,
it is valid only in the SOLUTION processor. This command is intended
only for use in a linear perturbation analysis, and can only be used to
modify certain contact element KEYOPT values as described below.
Modifying KEYOPT(12)
One use for this command is to modify contact interface behavior
between load steps in a linear perturbation analysis; it allows the
user to control the contact status locally per contact pair. For this
application, this command is limited to changing the contact interface
behavior key option: KEYOPT(12) of CONTA171, CONTA172, CONTA173,
CONTA174, CONTA175, CONTA176, and CONTA177; and KEYOPT(10) of CONTA178.
When used for this purpose, the command adjusts the contact status from
the linear perturbation base analysis (at the point of restart) as
described in the table below. Note that CNKMOD allows you to take
points in the base analysis that are near contact (within the pinball
region) and modify them to be treated as "in contact" in the
perturbation analysis; see the "1 - near-field" row with KEYOPT(12)
values set to 4 or 5. CNKMOD also allows you to take points that are
sliding in the base analysis and treat them as sticking in the
perturbation analysis, irrespective of the MU value; see the "2 -
sliding" row with KEYOPT(12) values set to 1,3, 5, or 6.
Table: 128:: : Adjusted Contact Status with CNKMOD is Issued
(if outside of the adjusted pinball region)
(if inside of the adjusted pinball region)
(if outside of the adjusted pinball region)
(if inside of the adjusted pinball region)
If an open gap exists at the end of the previous load step and the
contact status is adjusted as sliding or sticking due to a "bonded" or
"no separation" contact behavior definition, then the program will
treat it as near-field contact when executing CNKMOD in the subsequent
load steps.
In the linear perturbation analysis procedure, contact status can also
be controlled or modified by the PERTURB command. The contact status
always follows local controls defined by the CNKMOD command first, and
is then adjusted by the global sticking or bonded setting (ContKey =
STICKING or BONDED) on the PERTURB command (see the PERTURB command for
details).
Modifying KEYOPT(3)
Another use for this command is to change the units of normal contact
stiffness (contact element real constant FKN) in a linear perturbation
modal analysis that is used to model brake squeal. For contact elements
CONTA171, CONTA172, CONTA173, and CONTA174, KEYOPT(3) controls the
units of normal contact stiffness. You can issue the command
CNKMOD,ITYPE,3,1 during the first phase of the linear perturbation
analysis in order to change the units of normal contact stiffness from
FORCE/LENGTH3 (in the base analysis) to FORCE/LENGTH. Note that
KEYOPT(3) = 1 is valid only when a penalty-based algorithm is used
(KEYOPT(2) = 0 or 1) and the absolute normal contact stiffness value is
explicitly specified (that is, a negative value input for real constant
FKN).
"""
command = f"CNKMOD,{itype},{knum},{value}"
return self.run(command, **kwargs)
def cntr(self, option="", key="", **kwargs):
"""Redirects contact pair output quantities to a text file.
APDL Command: CNTR
Parameters
----------
option
Output option:
OUT - Contact output control.
key
Control key:
NO - Write contact information to the output file or to the screen (default).
YES - Write contact information to the Jobname.CNM file.
Notes
-----
Issue the command CNTR,OUT,YES to redirect contact pair output
quantities to the Jobname.CNM file.
To ensure that the contact information is written to Jobname.CNM,
reissue CNTR,OUT,YES each time you reenter the solution processor
(/SOLU).
"""
command = f"CNTR,{option},{key}"
return self.run(command, **kwargs)
def cutcontrol(self, lab="", value="", option="", **kwargs):
"""Controls time-step cutback during a nonlinear solution.
APDL Command: CUTCONTROL
Parameters
----------
lab
Specifies the criteria for causing a cutback. Valid labels are:
PLSLIMIT - Maximum equivalent plastic strain allowed within a time-step (substep). If the
calculated value exceeds the VALUE, the program
performs a cutback (bisection). VALUE defaults to 0.15
(15%).
CRPLIMIT - Set values for calculating the maximum equivalent creep ratio allowed within a
time step. If the calculated maximum creep ratio
exceeds the defined creep ratio limit, the program
performs a cutback.
DSPLIMIT - Maximum incremental displacement within the solution field in a time step
(substep). If the maximum calculated value exceeds
VALUE, the program performs a cutback (bisection).
VALUE defaults to 1.0 x 107.
NPOINT - Number of points in a cycle for a second order dynamic equation, used to
control automatic time stepping. If the number of
solution points per cycle | |
word was a verb to handle auxiliary verbs
if a > 3:
# print('More than 3 consecutive verbs detected in the following sentence: ', nltk_tagged)
a = 0
break
tag_prev = tag
word_prev = word.lower()
elif a >= 1 and (tag[:2] == 'RB' or word.lower() in ['not' , "n't", 't', "'t"]):
a += 1
# if word.lower() in ["n't", 't', "'t"]:
# print("Sentence containing n't, t or 't'", nltk_tagged)
else: a = 0
wordforms[(word.lower(), tag)] = wordforms.get((word.lower(), tag), 0) + 1
return wordforms
def noun_lemmas(wordforms):
"""This function receves as input a dictionary of wordforms and outputs the corresponding noun-lemmas as a dictionary with wordform(word, tag) as key and the noun-lemma as the value"""
all_nouns = dict()
wordforms_notinWordNet = []
for w in wordforms:
word = w[0]
tag = w[1]
# Now let's update the list of nouns.
# First, we ensure that the word quaifies. That is: 1) it is longer than 2 characters
if tag[:2] == 'VB' and word == 're':
word = 'are'
elif tag[:2] == 'VB' and word == 've':
word = 'have'
if ((len(word) > 2 or tag == 'CD' or (tag != 'AU' and word in ['be', 'do', 'go', 'ad', 'is', 'am'])) and word != "n't") or (tag[:2] == 'NN' and word.lower() in ['pc', 'pt', 'ms']): # and tag in ['N', 'V', 'J', 'R']
if word in ['app', 'apps']:
word_rep = 'application'
# elif tag == 'NN' and word.lower() in ['pc']: # ;wnl.lemmatize doesn't work on double words
# word_rep = 'personal computer'
# print(word_rep)
elif tag[:2] == 'NN' and word in ['pt']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'therapist'
elif tag == 'NNP' and word.lower() in ['ms']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'microsoft'
elif tag[:2] == 'JJ' and word in ['ok', 'ok.']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'satisfactoriness'
elif word in ['ios']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'software'
elif 'smartphone' in word:
word_rep = 'phone'
elif tag == 'NNP' and word == 'kevin':
word_rep = 'person'
elif tag[0] == 'N' and word in ['others']:
word_rep = 'people'
elif 'redesign' in word:
word_rep = 'design'
elif 'restructure' in word:
word_rep = 'structure'
elif 'realign' in word:
word_rep = 'align'
elif tag[0] == 'N' and word == 'rhyming':
word_rep = 'rhyme'
elif 'download' in word:
word_rep = 'transfer'
elif 'customize' in word:
word_rep = 'custom'
elif 'thank' in word:
word_rep = 'thanks'
elif 'keyboarding' in word:
word_rep = 'keyboard'
elif 'multitasking' in word:
word_rep = 'task'
elif 'off-putting' in word:
word_rep = 'appeal'
elif 'inexcusable' in word:
word_rep = 'excuse'
elif tag[:2] == 'VB' and word == 'due':
word_rep = 'do'
elif tag[0] == 'V' and 'enable' in word:
word_rep = 'ability'
# elif tag[0] == 'V' and word == 'sobering':
# word_rep = 'sobriety'
elif tag[0] == 'J' and word == 'unorganized':
word_rep = 'organization'
elif tag[0] == 'J' and word == 'hypermobile':
word_rep = 'mobility'
elif tag[0] == 'J' and word == 'memorable':
word_rep = 'memory'
elif tag[0] == 'J' and word == 'delightful':
word_rep = 'delight'
elif tag[0] == 'J' and word == 'optional':
word_rep = 'option'
elif tag[0] == 'J' and word == 'outdated':
word_rep = 'date'
elif tag[0] == 'J' and word == 'positional':
word_rep = 'position'
elif tag[0] == 'J' and word == 'unfocused':
word_rep = 'focus'
elif tag[0] == 'J' and word == 'descriptive':
word_rep = 'description'
elif word in ['never', 'once', 'already', 'full-time', 'ever', 'initially', 'again', 'sometimes', 'before', 'yet', 'soon', 'ahead', 'anytime', 'eventually', 'finally', 'ago', 'throughout']:
word_rep = 'time'
elif tag[:2] == 'RB' and word in ['prior']:
word_rep = 'time'
elif word in ['maybe', 'perhaps']:
word_rep = 'possibility'
elif tag == 'RB' and word in ['quite', 'bit', 'far']:
word_rep = 'extent'
elif tag == 'RB' and word in ['long']:
word_rep = 'length'
elif tag[0] == 'R' and word == 'simply':
word_rep = 'simplicity'
elif tag[0] == 'R' and word == 'professionally':
word_rep = 'profession'
elif tag[0] == 'R' and word == 'supposedly':
word_rep = 'supposition'
elif tag[0] == 'R' and word == 'undoubtedly':
word_rep = 'doubt'
elif tag[0] == 'R' and word == 'continually':
word_rep = 'continuity'
elif tag[0] == 'R' and word == 'safely':
word_rep = 'safety'
elif tag[0] == 'R' and word == 'routinely':
word_rep = 'routine'
elif tag[0] == 'R' and word == 'additionally':
word_rep = 'addition'
elif tag[0] == 'R' and word == 'namely':
word_rep = 'name'
elif tag[0] == 'R' and word == 'periodically':
word_rep = 'period'
elif tag[0] == 'R' and word == 'relaxed':
word_rep = 'relaxation'
elif word in ['another', 'every', 'both', 'either', 'together', 'anymore', 'almost', 'else']:
word_rep = 'number'
elif word in ['visually']:
word_rep = 'vision'
elif tag[0] == 'R' and word in ['most', 'more']:
word_rep = 'group'
elif tag[0] == 'R' and word in ['around', 'away', 'elsewhere', 'wherever', 'anywhere', 'between', 'sidewards', 'forth']:
word_rep = 'place'
elif tag[0] == 'R' and word in ['loose']:
word_rep = 'looseness'
elif tag[:2] == 'RB' and word in ['lighter']:
word_rep = 'lightness'
else:
word_rep = word
noun = None # pre-assign the variable noun to None
# check if the word is found in WordNet as it is:
if (tag[0] == 'N' or tag == 'CD') and wn.synsets(wnl.lemmatize(word_rep,'n'), pos='n') != []:
noun = wnl.lemmatize(word_rep,'n') # = all_nouns.get((word.lower(), tag, wnl.lemmatize(word_rep,'n')), 0) + 1
elif 'sideway' in word_rep:
noun = ['side', 'way'] # = all_nouns.get((word.lower(), tag, ('side', 'way')), 0) + 1
# elif tag[0] == 'N' and word.lower() == 'rhyming':
# all_nouns['rhyme'] = all_nouns.get('rhyme', 0) + 1
elif tag[0] in ['N', 'V', 'J', 'R'] and tag != 'RP': # Added on 20200520 "and tag != 'RP'" to exclude Particles. New idea: use derivationally related forms etc. Original idea: Transform the word through stemming and lemmatization
short_tag = tag[0].lower() # generate a short-tag from POS tag
if short_tag == 'j': short_tag = 'a'
noun = nounify(word_rep, short_tag) # prints out word and short_tag if not found in Wordnet
if noun == None and word_rep not in ['also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however', 'esque', 'but', 'anyway', 'furthermore', 'about', 'though', 'regardless', 'alright', 'further', 'mostly', 'anyways', 'nonetheless', 'virtually', 'beyond', 'along', 'alongside', 'somehow']:# and word.lower()[-2:] != 'ly':
# check if the word is found in WordNet as it is:
if wn.synsets(wnl.lemmatize(word_rep,'n'), pos='n') != [] and word not in ['tho', 'otter']:
noun = wnl.lemmatize(word_rep,'n') # = all_nouns.get((word.lower(), tag, wnl.lemmatize(word_rep,'n')), 0) + 1
if tag[:2] in ['NN', 'VB', 'JJ', 'RB', 'CD'] and noun == None and word_rep not in ['also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however', 'esque', 'but', 'anyway', 'furthermore', 'about', 'though', 'regardless', 'alright', 'further', 'mostly', 'anyways', 'nonetheless', 'virtually', 'beyond', 'along', 'alongside', 'somehow', 'thus']:# and word.lower()[-2:] != 'ly':
wordforms_notinWordNet = wordforms_notinWordNet + [w]
elif noun != None:
all_nouns[w] = noun # = all_nouns.get((word.lower(), tag, noun), 0) + 1
return all_nouns, wordforms_notinWordNet
# Now lets define the fuctions to find both hypernym and hyponym depth.
def hypernym_depth(word, postag):
return wn.synsets(wnl.lemmatize(word, postag), postag)[0].min_depth() #this selects the first synset. We could think of a smarter way of selecting a synset
#wn.synset('car.n.01').min_depth()
# Now let's create a table with verbs inside our database and populate it with their respective depth values
import sqlite3
import shutil # we use this library to create a copy of a file (in this case to duplicate the database
# so that we can loop over one instance while editing the other)
# Establish a SQLite connection to a database named 'Liars4.sqlite':
conn = sqlite3.connect('Liars7_clean_tr20200618.sqlite')
# Get the cursor, which is used to traverse the database, line by line
cur = conn.cursor()
# Then we duplicate thedatabase, so that one can loop and edit it at the same time
# and 'open' the other 'instance' of the same database
shutil.copyfile('Liars7_clean_tr20200618.sqlite', 'Liars7_w.sqlite')
conn_w = sqlite3.connect('Liars7_w.sqlite')
cur_w = conn_w.cursor()
# First, let's move brysbaert into SQL:
#Input the name of the excel file to be converted into a SQL database
name = input("Enter | |
added "custom" validation method from a certain
attribute, avoiding its execution at runtime.
This method should be used carefully and should be considered a secondary
resource for attribute validation.
:type attribute_name: String
:param attribute_name: The name of the attribute that will have
the provided validation methods removed from execution.
:type validation_method_name: Function
:param validation_method_name: The function with validation method
that will be removed from execution for the attribute.
:type contexts: Tuple
:param contexts: The (validation) contexts for which the the validation
method should be removed.
"""
# iterates over the complete set of contexts from which the validation
# method will be removed (complete validation method removal)
for context in contexts:
# creates a list that will hold the various valid/matching
# validation tuples and then retrieves the context validation
# map using it to retrieve the validation list (containing the tuples)
validation_tuples = []
context_validation_map = self.validation_map[context]
attribute_validation_list = context_validation_map[attribute_name]
# iterates over the complete set of validation tuples in the
# attribute validation list trying to find the valid ones
for validation_tuple in attribute_validation_list:
if not validation_tuple[0] == validation_method: continue
validation_tuples.append(validation_tuple)
# iterates over the complete set of (valid) validation tuples
# and removes them from the attribute validation list
for validation_tuple in validation_tuples:
attribute_validation_list.remove(validation_tuple)
def add_error(self, attribute_name, error_message, avoid_duplicates = True):
"""
Adds an error to the validation error map.
This error may be used latter for "verbosity" purposes.
Duplicate error message may be avoided in case the extra flag
is set (default behavior).
:type attribute_name: String
:param attribute_name: The name of the attribute to witch
there is going to be added an error.
:type error_message: String
:param error_message: The error message to be added.
:type avoid_duplicates: bool
:param avoid_duplicates: If duplicate error message should be
avoided (this should imply extra resources).
"""
# in case the attribute name is not defined in the validation
# errors map, must create a new sequence to store the values
if not attribute_name in self.validation_errors_map:
# starts the validation errors map for the attribute name
# as an empty list (default initialization value)
self.validation_errors_map[attribute_name] = []
# retrieves the validation errors (list) for the
# requested attribute name, will be used for the checking
# and insertion operations, avoids insertion in case the
# avoid duplicates flag is set and the error message is
# already present in the validation errors list
validation_errors = self.validation_errors_map[attribute_name]
if avoid_duplicates and error_message in validation_errors: return
# adds the validation error to the validation error
# list for the attribute name
validation_errors.append(error_message)
def clear_errors(self):
"""
Clears the internal structure referencing errors from the
current model structure.
No more errors are displayed after this call is made.
Note that no recursion is done so no related structure
are cleared from errors.
"""
# resets the validation errors map, by constructing
# a new map to hold the values (clear process)
self.validation_errors_map = {}
def init_validate(self):
"""
Initializes the validation system for the current
model, should set the complete set of validation
methods that are going to be run.
This method should always be called before a
validation is executed to ensure proper execution.
"""
# checks if the validation has been already started,
# (avoids duplicated validation starting)
if hasattr(self, "validation_started"): return
# in case the model has the set validation method
# the control flow must call it so that extra validation
# methods may be included in the model
if hasattr(self, "set_validation"): self.set_validation()
# sets the starting of the validation avoiding any
# further calls to this method (performance issue)
self.validation_started = True
def validate(self, checker = None, context = None):
"""
Validates all the attributes in the current object.
This method returns if the validation was successful or not.
An optional checker function may be provided so that the
validation process is controlled/managed by such function.
:type checker: Function
:param checker: Checker function that if existent will be
run for each of the model's attribute so that it's possible
to infer if a validation should be performed for that attribute.
:type context: String
:param context: The context (a string) for which the current
validation is going to be run. Note that the default validation
context is always used/executed/inherited.
:rtype: bool
:return: If the model validation was successful or not.
"""
# checks if the current model contains the pre validate
# method, in such case the method is called to signal
# the start of the validation process
if hasattr(self, "pre_validate"): self.pre_validate()
# initializes the validation structures so that it's possible
# and guaranteed to be able to properly execute validation on
# the current model as expected by the definition
self.init_validate()
# tries to retrieve the proper (validation) context for the
# current validation process, defaulting to the validation
# context set in the current context, then verifies if the
# resulting context is considered to be the default one
context = context if context else self.validation_context
is_default = context == "default"
# retrieves the references for both the default and the selected
# contexts validation maps (to be used in the validation process)
default_validation_map = self.validation_map.get("default", {})
context_validation_map = self.validation_map.get(context, {})
# verifies if the current context is the default one and in case
# it's not runs the default context validation (as an extra) then
# runs the validation for the selected context
if not is_default: self.validate_run(
checker = checker,
context_validation_map = default_validation_map
)
self.validate_run(
checker = checker,
context_validation_map = context_validation_map
)
# checks if the current validation process has success
# running (all the validation tests passed)
is_valid = self.is_valid(recursive = True)
# in case the validation was not successful and the current
# model contains the fail validate method defined it's called
# to signal the failure of the validation process
if not is_valid and hasattr(self, "fail_validate"): self.fail_validate()
# checks if the current model contains the post validate
# method, in such case the method is called to signal
# the end of the validation process
if hasattr(self, "post_validate"): self.post_validate()
# returns if the structure is valid, all tests passed
# with expected success
return is_valid
def validate_run(self, checker = None, context_validation_map = {}):
"""
Underlying method that runs the sequence of validation methods
defined in the provided context validation map.
The results from the validation should update the current entity
error structure so that it becomes invalidation in case there's
an error or valid otherwise.
:type checker: Function
:param checker: Checker function that if existent will be
run for each of the model's attribute so that it's possible
to infer if a validation should be performed for that attribute.
:type context_validation_map: Dictionary
:param context_validation_map: Map containing the associations
between the attribute names and the sequence containing the
various validation functions to be run for that attribute.
"""
# iterates over all the items in the context validation map
# so that it's possible to validate all of the attributes
# for the current model instance (as requested)
for attribute_name, validation_tuple_list in colony.legacy.iteritems(context_validation_map):
# in case the current model is already stored no need to
# to validate a non existent attribute (it's not going to be
# persisted and the value in the data source was already validated)
# the data model remains consistent for sure
if self.is_stored() and not self.has_value(attribute_name): continue
# in case there's a checker method method that will verify if the
# attribute qualifies for validation then runs it and verifies if
# the result is negative, if that's the case skips the validation
# for the current attribute name (validation not required)
if checker and not checker(attribute_name): continue
# retrieves the attribute value, this is the value that is going
# to be "fed" into the various validation methods registered for it
attribute_value = self.get_value(attribute_name)
# | |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.1.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Sentiment Analysis
#
# ## Updating a Model in SageMaker
#
# _Deep Learning Nanodegree Program | Deployment_
#
# ---
#
# In this notebook we will consider a situation in which a model that we constructed is no longer working as we intended. In particular, we will look at the XGBoost sentiment analysis model that we constructed earlier. In this case, however, we have some new data that our model doesn't seem to perform very well on. As a result, we will re-train our model and update an existing endpoint so that it uses our new model.
#
# This notebook starts by re-creating the XGBoost sentiment analysis model that was created in earlier notebooks. This means that you will have already seen the cells up to the end of Step 4. The new content in this notebook begins at Step 5.
#
# ## Instructions
#
# Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!
#
# In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.
#
# > **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.
# %% [markdown]
# ## Step 1: Downloading the data
#
# The dataset we are going to use is very popular among researchers in Natural Language Processing, usually referred to as the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/). It consists of movie reviews from the website [imdb.com](http://www.imdb.com/), each labeled as either '**pos**itive', if the reviewer enjoyed the film, or '**neg**ative' otherwise.
#
# > Maas, <NAME>., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.
#
# We begin by using some Jupyter Notebook magic to download and extract the dataset.
# %%
# %mkdir ../data
# !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
# %% [markdown]
# ## Step 2: Preparing the data
#
# The data we have downloaded is split into various files, each of which contains a single review. It will be much easier going forward if we combine these individual files into two large files, one for training and one for testing.
# %%
import os
import glob
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type, sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
# %%
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
# %%
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
# %%
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
# %%
train_X[100]
# %% [markdown]
# ## Step 3: Processing the data
#
# Now that we have our training and testing datasets merged and ready to use, we need to start processing the raw data into something that will be useable by our machine learning algorithm. To begin with, we remove any html formatting that may appear in the reviews and perform some standard natural language processing in order to homogenize the data.
# %%
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from nltk.stem.porter import *
stemmer = PorterStemmer()
# %%
import re
from bs4 import BeautifulSoup
def review_to_words(review):
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
# %%
review_to_words(train_X[100])
# %%
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Preprocess training and test data to obtain words for each review
#words_train = list(map(review_to_words, data_train))
#words_test = list(map(review_to_words, data_test))
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# %%
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
# %% [markdown]
# ### Extract Bag-of-Words features
#
# For the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation.
# %%
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
# joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays
def extract_BoW_features(words_train, words_test, vocabulary_size=5000,
cache_dir=cache_dir, cache_file="bow_features.pkl"):
"""Extract Bag-of-Words for a given set of documents, already preprocessed into words."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = joblib.load(f)
print("Read features from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Fit a vectorizer to training documents and use it to transform them
# NOTE: Training documents have already been preprocessed and tokenized into words;
# pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x
vectorizer = CountVectorizer(max_features=vocabulary_size,
preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed
features_train = vectorizer.fit_transform(words_train).toarray()
# Apply the same vectorizer to | |
<gh_stars>10-100
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# <EMAIL>.
# A smattering of pre-defined outputs for the convenience of OOF users.
from ooflib.SWIG.common import config
from ooflib.SWIG.common import coord
from ooflib.SWIG.common import ooferror
from ooflib.SWIG.common import progress
from ooflib.SWIG.engine import element
from ooflib.SWIG.engine import invariant
from ooflib.SWIG.engine import outputval
from ooflib.SWIG.engine import planarity
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import primitives
from ooflib.common import utils
from ooflib.common.IO import parameter
from ooflib.engine.IO import meshparameters
from ooflib.engine.IO import output
from types import *
import itertools
import math
import sys
# Examples of Outputs
# See comments in output.py.
## TODO 3.1: Add progress bars to more outputs?
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# posOutput is probably obsolete in 3D.
def _pos(mesh, elements, coords):
# The argument "elements" is a list of Elements or Edges, possibly
# mixed together. The argument "coords" is a list of lists. Each
# Element in the elements list corresponds to a list of
# MasterCoords in the coords list, and each Edge corresponds to a
# list of doubles (ie, master coords for the Edge, in the range
# [0,1)).
# Element.position() applies Element.from_master() to each
# MasterCoord in its argument list.
return utils.flatten1([elem.position(coordList)
for elem,coordList in zip(elements, coords)])
posOutput = output.Output(
name="original position",
otype=(primitives.Point, coord.Coord),
callback=_pos)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SpaceComponent(enum.EnumClass('x', 'y', 'z')):
tip='Components of vectors.'
discussion="""<para>
<classname>SpaceComponent</classname> is used by various
<link
linkend='Section-Output'><classname>Outputs</classname></link> to
choose one of the components of a vector quantity.
</para>"""
if config.dimension() == 2:
class InPlaneSpaceComponent(enum.EnumClass('x', 'y')):
tip="The in-plane components of vectors."
discussion="""<para>
<classname>InPlaneSpaceComponent</classname> is used by various
<link
linkend='Section-Output'><classname>Outputs</classname></link> to
choose one of the in-plane components of a vector quantity.
</para>"""
elif config.dimension() == 3:
InPlaneSpaceComponent = SpaceComponent
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# FieldOutput is for fields which are defined on the Mesh. Fields
# that aren't defined return 0. If a Field isn't defined everywhere,
# the user shouldn't be evaluating it everywhere, but that's his or
# her decision.
def _field(mesh, elements, coords, field):
return utils.flatten1([elem.outputFields(mesh, field, ecoords)
for elem, ecoords in itertools.izip(elements, coords)])
def _field_instancefn(self):
field = self.resolveAlias('field').value
if field is not None:
# Don't combine the following two lines into one! There must
# be a reference to ovalue until zero() is called. See
# valuePtr comment in outputval.h.
ovalue = field.newOutputValue()
return ovalue.valuePtr().zero()
def _field_column_names(self):
field = self.resolveAlias('field').value
if field.ndof() == 1:
return [field.name()]
names = []
it = field.iterator(planarity.ALL_INDICES)
while not it.end():
names.append("%s[%s]" % (field.name(), it.shortstring()))
it.next()
return names
FieldOutput = output.Output(
name = "field",
callback = _field,
otype = outputval.OutputValPtr,
srepr=lambda x: x.resolveAlias('field').value.name(),
instancefn = _field_instancefn,
column_names = _field_column_names,
bulk_only = True,
params = [meshparameters.FieldParameter("field", outofplane=1,
tip=parameter.emptyTipString)],
tip="Compute Field values.",
discussion='<para>Compute the value of the given &field; on a &mesh;.</para>'
)
## TODO 3.1: Add Field and Flux outputs that take their values from some
## *other* Mesh, in order to compute how well solutions are
## converging. Or, possibly, add a DifferenceWithOtherMesh output,
## that computes the same quantities for the minuend and subtrahend,
## but on different Meshes.
############
def _fieldderiv(mesh, elements, coords, field, derivative):
return utils.flatten1(
[elem.outputFieldDerivs(mesh, field, derivative, ecoords)
for elem, ecoords in itertools.izip(elements, coords)])
def _fieldderiv_shortrepr(self):
field = self.resolveAlias('field').value
derivative = self.resolveAlias('derivative').value
return "d(%s)/d%s" % (field.name(), derivative.string())
def _fieldderiv_column_names(self):
field = self.resolveAlias('field').value
derivative = self.resolveAlias('derivative').value
if field.ndof() == 1:
return ["d(%s)/d%s" % (field.name(), derivative.string())]
names = []
it = field.iterator(planarity.ALL_INDICES)
while not it.end():
names.append("d(%s[%s])/d%s" % (field.name(), it.shortstring(),
derivative.string()))
it.next()
return names
FieldDerivOutput = output.Output(
name = "field derivative",
callback = _fieldderiv,
otype = outputval.OutputValPtr,
instancefn = _field_instancefn,
bulk_only = True,
params = [meshparameters.FieldParameter("field", outofplane=1,
tip=parameter.emptyTipString),
enum.EnumParameter("derivative", InPlaneSpaceComponent,
tip='Which derivative to take.')],
srepr=_fieldderiv_shortrepr,
column_names=_fieldderiv_column_names,
tip='Compute derivatives of Fields.',
discussion='<para>Compute the spatial derivative of a &field; on a &mesh;.</para>')
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def _flux(mesh, elements, coords, flux):
ans = []
prog = progress.getProgress("Evaluating flux", progress.DEFINITE)
## TODO OPT: elements may be a generator, and converting it to a list
## is ugly and wasteful, but it's the only way to get its length,
## which we only need for the progress bar. We should either get
## rid of the progress bar, or find another way to get the number
## of elements. Perhaps giving the Output access to the domain
## would work. Output.evaluate isn't always called with a domain,
## though. See MeshDataGUI.updateData().
elist = list(elements)
nel = len(elist)
try:
ecount = 0
for elem, ecoords in itertools.izip(elist, coords):
mesh.begin_all_subproblems(elem)
fluxes = elem.outputFluxes(mesh, flux, ecoords)
ans.append(fluxes)
mesh.end_all_subproblems(elem)
ecount += 1
prog.setFraction((1.*ecount)/nel)
prog.setMessage("%d/%d elements" % (ecount, nel))
return utils.flatten1(ans)
finally:
prog.finish()
def _flux_shortrepr(self):
return self.resolveAlias('flux').value.name()
def _flux_instancefn(self):
flux = self.resolveAlias('flux').value
if flux:
# Don't combine the following two lines into one! There must
# be a reference to ovalue until zero() is called. See
# valuePtr comment in outputval.h.
ovalue = flux.newOutputValue()
return ovalue.valuePtr().zero()
def _flux_column_names(self):
flux = self.resolveAlias('flux').value
it = flux.iterator(planarity.ALL_INDICES)
names = []
while not it.end():
names.append("%s[%s]" % (flux.name(), it.shortstring()))
it.next()
return names
FluxOutput = output.Output(
name = "flux",
callback = _flux,
otype = outputval.OutputValPtr,
instancefn = _flux_instancefn,
bulk_only = True,
column_names=_flux_column_names,
params = [meshparameters.FluxParameter("flux",
tip=parameter.emptyTipString)],
srepr=_flux_shortrepr,
tip='Compute Flux values.',
discussion='<para>Compute the value of the given &flux; on a &mesh;.</para>')
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Extract a component of something indexable with an IndexP object
# (eg, Fields and Fluxes).
def _component(mesh, elements, coords, field, component):
if field:
# 'component' is a string, "x" or "xy" or the like
firstfield = next(iter(field))
comp = firstfield.getIndex(component) # convert string to IndexP
return [outputval.ScalarOutputVal(f[comp]) for f in field]
return []
def scalar_instancefn(self):
return outputval.ScalarOutputVal(0.0)
def single_column_name(self):
# The name of a column in an Output with only one column is the
# same as the shortrepr of the Output.
return [self.shortrepr(self)]
ComponentOutput = output.Output(
name = "component",
callback = _component,
otype = outputval.ScalarOutputValPtr,
instancefn = scalar_instancefn,
column_names=single_column_name,
inputs = [outputval.OutputValParameter('field')],
params = [meshparameters.FieldIndexParameter(
'component', tip='Which component to take.')]
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def _fieldcomp_shortrepr(self):
field = self.resolveAlias('field').value
fieldname = field.name()
if field.ndof() > 1:
comp = self.resolveAlias('component').value
return "%s[%s]" % (fieldname, comp)
else:
return fieldname
FieldCompOutput = ComponentOutput.clone(
name="field component",
column_names=single_column_name,
srepr=_fieldcomp_shortrepr,
tip='Compute a component of a Field.',
discussion=
'<para>Compute a single component of a &field; on a &mesh;.</para>',
)
FieldCompOutput.connect("field", FieldOutput)
FieldCompOutput.aliasParam('field:field', 'field')
####
def _fieldderiv_shortrepr(self):
field = self.resolveAlias('field').value
fieldname = field.name()
if field.ndof() > 1:
comp = self.resolveAlias('component').value
fieldname += '[%s]' % comp
deriv = self.resolveAlias('derivative').value
return "d(%s)/d%s" % (fieldname, deriv.string())
FieldDerivCompOutput = ComponentOutput.clone(
name="field derivative component",
tip='Compute a component of a Field derivative.',
discussion='<para>Compute a single component of a &field; derivative on a &mesh;.</para>',
srepr=_fieldderiv_shortrepr,
column_names=single_column_name)
FieldDerivCompOutput.connect("field", FieldDerivOutput)
FieldDerivCompOutput.aliasParam('field:field', 'field')
FieldDerivCompOutput.aliasParam('field:derivative', 'derivative')
###
def _fluxcomp_shortrepr(self):
flux = self.resolveAlias('flux').value
fluxname = flux.name()
if flux.ndof() > 1:
comp = self.resolveAlias('component').value
return "%s[%s]" % (fluxname, comp)
else:
return fluxname
FluxCompOutput = ComponentOutput.clone(
name="flux component",
tip='Compute a component of a Flux.',
srepr=_fluxcomp_shortrepr,
column_names=single_column_name,
discussion=
'<para>Compute a single component of a &flux; on a &mesh;.</para>')
FluxCompOutput.connect("field", FluxOutput)
FluxCompOutput.aliasParam('field:flux', 'flux')
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def _invariant(mesh, elements, coords, field, invariant):
## TODO OPT: Use MappedIterable?
return map(outputval.ScalarOutputVal, itertools.imap(invariant, field))
def _invariant_shortrepr(self):
field = self.findInput('field').shortrepr()
invariant = self.resolveAlias('invariant').value.shortrepr()
return "%s(%s)" % (invariant, field)
InvariantOutput = output.Output(
name="invariant",
callback=_invariant,
otype=outputval.ScalarOutputValPtr,
srepr=_invariant_shortrepr,
instancefn = scalar_instancefn,
column_names=single_column_name,
inputs=[outputval.OutputValParameter('field')],
params=[invariant.InvariantParameter("invariant",
tip=parameter.emptyTipString)]
)
def getInvariandClass(invariantOutput):
# Because InvariantOutput is an instance, and not a class, this
# function isn't monkeypatched into the class in the usual way.
oput = invariantOutput.findInput('field').outputInstance()
if oput is not None:
return oput.__class__
#=--=##=--=##=--=#
FieldInvariantOutput = InvariantOutput.clone(
name="field invariant",
tip='Compute invariants of Fields.',
discussion='<para>Compute invariants of a &field; on a &mesh;.</para>')
FieldInvariantOutput.connect("field", FieldOutput)
FieldInvariantOutput.aliasParam('field:field', 'field')
FluxInvariantOutput = InvariantOutput.clone(
name="flux invariant",
tip='Compute invariants of Fluxes.',
discussion='<para>Compute invariants of a &flux; on a &mesh;.</para>')
FluxInvariantOutput.connect("field", FluxOutput)
FluxInvariantOutput.aliasParam('field:flux', 'flux')
FieldDerivInvariantOutput = InvariantOutput.clone(
name="field derivative invariant",
tip='Compute invariants of Field derivatives.',
discussion='<para>Compute invariants of the derivative of a &field; on a &mesh;</para>')
FieldDerivInvariantOutput.connect("field", FieldDerivOutput)
FieldDerivInvariantOutput.aliasParam('field:field', 'field')
FieldDerivInvariantOutput.aliasParam('field:derivative', 'derivative')
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
from ooflib.common import strfunction
def _scalarFunctionOutput(mesh, elements, coords, f):
ans = []
t = mesh.getCurrentTime()
prog = progress.getProgress("Function evaluation", progress.DEFINITE)
ecount = 0
nel = mesh.nelements()
for elem, coordlist in itertools.izip(elements, coords):
realcoords = itertools.imap(elem.from_master, coordlist)
ans.extend(outputval.ScalarOutputVal(f(coord, t)) for coord in realcoords)
ecount += 1
prog.setFraction((1.*ecount)/nel)
prog.setMessage("%d/%d elements" % (ecount, nel))
prog.finish()
return ans
if config.dimension() | |
<gh_stars>0
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\sims4\tuning\serialization.py
# Compiled at: 2020-08-14 01:56:25
# Size of source mod 2**32: 53283 bytes
from _pyio import StringIO
from xml.sax.saxutils import XMLGenerator, escape
from xml.sax.xmlreader import AttributesImpl
import collections, inspect, os, pydoc
from sims4.class_utils import find_class
from sims4.resources import ResourceLoader
from sims4.tuning.merged_tuning_manager import MergedTuningAttr, get_manager, UnavailablePackSafeResourceError
from sims4.tuning.tunable_base import Tags, Attributes, TunableBase, TunableAliasError, TunableFileReadOnlyError, DELETEDMARKER, LoadingAttributes, LoadingTags
from sims4.utils import strformatter
import enum, paths, sims4.core_services, sims4.log, sims4.reload, sims4.resources, sims4.service_manager, sims4.tuning.instance_manager, sims4.tuning.instances, sims4.utils
import xml.etree.ElementTree as ET
logger = sims4.log.Logger('Tuning', default_owner='cjiang')
with sims4.reload.protected(globals()):
_deferred_tuning_loaders = []
_loaded_deferred_tunable = []
MODULE_TUNABLES = 'MODULE_TUNABLES'
ENUM_ENTRIES = 'ENUM_ENTRIES'
XML_ENTITIES = {'\n':'
', '\r':'
', '\t':'	'}
TUNING_LOADING_CALLBACK = '_tuning_loading_callback'
LOAD_MODULE_FOR_EXPORTING = False
tuning_class_set = set()
def quoteattr(data):
data = escape(data, XML_ENTITIES)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', '"')
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def process_tuning(module):
if paths.TUNING_ROOTS is None:
return False
else:
load_filename = get_file_name(module)
module_tuning_root = paths.TUNING_ROOTS.get(sims4.resources.Types.TUNING)
if module_tuning_root:
save_filename = os.path.join(module_tuning_root, load_filename)
else:
save_filename = None
result = load_module_tuning(module, save_filename)
return result
def get_file_name(module):
return '{0}.{1}'.format(module.__name__.replace('.', '-'), sims4.resources.extensions[sims4.resources.Types.TUNING])
def get_desc_file_name(module):
return '{0}.{1}'.format(module.__name__.replace('.', '-'), sims4.resources.extensions[sims4.resources.Types.TUNING_DESCRIPTION])
def get_tdesc_frag_name(cls):
return '{0}.{1}'.format(cls.__name__.replace('.', '-'), Attributes.TdescFragType)
def _enumerate_members(module, predicate, skip_private=True):
items = module.__dict__.items()
if hasattr(module, '__qualname__'):
qual_path = module.__qualname__ + '.'
else:
qual_path = ''
for key, value in items:
if skip_private:
if key.startswith('__'):
if key.endswith('__'):
continue
if predicate(qual_path + key, value):
yield (
key, value)
def _tunable_check(_, obj):
return isinstance(obj, TunableBase)
def _process_module_tunables(module, tunables):
module.MODULE_TUNABLES = tunables
for key, tunable in tunables.items():
delattr(module, key)
if tunable.needs_deferring:
tunable.deferred = True
def finalize_deferred_loads():
global _deferred_tuning_loaders
global _loaded_deferred_tunable
for deferred_loader in _deferred_tuning_loaders:
deferred_loader.load_value()
_deferred_tuning_loaders.clear()
for loaded_tunable in _loaded_deferred_tunable:
loaded_tunable.call_deferred_callback()
_loaded_deferred_tunable.clear()
def _replace_tunables(scan, module):
if MODULE_TUNABLES in vars(module):
return True
tunables = dict(_enumerate_members(module, _tunable_check))
if tunables:
reload_context = getattr(module, '__reload_context__', None)
if reload_context:
with reload_context(module, module):
_process_module_tunables(module, tunables)
else:
_process_module_tunables(module, tunables)
return True
return False
def _scan_tunables(scan, module):
if MODULE_TUNABLES in vars(module):
scan.update(module.MODULE_TUNABLES)
return bool(module.MODULE_TUNABLES)
return False
def _scan_module_rec(scan, module, key_name, root_name, visited=None, for_export=False):
if visited is None:
visited = set()
else:
if for_export:
module_scan = {}
else:
module_scan = collections.OrderedDict()
attr_name = Attributes.Name if for_export else LoadingAttributes.Name
if attr_name not in module_scan:
if hasattr(module, '__name__'):
module_name = module.__name__
if key_name in scan:
for class_dict in scan[key_name]:
if class_dict.get(attr_name) == module_name:
return False
module_scan[attr_name] = module_name
elif inspect.isclass(module):
if for_export:
has_tunables = _scan_tunables(module_scan, module)
if isinstance(module, enum.Metaclass) and module.export:
module_scan[ENUM_ENTRIES] = module
module_scan[Attributes.EnumBitFlag] = issubclass(module, enum.IntFlags)
module_scan[Attributes.EnumLocked] = module.locked
if module.offset:
module_scan[Attributes.EnumOffset] = module.offset
if module.display_sorted:
module_scan[Attributes.DisplaySorted] = module.display_sorted
if module.partitioned:
module_scan[Attributes.Partitioned] = module.partitioned
has_tunables = True
else:
has_tunables = _replace_tunables(module_scan, module)
else:
pass
has_tunables = False
def _is_visible_class(name, obj):
if inspect.isclass(obj):
if obj.__module__ == root_name:
if name == obj.__qualname__:
return True
return False
def _sort_key(enumerate_tuple):
if for_export:
try:
return inspect.getsourcelines(enumerate_tuple[1])[1]
except IOError:
return 0
else:
return enumerate_tuple[0]
class_tag = Tags.Class if for_export else LoadingTags.Class
for _cls_name, cls in sorted((_enumerate_members(module, _is_visible_class)), key=_sort_key):
if cls not in visited:
visited.add(cls)
has_tunables |= _scan_module_rec(module_scan, cls, class_tag, root_name, visited=visited, for_export=for_export)
if has_tunables:
if key_name not in scan:
scan[key_name] = []
scan[key_name].append(module_scan)
return has_tunables
def export_tuning(module, export_path):
if not hasattr(module, '__file__'):
return True
writer = None
try:
try:
schema_dict = {}
has_tunables = _scan_module_rec(schema_dict, module, (Tags.Module), (module.__name__), for_export=True)
if has_tunables:
writer = TuningDescFileWriter(module, export_path, whitespace_depth=2)
writer.open()
_export_module(writer, schema_dict)
except TunableFileReadOnlyError as exc:
try:
logger.error(str(exc))
return False
finally:
exc = None
del exc
except:
logger.exception('Error during export of module {0}', module)
return False
finally:
if writer is not None:
writer.close()
return True
ATTRIBUTES_RESERVED_KEYS = [
Attributes.InstancePath,
Attributes.InstanceType,
Attributes.InstanceClass,
Attributes.InstanceModule,
Attributes.InstanceSubclassesOnly,
Attributes.InstanceParents,
Attributes.Description]
def export_class(cls, export_path, instance_type):
writer = None
try:
try:
logger.debug(' Exporting: {}', cls.__name__)
schema_dict = cls.get_tunables(ignore_tuned_instance_metaclass_subclasses=True)
for reserved_key in ATTRIBUTES_RESERVED_KEYS:
if reserved_key in schema_dict:
raise KeyError("{} use reserved key '{}' in instance tunables. Please rename the tunable.".format(cls.__name__, reserved_key))
removed_tuning = cls.get_removed_tunable_names()
for tuning_to_remove in removed_tuning:
schema_dict[tuning_to_remove] = DELETEDMARKER
relative_path = os.path.relpath(export_path, paths.DATA_ROOT)
schema_dict[Attributes.InstancePath] = relative_path
schema_dict[Attributes.InstanceType] = sims4.resources.extensions[instance_type]
schema_dict[Attributes.InstanceClass] = cls.__name__
schema_dict[Attributes.InstanceModule] = cls.__module__
schema_dict[Attributes.InstanceSubclassesOnly] = sims4.tuning.instances.prohibits_instantiation(cls)
if cls.tuning_manager.use_guid_for_ref:
schema_dict[Attributes.InstanceUseGuidForRef] = True
if cls.get_base_game_only():
schema_dict[Attributes.InstanceBaseGameOnly] = True
if cls.tuning_manager.require_reference:
schema_dict[Attributes.InstanceRequireReference] = True
parent_names = []
for parent in cls.__mro__[1:]:
if isinstance(parent, sims4.tuning.instances.TunedInstanceMetaclass):
parent_names.append(parent.__name__)
if parent_names:
schema_dict[Attributes.InstanceParents] = ', '.join(parent_names)
if cls.__doc__:
schema_dict[Attributes.Description] = pydoc.getdoc(cls)
full_schema_dict = {Tags.Instance: schema_dict}
writer = TuningDescFileWriter(cls, export_path=export_path, whitespace_depth=1)
writer.open()
_export_module(writer, full_schema_dict)
except TunableFileReadOnlyError as exc:
try:
logger.error(str(exc))
return False
finally:
exc = None
del exc
except:
logger.exception('Error during export of class {0}', cls)
return False
finally:
if writer is not None:
writer.close()
return True
def export_fragment(cls, export_path):
writer = None
try:
try:
logger.debug(' Exporting: {}', cls.__name__)
writer = TuningDescFileWriter(cls, export_path=export_path, whitespace_depth=1)
writer.open()
writer.write_frag(cls())
except Exception as exc:
try:
logger.error(str(exc))
return False
finally:
exc = None
del exc
except:
logger.exception('Error during export of fragment {0}', cls)
return False
finally:
if writer is not None:
writer.close()
return True
def _export_module(writer, scan):
for name, value in sorted(scan.items()):
if isinstance(value, TunableBase):
if value.name is None:
value.name = name
else:
if value.name != name:
raise TunableAliasError(value.name)
writer.write_tunable(value)
elif isinstance(value, enum.Metaclass) and value.export:
writer.write_enum_items(value)
elif isinstance(value, dict):
writer.start_namespace(name, value)
_export_module(writer, value)
writer.end_namespace(name)
elif isinstance(value, list):
for sub_dict in value:
writer.start_namespace(name, sub_dict)
_export_module(writer, sub_dict)
writer.end_namespace(name)
elif value is DELETEDMARKER:
writer.write_deleted_tunable(name)
def _find_tunables_gen(name, tree, parent):
for category, sub_list in tree.items():
if category == LoadingAttributes.Name:
if MODULE_TUNABLES in vars(parent):
for tunable_name, tunable in parent.MODULE_TUNABLES.items():
yield (
tunable_name, tunable, parent)
if category in (LoadingTags.Class, LoadingTags.Instance, LoadingTags.Module):
for sub_tree in sub_list:
child_name = sub_tree.get(LoadingAttributes.Name, None)
if name:
child = vars(parent).get(child_name)
else:
child = parent
for t in _find_tunables_gen(child_name, sub_tree, child):
yield t
def load_module_tuning(module, tuning_filename_or_key):
schema_dict = {}
has_tunables = _scan_module_rec(schema_dict, module, (LoadingTags.Module), (module.__name__), for_export=False)
if not has_tunables:
return True
if not LOAD_MODULE_FOR_EXPORTING:
tuning_loader = ETreeTuningLoader(module, tuning_filename_or_key)
mtg = get_manager()
if isinstance(tuning_filename_or_key, str):
full_name = os.path.basename(tuning_filename_or_key)
res_name = os.path.splitext(full_name)[0]
res_key = sims4.resources.get_resource_key(res_name, sims4.resources.Types.TUNING)
else:
res_key = tuning_filename_or_key
if mtg.local_key_exists(res_key):
loader = ResourceLoader(res_key, sims4.resources.Types.TUNING)
tuning_file = loader.load()
tuning_loader.feed(tuning_file)
else:
root_node = mtg.get_tuning_res(res_key, silent_fail=True)
if root_node is not None:
tuning_loader.feed_node(root_node)
for name, tunable, parent in _find_tunables_gen(None, schema_dict, module):
if name in vars(parent):
tuned_value = tunable.deferred or getattr(parent, name)
tunable.invoke_callback(None, name, tuning_filename_or_key, tuned_value)
continue
value = tunable.default
reload_context = getattr(parent, '__reload_context__', None)
if reload_context:
with reload_context(parent, parent):
setattr(parent, name, value)
else:
setattr(parent, name, value)
return True
def create_class_instance(resource_key, resource_type):
try:
tuning_loader = ETreeClassCreator()
mtg = get_manager()
if mtg.deleted_local_key_exists(resource_key):
return
if mtg.local_key_exists(resource_key):
loader = ResourceLoader(resource_key, resource_type)
tuning_file = loader.load()
if tuning_file is None:
return
tuning_loader.feed(tuning_file)
else:
root_node = mtg.get_tuning_res(resource_key)
if root_node is not None:
tuning_loader.feed_node(root_node)
return tuning_loader.module
except Exception as e:
try:
logger.error('Exception while creating class instance for Resource {} (type: {})\n{}', resource_key,
resource_type, e, owner='manus')
return
finally:
e = None
del e
def load_from_xml(resource_key, resource_type, inst, from_reload=False):
source = strformatter('Instance: {0} ({1}), {2}', resource_key.instance, inst.__name__, resource_type)
tuning_loader = ETreeTuningLoader(inst, source, loading_tag=(LoadingTags.Instance))
mtg = get_manager()
if mtg.deleted_local_key_exists(resource_key):
tuning_loader.module = None
else:
if from_reload or mtg.local_key_exists(resource_key):
loader = ResourceLoader(resource_key, resource_type)
tuning_file = loader.load()
if tuning_file is not None:
return tuning_loader.feed(tuning_file)
if mtg.has_combined_tuning_loaded:
root_node = mtg.get_tuning_res(resource_key)
if root_node is not None:
return tuning_loader.feed_node(root_node)
return (None, None)
def restore_class_instance(inst):
tunables = inst.get_tunables()
for name in tunables:
if name in vars(inst):
delattr(inst, name)
if hasattr(inst, TUNING_LOADING_CALLBACK):
inst._tuning_loading_callback()
class ETreeTuningLoader:
def __init__(self, module, source, loading_tag=LoadingTags.Module):
self.module = module
self.source = source
self.root = None
self._invoke_names = []
self._loading_tag = loading_tag
def feed(self, tuning_file):
tree = ET.parse(tuning_file)
self.root = tree.getroot()
return self._load_node(self.root, self.module)
def feed_node(self, node):
return self._load_node(node, self.module)
def _load_node(self, node, tunable_class):
callback_infos = []
verify_callback_infos = []
mtg = get_manager()
if node.tag == LoadingTags.Module:
for child_node in node:
name = child_node.get(LoadingAttributes.Name)
child_class = self._inner_module(tunable_class, name)
node_to_load = child_node
if child_node.tag == MergedTuningAttr.Reference:
ref_index = child_node.get(MergedTuningAttr.Index)
node_to_load = mtg.get_tunable_node(ref_index)
self._load_node(node_to_load, child_class)
else:
if node.tag == LoadingTags.Class:
tunable_datas = self._get_module_tunables_from_class(tunable_class)
else:
tunable_datas = tunable_class.get_tunables()
for child_node in node:
tunable_name = child_node.get(LoadingAttributes.Name, '')
if tunable_datas is not None and tunable_name in tunable_datas:
tunable = tunable_datas.get(tunable_name)
if not tunable is None:
isinstance(tunable, TunableBase) or logger.error('Attempt to load a value from {0} that is no longer tunable: {1}'.format(self.source, tunable_name))
else:
self._load_tunable(tunable_class, tunable_name, tunable, child_node, mtg)
else:
sub_child_class = self._inner_module(tunable_class, tunable_name)
| |
than the number of busy bots, but not more than the
# configured maximum and not less than the configured minimum. In order
# to prevent drastic drops, do not allow the target size to fall below 99%
# of current capacity. Note that this dampens scale downs as a function of
# the frequency with which this function runs, which is currently every
# minute controlled by cron job. Tweak these numbers if the cron frequency
# changes.
# TODO(smut): Tune this algorithm.
# TODO(smut): Move algorithm parameters to luci-config.
target = int(math.ceil(utilization.busy * 1.5))
if target >= schedule.load_based[0].maximum_size:
return schedule.load_based[0].maximum_size
if target < int(0.99 * current):
target = int(0.99 * current)
if target < schedule.load_based[0].minimum_size:
target = schedule.load_based[0].minimum_size
return target
return default
def ensure_entities_exist(max_concurrent=50):
"""Ensures MachineType entities are correct, and MachineLease entities exist.
Updates MachineType entities based on the config and creates corresponding
MachineLease entities.
Args:
max_concurrent: Maximum number of concurrent asynchronous requests.
"""
now = utils.utcnow()
# Seconds and microseconds are too granular for determining scheduling.
now = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute)
# Generate a few asynchronous requests at a time in order to prevent having
# too many in flight at a time.
futures = []
machine_types = bot_groups_config.fetch_machine_types().copy()
for machine_type in MachineType.query():
# Check the MachineType in the datastore against its config.
# If it no longer exists, just disable it here. If it exists but
# doesn't match, update it.
config = machine_types.pop(machine_type.key.id(), None)
# If there is no config, disable the MachineType.
if not config:
if machine_type.enabled:
machine_type.enabled = False
futures.append(machine_type.put_async())
logging.info('Disabling deleted MachineType: %s', machine_type)
continue
put = False
# Handle scheduled config changes.
if config.schedule:
target_size = get_target_size(
config.schedule,
machine_type.key.id(),
machine_type.target_size,
config.target_size,
now=now,
)
if machine_type.target_size != target_size:
logging.info(
'Adjusting target_size (%s -> %s) for MachineType: %s',
machine_type.target_size,
target_size,
machine_type,
)
machine_type.target_size = target_size
put = True
# If the MachineType does not match the config, update it. Copy the values
# of certain fields so we can compare the MachineType to the config to check
# for differences in all other fields.
config = machine_type_pb2_to_entity(config)
config.enabled = machine_type.enabled
config.target_size = machine_type.target_size
if machine_type != config:
logging.info('Updating MachineType: %s', config)
machine_type = config
put = True
# If there's anything to update, update it once here.
if put:
futures.append(machine_type.put_async())
# If the MachineType isn't enabled, don't create MachineLease entities.
if not machine_type.enabled:
continue
# Ensure the existence of MachineLease entities.
cursor = 0
while cursor < machine_type.target_size:
while len(futures) < max_concurrent and cursor < machine_type.target_size:
futures.append(ensure_entity_exists(machine_type, cursor))
cursor += 1
ndb.Future.wait_any(futures)
# We don't bother checking success or failure. If a transient error
# like TransactionFailed or DeadlineExceeded is raised and an entity
# is not created, we will just create it the next time this is called,
# converging to the desired state eventually.
futures = [future for future in futures if not future.done()]
# Create MachineTypes that never existed before.
# The next iteration of this cron job will create their MachineLeases.
if machine_types:
machine_types = machine_types.values()
while machine_types:
num_futures = len(futures)
if num_futures < max_concurrent:
futures.extend([
machine_type_pb2_to_entity(machine_type).put_async()
for machine_type in machine_types[:max_concurrent - num_futures]
])
machine_types = machine_types[max_concurrent - num_futures:]
ndb.Future.wait_any(futures)
futures = [future for future in futures if not future.done()]
if futures:
ndb.Future.wait_all(futures)
@ndb.transactional_tasklet
def drain_entity(key):
"""Drains the given MachineLease.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = yield key.get_async()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.drained:
return
logging.info(
'Draining MachineLease:\nKey: %s\nHostname: %s',
key,
machine_lease.hostname,
)
machine_lease.drained = True
yield machine_lease.put_async()
@ndb.tasklet
def ensure_entity_drained(machine_lease):
"""Ensures the given MachineLease is drained.
Args:
machine_lease: MachineLease entity.
"""
if machine_lease.drained:
return
yield drain_entity(machine_lease.key)
def drain_excess(max_concurrent=50):
"""Marks MachineLeases beyond what is needed by their MachineType as drained.
Args:
max_concurrent: Maximum number of concurrent asynchronous requests.
"""
futures = []
for machine_type in MachineType.query():
for machine_lease in MachineLease.query(
MachineLease.machine_type == machine_type.key,
):
try:
index = int(machine_lease.key.id().rsplit('-', 1)[-1])
except ValueError:
logging.error(
'MachineLease index could not be deciphered\n Key: %s',
machine_lease.key,
)
continue
# Drain MachineLeases where the MachineType is not enabled or the index
# exceeds the target_size given by the MachineType. Since MachineLeases
# are created in contiguous blocks, only indices 0 through target_size - 1
# should exist.
if not machine_type.enabled or index >= machine_type.target_size:
if len(futures) == max_concurrent:
ndb.Future.wait_any(futures)
futures = [future for future in futures if not future.done()]
futures.append(ensure_entity_drained(machine_lease))
if futures:
ndb.Future.wait_all(futures)
def schedule_lease_management():
"""Schedules task queues to process each MachineLease."""
now = utils.utcnow()
for machine_lease in MachineLease.query():
# If there's no connection_ts, we're waiting on a bot so schedule the
# management job to check on it. If there is a connection_ts, then don't
# schedule the management job until it's time to release the machine.
if (not machine_lease.connection_ts
or machine_lease.drained
or machine_lease.lease_expiration_ts <= now + datetime.timedelta(
seconds=machine_lease.early_release_secs)):
if not utils.enqueue_task(
'/internal/taskqueue/machine-provider-manage',
'machine-provider-manage',
params={
'key': machine_lease.key.urlsafe(),
},
):
logging.warning(
'Failed to enqueue task for MachineLease: %s', machine_lease.key)
@ndb.transactional
def clear_lease_request(key, request_id):
"""Clears information about given lease request.
Args:
key: ndb.Key for a MachineLease entity.
request_id: ID of the request to clear.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if not machine_lease.client_request_id:
return
if request_id != machine_lease.client_request_id:
# Already cleared and incremented?
logging.warning(
'Request ID mismatch for MachineLease: %s\nExpected: %s\nActual: %s',
key,
request_id,
machine_lease.client_request_id,
)
return
machine_lease.bot_id = None
machine_lease.client_request_id = None
machine_lease.connection_ts = None
machine_lease.hostname = None
machine_lease.instruction_ts = None
machine_lease.lease_expiration_ts = None
machine_lease.lease_id = None
machine_lease.termination_task = None
machine_lease.put()
@ndb.transactional
def clear_termination_task(key, task_id):
"""Clears the termination task associated with the given lease request.
Args:
key: ndb.Key for a MachineLease entity.
task_id: ID for a termination task.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if not machine_lease.termination_task:
return
if task_id != machine_lease.termination_task:
logging.error(
'Task ID mismatch\nKey: %s\nExpected: %s\nActual: %s',
key,
task_id,
machine_lease.task_id,
)
return
machine_lease.termination_task = None
machine_lease.put()
@ndb.transactional
def associate_termination_task(key, hostname, task_id):
"""Associates a termination task with the given lease request.
Args:
key: ndb.Key for a MachineLease entity.
hostname: Hostname of the machine the termination task is for.
task_id: ID for a termination task.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if hostname != machine_lease.hostname:
logging.error(
'Hostname mismatch\nKey: %s\nExpected: %s\nActual: %s',
key,
hostname,
machine_lease.hostname,
)
return
if machine_lease.termination_task:
return
logging.info(
'Associating termination task\nKey: %s\nHostname: %s\nTask ID: %s',
key,
machine_lease.hostname,
task_id,
)
machine_lease.termination_task = task_id
machine_lease.put()
@ndb.transactional
def log_lease_fulfillment(
key, request_id, hostname, lease_expiration_ts, lease_id):
"""Logs lease fulfillment.
Args:
key: ndb.Key for a MachineLease entity.
request_id: ID of the request being fulfilled.
hostname: Hostname of the machine fulfilling the request.
lease_expiration_ts: UTC seconds since epoch when the lease expires.
lease_id: ID of the lease assigned by Machine Provider.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if request_id != machine_lease.client_request_id:
logging.error(
'Request ID mismatch\nKey: %s\nExpected: %s\nActual: %s',
key,
request_id,
machine_lease.client_request_id,
)
return
if (hostname == machine_lease.hostname
and lease_expiration_ts == machine_lease.lease_expiration_ts
and lease_id == machine_lease.lease_id):
return
machine_lease.hostname = hostname
machine_lease.lease_expiration_ts = datetime.datetime.utcfromtimestamp(
lease_expiration_ts)
machine_lease.lease_id = lease_id
machine_lease.put()
@ndb.transactional
def update_client_request_id(key):
"""Sets the client request ID used to lease a machine.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.drained:
logging.info('MachineLease is drained\nKey: %s', key)
return
if machine_lease.client_request_id:
return
machine_lease.request_count += 1
machine_lease.client_request_id = '%s-%s' % (
machine_lease.request_id_base, machine_lease.request_count)
machine_lease.put()
@ndb.transactional
def delete_machine_lease(key):
"""Deletes the given MachineLease if it is drained and has no active lease.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = key.get()
if not machine_lease:
return
if not machine_lease.drained:
logging.warning('MachineLease not drained: %s', key)
return
if machine_lease.client_request_id:
return
key.delete()
@ndb.transactional
def associate_bot_id(key, bot_id):
"""Associates a bot with the given machine lease.
Args:
key: ndb.Key for a MachineLease entity.
bot_id: ID for a bot.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.bot_id == bot_id:
return
machine_lease.bot_id = bot_id
machine_lease.put()
def ensure_bot_info_exists(machine_lease):
"""Ensures a | |
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"log_level\\": \\"INFO\\", \\"name\\": \\"dagster\\"}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441"
}
],
"given_name": null,
"key": "Shape.3baab16166bacfaf4705811e64d356112fd733cb",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.41de0e2d7b75524510155d0bdab8723c6feced3b": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "result",
"type_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742"
}
],
"given_name": null,
"key": "Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": "Explicit modules to preload in the forkserver.",
"is_required": false,
"name": "preload_modules",
"type_key": "Array.String"
}
],
"given_name": null,
"key": "Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.4b53b73df342381d0d05c5f36183dc99cb9676e2": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "path",
"type_key": "String"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.69ff9be621991cc7961ea5e667d43edaac9d2339": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"field_aliases": {
"solids": "ops"
},
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "outputs",
"type_key": "Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b"
}
],
"given_name": null,
"key": "Shape.69ff9be621991cc7961ea5e667d43edaac9d2339",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.743e47901855cb245064dd633e217bfcb49a11a7": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.979b3d2fece4f3eb92e90f2ec9fb4c85efe9ea5c": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "marker_to_close",
"type_key": "String"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"enabled\\": {}}",
"description": null,
"is_required": false,
"name": "retries",
"type_key": "Selector.1bfb167aea90780aa679597800c71bd8c65ed0b2"
}
],
"given_name": null,
"key": "Shape.979b3d2fece4f3eb92e90f2ec9fb4c85efe9ea5c",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.ba913521099bed4314e25592059869c8f3a3c96e": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"field_aliases": {
"solids": "ops"
},
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "noop_solid",
"type_key": "Shape.69ff9be621991cc7961ea5e667d43edaac9d2339"
}
],
"given_name": null,
"key": "Shape.ba913521099bed4314e25592059869c8f3a3c96e",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.ca5906d9a0377218b4ee7d940ad55957afa73d1b": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"retries\\": {\\"enabled\\": {}}}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.979b3d2fece4f3eb92e90f2ec9fb4c85efe9ea5c"
}
],
"given_name": null,
"key": "Shape.ca5906d9a0377218b4ee7d940ad55957afa73d1b",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [],
"given_name": null,
"key": "Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.e248cccc2d2206bf427e9bc9c2d22833f2aeb6d4": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "0",
"description": null,
"is_required": false,
"name": "max_concurrent",
"type_key": "Int"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"enabled\\": {}}",
"description": null,
"is_required": false,
"name": "retries",
"type_key": "Selector.1bfb167aea90780aa679597800c71bd8c65ed0b2"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": "Select how subprocesses are created. Defaults to spawn.\\nWhen forkserver is selected, set_forkserver_preload will be called with either:\\n* the preload_modules list if provided by config\\n* the module containing the Job if it was loaded from a module\\n* dagster\\nhttps://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods",
"is_required": false,
"name": "start_method",
"type_key": "Selector.0f5471adc2ad814d1c9fd94e2fa73c07217dea47"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.ebeaf4550c200fb540f2e1f3f2110debd8c4157c": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "console",
"type_key": "Shape.3baab16166bacfaf4705811e64d356112fd733cb"
}
],
"given_name": null,
"key": "Shape.ebeaf4550c200fb540f2e1f3f2110debd8c4157c",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"String": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "String",
"key": "String",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.STRING"
},
"type_param_keys": null
}
}
},
"dagster_type_namespace_snapshot": {
"__class__": "DagsterTypeNamespaceSnapshot",
"all_dagster_type_snaps_by_key": {
"Any": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Any",
"is_builtin": true,
"key": "Any",
"kind": {
"__enum__": "DagsterTypeKind.ANY"
},
"loader_schema_key": "Selector.f2fe6dfdc60a1947a8f8e7cd377a012b47065bc4",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Any",
"type_param_keys": []
},
"Bool": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Bool",
"is_builtin": true,
"key": "Bool",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.Bool-Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Bool",
"type_param_keys": []
},
"Float": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Float",
"is_builtin": true,
"key": "Float",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.Float-Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Float",
"type_param_keys": []
},
"Int": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Int",
"is_builtin": true,
"key": "Int",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.Int-Selector.a9799b971d12ace70a2d8803c883c863417d0725",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Int",
"type_param_keys": []
},
"Nothing": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Nothing",
"is_builtin": true,
"key": "Nothing",
"kind": {
"__enum__": "DagsterTypeKind.NOTHING"
},
"loader_schema_key": null,
"materializer_schema_key": null,
"name": "Nothing",
"type_param_keys": []
},
"String": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "String",
"is_builtin": true,
"key": "String",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.String-Selector.e04723c9d9937e3ab21206435b22247cfbe58269",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "String",
"type_param_keys": []
}
}
},
"dep_structure_snapshot": {
"__class__": "DependencyStructureSnapshot",
"solid_invocation_snaps": [
{
"__class__": "SolidInvocationSnap",
"input_dep_snaps": [],
"is_dynamic_mapped": false,
"solid_def_name": "noop_solid",
"solid_name": "noop_solid",
"tags": {}
}
]
},
"description": "desc",
"graph_def_name": "noop_pipeline",
"lineage_snapshot": null,
"mode_def_snaps": [
{
"__class__": "ModeDefSnap",
"description": null,
"logger_def_snaps": [
{
"__class__": "LoggerDefSnap",
"config_field_snap": {
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"log_level\\": \\"INFO\\", \\"name\\": \\"dagster\\"}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441"
},
"description": "The default colored console logger.",
"name": "console"
}
],
"name": "default",
"resource_def_snaps": [
{
"__class__": "ResourceDefSnap",
"config_field_snap": {
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
},
"description": null,
"name": "io_manager"
}
],
"root_config_key": "Shape.32aa7ec6e7407e8a502d0a6094909a9365103a8e"
}
],
"name": "noop_pipeline",
"solid_definitions_snapshot": {
"__class__": "SolidDefinitionsSnapshot",
"composite_solid_def_snaps": [],
"solid_def_snaps": [
{
"__class__": "SolidDefSnap",
"config_field_snap": {
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
},
"description": null,
"input_def_snaps": [],
"name": "noop_solid",
"output_def_snaps": [
{
"__class__": "OutputDefSnap",
"dagster_type_key": "Any",
"description": null,
"is_dynamic": false,
"is_required": true,
"name": "result"
}
],
"required_resource_keys": [],
"tags": {}
}
]
},
"tags": {
"key": "value"
}
}'''
snapshots['test_pipeline_snap_all_props 2'] = '694ecc99696f5f5578d02efbac52c36d91915ed9'
snapshots['test_two_invocations_deps_snap 1'] = '''{
"__class__": "PipelineSnapshot",
"config_schema_snapshot": {
"__class__": "ConfigSchemaSnapshot",
"all_config_snaps_by_key": {
"Any": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": "Any",
"key": "Any",
"kind": {
"__enum__": "ConfigTypeKind.ANY"
},
"scalar_kind": null,
"type_param_keys": null
},
"Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b": {
"__class__": "ConfigTypeSnap",
"description": "List of Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"Shape.41de0e2d7b75524510155d0bdab8723c6feced3b"
]
},
"Array.String": {
"__class__": "ConfigTypeSnap",
"description": "List of Array.String",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.String",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"String"
]
},
"Bool": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "Bool",
"key": "Bool",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.BOOL"
},
"type_param_keys": null
},
"Float": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "Float",
"key": "Float",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.FLOAT"
},
"type_param_keys": null
},
"Int": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "Int",
"key": "Int",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.INT"
},
"type_param_keys": null
},
"ScalarUnion.Bool-Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.Bool-Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"Bool",
"Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59"
]
},
"ScalarUnion.Float-Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.Float-Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"Float",
"Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3"
]
},
"ScalarUnion.Int-Selector.a9799b971d12ace70a2d8803c883c863417d0725": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.Int-Selector.a9799b971d12ace70a2d8803c883c863417d0725",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"Int",
"Selector.a9799b971d12ace70a2d8803c883c863417d0725"
]
},
"ScalarUnion.String-Selector.e04723c9d9937e3ab21206435b22247cfbe58269": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.String-Selector.e04723c9d9937e3ab21206435b22247cfbe58269",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"String",
"Selector.e04723c9d9937e3ab21206435b22247cfbe58269"
]
},
"Selector.0f5471adc2ad814d1c9fd94e2fa73c07217dea47": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "forkserver",
"type_key": "Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "spawn",
"type_key": "Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709"
}
],
| |
self.assertEquals(maxz, 5)
for i in range(2,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(maxx, 0)
self.assertEquals(miny, 0)
self.assertEquals(maxy, 0)
self.assertEquals(minz, 0)
self.assertEquals(maxz, 0)
def test19(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,6,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
for i in range(1,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(miny, 0)
self.assertEquals(minz, 0)
if i == 1 or i == 2:
self.assertEquals(maxx, 3)
self.assertEquals(maxy, 4)
self.assertEquals(maxz, 5)
elif i == 3 or i == 4:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 3)
self.assertEquals(maxz, 5)
elif i == 5 or i == 6:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 4+8)
self.assertEquals(maxz, 3)
def test20(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,100.0,100.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
for i in range(4):
error = instance.set_boundary_state(
i,0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
1.0, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i, 0, 0,
1.0, 1.0
)
print rho, rhovx, rhovy, rhovz, rhoen, error
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test21(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
for i in range(4):
for j in [1,2]:
error = instance.set_boundary_state(
i,0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
j, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i, 0, 0,
j, 1.0
)
print j
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test22(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(5,6,7,100.0,100.0,100.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
x1range = (4,6,7)
x2range = (5,4,7)
x3range = (5,6,4)
for xrange, j in zip([x1range, x1range, x2range, x2range, x3range, x3range], [1,2,3,4,5,6]):
for i0 in range(xrange[0]):
for j0 in range(xrange[1]):
for k0 in range(xrange[2]):
i = (i0 * (xrange[2] * xrange[1])) + (j0 * xrange[2]) + k0
error = instance.set_boundary_state(
i0, j0, k0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
j, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, k0, # index
j, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test24(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
for i in range(1,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(miny, 0)
self.assertEquals(minz, 0)
if i == 1 or i == 2:
self.assertEquals(maxx, 3)
self.assertEquals(maxy, 4)
self.assertEquals(maxz, 0)
elif i == 3 or i == 4:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 3)
self.assertEquals(maxz, 0)
elif i == 5 or i == 6:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 4 +8)
self.assertEquals(maxz, 3)
def test25(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
dx = 1.0
for i in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i,0,0,
1, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) - ((4 -i)*dx))
self.assertAlmostRelativeEquals(y, 0.0)
self.assertAlmostRelativeEquals(z, 0.0)
def test26(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
dx = 1.0
for i in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i,0,0,
2, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 100.0 + (0.5 * dx) + (i * dx))
self.assertAlmostRelativeEquals(y, 0.0)
self.assertAlmostRelativeEquals(z, 0.0)
def test27(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,1,100.0,100.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
dx = 1.0
dy = 100.0 / 5.0
for i in range(4):
for j in range(5):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, 1,
2, 1
)
print y, j, (0.5 * dy) - ((4 - j) * dy)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 100.0 + (0.5 * dx) + (i * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, 0.0)
for i in range(100 + 8):
for j in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, 1,
3, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, ((0.5 * dy) - ((4-j) * dy)))
self.assertAlmostRelativeEquals(z, 0.0)
x,y,z,error = instance.get_boundary_position_of_index(
i, j, 1,
4, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, 100.0 + (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, 0.0)
def test28(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(3, 3, 3, 6,12,18)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
dx = 6.0 / 3.0
dy = 12.0 / 3.0
dz = 18.0 / 3.0
for i in range(4):
for j in range(3):
for k in range(3):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
2, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 6.0 + (0.5 * dx) + (i * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, (0.5 * dz) + (k * dz))
for i in range(3 + 8):
for j in range(4):
for k in range(3):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
3, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, ((0.5 * dy) - ((4-j) * dy)))
self.assertAlmostRelativeEquals(z, (0.5 * dz) + (k * dz))
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
4, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, 12.0 + (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, (0.5 * dz) + (k * dz))
for i in range(3 + 8):
for j in range(3 + 8):
for k in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
5, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + ((j-4) * dy))
self.assertAlmostRelativeEquals(z, ((0.5 * dz) - ((4-k) * dz)))
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
6, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + ((j-4) * dy))
self.assertAlmostRelativeEquals(z, 18.0 + (0.5 * dz) + (k * dz))
def test29(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.setup_mesh(300,1,1,300.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
for j in [1,2]:
print j
for i in range(4):
error = instance.set_boundary_state(
i,0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
j, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i, 0, 0,
j, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test30(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.setup_mesh(30,10,1,30.0,10.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
for boundaryindex in [3,4]:
for i0 in range(38):
for j0 in range(4):
i = (i0 * (4*38)) + j0
error = instance.set_boundary_state(
i0,j0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, 0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test31(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(1,3,1)
instance.setup_mesh(5,6,1,5.0,6.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
for boundaryindex in [1,2]:
for i0 in range(4):
for j0 in range(6):
i | |
from enum import Enum, IntEnum
from math import isfinite
from typing import List, Optional, Union
from pydantic import validator
from geolib.geometry.one import Point
from geolib.models import BaseDataClass
from .soil_utils import Color
class SoilBaseModel(BaseDataClass):
@validator("*")
def fail_on_infinite(cls, v, values, field):
if isinstance(v, float) and not isfinite(v):
raise ValueError(
"Only finite values are supported, don't use nan, -inf or inf."
)
return v
class DistributionType(IntEnum):
Undefined = 0
Normal = 2
LogNormal = 3
Deterministic = 4
class StochasticParameter(SoilBaseModel):
"""
Stochastic parameters class
"""
is_probabilistic: bool = False
mean: Optional[float] = None
standard_deviation: Optional[float] = 0
distribution_type: Optional[DistributionType] = DistributionType.Normal
correlation_coefficient: Optional[float] = None
low_characteristic_value: Optional[float] = None
high_characteristic_value: Optional[float] = None
low_design_value: Optional[float] = None
high_design_value: Optional[float] = None
class ShearStrengthModelTypePhreaticLevel(Enum):
"""
Shear Strength Model Type. These types represent the
shear strength model that can be found in the UI of
the D-Stability program.
"""
MOHR_COULOMB = "Mohr_Coulomb"
NONE = "None"
SHANSEP = "SHANSEP"
SUTABLE = "SuTable"
def transform_shear_strength_model_type_to_internal(self):
from geolib.models.dstability.internal import (
ShearStrengthModelTypePhreaticLevelInternal,
)
transformation_dict = {
"Mohr_Coulomb": ShearStrengthModelTypePhreaticLevelInternal.C_PHI,
"None": ShearStrengthModelTypePhreaticLevelInternal.NONE,
"SHANSEP": ShearStrengthModelTypePhreaticLevelInternal.SU,
"SuTable": ShearStrengthModelTypePhreaticLevelInternal.SUTABLE,
}
return transformation_dict[self.value]
class MohrCoulombParameters(SoilBaseModel):
"""
Mohr Coulomb parameters class
"""
cohesion: Optional[Union[float, StochasticParameter]] = StochasticParameter()
dilatancy_angle: Optional[Union[float, StochasticParameter]] = StochasticParameter()
friction_angle: Optional[Union[float, StochasticParameter]] = StochasticParameter()
friction_angle_interface: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
cohesion_and_friction_angle_correlated: Optional[bool] = None
class SuTablePoint(SoilBaseModel):
su: float = None
stress: float = None
class UndrainedParameters(SoilBaseModel):
"""
Undrained shear strength parameters class. This class includes the SU Table and SHANSEP model variables included in D-Stability.
"""
shear_strength_ratio: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
shear_strength_ratio_and_shear_strength_exponent_correlated: Optional[bool] = None
strength_increase_exponent: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
s_and_m_correlated: Optional[bool] = None
undrained_shear_strength: Optional[float] = None
undrained_shear_strength_top: Optional[float] = None
undrained_shear_strength_bottom: Optional[float] = None
undrained_shear_strength_bearing_capacity_factor: Optional[float] = None
su_table: Optional[List[SuTablePoint]] = None
probabilistic_su_table: Optional[bool] = None
su_table_variation_coefficient: Optional[float] = None
def to_su_table_points(self):
from geolib.models.dstability.internal import PersistableSuTablePoint
su_table = None
if self.su_table is not None:
su_table = []
for su_point in self.su_table:
su_table.append(
PersistableSuTablePoint(
EffectiveStress=su_point.stress, Su=su_point.su
)
)
return su_table
class BjerrumParameters(SoilBaseModel):
"""
Bjerrum parameters class
input_type_is_comp_ratio: [bool] is true when compression input mode is "compression ratio", false when compression
input mode is "Compression index"
If input_type_is_comp_ratio is true, the following parameters are used as input:
reloading_swelling_RR
compression_ratio_CR
coef_secondary_compression_Ca
If input_type_is_comp_ratio is false, the following parameters are used as input:
reloading_swelling_index_Cr
compression_index_Cc
coef_secondary_compression_Ca
"""
input_type_is_comp_ratio: Optional[bool] = None
reloading_ratio: Optional[Union[float, StochasticParameter]] = StochasticParameter()
primary_compression_ratio: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
correlation_reload_primary_compression_ratio: Optional[float] = None
reloading_index: Optional[Union[float, StochasticParameter]] = StochasticParameter()
primary_compression_index: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
coef_secondary_compression_Ca: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
reloading_swelling_RR: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
compression_ratio_CR: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
reloading_swelling_index_Cr: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
compression_index_Cc: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
class StateType(Enum):
POP = "POP"
OCR = "OCR"
YIELD_STRESS = "yield_stress"
class IsotacheParameters(SoilBaseModel):
precon_isotache_type: Optional[StateType] = None
reloading_swelling_constant_a: Optional[
Union[float, StochasticParameter]
] = StochasticParameter() # SoilStdPriCompIndex
primary_compression_constant_b: Optional[
Union[float, StochasticParameter]
] = StochasticParameter() # SoilStdSecCompIndex
secondary_compression_constant_c: Optional[
Union[float, StochasticParameter]
] = StochasticParameter() # SoilStdSecCompRate
class KoppejanParameters(SoilBaseModel):
precon_koppejan_type: Optional[StateType] = None
preconsolidation_pressure: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
soil_ap_as_approximation_by_Cp_Cs: Optional[bool] = False
primary_Cp: Optional[Union[float, StochasticParameter]] = StochasticParameter()
primary_Cp_point: Optional[Union[float, StochasticParameter]] = StochasticParameter()
secular_Cs: Optional[Union[float, StochasticParameter]] = StochasticParameter()
secular_Cs_point: Optional[Union[float, StochasticParameter]] = StochasticParameter()
primary_Ap: Optional[Union[float, StochasticParameter]] = StochasticParameter()
primary_Asec: Optional[Union[float, StochasticParameter]] = StochasticParameter()
class StorageTypes(IntEnum):
vertical_consolidation_coefficient = 0
constant_permeability = 1
strain_dependent_permeability = 2
class StorageParameters(SoilBaseModel):
"""
In this case vertical_permeability has a unit of [m/day]. In GUI
of the D-Settlement this value is displayed as [m/s].
"""
vertical_permeability: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
permeability_horizontal_factor: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
horizontal_permeability: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
storage_type: Optional[StorageTypes]
permeability_strain_type: Optional[
Union[float, StochasticParameter]
] = StochasticParameter(mean=1e15)
vertical_consolidation_coefficient: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
class SoilWeightParameters(SoilBaseModel):
saturated_weight: Optional[Union[float, StochasticParameter]] = StochasticParameter()
unsaturated_weight: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
class GrainType(IntEnum):
FINE = 0
COARSE = 1
class SoilClassificationParameters(SoilBaseModel):
"""
Soil classification class
"""
initial_void_ratio: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
min_void_ratio: Optional[float] = None
max_void_ratio: Optional[float] = None
porosity: Optional[Union[float, StochasticParameter]] = StochasticParameter()
relative_density: Optional[float] = None
d_50: Optional[float] = None
grain_type: Optional[
GrainType
] = GrainType.FINE # TODO this must refer to a intenum class
class SoilStiffnessParameters(SoilBaseModel):
emod_menard: Optional[float] = None
class ModulusSubgradeReaction(IntEnum):
MENARD = 0
MANUAL = 1
class LambdaType(IntEnum):
MANUAL = 0
MULLERBRESLAU = 1
KOTTER = 2
class SubgradeReactionParameters(SoilBaseModel):
modulus_subgrade_reaction_type: Optional[ModulusSubgradeReaction] = None
lambda_type: Optional[LambdaType] = None
tangent_secant_1: Optional[float] = None
tangent_secant_2: Optional[float] = None
tangent_secant_3: Optional[float] = None
k_o_top: Optional[float] = None
k_1_top: Optional[float] = None
k_2_top: Optional[float] = None
k_3_top: Optional[float] = None
k_4_top: Optional[float] = None
k_o_bottom: Optional[float] = None
k_1_bottom: Optional[float] = None
k_2_bottom: Optional[float] = None
k_3_bottom: Optional[float] = None
k_4_bottom: Optional[float] = None
k_1_top_side: Optional[float] = None
k_2_top_side: Optional[float] = None
k_3_top_side: Optional[float] = None
k_1_bottom_side: Optional[float] = None
k_2_bottom_side: Optional[float] = None
k_3_bottom_side: Optional[float] = None
class EarthPressureCoefficientsType(IntEnum):
MANUAL = 0
BRINCHHANSEN = 1
class EarthPressureCoefficients(SoilBaseModel):
earth_pressure_coefficients_type: Optional[
EarthPressureCoefficientsType
] = EarthPressureCoefficientsType.BRINCHHANSEN
active: Optional[float] = None
neutral: Optional[float] = None
passive: Optional[float] = None
class HorizontalBehaviourType(IntEnum):
Stiff = 1
Elastic = 2
Foundation = 3
class HorizontalBehaviour(SoilBaseModel):
"""
Horizontal behaviour class
"""
horizontal_behavior_type: Optional[HorizontalBehaviourType] = None
soil_elasticity: Optional[float] = None
soil_default_elasticity: Optional[bool] = None
class ConeResistance(SoilBaseModel):
"""
Cone resistance class
"""
max_cone_resistance_type: Optional[Enum] = None
max_cone_resistance: Optional[float] = None
class StatePoint(SoilBaseModel):
state_point_id: Optional[str] = None
state_layer_id: Optional[str] = None
state_point_type: Optional[StateType] = None
state_point_is_probabilistic: Optional[bool] = None
class StateLine(SoilBaseModel):
"""
TODO Decide if we want to keep state in soil class
TODO decide if we want cross-dependency to geometry class
"""
state_line_points: Optional[List[Point]]
class SoilState(SoilBaseModel):
use_equivalent_age: Optional[bool] = None
equivalent_age: Optional[float] = None
state_points: Optional[StatePoint] = None
state_lines: Optional[StateLine] = None
yield_stress_layer: Optional[
Union[float, StochasticParameter]
] = StochasticParameter()
ocr_layer: Optional[Union[float, StochasticParameter]] = StochasticParameter()
pop_layer: Optional[Union[float, StochasticParameter]] = StochasticParameter()
class SoilType(IntEnum):
GRAVEL = 0
SAND = 1
LOAM = 2
CLAY = 3
PEAT = 4
SANDY_LOAM = 5
class Soil(SoilBaseModel):
"""Soil Material."""
id: Optional[str] = None
name: Optional[str] = None
code: Optional[str] = None
color: Color = Color("grey")
mohr_coulomb_parameters: Optional[MohrCoulombParameters] = MohrCoulombParameters()
undrained_parameters: Optional[UndrainedParameters] = UndrainedParameters()
bjerrum_parameters: Optional[BjerrumParameters] = BjerrumParameters()
isotache_parameters: Optional[IsotacheParameters] = IsotacheParameters()
koppejan_parameters: Optional[KoppejanParameters] = KoppejanParameters()
storage_parameters: Optional[StorageParameters] = StorageParameters()
soil_weight_parameters: Optional[SoilWeightParameters] = SoilWeightParameters()
soil_classification_parameters: Optional[
SoilClassificationParameters
] = SoilClassificationParameters()
soil_stiffness_parameters: Optional[
SoilStiffnessParameters
] = SoilStiffnessParameters()
horizontal_behaviour: Optional[HorizontalBehaviour] = HorizontalBehaviour()
cone_resistance: Optional[ConeResistance] = ConeResistance()
use_tension: Optional[bool] = None
use_probabilistic_defaults: Optional[bool] = False
soil_type_settlement_by_vibrations: Optional[SoilType] = SoilType.SAND
soil_type_nl: Optional[SoilType] = SoilType.SAND
soil_type_be: Optional[SoilType] = SoilType.SAND
soil_state: Optional[SoilState] = SoilState()
shear_strength_model_above_phreatic_level: Optional[
ShearStrengthModelTypePhreaticLevel
] = None
shear_strength_model_below_phreatic_level: Optional[
ShearStrengthModelTypePhreaticLevel
] = None
is_drained: Optional[bool] = None
is_probabilistic: Optional[bool] = None
earth_pressure_coefficients: Optional[
EarthPressureCoefficients
] = EarthPressureCoefficients()
subgrade_reaction_parameters: Optional[
SubgradeReactionParameters
] = SubgradeReactionParameters()
shell_factor: Optional[float] = None
@staticmethod
def set_stochastic_parameters(input_class: object):
"""
Converts float to stochastic parameter, where the mean is set as the input float value
Args:
input_class:
Returns:
"""
try:
class_dict = input_class.dict()
except AttributeError:
return input_class
for field in input_class.__fields__:
parameter = input_class.__fields__[field]
if isinstance(parameter.default, StochasticParameter):
if isinstance(class_dict[field], float):
setattr(
input_class, field, StochasticParameter(mean=class_dict[field])
)
return input_class
def set_all_stochastic_parameters(self):
"""
Loop over all fields in soil class, and converts floats to stochastic parameters if necessary
Returns:
"""
for field in self.__fields__:
self.set_stochastic_parameters(self.__getattribute__(field))
@staticmethod
def __transfer_soil_dict_to_model(soil_dict, model_soil):
"""
Transfers items from soil dictionary to model if the item is not None
Args:
soil_dict: soil dictionary
model_soil: internal soil in model
Returns:
"""
for key, value in dict(
soil_dict
).items(): # override default values with those of the soil
if key in dict(model_soil).keys() and value is not None:
setattr(model_soil, key, value)
return model_soil
def __to_dstability_stochastic_parameter(
self, stochastic_parameter: StochasticParameter
):
from geolib.models.dstability.internal import (
PersistableStochasticParameter as DStabilityStochasticParameter,
)
kwargs = {
"IsProbabilistic": stochastic_parameter.is_probabilistic,
"Mean": stochastic_parameter.mean,
"StandardDeviation": stochastic_parameter.standard_deviation,
}
return self.__transfer_soil_dict_to_model(kwargs, DStabilityStochasticParameter())
def __to_su_table(self):
from geolib.models.dstability.internal import PersistableSuTable
kwargs = {
"StrengthIncreaseExponent": self.undrained_parameters.strength_increase_exponent.mean,
"StrengthIncreaseExponentStochasticParameter": self.__to_dstability_stochastic_parameter(
stochastic_parameter=self.undrained_parameters.strength_increase_exponent
),
"IsSuTableProbabilistic": self.undrained_parameters.probabilistic_su_table,
"SuTableVariationCoefficient": self.undrained_parameters.su_table_variation_coefficient,
"SuTablePoints": self.undrained_parameters.to_su_table_points(),
}
return self.__transfer_soil_dict_to_model(kwargs, PersistableSuTable())
def _to_dstability(self):
from geolib.models.dstability.internal import PersistableSoil as DStabilitySoil
self.set_all_stochastic_parameters()
if self.shear_strength_model_above_phreatic_level is not None:
shear_strength_model_above_phreatic_level = (
self.shear_strength_model_above_phreatic_level.transform_shear_strength_model_type_to_internal()
)
else:
shear_strength_model_above_phreatic_level = (
self.shear_strength_model_above_phreatic_level
)
if self.shear_strength_model_below_phreatic_level is not None:
shear_strength_model_below_phreatic_level = (
self.shear_strength_model_below_phreatic_level.transform_shear_strength_model_type_to_internal()
)
else:
shear_strength_model_below_phreatic_level = | |
_progress_i += 1
if ((_progress_i % _progress_N) == 0):
if (_log_level > SILENT) and ( force or (_log_level < DEBUG ) ):
if (_progress_obj != None):
if (_progress_id == id):
_progress_obj.next(_progress_N)
return True
else:
return False
else:
print('.', end='', flush=True)
return True
return False
def finish_progress_bar():
"""Finish and close progress bar object"""
global _progress_obj
# print_nl = True
if _progress_obj != None:
# if isinstance(_progress_obj, Counter):
# print_nl = False
_progress_obj.finish()
# if print_nl:
print_new_line()
_progress_obj = None
return None
def wait(sec : int):
for i in range(0, sec):
i=i ## to get rid of the warning...
time.sleep(1)
print_progress(True)
print('', flush=True)
def print_new_line(force = False):
if (_log_level > SILENT) and ( force or (_log_level < DEBUG ) ):
print('', flush=True)
_log_msg('')
def NOW() -> int:
return int(time.time())
def get_date_str(timestamp:int = NOW(), date_format = '%Y%m%d_%H%M%S'):
"""Return YYYYMMDD_HHmm date string"""
try:
ts = datetime.fromtimestamp(timestamp)
return ts.strftime(date_format)
except Exception as err:
error(exception=err)
return None
def rebase_file_args(current_dir, files):
"""REbase file command line params after moving working dir to the script's dir"""
if isinstance(files, list):
if (files[0] == '-') or (files[0] == 'db:'):
return files
else:
return [ os.path.join(current_dir, fn) for fn in files ]
elif isinstance(files, str):
return os.path.join(current_dir, files)
async def read_int_list(filename: str) -> list():
"""Read file to a list and return list of integers in the input file"""
input_list = []
try:
async with aiofiles.open(filename) as fp:
async for line in fp:
try:
input_list.append(int(line))
except (ValueError, TypeError) as err:
pass
except Exception as err:
error('Unexpected error when reading file: ' + filename, err)
return input_list
async def save_JSON(filename: str, json_data: dict, sort_keys = False, pretty = True) -> bool:
"""Save JSON data into file"""
try:
dirname = os.path.dirname(filename)
if (dirname != '') and not os.path.isdir(dirname):
os.makedirs(dirname, 0o770-UMASK)
async with aiofiles.open(filename,'w', encoding="utf8") as outfile:
if pretty:
await outfile.write(json.dumps(json_data, ensure_ascii=False, indent=4, sort_keys=sort_keys))
else:
await outfile.write(json.dumps(json_data, ensure_ascii=False, sort_keys=sort_keys))
return True
except Exception as err:
error('Error saving JSON', err)
return False
async def open_JSON(filename: str, chk_JSON_func = None) -> dict:
try:
async with aiofiles.open(filename) as fp:
json_data = json.loads(await fp.read())
if (chk_JSON_func == None):
debug("JSON file content not checked: " + filename)
return json_data
elif chk_JSON_func(json_data):
debug("JSON File is valid: " + filename)
return json_data
else:
debug('JSON File has invalid content: ' + filename)
except Exception as err:
error('Unexpected error when reading file: ' + filename, err)
return None
async def get_url_JSON(session: aiohttp.ClientSession, url: str, chk_JSON_func = None, max_tries = MAX_RETRIES) -> dict:
"""Retrieve (GET) an URL and return JSON object"""
if session == None:
error('Session must be initialized first')
sys.exit(1)
if url == None:
return None
# To avoid excessive use of servers
for retry in range(1,max_tries+1):
try:
async with session.get(url) as resp:
if resp.status == 200:
debug('HTTP request OK')
json_resp = await resp.json()
if (chk_JSON_func == None) or chk_JSON_func(json_resp):
# debug("Received valid JSON: " + str(json_resp))
return json_resp
# Sometimes WG API returns JSON error even a retry gives valid JSON
elif resp.status == 407:
json_resp_err = await resp.json()
error('WG API returned 407: ' + json_resp_err['error']['message'])
if retry == max_tries:
break
debug('Retrying URL [' + str(retry) + '/' + str(max_tries) + ']: ' + url )
await asyncio.sleep(SLEEP)
except aiohttp.ClientError as err:
debug("Could not retrieve URL: " + url, exception=err)
except asyncio.CancelledError as err:
debug('Queue gets cancelled while still working.', exception=err)
except Exception as err:
debug('Unexpected Exception', exception=err)
debug("Could not retrieve URL: " + url)
return None
def bld_dict_hierarcy(d : dict, key : str, value) -> dict:
"""Build hierarcical dict based on multi-level key separated with """
try:
key_hier = key.split('.')
sub_key = key_hier.pop(0)
if len(key_hier) == 0:
d[sub_key] = value
elif sub_key not in d:
d[sub_key] = bld_dict_hierarcy({}, '.'.join(key_hier), value)
else:
d[sub_key] = bld_dict_hierarcy(d[sub_key], '.'.join(key_hier), value)
return d
except KeyError as err:
error('Key not found', err)
except Exception as err:
error('Unexpected Exception', err)
return None
def get_JSON_keypath(keypath: str, key: str):
if keypath == None:
return key
else:
return '.'.join([keypath, key])
def get_JSON_value(json, key : str = None, keys : list = None, keypath = None):
if (keys == None) and (key != None):
keys = key.split('.')
if len(keys) == 0:
return json
key = keys.pop(0)
if type(json) == dict:
if key in json:
return get_JSON_value(json[key], keys=keys, keypath=get_JSON_keypath(keypath, key))
else:
raise KeyError('Key: '+ get_JSON_keypath(keypath, key) + ' not found')
if type(json) == list:
p = re.compile(r'^\[(\d+)\]$')
m = p.match(key)
if len(m.groups()) != 1:
raise KeyError('Invalid key given: ' + get_JSON_keypath(keypath, key))
ndx = m.group(1)
try:
return get_JSON_value(json[ndx], keys=keys, keypath=get_JSON_keypath(keypath, key))
except IndexError:
raise KeyError('JSON array index out of range: ' + get_JSON_keypath(keypath, key))
raise KeyError('Key not found: ' + get_JSON_keypath(keypath, keys[0]))
def sort_dict(d: dict, number: bool = False) -> dict:
"""Sort a dict by keys"""
if number:
return dict(sorted(d.items(), key=lambda item: int(item[0])))
else:
return dict(sorted(d.items(), key=lambda item: item[0]))
# -----------------------------------------------------------
# Class SlowBar
# -----------------------------------------------------------
class SlowBar(IncrementalBar):
suffix = '%(index)d/%(max)d %(percent)d%% ETA %(remaining_hours).0f h %(remaining_mins).0f mins'
@property
def remaining_hours(self):
return self.eta // 3600
@property
def remaining_mins(self):
return (self.eta - (self.eta // 3600)*3600) // 60
# -----------------------------------------------------------
# Class StatsNotFound
# -----------------------------------------------------------
class StatsNotFound(Exception):
pass
# -----------------------------------------------------------
# Class WG
# -----------------------------------------------------------
class WG:
URL_WG_CLAN_INFO = 'clans/info/?application_id='
# URL_WG_PLAYER_TANK_LIST = 'tanks/stats/?fields=tank_id%2Clast_battle_time&application_id='
# URL_WG_PLAYER_TANK_LIST = 'tanks/stats/?fields=account_id%2Ctank_id%2Clast_battle_time%2Cbattle_life_time%2Call&application_id='
URL_WG_PLAYER_TANK_STATS = 'tanks/stats/?application_id='
URL_WG_ACCOUNT_ID = 'account/list/?fields=account_id%2Cnickname&application_id='
URL_WG_PLAYER_STATS = 'account/info/?application_id='
URL_WG_PLAYER_ACHIEVEMENTS = 'account/achievements/?application_id='
CACHE_DB_FILE = '.blitzutils_cache.sqlite3'
CACHE_GRACE_TIME = 30*24*3600 # 30 days cache
# sql_create_player_stats_tbl = """CREATE TABLE IF NOT EXISTS player_stats (
# account_id INTEGER NOT NULL,
# date INTEGER NOT NULL,
# stat TEXT,
# value FLOAT
# ); """
# sql_create_player_tank_stats_tbl = """CREATE TABLE IF NOT EXISTS player_tank_stats (
# account_id INTEGER NOT NULL,
# tank_id INTEGER DEFAULT NULL,
# date INTEGER NOT NULL,
# stat TEXT,
# value FLOAT
# ); """
# sql_select_player_stats = """SELECT value FROM player_stats ORDERBY date ASC
# WHERE account_id = {} AND stat = {}
# AND date >= {} LIMIT 1;"""
# TANK_STATS
SQL_TANK_STATS_TBL = 'tank_stats'
SQL_TANK_STATS_CREATE_TBL = 'CREATE TABLE IF NOT EXISTS ' + SQL_TANK_STATS_TBL + \
""" ( account_id INTEGER NOT NULL,
tank_id INTEGER NOT NULL,
update_time INTEGER NOT NULL,
stats TEXT,
PRIMARY KEY (account_id, tank_id) )"""
SQL_TANK_STATS_COUNT = 'SELECT COUNT(*) FROM ' + SQL_TANK_STATS_TBL
SQL_TANK_STATS_UPDATE = 'REPLACE INTO ' + SQL_TANK_STATS_TBL + '(account_id, tank_id, update_time, stats) VALUES(?,?,?,?)'
# PLAYER_STATS
SQL_PLAYER_STATS_TBL = 'player_stats'
SQL_PLAYER_STATS_CREATE_TBL = 'CREATE TABLE IF NOT EXISTS ' + SQL_PLAYER_STATS_TBL + \
""" ( account_id INTEGER PRIMARY KEY,
update_time INTEGER NOT NULL,
stats TEXT)"""
SQL_PLAYER_STATS_COUNT = 'SELECT COUNT(*) FROM ' + SQL_PLAYER_STATS_TBL
SQL_PLAYER_STATS_UPDATE = 'REPLACE INTO ' + SQL_PLAYER_STATS_TBL + '(account_id, update_time, stats) VALUES(?,?,?)'
SQL_PLAYER_STATS_CACHED = 'SELECT * FROM ' + SQL_PLAYER_STATS_TBL + ' WHERE account_id = ? AND update_time > ?'
# PLAYER_ACHIEVEMENTS
SQL_PLAYER_ACHIEVEMENTS_TBL = 'player_achievements'
SQL_PLAYER_ACHIEVEMENTS_CREATE_TBL = 'CREATE TABLE IF NOT EXISTS ' + SQL_PLAYER_ACHIEVEMENTS_TBL + \
""" ( account_id INTEGER PRIMARY KEY,
update_time INTEGER NOT NULL,
stats TEXT)"""
SQL_PLAYER_ACHIEVEMENTS_COUNT = 'SELECT COUNT(*) FROM ' + SQL_PLAYER_ACHIEVEMENTS_TBL
SQL_PLAYER_ACHIEVEMENTS_CACHED = 'SELECT * FROM ' + SQL_PLAYER_ACHIEVEMENTS_TBL + ' WHERE account_id = ? AND update_time > ?'
SQL_PLAYER_ACHIEVEMENTS_UPDATE = 'REPLACE INTO ' + SQL_PLAYER_ACHIEVEMENTS_TBL + '(account_id, update_time, stats) VALUES(?,?,?)'
SQL_TABLES = [ SQL_PLAYER_STATS_TBL, SQL_TANK_STATS_TBL, SQL_PLAYER_ACHIEVEMENTS_TBL ]
SQL_CHECK_TABLE_EXITS = """SELECT name FROM sqlite_master WHERE type='table' AND name=?"""
SQL_PRUNE_CACHE = """DELETE from {} WHERE update_time < {}"""
# Default data. Please use the latest maps.json
maps = {
"Random": "Random map",
"amigosville": "Falls Creek",
"asia": "Lost Temple",
"canal": "Canal",
"canyon": "Canyon",
"desert_train": "Desert Sands",
"erlenberg": "Middleburg",
"faust": "Faust",
"fort": "Macragge",
"grossberg": "Dynasty's Pearl",
"himmelsdorf": "Himmelsdorf",
"italy": "Vineyards",
"karelia": "Rockfield",
"karieri": "Copperfield",
"lake": "Mirage",
"lumber": "Alpenstadt",
"malinovka": "Winter Malinovka",
"medvedkovo": "Dead Rail",
"milbase": "Yamato Harbor",
"mountain": "Black Goldville",
"north": "North",
"ordeal": "Trial by Fire",
"pliego": "Castilla",
"port": "Port Bay",
"rock": "Mayan Ruins",
"rudniki": "Mines",
"savanna": "Oasis Palms",
"skit": "Naval Frontier",
"test": "World of Ducks",
"tutorial": "Proving Grounds"
}
tanks = None
tanks_by_tier = None
NATION = [ | |
o00OooO = "sudo iptables -t nat -C POSTROUTING -o {} -j MASQUERADE"
if ( commands . getoutput ( o00OooO . format ( o00oO0O ) ) != "" ) :
IIiii1 = lisp . lisp_get_loopback_address ( )
if ( IIiii1 ) :
iiI = "sudo iptables -t nat -A POSTROUTING -s {} -j ACCEPT"
os . system ( iiI . format ( IIiii1 ) )
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
iiI = "sudo iptables -t nat -A POSTROUTING -o {} -j MASQUERADE"
os . system ( iiI . format ( o00oO0O ) )
os . system ( "sudo sysctl net.ipv4.ip_forward=1" )
if 54 - 54: ooOoO0o * I11i - I1Ii111
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
lisp . lisp_iid_to_interface [ IIi1OOoO0OooO ] = iii1I
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
if 97 - 97: i1IIi
if ( "SO_BINDTODEVICE" in dir ( socket ) ) : iii1I . set_socket ( o00oO0O )
if ( "PF_PACKET" in dir ( socket ) ) : iii1I . set_bridge_socket ( o00oO0O )
if 29 - 29: I1IiiI
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
lisp . lisp_get_local_addresses ( )
if 59 - 59: I1Ii111 * iII111i
if 31 - 31: I11i / O0
if 57 - 57: i1IIi % ooOoO0o
if 69 - 69: o0oOOo0O0Ooo
if 69 - 69: I1Ii111
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if ( Iii != None and lisp . lisp_program_hardware ) :
Oo00OoOo = "ip verify unicast source reachable-via rx"
lisp . lisp_send_to_arista ( Oo00OoOo , o00oO0O )
Oo00OoOo = 'sysctl -w "net.ipv4.conf.{}.rp_filter=0"' . format ( o00oO0O )
os . system ( Oo00OoOo )
if 90 - 90: Ii1I * iII111i / OOooOOo
if 68 - 68: OoOoOO00
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
lisp . lisp_write_ipc_interfaces ( )
return
if 65 - 65: Ii1I
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
def lisp_parse_eid_in_url ( command , eid_prefix ) :
I11iiI1i1 = ""
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if ( eid_prefix == "0--0" ) :
command = command + "%[0]/0%"
elif ( eid_prefix . find ( "-name-" ) != - 1 ) :
eid_prefix = eid_prefix . split ( "-" )
if ( len ( eid_prefix ) > 4 ) :
eid_prefix = [ eid_prefix [ 0 ] , "name" , "-" . join ( eid_prefix [ 2 : - 1 ] ) ,
eid_prefix [ - 1 ] ]
if 71 - 71: Ii1I * OoOoOO00
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if 87 - 87: OoO0O00 * Oo0Ooo
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
OOOO0oo0 = "[" + eid_prefix [ 0 ] + "]'" + eid_prefix [ 2 ] + "'" + "/" + eid_prefix [ 3 ]
if 48 - 48: oO0o . II111iiii - OoOoOO00 % i1IIi . OoOoOO00
command = command + "%" + OOOO0oo0 + "%"
elif ( eid_prefix . count ( "-" ) in [ 9 , 10 ] ) :
if 32 - 32: Ii1I * I1IiiI - OOooOOo . Oo0Ooo / O0 + Ii1I
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
if 73 - 73: I1ii11iIi11i
eid_prefix = eid_prefix . split ( "-" )
OOOO0oo0 = "[" + eid_prefix [ 0 ] + "]" + "-" . join ( eid_prefix [ 1 : - 1 ] ) + "/" + eid_prefix [ - 1 ]
if 92 - 92: i11iIiiIii + O0 * I11i
command = command + "%" + OOOO0oo0 + "%"
elif ( eid_prefix . find ( "." ) == - 1 and eid_prefix . find ( ":" ) == - 1 ) :
eid_prefix = eid_prefix . split ( "-" )
if 60 - 60: o0oOOo0O0Ooo / Oo0Ooo
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if ( eid_prefix [ 1 ] == "plus" ) :
OOOO0oo0 = "[" + eid_prefix [ 0 ] + "]+" + eid_prefix [ 2 ] + "/" + eid_prefix [ 3 ]
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
command = command + "%" + OOOO0oo0 + "%"
else :
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
OOOO0oo0 = "[" + eid_prefix [ 0 ] + "]" + eid_prefix [ 1 ] + "-" + eid_prefix [ 2 ] + "-" + eid_prefix [ 3 ] + "/" + eid_prefix [ 4 ]
if 64 - 64: i11iIiiIii . iIii1I11I1II1
if 7 - 7: OoOoOO00 % ooOoO0o + OoOoOO00 - OoOoOO00 * i11iIiiIii % OoO0O00
if 57 - 57: OOooOOo / OoO0O00 + I1ii11iIi11i
if 60 - 60: O0 * Oo0Ooo % OOooOOo + IiII . OoO0O00 . Oo0Ooo
if 70 - 70: I11i . I1ii11iIi11i * oO0o
if ( len ( eid_prefix ) == 10 ) :
I11iiI1i1 = "[" + eid_prefix [ 5 ] + "]" + eid_prefix [ 6 ] + "-" + eid_prefix [ 7 ] + "-" + eid_prefix [ 8 ] + "/" + eid_prefix [ 9 ]
if 97 - 97: oO0o . iIii1I11I1II1 - OOooOOo
command = command + "%" + OOOO0oo0 + "%" + I11iiI1i1
else :
command = command + "%" + OOOO0oo0 + "%"
if 23 - 23: I1ii11iIi11i % I11i
if 18 - 18: OoooooooOO . i1IIi + II111iiii
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
else :
if 34 - 34: I1Ii111 * I11i
if 31 - 31: IiII . oO0o
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
eid_prefix = eid_prefix . split ( "-" )
if ( eid_prefix [ 1 ] == "*" ) :
OOOO0oo0 = ""
I11iiI1i1 = "[" + eid_prefix [ 2 ] + "]" + eid_prefix [ 3 ] + "/" + eid_prefix [ 4 ]
if 99 | |
<reponame>vespa-mrs/vespa
# Python Modules.
import copy
import math
# 3rd party stuff
import numpy as np
# Local imports
import vespa.common.rfp_rf_result as rfp_rf_result
from vespa.common.transform_run_exception import TransformRunException
from pylab import *
PI = np.pi
def run(trans_desc):
"""
Stub Summary
------------------
This is a template that you can use as a starting point to develop
algorithm code for both Create and Modify types of Transforms.
It has the same format needed for cut/paste insertion directly into
the Transform Editor.
It is also set up so you can call it directly from the command line.
The code in the __name__ == '__main__' section will create the
'trans_desc' dictionary based on default values you provide, and
the resultant rf waveform, gradient and time axis will be used to
run a bloch simulation to create a frequency profile for display.
Create or Modify Transform Information
---------------------------------------------
Listed below are the input parameters and extra parameters being passed
into this script.
time_steps - int, the number of data points in the output waveform
duration - float, total time of pulse, in msec
resolution - int, the number of points used in calculating the
frequency domain computations.
bandwidth_convention - int, choice
0 for "conventional" (default, FWHM)
1 for spectroscopy convention (MINIMUM)
2 for filter convention (MAXIMUM)
slr_pulse returns a tuple of 4 items defined below:
rf_waveform - ndarray, complex, real/imag values of the rf_pulse.
rf_xaxis - ndarray, float, same size as rf_waveform contains
the corresponding x axis points.
gradient - ndarray, float OR None
grad_xaxis - ndarray, float OR None
May raise (or throw):
TransformRunException - to indicate user calculated failure of
algorithm or parameters
Notes
-------------------------------
Derived from Warnking paper MRM 52:1190-1199 (2004)
Note. this code was rewritten to be all in one function (ie. no separate
aBt or bBt functs so as to make port to C/C++ easier
vref [uT] = reference voltage for scaling, default for Siemens 1ms hard 180 pulse = 11.7uT
Pulse Shape Parameters and Variables and Their Units and Definitions
----------------------------------------------------------------------------------------------
Variable Units Description Definition
----------------------------------------------------------------------------------------------
a, a0, aB rad Amplitude parameter Eqs. [1] and [2]
alpha deg Effective flip angle alpha = acos(1-2Pe)
AM rad/s RF amplitude modulation function Eq. [1]
b, b0, bB rad Bandwidth parameter Eqs. [1] and [2], b = pi*mu
beta rad Truncation factor of driving function g(+/-Tp/2)=+/-2*beta
c 1 Normalized off-resonance Eqs. [3] and [5]
f0 1 Maximal relative bandwidth increase Eq. [15]
FM rad/s RF frequency modulation function Eq. [2]
g rad HS driving function Eqs. [1] and [2]
gdot rad/s Derivative of the driving function
gamma rad/s*T Gyromagnetic ratio 2.675222exp8 rad/s*T
G T/m Slice-select gradient Eq. [18]
kappa 1 BASSI amplitude scaling parameter Eqs. [14] and [15]
L 1 relative RF energy, as function of Pc Eq. 9
mu 1 Bandwidth parameter, notation from (14) mu = beta/pi
Pe 1 Population inversion Pe=(1-cos(alpha))/2
Tp s Pulse duration
x0 m Center of inversion / saturation band
deltax m Width of inversion / saturation slab
The population Pe inversion corresponds to an effective flip angle (i.e., the
flip angle of a plane rotation that would produce the same longitudinal
magnetization), of alpha = arccos(1-2Pe).
Pulse parameters, as well as peak B1, peak gradient amplitude (Gmax) and width of the frequency
sweep (BW), for the pulses compared in the simulations and phantom experiments
-----------------------------------------------------------------------------------------------
Pulse Tp alpha beta b0 f0 Peak B1 Gmax BW
(ms) (deg) (rad) (uT) mT/m) (kHz)
-----------------------------------------------------------------------------------------------
HS 8.1 90 5.3 157 1 23.0 4.9 20.8
FOCI 8.1 90 5.3 157 3.2 23.0 15.7 66.7
BASSI(kappa=2) 8.1 90 5.3 168 3 23.1 15.7 66.7
VERSE-HS 8.1 90 5.3 503 22.8 19.0 81.0
HS 10.24 170 5.3 37.7 1 22.9 0.9 4.0
FOCI 10.24 175 6.3 22.8 22 23.0 14.7 62.7
BASSI(kappa=2) 10.24 175 6.3 22.8 22 23.0 14.7 62.7
BASSI(kappa=1.6) 10.24 175 6.3 22.8 22 23.0 14.7 62.7
BASSI(GOIA) 10.24 175 6.3 22.8 22 23.0 14.7 62.7
VERSE-HS 10.24 175 5.3 251 23.0 23.6 100.5
"""
param = trans_desc.parameters
extra = trans_desc.extra
#--------------------------------------------------------------------------
# unpack and convert parameters
#
# - Add or Modify the values below as needed
time_steps = int(param["time_steps"]) # int
duration = float(param["duration"]) # float, msec
flip_angle = float(param['tip_angle'])
trunc_factor = float(param['trunc_factor'])
bw_factor = float(param['bw_factor'])
max_relative_bw = float(param['max_relative_bw'])
slab_width = float(param['slab_width']) # float, in meters
slab_center = float(param['slab_center']) # float, in meters
ampl_scale = float(param['ampl_scale']) # float, 2 or 1.6
dwell_time = (1000 * duration) / (time_steps) # in usec
# these extra items may be used in making waveform or profile
bandwidth_convention = int(extra['pulse_bandwidth_type']) # Choice, 0-HalfHeight, 1-Min, 2-Max
resolution = int(extra['calc_resolution']) # int
gamma = float(extra['gamma']) # float MHz/T
# convert gamma to required units -> 1e8*rad/s*T
gamma = 2.0 * PI * gamma / 100.0
# note on gamma units MHz/T -> 2pi -> rad*MHz/T = rad*1e6*Hz/T = rad*1e6/(sec*T)
# = rad*1e2*Hz/gauss
# - so, MHz/T -> 2pi * 100 -> rad*Hz/gauss
# -> 42.576 MHz/T -> 26751.3 Hz/gauss
# -> 42.576 T/(rad MHz) -> 2.67513 1e8 rad/(sec T)
# Use TransformRunException to check parameter ranges and alert users
# - an example is shown below
# - when your code returns with this exception, it fails 'gracefully'
# - no result is changed by this exception
# - an error dialog shows the message passed back
# - can be used *anywhere* in code to fail and pass info back to the user
if time_steps < 1:
error_msg = "The number of time steps in the pulse must be > 0"
raise TransformRunException(error_msg, 1)
#--------------------------------------------------------------------------
# transform algorithm code starts here
npts = time_steps #1024 # 8100 with dwell 0.000001
dwell = dwell_time * 1e-6 #0.000002
Tp = npts * dwell
alpha = flip_angle #90.0
beta = trunc_factor #5.3
b0 = bw_factor #15.0 #168.0
f0 = max_relative_bw #3.0
deltax = slab_width #0.04
x0 = slab_center #0.0
kappa = ampl_scale #2.0
# Output Units: Tesla, Hz, T/m, deg
amt, fmt, g, phit = pulse_bassi_warnking(npts, dwell,
alpha, beta,
b0, f0,
kappa=2.0,
deltax=deltax,
x0=x0,
gamma=gamma*1e8)
amt = amt * 1000.0 # convert T to mT
g = g * 1000.0 # convert T/m to mT/m
rf_waveform = amt * np.exp(1j*phit)
rf_xaxis = np.arange(time_steps) * dwell
gradient = g
grad_xaxis = np.arange(time_steps) * dwell
# end transform algorithm
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# fill an output dictionary, or leave as None
outputs = {}
bw = (g[0] * 0.1) * 4358.0 * (deltax*100) # (mT/m->G/cm) Hz/G cm
outputs['bw'] = bw
# wid = bw * 10 / (g[0] * 4358) # this is in cm
# outputs['wid'] = wid
#--------------------------------------------------------------------------
# ensure you are returning ndarrays or None
rf_y = np.array(rf_waveform)
rf_x = np.array(rf_xaxis)
gr_y = gradient
gr_x = grad_xaxis
return rf_y, rf_x, gr_y, gr_x, outputs
def pulse_bassi_warnking(npts, dwell, alpha, beta, b0, f0,
vref=11.7, b1ref=11.7, kappa=2.0,
x0=0.0, deltax=0.02, gamma=267513000.0):
r"""
See comments above for description of this pulse algorithm
gamma - defaults to 1H value
"""
# gamma = 267522200. # 2.675222 x 10^8 rad/s*T
Tp = npts * dwell
t = np.arange(npts) * dwell
t = t - (Tp/2.0)
Pe = 0.5*(1-np.cos(alpha*PI/180.0)) # from comment above
# Equation 9
L = (1.0/(2*PI))*np.log(1.0/(np.sqrt(1-Pe)))
# Equation 15, bB(t) = bBt
bBt = np.power(np.power((np.power(np.cosh(2*beta*t/Tp),kappa)*(b0-(PI*L)) + (PI*L)),-2) + np.power(f0*b0,-2), -0.5)
bB0 = np.power(np.power((np.power(np.cosh(2*beta*0/Tp),kappa)*(b0-(PI*L)) + (PI*L)),-2) + np.power(f0*b0,-2), -0.5)
# Equation 14
aBt = np.power((bBt-PI*L)/(b0-PI*L),1.0/kappa) * np.sqrt(b0*b0 - 4*np.power(np.arccosh(np.cosh(b0/2)*np.sqrt(1-Pe)),2))
aB0 = np.power((bB0-PI*L)/(b0-PI*L),1.0/kappa) * np.sqrt(b0*b0 - 4*np.power(np.arccosh(np.cosh(b0/2)*np.sqrt(1-Pe)),2))
# Equation 16
#
# Acc'd to paper, if we plug in vref and b1ref output is in Volts
# For now, we set vref=b1ref thus removing that term from the equation
| |
<reponame>rlatawiec/fastai<filename>fastai/vision/data.py<gh_stars>0
"Manages data input pipeline - folderstransformbatch input. Includes support for classification, segmentation and bounding boxes"
from ..torch_core import *
from .image import *
from .transform import *
from ..data_block import *
from ..basic_data import *
from ..layers import *
from .learner import *
from ..core import *
from torchvision import transforms as tvt
from . import *
import os
import json
import torch
from torch.utils.data import Dataset
from pathlib import Path
from zipfile import ZipFile
import urllib.request
import random
from skimage import io, transform
__all__ = ['COCO_download', 'COCO_load', 'get_image_files', 'denormalize', 'get_annotations', 'ImageDataBunch',
'ImageList', 'normalize', 'normalize_funcs', 'resize_to',
'channel_view', 'mnist_stats', 'cifar_stats', 'imagenet_stats', 'download_images',
'verify_images', 'bb_pad_collate', 'ImageImageList', 'PointsLabelList',
'ObjectCategoryList', 'ObjectItemList', 'SegmentationLabelList', 'SegmentationItemList', 'PointsItemList',
'clip_annotations', 'COCODataset', 'LoadVideo']
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
def COCO_download(root_dir=str(os.getcwd()), destiny_folder="COCO", dataset=None, category=None, random_train=None,
random_valid=None,
annot_link='http://images.cocodataset.org/annotations/annotations_trainval2017.zip'):
'''
Download COCO annotations and image sets, either all or specific classes.
Args:
root_dir (string): path where the COCO database will be stored.
destiny_folder (string): name of folder to which download COCO database.
dataset (string): either 'all', 'train', or 'valid' - determines which image set will be downloaded.
category (list): if list of categories provided, only images of those categories will be downloaded.
random_train (int): number of images to download from training set.
random_valid (int): number of images to download from validation set.
annot_link (string): URL to COCO annotations.
'''
os.makedirs('{}/{}'.format(root_dir, destiny_folder), exist_ok=True)
path = '{}/{}'.format(root_dir, destiny_folder) # go to COCO directory
if os.path.isfile('{}/{}'.format(path, annot_link.split('/')[-1])):
print('Found annotations zip.')
pass
elif os.path.isdir('{}/annotations'.format(path)):
print('Found annotations folder.')
pass
else:
print('No annotations found, downloading.')
urllib.request.urlretrieve(annot_link, '{}/{}'.format(path, annot_link.split('/')[-1]))
try:
zip_ref = ZipFile('{}/{}'.format(path, annot_link.split('/')[-1]), 'r')
zip_ref.extractall(path)
zip_ref.close()
os.remove('{}/{}'.format(path, annot_link.split('/')[-1]))
except FileNotFoundError:
pass
datasets = make_dataset_dirs(dataset, path)
for i in datasets:
if i == 'train':
path2 = '{}/annotations'.format(path)
for i2 in os.listdir(path2):
if os.path.isfile(os.path.join(path2, i2)) and 'instances_train' in i2:
train_annot = '{}/{}'.format(path2, i2)
print('Found train annotations in {}'.format(train_annot))
break
with open(train_annot, 'r') as file:
annots = json.load(file)
random_sample = random_train
else:
path2 = '{}/annotations'.format(path)
for i2 in os.listdir(path2):
if os.path.isfile(os.path.join(path2, i2)) and 'instances_val' in i2:
val_annots = '{}/{}'.format(path2, i2)
print('Found validation annotations in {}'.format(val_annots))
break
with open(val_annots, 'r') as file:
annots = json.load(file)
random_sample = random_valid
print('Getting images urls.')
images_to_download = get_image_urls_and_names(annots, random_sample, category)
print(
'Downloading {} {} images to {}. Images in destination folder with same name will NOT be replaced.'.format(
len(images_to_download), i, '{}/{}/{}'.format(root_dir, destiny_folder, i)))
path3 = '{}/{}/{}'.format(root_dir, destiny_folder, i)
onlyfiles = [f for f in os.listdir(path3) if os.path.isfile(os.path.join(path3, f))]
for k in onlyfiles: images_to_download.pop(k, None)
found_in_folder = len(onlyfiles)
for file_name in images_to_download:
urllib.request.urlretrieve(images_to_download[file_name], '{}/{}'.format(path3, file_name))
print(
'Downloaded {} images, {} images were already in folder.'.format(len(images_to_download), found_in_folder))
def get_image_urls_and_names(annots, random_sample, category=None):
'''
Filters loaded JSON COCO annotations and returns dict of image_name:coco_url_to_image.
Args:
annots (JSON): Loaded COCO-like annotions in JSON format.
random_sample (int): Number of images to download.
'''
categories = {i['id']: i['name'] for i in annots['categories']}
images = {i['id']: [i['file_name'], i['coco_url']] for i in annots['images']}
annotations = [[i['image_id'], i['category_id']] for i in annots['annotations']]
chosen_images = dict()
for annotation in annotations:
corr_image = images[annotation[0]]
if category is not None:
if categories[annotation[1]] not in category:
continue
chosen_images[corr_image[0]] = corr_image[1]
if random_sample:
if random_sample <= len(chosen_images):
chosen_images = dict(random.sample(chosen_images.items(), random_sample))
return chosen_images
def make_dataset_dirs(dataset_command, path):
"""
Prepare COCO catalogue structure - make folders if they not exist.
Args:
dataset_command (string): Defines dataset for which the folders will be made.
path (string): Path to place where folders will be created.
"""
if dataset_command is None:
print('No datasets selected.')
else:
if dataset_command == 'all':
os.makedirs('{}/train'.format(path), exist_ok=True)
os.makedirs('{}/valid'.format(path), exist_ok=True)
return ['train', 'valid']
elif dataset_command == 'train':
os.makedirs('{}/train'.format(path), exist_ok=True)
return ['train']
elif dataset_command == 'valid':
os.makedirs('{}/valid'.format(path), exist_ok=True)
return ['valid']
else:
print('Invalid dataset - enter either all, train or valid.')
return []
def COCO_load(root_dir, train_annot=False, valid_annot=False, tfms=[], resize=608, batch_size=4):
"""
Args:
root_dir (string): Path to the directory with train and valid folders.
train_annot (string): Path to the COCO-style json file with annotations for training image set.
valid_annot (string): Path to the COCO-style json file with annotations for validation image set.
tfms (get_transforms() function): Optional transformations to be applied to images.
resize (int): Size to which all images will be resized. Also resizes bounding boxes.
batch_size (int): How many images we load and use at once.
"""
if not train_annot:
path = '{}/annotations'.format(root_dir)
for i in os.listdir(path):
if os.path.isfile(os.path.join(path, i)) and 'instances_train' in i:
train_annot = '{}/{}'.format(path, i)
print('Found train annotations in {}'.format(train_annot))
if not valid_annot:
path = '{}/annotations'.format(root_dir)
for i in os.listdir(path):
if os.path.isfile(os.path.join(path, i)) and 'instances_val' in i:
valid_annot = '{}/{}'.format(path, i)
print('Found validation annotations in {}'.format(valid_annot))
with open(train_annot) as tr:
coco_train = COCODataset(json.load(tr))
with open(valid_annot) as vl:
coco_valid = COCODataset(json.load(vl))
boxes = coco_train.get_bboxes()
boxes2 = coco_valid.get_bboxes()
boxes.update(boxes2)
get_y_func = lambda o: \
boxes[Path(o).name] # input dict is being transformed during pipeline, thats why it operates on Path objects
all_objects = (ObjectItemList.from_folder(root_dir).split_by_folder()
.label_from_func(get_y_func)
.transform(tfms, tfm_y=True, size=resize)
.databunch(bs=batch_size, collate_fn=bb_pad_collate))
return all_objects
def clip_annotations(images_path, annotations_file):
images = os.listdir(images_path)
towrite = {}
with open(annotations_file) as file:
annots = json.load(file)
towrite['info'] = annots['info']
towrite['licenses'] = annots['licenses']
towrite['images'] = []
ids = set()
removed = [0, 0]
for im in annots['images']:
if im['file_name'] in images:
towrite['images'].append(im)
ids.add(im['id'])
else:
removed[0] += 1
towrite['annotations'] = []
for an in annots['annotations']:
if an['image_id'] in ids:
towrite['annotations'].append(an)
else:
removed[1] += 1
towrite['categories'] = annots['categories']
with open('{}_clipped.json'.format(annotations_file[:-5]), 'w') as file:
json.dump(towrite, file)
print('Clipped json file was written to file! {} images and {} annotations were removed'.format(*removed))
def get_image_files(c:PathOrStr, check_ext:bool=True, recurse=False)->FilePathList:
"Return list of files in `c` that are images. `check_ext` will filter to `image_extensions`."
return get_files(c, extensions=(image_extensions if check_ext else None), recurse=recurse)
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {}
for o in annot_dict['categories']:
classes[o['id']] = o['name']
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[1],bb[0], bb[3]+bb[1], bb[2]+bb[0]])
id2cats[o['image_id']].append(classes[o['category_id']])
for o in annot_dict['images']:
if o['id'] in id2bboxes:
id2images[o['id']] = ifnone(prefix, '') + o['file_name']
ids = list(id2images.keys())
return [id2images[k] for k in ids], [[id2bboxes[k], id2cats[k]] for k in ids]
def bb_pad_collate(samples:BatchSamples, pad_idx:int=0) -> Tuple[FloatTensor, Tuple[LongTensor, LongTensor]]:
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
if isinstance(samples[0][1], int): return data_collate(samples)
max_len = max([len(s[1].data[1]) for s in samples])
bboxes = torch.zeros(len(samples), max_len, 4)
labels = torch.zeros(len(samples), max_len).long() + pad_idx
imgs = []
for i,s in enumerate(samples):
imgs.append(s[0].data[None])
bbs, lbls = s[1].data
if not (bbs.nelement() == 0):
bboxes[i,-len(lbls):] = bbs
labels[i,-len(lbls):] = tensor(lbls)
return torch.cat(imgs,0), (bboxes,labels)
def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Normalize `x` with `mean` and `std`."
return (x-mean[...,None,None]) / std[...,None,None]
def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor, do_x:bool=True)->TensorImage:
"Denormalize `x` with `mean` and `std`."
return x.cpu().float()*std[...,None,None] + mean[...,None,None] if do_x else x.cpu()
def _normalize_batch(b:Tuple[Tensor,Tensor], mean:FloatTensor, std:FloatTensor, do_x:bool=True, do_y:bool=False)->Tuple[Tensor,Tensor]:
"`b` = `x`,`y` - normalize `x` array of imgs and `do_y` optionally `y`."
x,y = b
mean,std = mean.to(x.device),std.to(x.device)
if do_x: x = normalize(x,mean,std)
if do_y and len(y.shape) == 4: y = normalize(y,mean,std)
return x,y
def normalize_funcs(mean:FloatTensor, std:FloatTensor, do_x:bool=True, do_y:bool=False)->Tuple[Callable,Callable]:
"Create normalize/denormalize func using `mean` and `std`, can specify `do_y` and `device`."
mean,std = tensor(mean),tensor(std)
return (partial(_normalize_batch, mean=mean, std=std, do_x=do_x, do_y=do_y),
partial(denormalize, mean=mean, std=std, do_x=do_x))
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
mnist_stats = ([0.15]*3, [0.15]*3)
def channel_view(x:Tensor)->Tensor:
"Make channel the first axis of `x` and flatten remaining axes"
return x.transpose(0,1).contiguous().view(x.shape[1],-1)
class ImageDataBunch(DataBunch):
"DataBunch suitable for computer vision."
_square_show = True
@classmethod
def create_from_ll(cls, lls:LabelLists, bs:int=64, val_bs:int=None, ds_tfms:Optional[TfmList]=None,
num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None,
test:Optional[PathOrStr]=None, collate_fn:Callable=data_collate, size:int=None, no_check:bool=False,
resize_method:ResizeMethod=None, mult:int=None, padding_mode:str='reflection',
mode:str='bilinear', tfm_y:bool=False)->'ImageDataBunch':
"Create an `ImageDataBunch` from `LabelLists` `lls` with potential `ds_tfms`."
lls = lls.transform(tfms=ds_tfms, size=size, resize_method=resize_method, mult=mult, padding_mode=padding_mode,
mode=mode, tfm_y=tfm_y)
if test is not None: lls.add_test_folder(test)
return lls.databunch(bs=bs, val_bs=val_bs, dl_tfms=dl_tfms, num_workers=num_workers, collate_fn=collate_fn,
device=device, no_check=no_check)
@classmethod
def from_folder(cls, path:PathOrStr, train:PathOrStr='train', valid:PathOrStr='valid',
valid_pct=None, classes:Collection=None, **kwargs:Any)->'ImageDataBunch':
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
path=Path(path)
il = ImageList.from_folder(path)
if valid_pct is None: src = il.split_by_folder(train=train, valid=valid)
else: src = il.split_by_rand_pct(valid_pct)
src = src.label_from_folder(classes=classes)
return cls.create_from_ll(src, **kwargs)
@classmethod
def from_df(cls, path:PathOrStr, df:pd.DataFrame, folder:PathOrStr=None, label_delim:str=None, valid_pct:float=0.2,
fn_col:IntsOrStrs=0, label_col:IntsOrStrs=1, suffix:str='', **kwargs:Any)->'ImageDataBunch':
"Create from a `DataFrame` `df`."
src = (ImageList.from_df(df, path=path, folder=folder, suffix=suffix, cols=fn_col)
.split_by_rand_pct(valid_pct)
| |
<reponame>kurli/chromium-crosswalk
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json
import logging
import os
from collections import defaultdict, Mapping
import svn_constants
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
from third_party.handlebar import Handlebar
def _RemoveNoDocs(item):
if json_parse.IsDict(item):
if item.get('nodoc', False):
return True
for key, value in item.items():
if _RemoveNoDocs(value):
del item[key]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _DetectInlineableTypes(schema):
'''Look for documents that are only referenced once and mark them as inline.
Actual inlining is done by _InlineDocs.
'''
if not schema.get('types'):
return
ignore = frozenset(('value', 'choices'))
refcounts = defaultdict(int)
# Use an explicit stack instead of recursion.
stack = [schema]
while stack:
node = stack.pop()
if isinstance(node, list):
stack.extend(node)
elif isinstance(node, Mapping):
if '$ref' in node:
refcounts[node['$ref']] += 1
stack.extend(v for k, v in node.iteritems() if k not in ignore)
for type_ in schema['types']:
if not 'noinline_doc' in type_:
if refcounts[type_['id']] == 1:
type_['inline_doc'] = True
def _InlineDocs(schema):
'''Replace '$ref's that refer to inline_docs with the json for those docs.
'''
types = schema.get('types')
if types is None:
return
inline_docs = {}
types_without_inline_doc = []
# Gather the types with inline_doc.
for type_ in types:
if type_.get('inline_doc'):
inline_docs[type_['id']] = type_
for k in ('description', 'id', 'inline_doc'):
type_.pop(k, None)
else:
types_without_inline_doc.append(type_)
schema['types'] = types_without_inline_doc
def apply_inline(node):
if isinstance(node, list):
for i in node:
apply_inline(i)
elif isinstance(node, Mapping):
ref = node.get('$ref')
if ref and ref in inline_docs:
node.update(inline_docs[ref])
del node['$ref']
for k, v in node.iteritems():
apply_inline(v)
apply_inline(schema)
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
'''Inserts commas every three digits for integer values. It is magic.
'''
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
def _GetAddRulesDefinitionFromEvents(events):
'''Parses the dictionary |events| to find the definition of the method
addRules among functions of the type Event.
'''
assert 'types' in events, \
'The dictionary |events| must contain the key "types".'
event_list = [t for t in events['types']
if 'name' in t and t['name'] == 'Event']
assert len(event_list) == 1, 'Exactly one type must be called "Event".'
event = event_list[0]
assert 'functions' in event, 'The type Event must contain "functions".'
result_list = [f for f in event['functions']
if 'name' in f and f['name'] == 'addRules']
assert len(result_list) == 1, \
'Exactly one function must be called "addRules".'
return result_list[0]
class _JSCModel(object):
'''Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
'''
def __init__(self,
json,
ref_resolver,
disable_refs,
availability_finder,
branch_utility,
parse_cache,
template_data_source,
add_rules_schema_function,
idl=False):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
self._availability_finder = availability_finder
self._branch_utility = branch_utility
self._api_availabilities = parse_cache.GetFromFile(
'%s/api_availabilities.json' % svn_constants.JSON_PATH)
self._intro_tables = parse_cache.GetFromFile(
'%s/intro_tables.json' % svn_constants.JSON_PATH)
self._api_features = parse_cache.GetFromFile(
'%s/_api_features.json' % svn_constants.API_PATH)
self._template_data_source = template_data_source
self._add_rules_schema_function = add_rules_schema_function
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
if idl:
_DetectInlineableTypes(clean_json)
_InlineDocs(clean_json)
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
as_dict = {
'name': self._namespace.name,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties),
'introList': self._GetIntroTableList(),
'channelWarning': self._GetChannelWarning(),
'byName': {},
}
# Make every type/function/event/property also accessible by name for
# rendering specific API entities rather than the whole thing at once, for
# example {{apis.manifestTypes.byName.ExternallyConnectable}}.
for item_type in ('types', 'functions', 'events', 'properties'):
as_dict['byName'].update(
(item['name'], item) for item in as_dict[item_type])
return as_dict
def _GetApiAvailability(self):
# Check for a predetermined availability for this API.
api_info = self._api_availabilities.get(self._namespace.name)
if api_info is not None:
channel = api_info['channel']
if channel == 'stable':
return self._branch_utility.GetStableChannelInfo(api_info['version'])
return self._branch_utility.GetChannelInfo(channel)
return self._availability_finder.GetApiAvailability(self._namespace.name)
def _GetChannelWarning(self):
if not self._IsExperimental():
return { self._GetApiAvailability().channel: True }
return None
def _IsExperimental(self):
return self._namespace.name.startswith('experimental')
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parentName'] = function.parent.simple_name
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'supportsListeners': event.supports_listeners,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parentName'] = event.parent.simple_name
# For the addRules method we can use the common definition, because addRules
# has the same signature for every event.
if event.supports_rules:
event_dict['addRulesFunction'] = self._add_rules_schema_function()
# We need to create the method description for addListener based on the
# information stored in |event|.
if event.supports_listeners:
callback_object = model.Function(parent=event,
name='callback',
json={},
namespace=event.parent,
origin='')
callback_object.params = event.params
if event.callback:
callback_object.callback = event.callback
callback_parameters = self._GenerateCallbackProperty(callback_object)
callback_parameters['last'] = True
event_dict['addListenerFunction'] = {
'name': 'addListener',
'callback': self._GenerateFunction(callback_object),
'parameters': [callback_parameters]
}
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parentName'] = property_.parent.simple_name
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parentName'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in type_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of.lower()
else:
dst_dict['simple_type'] = type_.property_type.name.lower()
def _GetIntroTableList(self):
'''Create a generic data structure that can be traversed by the templates
to create an API intro table.
'''
intro_rows = [
self._GetIntroDescriptionRow(),
self._GetIntroAvailabilityRow()
] + self._GetIntroDependencyRows()
# Add rows using data from intro_tables.json, overriding any existing rows
# if they share the same 'title' attribute.
row_titles = [row['title'] for row in intro_rows]
for misc_row in self._GetMiscIntroRows():
if misc_row['title'] in row_titles:
intro_rows[row_titles.index(misc_row['title'])] = misc_row
else:
intro_rows.append(misc_row)
return intro_rows
def _GetIntroDescriptionRow(self):
''' Generates the 'Description' row data for an API intro table.
'''
return {
'title': 'Description',
'content': [
{ 'text': self._FormatDescription(self._namespace.description) }
]
}
| |
- m.x2695 - m.x2696 - m.x2697
- m.x2698 - m.x2699 - m.x2700 + m.x3009 == 0)
m.c5 = Constraint(expr= - m.x2701 - m.x2702 - m.x2703 - m.x2704 - m.x2705 - m.x2706 - m.x2707 - m.x2708 - m.x2709
- m.x2710 - m.x2711 - m.x2712 - m.x2713 - m.x2714 - m.x2715 - m.x2716 - m.x2717 - m.x2718
- m.x2719 - m.x2720 - m.x2721 - m.x2722 - m.x2723 - m.x2724 - m.x2725 - m.x2726 - m.x2727
- m.x2728 - m.x2729 - m.x2730 - m.x2731 - m.x2732 - m.x2733 - m.x2734 - m.x2735 - m.x2736
- m.x2737 - m.x2738 - m.x2739 - m.x2740 - m.x2741 - m.x2742 - m.x2743 - m.x2744 - m.x2745
- m.x2746 - m.x2747 - m.x2748 - m.x2749 - m.x2750 - m.x2751 - m.x2752 - m.x2753 - m.x2754
- m.x2755 - m.x2756 - m.x2757 - m.x2758 - m.x2759 - m.x2760 - m.x2761 - m.x2762 - m.x2763
- m.x2764 - m.x2765 - m.x2766 - m.x2767 - m.x2768 - m.x2769 - m.x2770 - m.x2771 - m.x2772
- m.x2773 - m.x2774 - m.x2775 - m.x2776 - m.x2777 - m.x2778 - m.x2779 - m.x2780 - m.x2781
- m.x2782 - m.x2783 - m.x2784 - m.x2785 - m.x2786 - m.x2787 - m.x2788 - m.x2789 - m.x2790
- m.x2791 - m.x2792 - m.x2793 - m.x2794 - m.x2795 - m.x2796 - m.x2797 - m.x2798 - m.x2799
- m.x2800 - m.x2801 - m.x2802 - m.x2803 - m.x2804 - m.x2805 - m.x2806 - m.x2807 - m.x2808
- m.x2809 - m.x2810 - m.x2811 - m.x2812 - m.x2813 - m.x2814 - m.x2815 - m.x2816 - m.x2817
- m.x2818 - m.x2819 - m.x2820 - m.x2821 - m.x2822 - m.x2823 - m.x2824 - m.x2825 - m.x2826
- m.x2827 - m.x2828 - m.x2829 - m.x2830 - m.x2831 - m.x2832 - m.x2833 - m.x2834 - m.x2835
- m.x2836 - m.x2837 - m.x2838 - m.x2839 - m.x2840 - m.x2841 - m.x2842 - m.x2843 - m.x2844
- m.x2845 - m.x2846 - m.x2847 - m.x2848 - m.x2849 - m.x2850 - m.x2851 - m.x2852 - m.x2853
- m.x2854 - m.x2855 - m.x2856 - m.x2857 - m.x2858 - m.x2859 - m.x2860 - m.x2861 - m.x2862
- m.x2863 - m.x2864 - m.x2865 - m.x2866 - m.x2867 - m.x2868 - m.x2869 - m.x2870 - m.x2871
- m.x2872 - m.x2873 - m.x2874 - m.x2875 - m.x2876 - m.x2877 - m.x2878 - m.x2879 - m.x2880
- m.x2881 - m.x2882 - m.x2883 - m.x2884 - m.x2885 - m.x2886 - m.x2887 - m.x2888 - m.x2889
- m.x2890 - m.x2891 - m.x2892 - m.x2893 - m.x2894 - m.x2895 - m.x2896 - m.x2897 - m.x2898
- m.x2899 - m.x2900 - m.x2901 - m.x2902 - m.x2903 - m.x2904 - m.x2905 - m.x2906 - m.x2907
- m.x2908 - m.x2909 - m.x2910 - m.x2911 - m.x2912 - m.x2913 - m.x2914 - m.x2915 - m.x2916
- m.x2917 - m.x2918 - m.x2919 - m.x2920 - m.x2921 - m.x2922 - m.x2923 - m.x2924 - m.x2925
- m.x2926 - m.x2927 - m.x2928 - m.x2929 - m.x2930 - m.x2931 - m.x2932 - m.x2933 - m.x2934
- m.x2935 - m.x2936 - m.x2937 - m.x2938 - m.x2939 - m.x2940 - m.x2941 - m.x2942 - m.x2943
- m.x2944 - m.x2945 - m.x2946 - m.x2947 - m.x2948 - m.x2949 - m.x2950 - m.x2951 - m.x2952
- m.x2953 - m.x2954 - m.x2955 - m.x2956 - m.x2957 - m.x2958 - m.x2959 - m.x2960 - m.x2961
- m.x2962 - m.x2963 - m.x2964 - m.x2965 - m.x2966 - m.x2967 - m.x2968 - m.x2969 - m.x2970
- m.x2971 - m.x2972 - m.x2973 - m.x2974 - m.x2975 - m.x2976 - m.x2977 - m.x2978 - m.x2979
- m.x2980 - m.x2981 - m.x2982 - m.x2983 - m.x2984 - m.x2985 - m.x2986 - m.x2987 - m.x2988
- m.x2989 - m.x2990 - m.x2991 - m.x2992 - m.x2993 - m.x2994 - m.x2995 - m.x2996 - m.x2997
- m.x2998 - m.x2999 - m.x3000 + m.x3010 == 0)
m.c6 = Constraint(expr= - m.b1 - m.b2 - m.b3 - m.b4 - m.b5 - m.b6 - m.b7 - m.b8 - m.b9 - m.b10 - m.b11 - m.b12 - m.b13
- m.b14 - m.b15 - m.b16 - m.b17 - m.b18 - m.b19 - m.b20 - m.b21 - m.b22 - m.b23 - m.b24 - m.b25
- m.b26 - m.b27 - m.b28 - m.b29 - m.b30 - m.b31 - m.b32 - m.b33 - m.b34 - m.b35 - m.b36 - m.b37
- m.b38 - m.b39 - m.b40 - m.b41 - m.b42 - m.b43 - m.b44 - m.b45 - m.b46 - m.b47 - m.b48 - m.b49
- m.b50 - m.b51 - m.b52 - m.b53 - m.b54 - m.b55 - m.b56 - m.b57 - m.b58 - m.b59 - m.b60 - m.b61
- m.b62 - m.b63 - m.b64 - m.b65 - m.b66 - m.b67 - m.b68 - m.b69 - m.b70 - m.b71 - m.b72 - m.b73
- m.b74 - m.b75 - m.b76 - m.b77 - m.b78 - m.b79 - m.b80 - m.b81 - m.b82 - m.b83 - m.b84 - m.b85
- m.b86 - m.b87 - m.b88 - m.b89 - m.b90 - m.b91 - m.b92 - m.b93 - m.b94 - m.b95 - m.b96 - m.b97
- m.b98 - m.b99 - m.b100 - m.b101 - m.b102 - m.b103 - m.b104 - m.b105 - m.b106 - m.b107 - m.b108
- m.b109 - m.b110 - m.b111 - m.b112 - m.b113 - m.b114 - m.b115 - m.b116 - m.b117 - m.b118
- m.b119 - m.b120 - m.b121 - m.b122 - m.b123 - m.b124 - m.b125 - m.b126 - m.b127 - m.b128
- m.b129 - m.b130 - m.b131 - m.b132 - m.b133 - m.b134 - m.b135 - m.b136 - m.b137 - m.b138
- m.b139 - m.b140 - m.b141 - m.b142 - m.b143 - m.b144 - m.b145 - m.b146 - m.b147 - m.b148
- m.b149 - m.b150 - m.b151 - m.b152 - m.b153 - m.b154 - m.b155 - m.b156 - m.b157 - m.b158
- m.b159 - m.b160 - m.b161 - m.b162 - m.b163 - m.b164 - m.b165 - m.b166 - m.b167 - m.b168
- m.b169 - m.b170 - m.b171 - m.b172 - m.b173 - m.b174 - m.b175 - m.b176 - m.b177 - m.b178
- m.b179 - m.b180 - m.b181 - m.b182 - m.b183 - m.b184 - m.b185 - m.b186 - m.b187 - m.b188
- m.b189 - m.b190 - m.b191 - m.b192 - m.b193 - m.b194 - m.b195 - m.b196 - m.b197 - m.b198
- m.b199 - m.b200 - m.b201 - m.b202 - m.b203 - m.b204 - m.b205 - m.b206 - m.b207 - m.b208
- m.b209 - m.b210 - m.b211 - m.b212 - m.b213 - m.b214 - m.b215 - m.b216 - m.b217 - m.b218
- m.b219 - m.b220 - m.b221 - m.b222 - m.b223 - m.b224 - m.b225 - m.b226 - m.b227 - m.b228
- m.b229 - m.b230 - m.b231 - m.b232 - m.b233 - m.b234 - m.b235 - m.b236 - m.b237 - m.b238
- m.b239 - m.b240 - m.b241 - m.b242 - m.b243 - m.b244 - m.b245 - m.b246 - m.b247 - m.b248
- m.b249 - m.b250 - m.b251 - m.b252 - m.b253 - m.b254 - m.b255 - m.b256 - m.b257 - m.b258
- m.b259 - m.b260 - m.b261 - m.b262 - m.b263 - m.b264 - m.b265 - m.b266 - m.b267 - m.b268
- m.b269 - m.b270 - m.b271 - m.b272 - m.b273 - m.b274 - m.b275 - m.b276 - m.b277 - m.b278
- m.b279 - m.b280 - m.b281 - m.b282 - m.b283 - m.b284 - m.b285 - m.b286 - m.b287 - m.b288
- m.b289 - m.b290 - m.b291 - m.b292 - m.b293 - m.b294 - m.b295 - m.b296 - m.b297 - m.b298
- m.b299 - m.b300 + m.x3001 == 0)
m.c7 = Constraint(expr= - m.b301 - m.b302 - m.b303 - m.b304 - m.b305 - m.b306 - m.b307 - m.b308 - m.b309 - m.b310
- m.b311 - m.b312 - m.b313 - m.b314 - m.b315 - m.b316 - m.b317 - m.b318 - m.b319 - m.b320
- m.b321 - m.b322 - m.b323 - m.b324 - m.b325 | |
jnp.pi
)
return -0.5 * M - normalize_term
@lazy_property
def covariance_matrix(self):
return jnp.matmul(self.scale_tril, jnp.swapaxes(self.scale_tril, -1, -2))
@lazy_property
def precision_matrix(self):
identity = jnp.broadcast_to(
jnp.eye(self.scale_tril.shape[-1]), self.scale_tril.shape
)
return cho_solve((self.scale_tril, True), identity)
@property
def mean(self):
return jnp.broadcast_to(self.loc, self.shape())
@property
def variance(self):
return jnp.broadcast_to(
jnp.sum(self.scale_tril ** 2, axis=-1), self.batch_shape + self.event_shape
)
def tree_flatten(self):
return (self.loc, self.scale_tril), None
@classmethod
def tree_unflatten(cls, aux_data, params):
loc, scale_tril = params
return cls(loc, scale_tril=scale_tril)
@staticmethod
def infer_shapes(
loc=(), covariance_matrix=None, precision_matrix=None, scale_tril=None
):
batch_shape, event_shape = loc[:-1], loc[-1:]
for matrix in [covariance_matrix, precision_matrix, scale_tril]:
if matrix is not None:
batch_shape = lax.broadcast_shapes(batch_shape, matrix[:-2])
event_shape = lax.broadcast_shapes(event_shape, matrix[-1:])
return batch_shape, event_shape
class MultivariateStudentT(Distribution):
arg_constraints = {
"df": constraints.positive,
"loc": constraints.real_vector,
"scale_tril": constraints.lower_cholesky,
}
support = constraints.real_vector
reparametrized_params = ["df", "loc", "scale_tril"]
def __init__(
self,
df,
loc=0.0,
scale_tril=None,
validate_args=None,
):
if jnp.ndim(loc) == 0:
(loc,) = promote_shapes(loc, shape=(1,))
batch_shape = lax.broadcast_shapes(
jnp.shape(df), jnp.shape(loc)[:-1], jnp.shape(scale_tril)[:-2]
)
(self.df,) = promote_shapes(df, shape=batch_shape)
(self.loc,) = promote_shapes(loc, shape=batch_shape + loc.shape[-1:])
(self.scale_tril,) = promote_shapes(
scale_tril, shape=batch_shape + scale_tril.shape[-2:]
)
event_shape = jnp.shape(self.scale_tril)[-1:]
self._chi2 = Chi2(self.df)
super(MultivariateStudentT, self).__init__(
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
)
def sample(self, key, sample_shape=()):
assert is_prng_key(key)
key_normal, key_chi2 = random.split(key)
std_normal = random.normal(
key_normal,
shape=sample_shape + self.batch_shape + self.event_shape,
)
z = self._chi2.sample(key_chi2, sample_shape)
y = std_normal * jnp.expand_dims(jnp.sqrt(self.df / z), -1)
return self.loc + jnp.squeeze(
jnp.matmul(self.scale_tril, y[..., jnp.newaxis]), axis=-1
)
@validate_sample
def log_prob(self, value):
n = self.scale_tril.shape[-1]
Z = (
jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1)
+ 0.5 * n * jnp.log(self.df)
+ 0.5 * n * jnp.log(jnp.pi)
+ gammaln(0.5 * self.df)
- gammaln(0.5 * (self.df + n))
)
M = _batch_mahalanobis(self.scale_tril, value - self.loc)
return -0.5 * (self.df + n) * jnp.log1p(M / self.df) - Z
@lazy_property
def covariance_matrix(self):
# NB: this is not covariance of this distribution;
# the actual covariance is df / (df - 2) * covariance_matrix
return jnp.matmul(self.scale_tril, jnp.swapaxes(self.scale_tril, -1, -2))
@lazy_property
def precision_matrix(self):
identity = jnp.broadcast_to(
jnp.eye(self.scale_tril.shape[-1]), self.scale_tril.shape
)
return cho_solve((self.scale_tril, True), identity)
@property
def mean(self):
# for df <= 1. should be jnp.nan (keeping jnp.inf for consistency with scipy)
return jnp.broadcast_to(
jnp.where(jnp.expand_dims(self.df, -1) <= 1, jnp.inf, self.loc),
self.shape(),
)
@property
def variance(self):
df = jnp.expand_dims(self.df, -1)
var = jnp.power(self.scale_tril, 2).sum(-1) * (df / (df - 2))
var = jnp.where(df > 2, var, jnp.inf)
var = jnp.where(df <= 1, jnp.nan, var)
return jnp.broadcast_to(var, self.batch_shape + self.event_shape)
@staticmethod
def infer_shapes(df, loc, scale_tril):
event_shape = (scale_tril[-1],)
batch_shape = lax.broadcast_shapes(df, loc[:-1], scale_tril[:-2])
return batch_shape, event_shape
def _batch_mv(bmat, bvec):
r"""
Performs a batched matrix-vector product, with compatible but different batch shapes.
This function takes as input `bmat`, containing :math:`n \times n` matrices, and
`bvec`, containing length :math:`n` vectors.
Both `bmat` and `bvec` may have any number of leading dimensions, which correspond
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
"""
return jnp.squeeze(jnp.matmul(bmat, jnp.expand_dims(bvec, axis=-1)), axis=-1)
def _batch_capacitance_tril(W, D):
r"""
Computes Cholesky of :math:`I + W.T @ inv(D) @ W` for a batch of matrices :math:`W`
and a batch of vectors :math:`D`.
"""
Wt_Dinv = jnp.swapaxes(W, -1, -2) / jnp.expand_dims(D, -2)
K = jnp.matmul(Wt_Dinv, W)
# could be inefficient
return jnp.linalg.cholesky(jnp.add(K, jnp.identity(K.shape[-1])))
def _batch_lowrank_logdet(W, D, capacitance_tril):
r"""
Uses "matrix determinant lemma"::
log|W @ W.T + D| = log|C| + log|D|,
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute
the log determinant.
"""
return 2 * jnp.sum(
jnp.log(jnp.diagonal(capacitance_tril, axis1=-2, axis2=-1)), axis=-1
) + jnp.log(D).sum(-1)
def _batch_lowrank_mahalanobis(W, D, x, capacitance_tril):
r"""
Uses "Woodbury matrix identity"::
inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D),
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute the squared
Mahalanobis distance :math:`x.T @ inv(W @ W.T + D) @ x`.
"""
Wt_Dinv = jnp.swapaxes(W, -1, -2) / jnp.expand_dims(D, -2)
Wt_Dinv_x = _batch_mv(Wt_Dinv, x)
mahalanobis_term1 = jnp.sum(jnp.square(x) / D, axis=-1)
mahalanobis_term2 = _batch_mahalanobis(capacitance_tril, Wt_Dinv_x)
return mahalanobis_term1 - mahalanobis_term2
class LowRankMultivariateNormal(Distribution):
arg_constraints = {
"loc": constraints.real_vector,
"cov_factor": constraints.independent(constraints.real, 2),
"cov_diag": constraints.independent(constraints.positive, 1),
}
support = constraints.real_vector
reparametrized_params = ["loc", "cov_factor", "cov_diag"]
def __init__(self, loc, cov_factor, cov_diag, validate_args=None):
if jnp.ndim(loc) < 1:
raise ValueError("`loc` must be at least one-dimensional.")
event_shape = jnp.shape(loc)[-1:]
if jnp.ndim(cov_factor) < 2:
raise ValueError(
"`cov_factor` must be at least two-dimensional, "
"with optional leading batch dimensions"
)
if jnp.shape(cov_factor)[-2:-1] != event_shape:
raise ValueError(
"`cov_factor` must be a batch of matrices with shape {} x m".format(
event_shape[0]
)
)
if jnp.shape(cov_diag)[-1:] != event_shape:
raise ValueError(
"`cov_diag` must be a batch of vectors with shape {}".format(
self.event_shape
)
)
loc, cov_factor, cov_diag = promote_shapes(
loc[..., jnp.newaxis], cov_factor, cov_diag[..., jnp.newaxis]
)
batch_shape = lax.broadcast_shapes(
jnp.shape(loc), jnp.shape(cov_factor), jnp.shape(cov_diag)
)[:-2]
self.loc = loc[..., 0]
self.cov_factor = cov_factor
cov_diag = cov_diag[..., 0]
self.cov_diag = cov_diag
self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag)
super(LowRankMultivariateNormal, self).__init__(
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
)
@property
def mean(self):
return self.loc
@lazy_property
def variance(self):
raw_variance = jnp.square(self.cov_factor).sum(-1) + self.cov_diag
return jnp.broadcast_to(raw_variance, self.batch_shape + self.event_shape)
@lazy_property
def scale_tril(self):
# The following identity is used to increase the numerically computation stability
# for Cholesky decomposition (see http://www.gaussianprocess.org/gpml/, Section 3.4.3):
# W @ W.T + D = D1/2 @ (I + D-1/2 @ W @ W.T @ D-1/2) @ D1/2
# The matrix "I + D-1/2 @ W @ W.T @ D-1/2" has eigenvalues bounded from below by 1,
# hence it is well-conditioned and safe to take Cholesky decomposition.
cov_diag_sqrt_unsqueeze = jnp.expand_dims(jnp.sqrt(self.cov_diag), axis=-1)
Dinvsqrt_W = self.cov_factor / cov_diag_sqrt_unsqueeze
K = jnp.matmul(Dinvsqrt_W, jnp.swapaxes(Dinvsqrt_W, -1, -2))
K = jnp.add(K, jnp.identity(K.shape[-1]))
scale_tril = cov_diag_sqrt_unsqueeze * jnp.linalg.cholesky(K)
return scale_tril
@lazy_property
def covariance_matrix(self):
# TODO: find a better solution to create a diagonal matrix
new_diag = self.cov_diag[..., jnp.newaxis] * jnp.identity(self.loc.shape[-1])
covariance_matrix = new_diag + jnp.matmul(
self.cov_factor, jnp.swapaxes(self.cov_factor, -1, -2)
)
return covariance_matrix
@lazy_property
def precision_matrix(self):
# We use "Woodbury matrix identity" to take advantage of low rank form::
# inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D)
# where :math:`C` is the capacitance matrix.
Wt_Dinv = jnp.swapaxes(self.cov_factor, -1, -2) / jnp.expand_dims(
self.cov_diag, axis=-2
)
A = solve_triangular(Wt_Dinv, self._capacitance_tril, lower=True)
# TODO: find a better solution to create a diagonal matrix
inverse_cov_diag = jnp.reciprocal(self.cov_diag)
diag_embed = inverse_cov_diag[..., jnp.newaxis] * jnp.identity(
self.loc.shape[-1]
)
return diag_embed - jnp.matmul(jnp.swapaxes(A, -1, -2), A)
def sample(self, key, sample_shape=()):
assert is_prng_key(key)
key_W, key_D = random.split(key)
batch_shape = sample_shape + self.batch_shape
W_shape = batch_shape + self.cov_factor.shape[-1:]
D_shape = batch_shape + self.cov_diag.shape[-1:]
eps_W = random.normal(key_W, W_shape)
eps_D = random.normal(key_D, D_shape)
return (
self.loc
+ _batch_mv(self.cov_factor, eps_W)
+ jnp.sqrt(self.cov_diag) * eps_D
)
@validate_sample
def log_prob(self, value):
diff = value - self.loc
M = _batch_lowrank_mahalanobis(
self.cov_factor, self.cov_diag, diff, self._capacitance_tril
)
log_det = _batch_lowrank_logdet(
self.cov_factor, self.cov_diag, self._capacitance_tril
)
return -0.5 * (self.loc.shape[-1] * jnp.log(2 * jnp.pi) + log_det + M)
def entropy(self):
log_det = _batch_lowrank_logdet(
self.cov_factor, self.cov_diag, self._capacitance_tril
)
H = 0.5 * (self.loc.shape[-1] * (1.0 + jnp.log(2 * jnp.pi)) + log_det)
return jnp.broadcast_to(H, self.batch_shape)
@staticmethod
def infer_shapes(loc, cov_factor, cov_diag):
event_shape = loc[-1:]
batch_shape = lax.broadcast_shapes(loc[:-1], cov_factor[:-2], cov_diag[:-1])
return batch_shape, event_shape
class Normal(Distribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
support = constraints.real
reparametrized_params = ["loc", "scale"]
def __init__(self, loc=0.0, scale=1.0, validate_args=None):
self.loc, self.scale = promote_shapes(loc, scale)
batch_shape = lax.broadcast_shapes(jnp.shape(loc), jnp.shape(scale))
super(Normal, self).__init__(
batch_shape=batch_shape, validate_args=validate_args
)
def sample(self, key, sample_shape=()):
assert is_prng_key(key)
eps = random.normal(
key, shape=sample_shape + self.batch_shape + self.event_shape
)
return self.loc + eps * self.scale
@validate_sample
def log_prob(self, value):
normalize_term = jnp.log(jnp.sqrt(2 * jnp.pi) * self.scale)
value_scaled = (value - self.loc) / self.scale
return -0.5 * value_scaled ** 2 - normalize_term
def cdf(self, value):
scaled = (value - self.loc) / self.scale
return ndtr(scaled)
def icdf(self, q):
return self.loc + self.scale * ndtri(q)
@property
def mean(self):
return jnp.broadcast_to(self.loc, self.batch_shape)
@property
def variance(self):
return jnp.broadcast_to(self.scale ** 2, self.batch_shape)
class Pareto(TransformedDistribution):
arg_constraints = {"scale": constraints.positive, "alpha": constraints.positive}
reparametrized_params = ["scale", "alpha"]
def __init__(self, scale, alpha, validate_args=None):
self.scale, self.alpha = promote_shapes(scale, alpha)
batch_shape = lax.broadcast_shapes(jnp.shape(scale), jnp.shape(alpha))
scale, alpha = jnp.broadcast_to(scale, batch_shape), jnp.broadcast_to(
alpha, batch_shape
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.