input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'duk_bi_thread_constructor',
'callable': True,
'constructable': True,
'values': [],
'functions': [
# 'yield' is a reserved word but does not prevent its use as a property name
{ 'name': 'yield', 'native': 'duk_bi_thread_yield', 'length': 2 },
{ 'name': 'resume', 'native': 'duk_bi_thread_resume', 'length': 3 },
{ 'name': 'current', 'native': 'duk_bi_thread_current', 'length': 0 },
]
}
bi_thread_prototype = {
'internal_prototype': 'bi_object_prototype',
'external_constructor': 'bi_thread_constructor',
'class': 'Thread',
# Note: we don't keep up with the E5 "convention" that prototype objects
# are some faux instances of their type (e.g. Date.prototype is a Date
# instance).
#
# Also, we don't currently have a "constructor" property because there is
# no explicit constructor object.
'values': [
],
'functions': [
# toString() and valueOf() are inherited from Object.prototype
],
}
bi_buffer_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_buffer_prototype',
'class': 'Function',
'name': 'Buffer',
'length': 1,
'varargs': True,
'native': 'duk_bi_buffer_constructor',
'callable': True,
'constructable': True,
'values': [],
'functions': [
]
}
bi_buffer_prototype = {
'internal_prototype': 'bi_object_prototype',
'external_constructor': 'bi_buffer_constructor',
'class': 'Buffer',
'values': [
],
'functions': [
{ 'name': 'toString', 'native': 'duk_bi_buffer_prototype_tostring_shared', 'length': 0, 'magic': { 'type': 'plain', 'value': 1 } }, # magic = to_string
{ 'name': 'valueOf', 'native': 'duk_bi_buffer_prototype_tostring_shared', 'length': 0, 'magic': { 'type': 'plain', 'value': 0 } }, # magic = to_string
],
}
bi_pointer_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_pointer_prototype',
'class': 'Function',
'name': 'Pointer',
'length': 1,
'varargs': True,
'native': 'duk_bi_pointer_constructor',
'callable': True,
'constructable': True,
'values': [],
'functions': [
]
}
bi_pointer_prototype = {
'internal_prototype': 'bi_object_prototype',
'external_constructor': 'bi_pointer_constructor',
'class': 'Pointer',
'values': [
],
'functions': [
{ 'name': 'toString', 'native': 'duk_bi_pointer_prototype_tostring_shared', 'length': 0, 'magic': { 'type': 'plain', 'value': 1 } }, # magic = to_string
{ 'name': 'valueOf', 'native': 'duk_bi_pointer_prototype_tostring_shared', 'length': 0, 'magic': { 'type': 'plain', 'value': 0 } }, # magic = to_string
],
}
bi_logger_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_logger_prototype',
'class': 'Function',
'name': 'Logger',
'length': 1,
'varargs': True,
'native': 'duk_bi_logger_constructor',
'callable': True,
'constructable': True,
'values': [],
'functions': [
]
}
bi_logger_prototype = {
'internal_prototype': 'bi_object_prototype',
'external_constructor': 'bi_logger_constructor',
'class': 'Object',
'values': [
# default log level: 2 = info
{ 'name': 'l', 'value': 2, 'attributes': 'w' },
# default logger name (if undefined given or fileName of caller not known)
{ 'name': 'n', 'value': 'anon', 'attributes': 'w' },
],
'functions': [
{ 'name': 'fmt', 'native': 'duk_bi_logger_prototype_fmt', 'length': 1 },
{ 'name': 'raw', 'native': 'duk_bi_logger_prototype_raw', 'length': 1 },
{ 'name': 'trace', 'native': 'duk_bi_logger_prototype_log_shared', 'length': 0, 'varargs': True, 'magic': { 'type': 'plain', 'value': 0 } },
{ 'name': 'debug', 'native': 'duk_bi_logger_prototype_log_shared', 'length': 0, 'varargs': True, 'magic': { 'type': 'plain', 'value': 1 } },
{ 'name': 'info', 'native': 'duk_bi_logger_prototype_log_shared', 'length': 0, 'varargs': True, 'magic': { 'type': 'plain', 'value': 2 } },
{ 'name': 'warn', 'native': 'duk_bi_logger_prototype_log_shared', 'length': 0, 'varargs': True, 'magic': { 'type': 'plain', 'value': 3 } },
{ 'name': 'error', 'native': 'duk_bi_logger_prototype_log_shared', 'length': 0, 'varargs': True, 'magic': { 'type': 'plain', 'value': 4 } },
{ 'name': 'fatal', 'native': 'duk_bi_logger_prototype_log_shared', 'length': 0, 'varargs': True, 'magic': { 'type': 'plain', 'value': 5 } },
],
}
# This is an Error *instance* used to avoid allocation when a "double error" occurs.
# The object is "frozen and sealed" to avoid code accidentally modifying the instance.
# This is important because the error is rethrown as is.
bi_double_error = {
'internal_prototype': 'bi_error_prototype',
'class': 'Error',
'extensible': False,
# Note: this is the only non-extensible built-in, so there is special
# post-tweak in duk_hthread_builtins.c to handle this.
'values': [
{ 'name': 'name', 'value': 'DoubleError', 'attributes': '' },
{ 'name': 'message', 'value': 'error in error handling', 'attributes': '' },
],
'functions': [
],
}
#
# Built-ins table. The ordering determines ordering for the DUK_BIDX_XXX constants.
#
builtins_orig = [
{ 'id': 'bi_global', 'info': bi_global },
{ 'id': 'bi_global_env', 'info': bi_global_env },
{ 'id': 'bi_object_constructor', 'info': bi_object_constructor },
{ 'id': 'bi_object_prototype', 'info': bi_object_prototype },
{ 'id': 'bi_function_constructor', 'info': bi_function_constructor },
{ 'id': 'bi_function_prototype', 'info': bi_function_prototype },
{ 'id': 'bi_array_constructor', 'info': bi_array_constructor },
{ 'id': 'bi_array_prototype', 'info': bi_array_prototype },
{ 'id': 'bi_string_constructor', 'info': bi_string_constructor },
{ 'id': 'bi_string_prototype', 'info': bi_string_prototype },
{ 'id': 'bi_boolean_constructor', 'info': bi_boolean_constructor },
{ 'id': 'bi_boolean_prototype', 'info': bi_boolean_prototype },
{ 'id': 'bi_number_constructor', 'info': bi_number_constructor },
{ 'id': 'bi_number_prototype', 'info': bi_number_prototype },
{ 'id': 'bi_date_constructor', 'info': bi_date_constructor },
{ 'id': 'bi_date_prototype', 'info': bi_date_prototype },
{ 'id': 'bi_regexp_constructor', 'info': bi_regexp_constructor },
{ 'id': 'bi_regexp_prototype', 'info': bi_regexp_prototype },
{ 'id': 'bi_error_constructor', 'info': bi_error_constructor },
{ 'id': 'bi_error_prototype', 'info': bi_error_prototype },
{ 'id': 'bi_eval_error_constructor', 'info': bi_eval_error_constructor },
{ 'id': 'bi_eval_error_prototype', 'info': bi_eval_error_prototype },
{ 'id': 'bi_range_error_constructor', 'info': bi_range_error_constructor },
{ 'id': 'bi_range_error_prototype', 'info': bi_range_error_prototype },
{ 'id': 'bi_reference_error_constructor', 'info': bi_reference_error_constructor },
{ 'id': 'bi_reference_error_prototype', 'info': bi_reference_error_prototype },
{ 'id': 'bi_syntax_error_constructor', 'info': bi_syntax_error_constructor },
{ 'id': 'bi_syntax_error_prototype', 'info': bi_syntax_error_prototype },
{ 'id': 'bi_type_error_constructor', 'info': bi_type_error_constructor },
{ 'id': 'bi_type_error_prototype', 'info': bi_type_error_prototype },
{ 'id': 'bi_uri_error_constructor', 'info': bi_uri_error_constructor },
{ 'id': 'bi_uri_error_prototype', 'info': bi_uri_error_prototype },
{ 'id': 'bi_math', 'info': bi_math },
{ 'id': 'bi_json', 'info': bi_json },
{ 'id': 'bi_type_error_thrower', 'info': bi_type_error_thrower },
# custom
{ 'id': 'bi_duk', 'info': bi_duk },
{ 'id': 'bi_thread_constructor', 'info': bi_thread_constructor },
{ 'id': 'bi_thread_prototype', 'info': bi_thread_prototype },
{ 'id': 'bi_buffer_constructor', 'info': bi_buffer_constructor },
{ 'id': 'bi_buffer_prototype', 'info': bi_buffer_prototype },
{ 'id': 'bi_pointer_constructor', 'info': bi_pointer_constructor },
{ 'id': 'bi_pointer_prototype', 'info': bi_pointer_prototype },
{ 'id': 'bi_logger_constructor', 'info': bi_logger_constructor },
{ 'id': 'bi_logger_prototype', 'info': bi_logger_prototype },
{ 'id': 'bi_double_error', 'info': bi_double_error },
]
#
# GenBuiltins
#
class GenBuiltins:
build_info = None
double_byte_order = None
ext_section_b = None
ext_browser_like = None
builtins = None
gs = None
init_data = None
initjs_data = None
native_func_hash = None
native_func_list = None
builtin_indexes = None
count_builtins = None
count_normal_props = None
count_function_props = None
def __init__(self, build_info=None, initjs_data=None, double_byte_order=None, ext_section_b=None, ext_browser_like=None):
self.build_info = build_info
self.double_byte_order = double_byte_order
self.ext_section_b = ext_section_b
self.ext_browser_like = ext_browser_like
self.builtins = copy.deepcopy(builtins_orig)
self.gs = None
self.init_data = None
self.initjs_data = initjs_data
if len(self.initjs_data) > 1 and self.initjs_data[-1] != '\0':
# force NUL termination, init code now expects that
self.initjs_data += '\0'
self.native_func_hash = {}
self.native_func_list = []
self.builtin_indexes = {}
self.count_builtins = 0
self.count_normal_props = 0
self.count_function_props = 0
def findBuiltIn(self, id_):
for i in self.builtins:
if i['id'] == id_:
return i
return None
def initBuiltinIndex(self):
idx = 0
for bi in self.builtins:
self.builtin_indexes[bi['id']] = idx
idx += 1
def getNativeFuncs(self, bi):
if bi.has_key('native'):
native_func = bi['native']
self.native_func_hash[native_func] = -1
for valspec in bi['values']:
if valspec.has_key('getter'):
native_func = valspec['getter']
self.native_func_hash[native_func] = -1
if valspec.has_key('setter'):
native_func = valspec['setter']
self.native_func_hash[native_func] = -1
for funspec in bi['functions']:
if funspec.has_key('native'):
native_func = funspec['native']
self.native_func_hash[native_func] = -1
def numberNativeFuncs(self):
k = self.native_func_hash.keys()
k.sort()
idx = 0
for i in k:
self.native_func_hash[i] = idx
self.native_func_list.append(i)
idx += 1
def writeNativeFuncArray(self, genc):
genc.emitLine('/* native functions: %d */' % len(self.native_func_list))
genc.emitLine('const duk_c_function duk_bi_native_functions[] = {')
for i in self.native_func_list:
genc.emitLine('\t(duk_c_function) %s,' % i)
genc.emitLine('};')
def generateDefineNames(self, id):
t1 = id.upper().split('_')
t2 = '_'.join(t1[1:]) # bi_foo_bar -> FOO_BAR
return 'DUK_BIDX_' + t2, 'DUK_BUILTIN_' + t2
def encodePropertyFlags(self, flags):
# Note: must match duk_hobject.h
res = 0
nflags = 0
if 'w' in flags:
nflags += 1
res = res | PROPDESC_FLAG_WRITABLE
if 'e' in flags:
nflags += 1
res = res | PROPDESC_FLAG_ENUMERABLE
if 'c' in flags:
nflags += 1
res = res | PROPDESC_FLAG_CONFIGURABLE
if 'a' in flags:
nflags += 1
res = res | PROPDESC_FLAG_ACCESSOR
if nflags != len(flags):
raise Exception('unsupported flags: %s' % repr(flags))
return res
def resolveMagic(self, elem):
if elem is None:
return 0
assert(elem.has_key('type'))
if elem['type'] == 'bidx':
v = elem['value']
for i, bi in enumerate(self.builtins):
if bi['id'] == v:
#print(v, '->', i)
return i
raise Exception('invalid builtin index for magic: ' % repr(v))
elif elem['type'] == 'plain':
v = elem['value']
if not (v >= -0x8000 and v <= 0x7fff):
raise Exception('invalid plain value for magic: %s' % repr(v))
#print('MAGIC', v)
# Magic is a 16-bit signed value, but convert to 16-bit signed
# for encoding
return v & 0xffff
else:
raise Exception('invalid magic type: %s' % repr(elem['type']))
def generatePropertiesDataForBuiltin(self, be, bi):
self.count_builtins += 1
if bi.has_key('internal_prototype'):
be.bits(self.builtin_indexes[bi['internal_prototype']], BIDX_BITS)
else:
be.bits(NO_BIDX_MARKER, BIDX_BITS)
if bi.has_key('external_prototype'):
be.bits(self.builtin_indexes[bi['external_prototype']], BIDX_BITS)
else:
be.bits(NO_BIDX_MARKER, BIDX_BITS)
if bi.has_key('external_constructor'):
be.bits(self.builtin_indexes[bi['external_constructor']], BIDX_BITS)
else:
be.bits(NO_BIDX_MARKER, BIDX_BITS)
# Filter values and functions
values = []
for valspec in bi['values']:
if valspec.has_key('section_b') and valspec['section_b'] and not gb.ext_section_b:
continue
if valspec.has_key('browser') and valspec['browser'] and not gb.ext_browser_like:
continue
values.append(valspec)
functions = []
for valspec in bi['functions']:
if valspec.has_key('section_b') and valspec['section_b'] and not self.ext_section_b:
continue
if valspec.has_key('browser') and valspec['browser'] and not self.ext_browser_like:
continue
functions.append(valspec)
be.bits(len(values), NUM_NORMAL_PROPS_BITS)
for valspec in values:
self.count_normal_props += 1
# NOTE: we rely on there being less than 256 built-in strings
stridx = self.gs.stringToIndex(valspec['name'])
val = valspec.get('value') # missing for accessors
be.bits(stridx, STRIDX_BITS)
if valspec['name'] == 'length':
default_attrs = LENGTH_PROPERTY_ATTRIBUTES
else:
default_attrs = DEFAULT_PROPERTY_ATTRIBUTES
attrs = default_attrs
if valspec.has_key('attributes'):
attrs = valspec['attributes']
# attribute check doesn't check for accessor flag; that is now
# automatically set by C code when value is an accessor type
if attrs != default_attrs:
#print 'non-default attributes: %s -> %r (default %r)' % (valspec['name'], attrs, default_attrs)
be.bits(1, 1) # flag: have custom attributes
be.bits(self.encodePropertyFlags(attrs), PROP_FLAGS_BITS)
else:
be.bits(0, 1) # flag: no custom attributes
if isinstance(val, bool):
if val == True:
be.bits(PROP_TYPE_BOOLEAN_TRUE, PROP_TYPE_BITS)
else:
be.bits(PROP_TYPE_BOOLEAN_FALSE, PROP_TYPE_BITS)
elif val == UNDEFINED:
be.bits(PROP_TYPE_UNDEFINED, PROP_TYPE_BITS)
elif isinstance(val, (float, int)):
be.bits(PROP_TYPE_DOUBLE, PROP_TYPE_BITS)
val = float(val)
# encoding of double must match target architecture byte order
bo = self.double_byte_order
if bo == 'big':
data = struct.pack('>d', val) # 01234567
elif bo == 'little':
data = struct.pack('<d', val) # 76543210
elif bo == 'mixed': # arm
data = struct.pack('<d', val) # 32107654
data = data[4:8] + data[0:4]
else:
raise Exception('unsupported byte order: %s' % repr(bo))
#print('DOUBLE: ' + data.encode('hex'))
if len(data) != 8:
raise Exception('internal error')
be.string(data)
elif isinstance(val, str) or isinstance(val, unicode):
if isinstance(val, unicode):
# Note: non-ASCII characters will not currently work,
# because bits/char is too low.
val = val.encode('utf-8')
if self.gs.hasString(val):
# String value is in built-in string table -> encode
# using a string index. This saves some space,
# especially for the 'name' property of errors
# ('EvalError' etc).
stridx = self.gs.stringToIndex(val)
be.bits(PROP_TYPE_STRIDX, PROP_TYPE_BITS)
be.bits(stridx, STRIDX_BITS)
else:
# Not in string table -> encode as raw 7-bit value
be.bits(PROP_TYPE_STRING, PROP_TYPE_BITS)
be.bits(len(val), STRING_LENGTH_BITS)
for i in xrange(len(val)):
t = ord(val[i])
be.bits(t, STRING_CHAR_BITS)
elif isinstance(val, dict):
if val['type'] == 'builtin':
be.bits(PROP_TYPE_BUILTIN, PROP_TYPE_BITS)
be.bits(self.builtin_indexes[val['id']], BIDX_BITS)
else:
raise Exception('unsupported value: %s' % repr(val))
elif val is None and valspec.has_key('getter') and valspec.has_key('setter'):
be.bits(PROP_TYPE_ACCESSOR, PROP_TYPE_BITS)
natidx = self.native_func_hash[valspec['getter']]
be.bits(natidx, NATIDX_BITS)
natidx = self.native_func_hash[valspec['setter']]
be.bits(natidx, NATIDX_BITS)
else:
raise Exception('unsupported value: %s' % repr(val))
be.bits(len(functions), NUM_FUNC_PROPS_BITS)
for funspec in functions:
self.count_function_props += 1
# NOTE: we rely on there being less than 256 built-in strings
# and built-in native functions
stridx = self.gs.stringToIndex(funspec['name'])
be.bits(stridx, STRIDX_BITS)
natidx = self.native_func_hash[funspec['native']]
be.bits(natidx, NATIDX_BITS)
length = funspec['length']
be.bits(length, LENGTH_PROP_BITS)
if funspec.get('varargs', False):
be.bits(1, 1) # flag: non-default nargs
be.bits(NARGS_VARARGS_MARKER, NARGS_BITS)
elif funspec.has_key('nargs'):
be.bits(1, 1) # flag: non-default nargs
be.bits(funspec['nargs'], NARGS_BITS)
else:
be.bits(0, 1) # flag: default nargs OK
# XXX: make this check conditional to minimize bit count
# (there are quite a lot of function properties)
magic = self.resolveMagic(funspec.get('magic'))
if magic != 0:
assert(magic >= 0)
assert(magic < (1 << MAGIC_BITS))
be.bits(1, 1)
be.bits(magic, MAGIC_BITS)
else:
be.bits(0, | |
#!/usr/bin/env python
#____________________________________________________________
#
#
# A very simple way to make plots with ROOT via an XML file
#
# <NAME>
# <EMAIL>
#
# Fermilab, 2010
#
#____________________________________________________________
"""
plotBeamSpotDB
A very simple script to plot the beam spot data stored in condDB
usage: %prog -t <tag name>
-a, --auth = AUTH: DB authorization path. online(/nfshome0/popcondev/conddb).
-b, --batch : Run ROOT in batch mode.
-c, --create = CREATE: name for beam spot data file.
-d, --data = DATA: input beam spot data file.
-D, --destDB = DESTDB: destination DB string. online(oracle://cms_orcon_prod/CMS_COND_31X_BEAMSPOT).
-i, --initial = INITIAL: First IOV. Options: run number, or run:lumi, eg. \"133200:21\"
-f, --final = FINAL: Last IOV. Options: run number, or run:lumi
-g, --graph : create a TGraphError instead of a TH1 object
-n, --noplot : Only extract beam spot data, plots are not created..
-o, --output = OUTPUT: filename of ROOT file with plots.
-p, --payload = PAYLOAD: filename of output text file. Combine and splits lumi IOVs.
-P, --Print : create PNG plots from canvas.
-s, --suffix = SUFFIX: Suffix will be added to plots filename.
-t, --tag = TAG: Database tag name.
-T, --Time : create plots with time axis.
-I, --IOVbase = IOVBASE: options: runbase(default), lumibase, timebase
-w, --wait : Pause script after plotting a new histograms.
-W, --weighted : Create a weighted result for a range of lumi IOVs, skip lumi IOV combination and splitting.
-x, --xcrossing = XCROSSING : Bunch crossing number.
<NAME> (<EMAIL>)
Fermilab 2010
"""
import os, string, re, sys, math
import commands, time
from BeamSpotObj import BeamSpot
from IOVObj import IOV
from CommonMethods import *
try:
import ROOT
except:
print "\nCannot load PYROOT, make sure you have setup ROOT in the path"
print "and pyroot library is also defined in the variable PYTHONPATH, try:\n"
if (os.getenv("PYTHONPATH")):
print " setenv PYTHONPATH ${PYTHONPATH}:$ROOTSYS/lib\n"
else:
print " setenv PYTHONPATH $ROOTSYS/lib\n"
sys.exit()
from ROOT import TFile, TGraphErrors, TGaxis, TDatime
from ROOT import TCanvas, TH1F
# ROOT STYLE
#############################
def SetStyle():
# canvas
ROOT.gStyle.SetCanvasBorderMode(0)
ROOT.gStyle.SetCanvasColor(0)
ROOT.gStyle.SetCanvasDefH(600)
ROOT.gStyle.SetCanvasDefW(600)
ROOT.gStyle.SetCanvasDefX(0)
ROOT.gStyle.SetCanvasDefY(0)
# pad
ROOT.gStyle.SetPadBorderMode(0)
ROOT.gStyle.SetPadColor(0)
ROOT.gStyle.SetPadGridX(False)
ROOT.gStyle.SetPadGridY(False)
ROOT.gStyle.SetGridColor(0)
ROOT.gStyle.SetGridStyle(3)
ROOT.gStyle.SetGridWidth(1)
ROOT.gStyle.SetFrameBorderMode(0)
ROOT.gStyle.SetFrameFillColor(0)
ROOT.gStyle.SetTitleColor(1)
ROOT.gStyle.SetStatColor(0)
# set the paper & margin sizes
ROOT.gStyle.SetPaperSize(20,26)
ROOT.gStyle.SetPadTopMargin(0.04)
ROOT.gStyle.SetPadRightMargin(0.04)
ROOT.gStyle.SetPadBottomMargin(0.14)
ROOT.gStyle.SetPadLeftMargin(0.11)
ROOT.gStyle.SetPadTickX(1)
ROOT.gStyle.SetPadTickY(1)
ROOT.gStyle.SetTextFont(42) #132
ROOT.gStyle.SetTextSize(0.09)
ROOT.gStyle.SetLabelFont(42,"xyz")
ROOT.gStyle.SetTitleFont(42,"xyz")
ROOT.gStyle.SetLabelSize(0.035,"xyz")
ROOT.gStyle.SetTitleSize(0.045,"xyz")
ROOT.gStyle.SetTitleOffset(1.1,"y")
# use bold lines and markers
ROOT.gStyle.SetMarkerStyle(8)
ROOT.gStyle.SetHistLineWidth(2)
ROOT.gStyle.SetLineWidth(1)
#ROOT.gStyle.SetLineStyleString(2,"[12 12]") // postscript dashes
ROOT.gStyle.SetMarkerSize(0.6)
# do not display any of the standard histogram decorations
ROOT.gStyle.SetOptTitle(0)
ROOT.gStyle.SetOptStat(0) #("m")
ROOT.gStyle.SetOptFit(0)
#ROOT.gStyle.SetPalette(1,0)
ROOT.gStyle.cd()
ROOT.gROOT.ForceStyle()
#########################################
if __name__ == '__main__':
# style
SetStyle()
printCanvas = False
printFormat = "png"
printBanner = False
Banner = "CMS Preliminary"
# COMMAND LINE OPTIONS
#################################
option,args = parse(__doc__)
if not args and not option: exit()
tag = ''
if not option.tag and not option.data:
print " need to provide DB tag name or beam spot data file"
exit()
else:
tag = option.tag
if option.batch:
ROOT.gROOT.SetBatch()
datafilename = "tmp_beamspot.dat"
if option.create:
datafilename = option.create
getDBdata = True
if option.data:
getDBdata = False
IOVbase = 'runbase'
if option.IOVbase:
if option.IOVbase != "runbase" and option.IOVbase != "lumibase" and option.IOVbase != "timebase":
print "\n\n unknown iov base option: "+ option.IOVbase +" \n\n\n"
exit()
IOVbase = option.IOVbase
firstRun = "0"
lastRun = "4999999999"
if IOVbase == "lumibase":
firstRun = "0:0"
lastRun = "4999999999:4999999999"
if option.initial:
firstRun = option.initial
if option.final:
lastRun = option.final
# GET IOVs
################################
if getDBdata:
print " read DB to get list of IOVs for the given tag"
mydestdb = 'frontier://PromptProd/CMS_COND_31X_BEAMSPOT'
if option.destDB:
mydestdb = option.destDB
acommand = 'cmscond_list_iov -c '+mydestdb+' -P /afs/cern.ch/cms/DB/conddb -t '+ tag
tmpstatus = commands.getstatusoutput( acommand )
tmplistiov = tmpstatus[1].split('\n')
#print tmplistiov
iovlist = []
passline = False
iline = jline = 0
totlines = len(tmplistiov)
for line in tmplistiov:
if line.find('since') != -1:
passline = True
jline = iline
if passline and iline > jline and iline < totlines-1:
linedata = line.split()
#print linedata
aIOV = IOV()
aIOV.since = int(linedata[0])
aIOV.till = int(linedata[1])
iovlist.append( aIOV )
iline += 1
print " total number of IOVs = " + str(len(iovlist))
# GET DATA
################################
otherArgs = ''
if option.destDB:
otherArgs = " -d " + option.destDB
if option.auth:
otherArgs = otherArgs + " -a "+ option.auth
print " get beam spot data from DB for IOVs. This can take a few minutes ..."
tmpfile = open(datafilename,'w')
for iIOV in iovlist:
passiov = False
tmprunfirst = firstRun
tmprunlast = lastRun
tmplumifirst = 1
tmplumilast = 9999999
if IOVbase=="lumibase":
#tmprunfirst = int(firstRun.split(":")[0])
#tmprunlast = int(lastRun.split(":")[0])
#tmplumifirst = int(firstRun.split(":")[1])
#tmplumilast = int(lastRun.split(":")[1])
tmprunfirst = pack( int(firstRun.split(":")[0]) , int(firstRun.split(":")[1]) )
tmprunlast = pack( int(lastRun.split(":")[0]) , int(lastRun.split(":")[1]) )
#print "since = " + str(iIOV.since) + " till = "+ str(iIOV.till)
if iIOV.since >= int(tmprunfirst) and int(tmprunlast) < 0 and iIOV.since <= int(tmprunfirst):
print " IOV: " + str(iIOV.since)
passiov = True
if iIOV.since >= int(tmprunfirst) and int(tmprunlast) > 0 and iIOV.till <= int(tmprunlast):
print " a IOV: " + str(iIOV.since) + " to " + str(iIOV.till)
passiov = True
#if iIOV.since >= int(tmprunlast) and iIOV.till >= 4294967295:
# print " b IOV: " + str(iIOV.since) + " to " + str(iIOV.till)
# passiov = True
if passiov:
acommand = 'getBeamSpotDB.py -t '+ tag + " -r " + str(iIOV.since) +otherArgs
if IOVbase=="lumibase":
tmprun = unpack(iIOV.since)[0]
tmplumi = unpack(iIOV.since)[1]
acommand = 'getBeamSpotDB.py -t '+ tag + " -r " + str(tmprun) +" -l "+str(tmplumi) +otherArgs
print acommand
status = commands.getstatusoutput( acommand )
tmpfile.write(status[1])
print " beam spot data collected and stored in file " + datafilename
tmpfile.close()
# PROCESS DATA
###################################
# check if input data exists if given
if option.data:
if os.path.isdir(option.data):
tmp = commands.getstatusoutput("ls "+option.data)
files = tmp[1].split()
datafilename = "combined_all.txt"
output = open(datafilename,"w")
for f in files:
input = open(option.data +"/"+f)
output.writelines(input.readlines())
output.close()
print " data files have been collected in "+datafilename
elif os.path.exists(option.data):
datafilename = option.data
else:
print " input beam spot data DOES NOT exist, file " + option.data
exit()
listbeam = []
if option.xcrossing:
listmap = readBeamSpotFile(datafilename,listbeam,IOVbase,firstRun,lastRun)
# bx
print "List of bunch crossings in the file:"
print listmap.keys()
listbeam = listmap[option.Xrossing]
else:
readBeamSpotFile(datafilename,listbeam,IOVbase,firstRun,lastRun)
sortAndCleanBeamList(listbeam,IOVbase)
if IOVbase == "lumibase" and option.payload:
weighted = True;
if not option.weighted:
weighted = False
createWeightedPayloads(option.payload,listbeam,weighted)
if option.noplot:
print " no plots requested, exit now."
sys.exit()
# MAKE PLOTS
###################################
TGaxis.SetMaxDigits(8)
graphlist = []
graphnamelist = ['X','Y','Z','SigmaZ','dxdz','dydz','beamWidthX', 'beamWidthY']
graphtitlelist = ['beam spot X','beam spot Y','beam spot Z','beam spot #sigma_Z','beam spot dX/dZ','beam spot dY/dZ','beam width X','beam width Y']
graphXaxis = 'Run number'
if IOVbase == 'runbase':
graphXaxis = "Run number"
if IOVbase == 'lumibase':
graphXaxis = 'Lumi section'
if IOVbase == 'timebase' or option.Time:
graphXaxis = "Time"
#dh = ROOT.TDatime(2010,06,01,00,00,00)
ROOT.gStyle.SetTimeOffset(0) #dh.Convert())
graphYaxis = ['beam spot X [cm]','beam spot Y [cm]','beam spot Z [cm]', 'beam spot #sigma_{Z} [cm]', 'beam spot dX/dZ', 'beam spot dY/dZ','beam width X [cm]', 'beam width Y [cm]']
cvlist = []
for ig in range(0,8):
cvlist.append( TCanvas(graphnamelist[ig],graphtitlelist[ig], 1200, 600) )
if option.graph:
graphlist.append( TGraphErrors( len(listbeam) ) )
else:
graphlist.append( TH1F("name","title",len(listbeam),0,len(listbeam)) )
graphlist[ig].SetName(graphnamelist[ig])
graphlist[ig].SetTitle(graphtitlelist[ig])
ipoint = 0
for ii in range(0,len(listbeam)):
ibeam = listbeam[ii]
datax = dataxerr = 0.
datay = datayerr = 0.
if graphnamelist[ig] == 'X':
datay = ibeam.X
datayerr = ibeam.Xerr
if graphnamelist[ig] == 'Y':
datay = ibeam.Y
datayerr = ibeam.Yerr
if graphnamelist[ig] == 'Z':
datay = ibeam.Z
datayerr = ibeam.Zerr
if graphnamelist[ig] == 'SigmaZ':
datay = ibeam.sigmaZ
datayerr = ibeam.sigmaZerr
if graphnamelist[ig] == 'dxdz':
datay = ibeam.dxdz
datayerr = ibeam.dxdzerr
if graphnamelist[ig] == 'dydz':
datay = ibeam.dydz
datayerr = ibeam.dydzerr
if graphnamelist[ig] == 'beamWidthX':
datay = ibeam.beamWidthX
datayerr = ibeam.beamWidthXerr
if graphnamelist[ig] == 'beamWidthY':
datay = ibeam.beamWidthY
datayerr = ibeam.beamWidthYerr
datax = ibeam.IOVfirst
if IOVbase=="lumibase":
datax = str(ibeam.Run) + ":" + str(ibeam.IOVfirst)
if ibeam.IOVfirst != ibeam.IOVlast:
datax = str(ibeam.Run) + ":" + str(ibeam.IOVfirst)+"-"+str(ibeam.IOVlast)
#print datax
if option.graph:
if IOVbase=="lumibase":
#first = int( pack( int(ibeam.Run) , int(ibeam.IOVfirst) ) )
#last = int( pack( int(ibeam.Run) , int(ibeam.IOVlast) ) )
first = ibeam.IOVfirst
last = ibeam.IOVlast
if option.Time:
atime = ibeam.IOVBeginTime
first = time.mktime( time.strptime(atime.split()[0] + " " + atime.split()[1] + " " + atime.split()[2],"%Y.%m.%d %H:%M:%S | |
<reponame>Zac-hills/d3m-primitives
import os
import copy
import typing
import sys
import logging
import numpy as np
import pandas as pd
from Simon import Simon
from Simon.penny.guesser import guess
from d3m.primitive_interfaces.unsupervised_learning import (
UnsupervisedLearnerPrimitiveBase,
)
from d3m.primitive_interfaces.base import CallResult
from d3m.exceptions import PrimitiveNotFittedError
from d3m import container, utils
from d3m.base import utils as base_utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
import tensorflow as tf
__author__ = "Distil"
__version__ = "1.2.3"
__contact__ = "mailto:<EMAIL>"
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
SIMON_ANNOTATIONS_DICT = {
"categorical": "https://metadata.datadrivendiscovery.org/types/CategoricalData",
"email": "http://schema.org/email",
"text": "http://schema.org/Text",
"uri": "https://metadata.datadrivendiscovery.org/types/FileName",
"address": "http://schema.org/address",
"state": "http://schema.org/State",
"city": "http://schema.org/City",
"postal_code": "http://schema.org/postalCode",
"latitude": "http://schema.org/latitude",
"longitude": "http://schema.org/longitude",
"country": "http://schema.org/Country",
"country_code": "http://schema.org/addressCountry",
"boolean": "http://schema.org/Boolean",
"datetime": "http://schema.org/DateTime",
"float": "http://schema.org/Float",
"int": "http://schema.org/Integer",
"phone": "https://metadata.datadrivendiscovery.org/types/AmericanPhoneNumber",
"ordinal": "https://metadata.datadrivendiscovery.org/types/OrdinalData",
}
class Params(params.Params):
add_semantic_types: typing.Optional[typing.List[typing.List[str]]]
remove_semantic_types: typing.Optional[typing.List[typing.List[str]]]
class Hyperparams(hyperparams.Hyperparams):
detect_semantic_types = hyperparams.Set(
elements=hyperparams.Enumeration(
values=[
"http://schema.org/Boolean",
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
"http://schema.org/Integer",
"http://schema.org/Float",
"http://schema.org/Text",
"http://schema.org/DateTime",
"https://metadata.datadrivendiscovery.org/types/Time",
"https://metadata.datadrivendiscovery.org/types/OrdinalData",
"https://metadata.datadrivendiscovery.org/types/AmericanPhoneNumber",
"http://schema.org/addressCountry",
"http://schema.org/Country",
"http://schema.org/longitude",
"http://schema.org/latitude",
"http://schema.org/postalCode",
"http://schema.org/City",
"http://schema.org/State",
"http://schema.org/address",
"http://schema.org/email",
"https://metadata.datadrivendiscovery.org/types/FileName",
"https://metadata.datadrivendiscovery.org/types/UniqueKey",
"https://metadata.datadrivendiscovery.org/types/Attribute",
"https://metadata.datadrivendiscovery.org/types/TrueTarget",
"https://metadata.datadrivendiscovery.org/types/UnknownType",
"https://metadata.datadrivendiscovery.org/types/PrimaryKey",
"https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey",
],
# Default is ignored.
# TODO: Remove default. See: https://gitlab.com/datadrivendiscovery/d3m/issues/141
default="http://schema.org/Boolean",
),
default=(
"http://schema.org/Boolean",
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
"http://schema.org/Integer",
"http://schema.org/Float",
"http://schema.org/Text",
"http://schema.org/DateTime",
"https://metadata.datadrivendiscovery.org/types/Time",
"https://metadata.datadrivendiscovery.org/types/OrdinalData",
"https://metadata.datadrivendiscovery.org/types/AmericanPhoneNumber",
"http://schema.org/addressCountry",
"http://schema.org/Country",
"http://schema.org/longitude",
"http://schema.org/latitude",
"http://schema.org/postalCode",
"http://schema.org/City",
"http://schema.org/State",
"http://schema.org/address",
"http://schema.org/email",
"https://metadata.datadrivendiscovery.org/types/FileName",
"https://metadata.datadrivendiscovery.org/types/UniqueKey",
"https://metadata.datadrivendiscovery.org/types/Attribute",
"https://metadata.datadrivendiscovery.org/types/TrueTarget",
"https://metadata.datadrivendiscovery.org/types/UnknownType",
"https://metadata.datadrivendiscovery.org/types/PrimaryKey",
"https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey",
),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of semantic types to detect and set. One can provide a subset \
of supported semantic types to limit what the primitive detects.",
)
remove_unknown_type = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description='Remove "https://metadata.datadrivendiscovery.org/types/UnknownType" semantic \
type from columns on which the primitive has detected other semantic types.',
)
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified column \
cannot be detected, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description='A set of column indices to not operate on. Applicable only if "use_columns" \
is not provided.',
)
return_result = hyperparams.Enumeration(
values=["append", "replace", "new"],
default="replace",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Should detected columns be appended, should they replace original columns, \
or should only detected columns be returned?",
)
add_index_columns = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description='Also include primary index columns if input data has them. Applicable only \
if "return_result" is set to "new".',
)
replace_index_columns = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description='Replace primary index columns even if otherwise appending columns. \
Applicable only if "return_result" is set to "append".',
)
overwrite = hyperparams.UniformBool(
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to overwrite manual annotations with SIMON annotations. If overwrite is set to \
False only columns with `UnknownType` will be processed, otherwise all columns will be processed",
)
statistical_classification = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to infer categorical and ordinal annotations using rule-based classification",
)
multi_label_classification = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to perfrom multi-label classification and potentially append multiple \
annotations to metadata.",
)
max_rows = hyperparams.UniformInt(
lower=100,
upper=2000,
default=500,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="maximum number of rows from the dataset to process when inferring column semantic types",
)
p_threshold = hyperparams.Uniform(
lower=0,
upper=1.0,
default=0.9,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="probability threshold to use when decoding classification results. Semantic types \
with prediction probabilities above `p_threshold` will be added",
)
class SimonPrimitive(
UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]
):
"""This primitive infers the semantic type of each column using a pre-trained LSTM-CNN model.
The model was trained on simulated data of different semantic types using the python Faker library.
A hyperparameter `return_result` controls whether Simon's inferences replace existing metadata,
append new columns with inferred metadata, or return a new dataframe with only the inferred columns.
Simon can append multiple annotations if the hyperparameter `multi_label_classification` is
set to 'True'. If `statistical_classification` is set to True, Simon will use rule-based heuristics
to label categorical and ordinal columns. Finally, the `p_threshold` hyperparameter varies the
prediction probability threshold for adding annotations.
The following annotations will only be considered if `statistical_classification` is set to False:
"https://metadata.datadrivendiscovery.org/types/AmericanPhoneNumber",
"http://schema.org/addressCountry", "http://schema.org/Country",
"http://schema.org/longitude", "http://schema.org/latitude",
"http://schema.org/postalCode", "http://schema.org/City",
"http://schema.org/State", "http://schema.org/address", "http://schema.org/email",
"https://metadata.datadrivendiscovery.org/types/FileName"
The following annotations will only be considered if `statistical_classification` is set to True:
"https://metadata.datadrivendiscovery.org/types/OrdinalData"
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "d2fa8df2-6517-3c26-bafc-87b701c4043a",
"version": __version__,
"name": "simon",
"keywords": [
"Data Type Predictor",
"Semantic Classification",
"Text",
"NLP",
"Tabular",
],
"source": {
"name": __author__,
"contact": __contact__,
"uris": [
"https://github.com/kungfuai/d3m-primitives",
],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "TGZ",
"key": "simon_models_1",
"file_uri": "http://public.datadrivendiscovery.org/simon_models_1.tar.gz",
"file_digest": "d071106b823ab1168879651811dd03b829ab0728ba7622785bb5d3541496c45f",
},
],
"python_path": "d3m.primitives.data_cleaning.column_type_profiler.Simon",
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.CONVOLUTIONAL_NEURAL_NETWORK,
],
"primitive_family": metadata_base.PrimitiveFamily.DATA_CLEANING,
}
)
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: typing.Dict[str, str] = None,
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self._volumes = volumes
self._X_train: Inputs = None
self._add_semantic_types: typing.List[typing.List[str]] = None
self._remove_semantic_types: typing.List[typing.List[str]] = None
self.random_seed = random_seed
def set_training_data(self, *, inputs: Inputs) -> None:
"""Sets primitive's training data
Arguments:
inputs {Inputs} -- D3M dataframe
"""
self._X_train = inputs
self._is_fit = False
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
"""Learns column annotations using training data. Saves to apply to testing data.
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, not considered (default: {None})
Returns:
CallResult[None]
"""
true_target_columns = self._X_train.metadata.list_columns_with_semantic_types(
["https://metadata.datadrivendiscovery.org/types/TrueTarget"]
)
index_columns = self._X_train.metadata.get_index_columns()
# Target and index columns should be set only once, if they are set.
self.has_set_target_columns = False
self.has_set_index_column = False
columns_to_use = self._get_columns(self._X_train.metadata)
self._add_semantic_types = []
self._remove_semantic_types = []
# compute SIMON annotations
self.simon_annotations = self._produce_annotations(inputs=self._X_train)
logger.debug(f"simon annotations: {self.simon_annotations}")
for col_idx in columns_to_use:
# Target and index columns should be set only once, if they are set.
self.has_set_target_columns = False
self.has_set_index_column = False
input_column = self._X_train.select_columns([col_idx])
column_metadata = self._X_train.metadata.query_column(col_idx)
column_name = column_metadata.get("name", str(col_idx))
column_semantic_types = list(column_metadata.get("semantic_types", []))
# We might be here because column has a known type, but it has
# "https://metadata.datadrivendiscovery.org/types/SuggestedTarget" set.
has_unknown_type = (
not column_semantic_types
or "https://metadata.datadrivendiscovery.org/types/UnknownType"
in column_semantic_types
)
# A normalized copy of semantic types, which always includes unknown type.
normalized_column_semantic_types = copy.copy(column_semantic_types)
# If we are processing this column and it does not have semantic type then it
# has missing semantic types, we first set it, to normalize the input semantic types.
# If we will add any other semantic type, we will then remove this semantic type.
if (
has_unknown_type
and "https://metadata.datadrivendiscovery.org/types/UnknownType"
in self.hyperparams["detect_semantic_types"]
and "https://metadata.datadrivendiscovery.org/types/UnknownType"
not in normalized_column_semantic_types
):
normalized_column_semantic_types.append(
"https://metadata.datadrivendiscovery.org/types/UnknownType"
)
# A working copy of semantic types.
new_column_semantic_types = copy.copy(normalized_column_semantic_types)
# append simon labels
if has_unknown_type:
new_column_semantic_types = self._append_simon_annotations(
new_column_semantic_types, col_idx
)
# handle target columns
new_column_semantic_types = self._set_target_column(
new_column_semantic_types, true_target_columns
)
if has_unknown_type:
# handle index columns
if not index_columns and not self.has_set_index_column:
new_column_semantic_types = self._set_index_column(
new_column_semantic_types, column_name
)
# handle attribute columns
new_column_semantic_types = self._set_attribute_column(
new_column_semantic_types
)
# handle additional time label
new_column_semantic_types = self._set_additional_time_label(
new_column_semantic_types
)
# Have we added any other semantic type besides unknown type?
if new_column_semantic_types != normalized_column_semantic_types:
if (
self.hyperparams["remove_unknown_type"]
and "https://metadata.datadrivendiscovery.org/types/UnknownType"
in new_column_semantic_types
):
new_column_semantic_types.remove(
"https://metadata.datadrivendiscovery.org/types/UnknownType"
)
new_column_semantic_types_set = set(new_column_semantic_types)
column_semantic_types_set = set(column_semantic_types)
self._add_semantic_types.append(
sorted(new_column_semantic_types_set - column_semantic_types_set)
)
self._remove_semantic_types.append(
sorted(column_semantic_types_set - new_column_semantic_types_set)
)
assert len(self._add_semantic_types) == len(columns_to_use)
assert len(self._remove_semantic_types) == len(columns_to_use)
self._is_fit = True
return CallResult(None)
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Inputs]:
"""Add SIMON annotations
Arguments:
inputs {Inputs} -- full D3M dataframe, containing attributes, key, and target
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, not considered (default: {None})
Raises:
PrimitiveNotFittedError: if primitive not fit
Returns:
CallResult[Outputs] -- Input pd frame with metadata augmented
"""
if not self._is_fit:
raise PrimitiveNotFittedError("Primitive not fitted.")
## BEGIN originally from from d3m.primitives.schema_discovery.profiler.Common """
assert self._add_semantic_types is not None
assert self._remove_semantic_types is not None
columns_to_use, output_columns = self._produce_columns(
inputs, self._add_semantic_types, self._remove_semantic_types
)
if (
self.hyperparams["replace_index_columns"]
and self.hyperparams["return_result"] == "append"
):
assert len(columns_to_use) == len(output_columns)
index_columns = inputs.metadata.get_index_columns()
index_columns_to_use = []
other_columns_to_use = []
index_output_columns = []
other_output_columns = []
for column_to_use, output_column in zip(columns_to_use, output_columns):
if column_to_use in index_columns:
index_columns_to_use.append(column_to_use)
index_output_columns.append(output_column)
else:
other_columns_to_use.append(column_to_use)
other_output_columns.append(output_column)
outputs = base_utils.combine_columns(
inputs,
| |
<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Tasks for RL."""
import abc
import copy
import itertools
import random
import numpy as np
from six.moves import xrange
from common import bf # brain coder
from common import reward as r # brain coder
from single_task import misc # brain coder
from single_task import test_tasks # brain coder
import tensorflow as tf
MAX_EXECUTION_STEPS = 5000
def make_task(task_name, override_kwargs=None, max_code_length=100,
require_correct_syntax=False,
do_code_simplification=False,
correct_bonus=2.0, code_length_bonus=1.0):
"""Make tasks with setting from paper."""
tf.logging.info('Making paper-config task.')
n = 16 # Number of test cases.
task_mapping = {
'print-hello': (
PrintTask, dict(base=27, fixed_string=[8, 5, 12, 12, 15])),
'print': (PrintIntTask, dict(base=256, fixed_string=[1, 2, 3, 4, 5])),
'echo': (EchoTask, dict(base=27, min_length=1, max_length=6)),
'remove-char': (
RemoveCharTask, dict(base=256, n=n, min_len=1, max_len=6)),
'reverse': (
ReverseTask, dict(base=256, n=n, min_len=1, max_len=6)),
'reverse-tune': (
ReverseTaskV2, dict(base=256, reward_type='static-bylen')),
'remove-char-tune': (RemoveCharTaskV2, dict(base=27)),
'prefix': (CommonPrefixTask, dict(base=27)),
'find': (FindSubStrTask, dict(base=27)),
'sort3': (SortFixedTaskV2, dict(base=27, n=150, length=3)),
'count-char': (CountCharTaskV2, dict(n=n, max_len=6)),
'bool-logic': (BooleanLogicTask, dict()),
'add': (AddTask, dict(n=9)),
'echo-twice': (EchoTwiceTask, dict(n=n)),
'echo-thrice': (EchoThriceTask, dict(n=n)),
'copy-reverse': (CopyReverseTask, dict(n=n)),
'zero-cascade': (EchoZeroCascadeTask, dict(n=n)),
'cascade': (EchoCascadeTask, dict(n=n)),
'shift-left': (ShiftLeftTask, dict(n=n)),
'shift-right': (ShiftRightTask, dict(n=n)),
'riffle': (RiffleTask, dict(n=n)),
'unriffle': (UnriffleTask, dict(n=n)),
'middle-char': (MiddleCharTask, dict(n=n)),
'remove-last': (RemoveLastTask, dict(n=n)),
'remove-last-two': (RemoveLastTwoTask, dict(n=n)),
'echo-alternating': (EchoAlternatingTask, dict(n=n)),
'echo-half': (EchoHalfTask, dict(n=n)),
'length': (LengthTask, dict(n=n)),
'echo-second-seq': (EchoSecondSequenceTask, dict(n=n)),
'echo-nth-seq': (EchoNthSequenceTask, dict(n=n)),
'substring': (SubstringTask, dict(n=n)),
'divide-2': (Divide2Task, dict(n=n)),
'dedup': (DedupTask, dict(n=n)),
'remove-target-char': (RemoveTargetCharTask, dict(n=n)),
'list-index': (ListIndexTask, dict(n=n)),
'fib': (FibonacciTask, dict()),
'count-down': (BottlesOfBeerTask, dict()),
'split': (SplitTask, dict()),
'trim-left': (TrimLeftTask, dict()),
'circle-route': (
JudgeRouteCircleTask, dict(n=100, max_len=32)),
'multiply': (MultiplyTask, dict(n=100)),
'divmod': (DivModTask, dict(n=100)),
}
if task_name not in task_mapping:
# Test tasks.
if task_name == 'test-hill-climb':
return test_tasks.BasicTaskManager(test_tasks.HillClimbingTask())
raise ValueError('Unknown task type "%s"' % task_name)
task_cls, kwargs = task_mapping[task_name]
if override_kwargs:
if not isinstance(override_kwargs, dict):
raise ValueError(
'override_kwargs must be a dict, got: %s', override_kwargs)
kwargs.update(override_kwargs)
task = task_cls(**kwargs)
reward_fn = r.absolute_distance_reward
# reward_fn = r.absolute_mod_distance_reward
# reward_fn = r.absolute_log_distance_reward
tf.logging.info('Using reward function: %s', reward_fn.__name__)
# We want reward with and without code simplification to be scaled the same
# way. Without code simplification, give the maximum code length bonus
# every time.
min_code_length = 0.0 if do_code_simplification else max_code_length
return MultiIOTaskManager(
task=task, correct_bonus=correct_bonus,
code_length_bonus=code_length_bonus,
max_code_length=max_code_length, min_code_length=min_code_length,
reward_fn=reward_fn, require_correct_syntax=require_correct_syntax)
def concat(lists):
if not lists:
return []
l = lists[0]
for k in lists[1:]:
l += k
return l
def concat_join(lists, sep):
if not lists:
return []
l = lists[0]
for k in lists[1:]:
l += [sep] + k
return l
def clipped_linear(x, x0, y0, slope, y_range):
min_y, max_y = y_range
return min(max(slope * (x - x0) + y0, min_y), max_y)
class MultiIOTaskManager(object):
"""Supports tasks which test the code with multiple I/O examples."""
def __init__(self, task, max_code_length=32, min_code_length=0,
max_execution_steps=MAX_EXECUTION_STEPS, correct_bonus=1.0,
code_length_bonus=1.0, failure_reward=-2.0, reward_fn=None,
require_correct_syntax=False):
assert isinstance(task, BaseTask)
self.task = task
self.max_code_length = max_code_length
self.min_code_length = min_code_length
self.max_execution_steps = max_execution_steps
self.require_correct_syntax = require_correct_syntax
self.correct_bonus = correct_bonus
self.code_length_bonus = code_length_bonus
self.failure_reward = failure_reward
self.time_penalty = (
1.0 / (max_code_length - min_code_length)
if max_code_length > min_code_length else 0.0)
if reward_fn is None:
self.reward_fn = r.absolute_distance_reward
else:
self.reward_fn = reward_fn
self.input_type = (
task.input_type if hasattr(task, 'input_type') else misc.IOType.integer)
self.output_type = (
task.output_type if hasattr(task, 'output_type')
else misc.IOType.integer)
self._compute_best_reward()
def _compute_best_reward(self):
io_seqs = self.task.make_io_set()
reward = 0.0
for _, output_seq in io_seqs:
reward += self.reward_fn(output_seq, output_seq, self.task.base)
reward += self.correct_bonus
reward += self.code_length_bonus # Bonus for shortest code.
self.best_reward = reward
self.good_reward = 0.75 * reward
tf.logging.info('Known best reward: %.4f', self.best_reward)
def _score_batch(self, code_strings):
return [self._score_code(code) for code in code_strings]
def _score_code(self, code):
"""Run test cases on code and compute reward.
Args:
code: A single BF code string.
Returns:
misc.RewardInfo namedtuple instance containing reward and code execution
information, including inputs, expected outputs, code outputs, input
and output types, and reason for the reward obtained.
"""
# Get list of 2-tuples, each containing an input sequence and an output
# sequence.
io_seqs = self.task.make_io_set()
terminal_reward = 0.0
results = []
reason = 'correct'
for input_seq, output_seq in io_seqs:
eval_result = bf.evaluate(
code, input_buffer=input_seq, timeout=0.1,
max_steps=self.max_execution_steps,
base=self.task.base,
require_correct_syntax=self.require_correct_syntax)
result, success = eval_result.output, eval_result.success
if not success:
# Code execution timed out.
terminal_reward = self.failure_reward
results = []
reason = eval_result.failure_reason
break
else:
terminal_reward += self.reward_fn(result, output_seq, self.task.base)
if result == output_seq:
terminal_reward += self.correct_bonus # Bonus for correct answer.
# Only add additional reward for shorter code. Subtracting reward
# interferes with the main objective. Only optimize for length once
# any solution is found.
if self.min_code_length == self.max_code_length:
terminal_reward += self.code_length_bonus
else:
terminal_reward += self.code_length_bonus * clipped_linear(
x=len(code), x0=self.min_code_length, y0=1.0,
slope=-self.time_penalty, y_range=(0.0, 1.0))
# reason remains 'correct' if it is already
elif reason == 'correct':
reason = 'wrong'
results.append(result)
# Return list of rewards, one for each char in the code. All are 0 except
# for the terminal reward.
terminal_reward /= self.best_reward
return misc.RewardInfo(
episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],
input_case=misc.IOTuple(i for i, o in io_seqs),
correct_output=misc.IOTuple(o for i, o in io_seqs),
code_output=misc.IOTuple(results),
input_type=self.input_type,
output_type=self.output_type,
reason=reason)
def rl_batch(self, batch_size):
"""Produces list of reward functions. One for each program in the batch."""
return [self._score_code] * batch_size
def conditional_overwrite(current_value, new_value, allowed_overwrite_values):
if current_value in allowed_overwrite_values:
return new_value
return current_value
class BaseTask(object):
"""A coding task.
All coding tasks should inherit this class.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, base=256):
self.base = base # All tasks must set the integer base that the expect.
@abc.abstractmethod
def make_io_set(self):
"""Generate a set of test cases for the task.
Returns:
List of tuples, where each tuple is (input_case, output_case).
input_case and output_case are lists of integers.
"""
pass
# ==============================================================================
# ICLR tasks.
# ==============================================================================
class PrintTask(BaseTask):
"""Print string coding task.
Code needs to output a fixed string (given as a hyperparameter to the
task constructor). Program input is ignored.
"""
def __init__(self, base, fixed_string=None):
super(type(self), self).__init__()
self.base = base # base includes EOS
self.eos = 0
if fixed_string:
self.fixed_string = fixed_string
else:
self.fixed_string = [1, 2, 3, 0] # ABC<EOS>
self.min_length = self.max_length = len(self.fixed_string)
def make_io_set(self):
return [(list(), list(self.fixed_string))]
class RemoveCharTaskV2(BaseTask):
"""Remove character coding task (version 2).
Code needs to pipe input to output, but with all the 'A' (value 1) chars
removed. 'A' appears exactly once in each input.
Test cases are hard-coded.
"""
def __init__(self, base):
super(type(self), self).__init__()
self.base = base
self.eos = 0
self.remove_char = 1
assert base >= 27
def make_io_set(self):
rm = self.remove_char
return [
([rm, 0], [0]),
([20, rm, 0], [20, 0]),
([rm, 13, 0], [13, 0]),
([6, rm, 17, 0], [6, 17, 0]),
([rm, 11, 24, 0], [11, 24, 0]),
([2, 16, 21, rm, 0], [2, 16, 21, 0]),
([18, rm, 12, 26, 7, 0], [18, 12, 26, 7, 0]),
([9, 10, 22, rm, 4, 0], [9, 10, 22, 4, 0])]
class RemoveCharTask(BaseTask):
"""Remove character coding task.
Code needs to pipe input to output, but with all the 'A' (value 1) chars
removed. 'A' appears at least once in each input.
Test cases are dynamically generated, allowing for the number of test cases
to be a hyperparameter.
"""
def __init__(self, base, n, min_len, max_len):
super(type(self), self).__init__()
self.base = base
self.eos = 0
self.remove_char = 1
assert base >= 27
self._io_pairs = self._make_io_examples(n, min_len, max_len)
def _make_io_examples(self, n, min_len, max_len):
"""Generate test cases for the task."""
rand = random.Random(6849275409234) # Test cases are fixed, but varied.
io_examples = []
for _ in xrange(n):
length = rand.randrange(min_len, max_len + 1)
rm_char_pos = rand.randrange(0, length)
input_seq = [rand.randrange(1, self.base) for _ in xrange(length)]
input_seq[rm_char_pos] = self.remove_char
output_seq = list(input_seq)
del output_seq[rm_char_pos]
output_seq.append(0)
io_examples.append((input_seq, output_seq))
return io_examples
def make_io_set(self):
return copy.deepcopy(self._io_pairs)
class ReverseTaskV2(BaseTask):
"""Reverse string coding task (version 2).
Code needs to pipe input to output, but in reverse order.
Stochastic test case = new test case randomly generated for every run of
`make_io_set`, i.e. different test cases every time code is scored.
Task supports different types of test cases:
rand-one: Code is scored on one stochastic test case.
rand-many: Code is scored on 5 stochastic test cases.
static-bylen: Code is scored on 5 static test cases. There is one test
case for string lengths 1 through 5.
| |
# encoding: UTF-8
'''
本文件中实现了CTA策略引擎,针对CTA类型的策略,抽象简化了部分底层接口的功能。
关于平今和平昨规则:
1. 普通的平仓OFFSET_CLOSET等于平昨OFFSET_CLOSEYESTERDAY
2. 只有上期所的品种需要考虑平今和平昨的区别
3. 当上期所的期货有今仓时,调用Sell和Cover会使用OFFSET_CLOSETODAY,否则
会使用OFFSET_CLOSE
4. 以上设计意味着如果Sell和Cover的数量超过今日持仓量时,会导致出错(即用户
希望通过一个指令同时平今和平昨)
5. 采用以上设计的原因是考虑到vn.trader的用户主要是对TB、MC和金字塔类的平台
感到功能不足的用户(即希望更高频的交易),交易策略不应该出现4中所述的情况
6. 对于想要实现4中所述情况的用户,需要实现一个策略信号引擎和交易委托引擎分开
的定制化统结构(没错,得自己写)
Modified by IncenseLee(李来佳)
1、增加单一策略里,多个vtSymbol的配置。
2、修改loadSetting和SaveSetting
'''
print('load ctaEngine.py')
import json
import os
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
import re
import csv
import copy
import decimal
from vnpy.trader.vtEvent import *
from vnpy.trader.vtConstant import *
from vnpy.trader.vtGateway import VtSubscribeReq, VtOrderReq, VtCancelOrderReq, VtLogData, VtSignalData
from vnpy.trader.app.ctaStrategy.ctaBase import *
from vnpy.trader.setup_logger import setup_logger
from vnpy.trader.vtFunction import todayDate, getJsonPath
from vnpy.trader.util_mail import sendmail
# 加载 strategy目录下所有的策略
from vnpy.trader.app.ctaStrategy.strategy import STRATEGY_CLASS
MATRIX_DB_NAME = 'matrix' # 虚拟策略矩阵的数据库名称
POSITION_DISPATCH_COLL_NAME = 'position_dispatch' # 虚拟策略矩阵的策略调度配置collection名称
POSITION_DISPATCH_HISTORY_COLL_NAME = 'position_dispatch_history' # 虚拟策略矩阵的策略调度配置collection名称
########################################################################
class CtaEngine(object):
"""CTA策略引擎"""
# 策略配置文件
settingFileName = 'CTA_setting.json'
settingfilePath = getJsonPath(settingFileName, __file__)
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 保存策略实例的Dict
# key为策略名称,value为策略实例,注意策略名称不允许重复
self.strategyDict = {}
# 保存策略设置的字典
# key为策略名称,value为策略设置,注意策略名称不允许重复
self.settingDict = {}
# 保存vtSymbol和策略实例映射的字典(用于推送tick数据)
# 由于可能多个strategy交易同一个vtSymbol,因此key为vtSymbol
# value为包含所有相关strategy对象的list
self.tickStrategyDict = {}
# 保存vtOrderID和strategy对象映射的字典(用于推送order和trade数据)
# key为vtOrderID,value为strategy对象
self.orderStrategyDict = {}
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 持仓缓存字典
# key为vtSymbol,value为PositionBuffer对象
self.posBufferDict = {}
# 成交号集合,用来过滤已经收到过的成交推送
self.tradeSet = set()
# 引擎类型为实盘
self.engineType = ENGINETYPE_TRADING
# tick缓存
self.tickDict = {}
# 未能订阅的symbols
self.pendingSubcribeSymbols = {}
# 注册事件监听
self.registerEvent()
# 持仓调度的order_id记录
self.dispatch_pos_order_dict = {}
self.strategy_group = EMPTY_STRING
self.logger = None
self.strategy_loggers = {}
self.createLogger()
def analysis_vtSymbol(self, vtSymbol):
"""
分析合约
:param vtSymbol:
:return:
"""
base_symbol, quote_symbol,exchange = None,None,None
if '.' not in vtSymbol:
exchange = None
symbol_pair = vtSymbol
else:
s1 = vtSymbol.split('.')
exchange = s1[1]
symbol_pair = s1[0]
if '_' not in symbol_pair:
return vtSymbol, quote_symbol,exchange
s2 = symbol_pair.split('_')
base_symbol = s2[0]
quote_symbol = s2[1]
return base_symbol, quote_symbol, exchange
# ----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy, priceType=PRICETYPE_LIMITPRICE):
"""发单"""
base_symbol, quote_symbol, exchange = self.analysis_vtSymbol(vtSymbol)
contract = self.mainEngine.getContract(vtSymbol)
if contract is None:
self.writeCtaError(
u'vtEngine.sendOrder取不到{}合约得信息,{}发送{}委托:{},v{}'.format(vtSymbol, strategy.name, orderType, price,
volume))
return ''
req = VtOrderReq()
req.symbol = contract.symbol # 合约代码
req.exchange = contract.exchange # 交易所
req.vtSymbol = contract.vtSymbol
req.price = self.roundToPriceTick(contract.priceTick, price) # 价格
req.volume = self.roundToVolumeTick(volumeTick=contract.volumeTick,volume=volume) # 数量
if strategy:
req.productClass = strategy.productClass
req.currency = strategy.currency
else:
req.productClass = ''
req.currency = ''
# 设计为CTA引擎发出的委托只允许使用限价单
# modified by incense, 通过参数传递
req.priceType = priceType # 价格类型
# CTA委托类型映射
if orderType == CTAORDER_BUY:
req.direction = DIRECTION_LONG # 合约方向
req.offset = OFFSET_OPEN # 开/平
elif orderType == CTAORDER_SELL:
req.direction = DIRECTION_SHORT
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
self.writeCtaLog(u'获取持仓缓存失败,则默认平昨')
req.offset = OFFSET_CLOSE
# modified by IncenseLee 2016/11/08,改为优先平昨仓
elif posBuffer.longYd:
self.writeCtaLog(u'{}优先平昨,昨多仓:{},平仓数:{}'.format(vtSymbol, posBuffer.longYd, volume))
req.offset = OFFSET_CLOSE
# if posBuffer.longYd >= volume:
# posBuffer.longYd -= volume
# self.writeCtaLog(u'{}剩余昨多仓{}'.format(vtSymbol,posBuffer.longYd))
else:
self.writeCtaLog(u'{}平今,今多仓:{},{}'.format(vtSymbol, posBuffer.longToday, volume))
req.offset = OFFSET_CLOSETODAY
# if posBuffer.longToday >= volume:
# posBuffer.longToday -= volume
# self.writeCtaLog(u'{}剩余今多仓{}'.format(vtSymbol, posBuffer.longToday))
# 否则如果有多头今仓,则使用平今
# elif posBuffer.longToday:
# req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
# else:
# req.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
req.direction = DIRECTION_SHORT
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
req.direction = DIRECTION_LONG
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# modified by IncenseLee 2016/11/08,改为优先平昨仓
elif posBuffer.shortYd:
self.writeCtaLog(u'{}优先平昨,昨空仓:{},平仓数:{}'.format(vtSymbol, posBuffer.shortYd, volume))
req.offset = OFFSET_CLOSE
# if posBuffer.shortYd >= volume:
# posBuffer.shortYd -= volume
# self.writeCtaLog(u'{}剩余昨空仓{}'.format(vtSymbol, posBuffer.shortYd))
else:
self.writeCtaLog(u'{}平今,今空仓:{},平仓数:{}'.format(vtSymbol, posBuffer.shortToday, volume))
req.offset = OFFSET_CLOSETODAY
# if posBuffer.shortToday >= volume:
# posBuffer.shortToday -= volume
# self.writeCtaLog(u'{}剩余今空仓{}'.format(vtSymbol, posBuffer.shortToday))
# 否则如果有空头今仓,则使用平今
# elif posBuffer.shortToday:
# req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
# else:
# req.offset = OFFSET_CLOSE
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName) # 发单
if vtOrderID is None or len(vtOrderID) == 0:
self.writeCtaError(u'{} 发送委托失败. {} {} {} {}'.format(strategy.name if strategy else 'CtaEngine', vtSymbol, req.offset, req.direction, volume, price))
return ''
if strategy:
self.orderStrategyDict[vtOrderID] = strategy # 保存vtOrderID和策略的映射关系
self.writeCtaLog(u'策略%s发送委托,%s, %s,%s,%s@%s'
% (strategy.name, vtSymbol, req.offset, req.direction, volume, price))
else:
self.writeCtaLog(u'%s发送委托,%s, %s,%s,%s@%s'
% ('CtaEngine', vtSymbol, req.offset, req.direction, volume, price))
return vtOrderID
# ----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 查询报单对象
# 1.调用主引擎接口,查询委托单对象
order = self.mainEngine.getOrder(vtOrderID)
# 如果查询成功
if order:
# 2.检查是否报单(委托单)还有效,只有有效时才发出撤单指令
orderFinished = (order.status == STATUS_ALLTRADED or order.status == STATUS_CANCELLED)
if not orderFinished:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
else:
if order.status == STATUS_ALLTRADED:
self.writeCtaLog(u'委托单({0}已执行,无法撤销'.format(vtOrderID))
if order.status == STATUS_CANCELLED:
self.writeCtaLog(u'委托单({0}已撤销,无法再次撤销'.format(vtOrderID))
# 查询不成功
else:
self.writeCtaLog(u'委托单({0}不存在'.format(vtOrderID))
# ----------------------------------------------------------------------
def cancelOrders(self, symbol, offset=EMPTY_STRING):
"""撤销所有单"""
# Symbol参数:指定合约的撤单;
# OFFSET参数:指定Offset的撤单,缺省不填写时,为所有
l = self.mainEngine.getAllWorkingOrders()
self.writeCtaLog(u'从所有订单{0}中撤销{1}'.format(len(l), symbol))
for order in l:
if symbol == EMPTY_STRING:
symbolCond = True
else:
symbolCond = order.symbol == symbol
if offset == EMPTY_STRING:
offsetCond = True
else:
offsetCond = order.offset == offset
if symbolCond and offsetCond:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.writeCtaLog(u'撤单:{0}/{1},{2}{3}手'
.format(order.symbol, order.orderID, order.offset,
order.totalVolume - order.tradedVolume))
self.mainEngine.cancelOrder(req, order.gatewayName)
# ----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
# 1.生成本地停止单ID
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
# 2.创建停止单对象
so = StopOrder()
so.vtSymbol = vtSymbol # 代码
so.orderType = orderType # 停止单类型
so.price = price # 价格
so.volume = volume # 数量
so.strategy = strategy # 来源策略
so.stopOrderID = stopOrderID # Id
so.status = STOPORDER_WAITING # 状态
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so # 字典中不会删除
self.workingStopOrderDict[stopOrderID] = so # 字典中会删除
self.writeCtaLog(u'发停止单成功,'
u'Id:{0},Symbol:{1},Type:{2},Price:{3},Volume:{4}'
u'.'.format(stopOrderID, vtSymbol, orderType, price, volume))
return stopOrderID
# ----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单
Incense Li modified 20160124:
增加返回True 和 False
"""
# 1.检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED # STOPORDER_WAITING =》STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID] # 删除
self.writeCtaLog(u'撤销停止单:{0}成功.'.format(stopOrderID))
return True
else:
self.writeCtaLog(u'撤销停止单:{0}失败,不存在Id.'.format(stopOrderID))
return False
# ----------------------------------------------------------------------
def processStopOrder(self, tick):
"""收到行情后处理本地停止单(检查是否要立即发出)"""
vtSymbol = tick.vtSymbol
# 1.首先检查是否有策略交易该合约
if vtSymbol in self.tickStrategyDict:
# 2.遍历等待中的停止单,检查是否会被触发
for so in (self.workingStopOrderDict.values()):
if so.vtSymbol == vtSymbol:
# 3. 触发标识判断
longTriggered = so.direction == DIRECTION_LONG and tick.lastPrice >= so.price # 多头停止单被触发
shortTriggered = so.direction == DIRECTION_SHORT and tick.lastPrice <= so.price # 空头停止单被触发
# 4.触发处理
if longTriggered or shortTriggered:
# 5.设定价格,买入和卖出分别以涨停跌停价发单(模拟市价单)
if so.direction == DIRECTION_LONG:
price = tick.upperLimit
else:
price = tick.lowerLimit
# 6.更新停止单状态,触发
so.status = STOPORDER_TRIGGERED
# 7.发单
self.sendOrder(so.vtSymbol, so.orderType, price, so.volume, so.strategy)
# 8.删除停止单
del self.workingStopOrderDict[so.stopOrderID]
# ----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送事件"""
# 1. 获取事件的Tick数据
tick = event.dict_['data']
tick = copy.copy(tick)
# 移除待订阅的合约清单
if tick.vtSymbol in self.pendingSubcribeSymbols:
self.writeCtaLog(u'已成功订阅{0},从待订阅清单中移除'.format(tick.vtSymbol))
del self.pendingSubcribeSymbols[tick.vtSymbol]
# 缓存最新tick
self.tickDict[tick.vtSymbol] = tick
# 2.收到tick行情后,优先处理本地停止单(检查是否要立即发出)
self.processStopOrder(tick)
# 3.推送tick到对应的策略对象进行处理
if tick.vtSymbol in self.tickStrategyDict:
# 4.将vtTickData数据转化为ctaTickData
ctaTick = CtaTickData()
d = ctaTick.__dict__
for key in d.keys():
d[key] = tick.__getattribute__(key)
if not ctaTick.datetime:
# 添加datetime字段
ctaTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y-%m-%d %H:%M:%S.%f')
# 逐个推送到策略实例中
l = self.tickStrategyDict[tick.vtSymbol]
for strategy in l:
self.callStrategyFunc(strategy, strategy.onTick, ctaTick)
# ----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托推送事件"""
# 1.获取事件的Order数据
order = event.dict_['data']
# order.vtOrderID 在gateway中,已经格式化为 gatewayName.vtOrderID
# 2.判断order是否在策略的映射字典中
if order.vtOrderID in self.orderStrategyDict:
# 3.提取对应的策略
strategy = self.orderStrategyDict[order.vtOrderID]
# 4.触发策略的委托推送事件方法
strategy.onOrder(order)
else:
# 检查调度的平仓
self.onOrder_dispatch_close_pos(order)
# ----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送事件"""
# 1.获取事件的Trade数据
trade = event.dict_['data']
# 过滤已经收到过的成交回报
if trade.vtTradeID in self.tradeSet:
return
self.tradeSet.add(trade.vtTradeID)
# 将成交推送到策略对象中
if trade.vtOrderID in self.orderStrategyDict:
# 3.提取对应的策略
strategy = self.orderStrategyDict[trade.vtOrderID]
# 计算策略持仓 ( canceled by IncenseLee )
# if trade.direction == DIRECTION_LONG:
# strategy.pos += trade.volume
# else:
# strategy.pos -= trade.volume
self.callStrategyFunc(strategy, strategy.onTrade, trade)
# 更新持仓缓存数据
if trade.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(trade.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = trade.vtSymbol
self.posBufferDict[trade.vtSymbol] = posBuffer
posBuffer.updateTradeData(trade)
# ----------------------------------------------------------------------
def processPositionEvent(self, event):
"""处理持仓推送"""
pos = event.dict_['data']
# 更新持仓缓存数据
if True: # pos.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(pos.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = pos.vtSymbol
self.posBufferDict[pos.vtSymbol] = posBuffer
posBuffer.updatePositionData(pos)
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
# 注册行情数据推送(Tick数据到达)的响应事件
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
# 注册订单推送的响应事件
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
# 注册成交推送(交易)的响应时间
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
# 注册持仓更新事件
self.eventEngine.register(EVENT_POSITION, self.processPositionEvent)
# 账号更新事件(借用账号更新事件,来检查是否有未订阅的合约信息)
self.eventEngine.register(EVENT_ACCOUNT, self.checkUnsubscribedSymbols)
# 注册定时器事件
self.eventEngine.register(EVENT_TIMER, self.processTimerEvent)
# 注册强制止损事件
self.eventEngine.register(EVENT_ACCOUNT_LOSS, self.processAccoutLossEvent)
# 注册定时清除dispatch临时持仓
def processAccoutLossEvent(self, event):
"""处理止损时间"""
balance = event.dict_['data']
self.writeCtaLog(u'净值{0}低于止损线,执行强制止损'.format(balance))
self.mainEngine.writeLog(u'净值{0}低于止损线,执行强制止损'.format(balance))
self.cancelOrders(symbol=EMPTY_STRING)
for posBuffer in (self.posBufferDict.values()):
if posBuffer.shortYd > 0:
self.writeCtaLog(u'{0}合约持有昨空单{1}手,强平'.format(posBuffer.vtSymbol, posBuffer.shortYd))
tick = | |
# -*- coding: utf-8 -*-
import unittest
import pytest
from skosprovider.skos import (
Label,
Note,
Source,
ConceptScheme,
Concept,
Collection,
label,
find_best_label_for_type,
filter_labels_by_language,
dict_to_label,
dict_to_note,
dict_to_source
)
class LabelTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testConstructor(self):
l = Label('Knokke-Heist', type="prefLabel", language='nl-BE')
self.assertEqual('Knokke-Heist', l.label)
self.assertEqual('prefLabel', l.type)
self.assertEqual('nl-BE', l.language)
def testConstructorInvalidLanguage(self):
with self.assertRaises(ValueError):
l = Label('Knokke-Heist', type="prefLabel", language='nederlands')
l = Label('Knokke-Heist', type='prefLabel', language=None)
assert l.language == 'und'
def testRepr(self):
l = Label('Knokke-Heist', type="prefLabel", language='nl-BE')
self.assertEqual("Label('Knokke-Heist', 'prefLabel', 'nl-BE')", l.__repr__())
def testIsValidType(self):
self.assertTrue(Label.is_valid_type('prefLabel'))
self.assertFalse(Label.is_valid_type('voorkeursLabel'))
l = Label('Knokke-Heist')
self.assertTrue(l.is_valid_type('prefLabel'))
def testEquality(self):
l1 = Label('Knokke-Heist')
l2 = Label('Knokke-Heist', 'prefLabel', 'und')
self.assertEqual(l1, l2)
def testInequality(self):
l1 = Label('Knokke-Heist')
l2 = Label('Knokke', 'altLabel')
self.assertNotEqual(l1, l2)
def testDictEquality(self):
l1 = Label('Knokke-Heist')
l2 = {'label': 'Knokke-Heist', 'type': 'prefLabel', 'language': 'und'}
self.assertEqual(l1, l2)
def testDictInequality(self):
l1 = Label('Knokke-Heist')
l2 = {'label': 'Knokke', 'type': 'altLabel', 'language': None}
self.assertNotEqual(l1, l2)
class NoteTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testConstructor(self):
n = Note(
'Een gemeente in West-Vlaanderen.',
type="note",
language='nl-BE'
)
self.assertEqual('Een gemeente in West-Vlaanderen.', n.note)
self.assertEqual('note', n.type)
self.assertEqual('nl-BE', n.language)
def testConstructorInvalidLanguage(self):
with self.assertRaises(ValueError):
n = Note(
'Een gemeente in West-Vlaanderen.',
type="note",
language='nederlands'
)
n = Note(
'Een gemeente in West-Vlaanderen.',
type="note",
language=None
)
assert n.language == 'und'
def testConstructorInvalidMarkup(self):
with self.assertRaises(ValueError):
n = Note(
'Een gemeente in West-Vlaanderen.',
type="note",
language='nl',
markup='markdown'
)
def testEquality(self):
n1 = Note('A note.')
n2 = Note('A note.', 'note', 'und')
self.assertEqual(n1, n2)
def testInEquality(self):
n1 = Note('A note.')
n2 = Note('A note.', 'definition', 'und')
self.assertNotEqual(n1, n2)
def testDictEquality(self):
n1 = Note('A note.')
n2 = {'note': 'A note.', 'type': 'note', 'language': 'und', 'markup': None}
self.assertEqual(n1, n2)
def testDictInequality(self):
n1 = Note('A note.')
n2 = {'note': 'A note.', 'type': 'definition', 'language': 'und', 'markup': None}
self.assertNotEqual(n1, n2)
def testConstructorWithHTML(self):
n = Note(
'<p>Een gemeente in <em>West-Vlaanderen</em>.</p>',
type="note",
language='nl-BE',
markup='HTML'
)
self.assertEqual('<p>Een gemeente in <em>West-Vlaanderen</em>.</p>', n.note)
self.assertEqual('note', n.type)
self.assertEqual('nl-BE', n.language)
self.assertEqual('HTML', n.markup)
def testIsValidType(self):
self.assertTrue(Note.is_valid_type('note'))
self.assertFalse(Note.is_valid_type('notitie'))
n = Note('A community in West-Flanders.', 'definition', 'en')
self.assertTrue(n.is_valid_type('definition'))
def testIsValidMarkup(self):
self.assertTrue(Note.is_valid_markup('HTML'))
self.assertFalse(Note.is_valid_markup('markdown'))
n = Note('A community in West-Flanders.', 'definition', 'en', None)
self.assertTrue(n.is_valid_markup(None))
class SourceTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testConstructor(self):
citation = '<NAME>; <NAME>. & <NAME>. 2015. Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)'
s = Source(
citation
)
self.assertEqual(citation, s.citation)
def testConstructorWithHTML(self):
citation = '<NAME>; <NAME>. & <NAME>. 2015. <em>Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)</em>'
s = Source(
citation,
markup='HTML'
)
self.assertEqual(citation, s.citation)
self.assertEqual('HTML', s.markup)
def testIsValidMarkup(self):
self.assertTrue(Source.is_valid_markup('HTML'))
self.assertFalse(Source.is_valid_markup('markdown'))
citation = '<NAME>; <NAME>. & <NAME>. 2015. Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)'
s = Source(
citation
)
self.assertTrue(s.is_valid_markup(None))
def testConstructorInvalidMarkup(self):
with self.assertRaises(ValueError):
citation = '<NAME>; <NAME>. & <NAME>. 2015. <em>Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)</em>'
s = Source(
citation,
markup='markdown'
)
class ConceptSchemeTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _get_gemeenten_nl(self):
return Label('Gemeenten', type="prefLabel", language='nl-BE')
def _get_fusiegemeenten_nl(self):
return Label('Fusiegemeenten', type="altLabel", language='nl-BE')
def _get_communities_en(self):
return Label('Communities', type="prefLabel", language='en')
def _get_labels(self):
return [
self._get_gemeenten_nl(),
self._get_fusiegemeenten_nl(),
self._get_communities_en()
]
def testRepr(self):
cs = ConceptScheme(
uri='urn:x-skosprovider:gemeenten'
)
self.assertEqual("ConceptScheme('urn:x-skosprovider:gemeenten')", cs.__repr__())
def testLabel(self):
labels = self._get_labels()
cs = ConceptScheme(
uri='urn:x-skosprovider:gemeenten',
labels=labels
)
self.assertEqual(label(labels), cs.label())
self.assertEqual(label(labels, 'nl'), cs.label('nl'))
self.assertEqual(label(labels, 'en'), cs.label('en'))
self.assertEqual(label(labels, None), cs.label(None))
def testSortKey(self):
labels = self._get_labels()
sl = Label('allereerste', type='sortLabel', language='nl-BE')
labels.append(sl)
cs = ConceptScheme(
uri='urn:x-skosprovider:gemeenten',
labels=labels
)
self.assertEqual('allereerste', cs._sortkey('sortlabel'))
self.assertEqual('allereerste', cs._sortkey('sortlabel', 'nl'))
self.assertEqual('communities', cs._sortkey('sortlabel', 'en'))
self.assertEqual('urn:x-skosprovider:gemeenten', cs._sortkey('uri'))
def testLanguages(self):
labels = self._get_labels()
cs = ConceptScheme(
uri='urn:x-skosprovider:gemeenten',
labels=labels,
languages=['nl', 'en', 'und']
)
self.assertEqual(cs.languages, ['nl', 'en', 'und'])
def testSource(self):
cs = ConceptScheme(
uri='urn:x-skosprovider:gemeenten',
sources=[{'citation': 'My citation'}]
)
self.assertEqual(1, len(cs.sources))
self.assertIsInstance(cs.sources[0], Source)
self.assertEqual('My citation', cs.sources[0].citation)
def testEmptyUri(self):
with pytest.raises(ValueError):
cs = ConceptScheme(uri=None)
class ConceptTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _get_knokke_heist_nl(self):
return Label('Knokke-Heist', type="prefLabel", language='nl-BE')
def _get_cnocke_heyst_nl(self):
return Label('Cnock-Heyst', type="altLabel", language='nl-BE')
def _get_knokke_heist_en(self):
return Label('Knocke-Heyst', type="prefLabel", language='en')
def _get_labels(self):
return [
self._get_knokke_heist_nl(),
self._get_cnocke_heyst_nl(),
self._get_knokke_heist_en()
]
def testRepr(self):
c = Concept(1)
self.assertEqual("Concept('1')", c.__repr__())
def testIn(self):
c = Concept(1)
assert hasattr(c, 'id')
assert hasattr(c, 'uri')
assert hasattr(c, 'labels')
assert hasattr(c, 'notes')
assert hasattr(c, 'broader')
assert hasattr(c, 'narrower')
assert hasattr(c, 'related')
assert hasattr(c, 'member_of')
def testLabel(self):
labels = self._get_labels()
c = Concept(1, labels=labels)
self.assertEqual(label(labels), c.label())
self.assertEqual(label(labels, 'nl'), c.label('nl'))
self.assertEqual(label(labels, 'en'), c.label('en'))
self.assertEqual(label(labels, None), c.label(None))
def testSortKey(self):
labels = self._get_labels()
sl = Label('allereerste', type='sortLabel', language='nl-BE')
labels.append(sl)
c = Concept(1, labels=labels)
self.assertEqual('allereerste', c._sortkey('sortlabel'))
self.assertEqual('allereerste', c._sortkey('sortlabel', 'nl'))
self.assertEqual('knocke-heyst', c._sortkey('sortlabel', 'en'))
self.assertEqual('', c._sortkey('uri'))
def testUri(self):
c = Concept(1, uri='urn:x-skosprovider:gemeenten:1')
self.assertEqual(1, c.id)
self.assertEqual('urn:x-skosprovider:gemeenten:1', c.uri)
def testMemberOf(self):
c = Concept(
1,
uri='urn:x-skosprovider:gemeenten:1',
member_of=[15])
self.assertEqual(set([15]), set(c.member_of))
def testMatches(self):
c = Concept(
1,
uri='urn:x-skosprovider:gemeenten:1',
matches={
'broad': ['http://id.something.org/provincies/1']
}
)
assert 'close' in c.matches
assert 'exact' in c.matches
assert 'broad' in c.matches
assert 'narrow' in c.matches
assert 'related' in c.matches
assert ['http://id.something.org/provincies/1'] == c.matches['broad']
def testSource(self):
c = Concept(
id=1,
sources=[{'citation': 'My citation'}]
)
self.assertEqual(1, len(c.sources))
self.assertIsInstance(c.sources[0], Source)
self.assertEqual('My citation', c.sources[0].citation)
class CollectionTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _get_deelgemeenten_nl(self):
return Label('Deelgemeenten', type="prefLabel", language='nl-BE')
def _get_prefusiegemeenten_nl(self):
return Label('Prefusiegemeenten', type="altLabel", language='nl-BE')
def _get_labels(self):
return [
self._get_deelgemeenten_nl(),
self._get_prefusiegemeenten_nl(),
]
def testRepr(self):
c = Collection(1)
self.assertEqual("Collection('1')", c.__repr__())
def testId(self):
coll = Collection(350)
self.assertEqual(350, coll.id)
def testUri(self):
c = Collection(350, uri='urn:x-skosprovider:gemeenten:350')
self.assertEqual(350, c.id)
self.assertEqual('urn:x-skosprovider:gemeenten:350', c.uri)
def testLabel(self):
labels = self._get_labels()
coll = Collection(350, labels=labels)
self.assertEqual(label(labels), coll.label())
self.assertEqual(label(labels, 'nl'), coll.label('nl'))
self.assertEqual(label(labels, 'en'), coll.label('en'))
self.assertEqual(label(labels, None), coll.label(None))
def testSortkey(self):
labels = self._get_labels()
sl = Label('allereerste', type='sortLabel', language='nl-BE')
labels.append(sl)
coll = Collection(350, labels=labels)
self.assertEqual('allereerste', coll._sortkey('sortlabel'))
self.assertEqual('allereerste', coll._sortkey('sortlabel', 'nl'))
self.assertEqual('allereerste', coll._sortkey('sortlabel', 'en'))
self.assertEqual('deelgemeenten', coll._sortkey('label', 'nl'))
self.assertEqual('', coll._sortkey('uri'))
def testEmptyMembers(self):
labels = self._get_labels()
coll = Collection(
350,
labels=labels,
members=[]
)
self.assertEqual([], coll.members)
def testMembers(self):
labels = self._get_labels()
coll = Collection(
id=350,
labels=labels,
members=[1, 2]
)
self.assertTrue(set([1, 2]), set(coll.members))
def testMemberOf(self):
coll = Collection(
id=1,
member_of=[350]
)
self.assertTrue(set([350]), set(coll.member_of))
def testSource(self):
coll = Collection(
id=1,
sources=[{'citation': 'My citation'}]
)
self.assertEqual(1, len(coll.sources))
self.assertIsInstance(coll.sources[0], Source)
self.assertEqual('My citation', coll.sources[0].citation)
def testnferConceptRelations(self):
coll = Collection(
id=1,
)
self.assertTrue(coll.infer_concept_relations)
coll = Collection(
id=1,
infer_concept_relations=False
)
self.assertFalse(coll.infer_concept_relations)
class DictToNoteFunctionTest(unittest.TestCase):
def testDictToNodeWithDict(self):
d = dict_to_note({'note': 'A note.', 'type': 'note'})
self.assertEqual('A note.', d.note)
self.assertEqual('note', d.type)
self.assertEqual('und', d.language)
def testDictToNodeWithNote(self):
d = dict_to_note(Note('A note.', 'note'))
self.assertEqual('A note.', d.note)
self.assertEqual('note', d.type)
self.assertEqual('und', d.language)
class DictToLabelFunctionTest(unittest.TestCase):
def testDictToLabelWithDict(self):
l = dict_to_label({'label': 'A label.', 'type': 'prefLabel'})
self.assertEqual('A label.', l.label)
self.assertEqual('prefLabel', l.type)
self.assertEqual('und', l.language)
def testDictToLabelWithlabel(self):
l = dict_to_label(Label('A label.', 'prefLabel'))
self.assertEqual('A label.', l.label)
self.assertEqual('prefLabel', l.type)
self.assertEqual('und', l.language)
class DictToSourceFunctionTest(unittest.TestCase):
def testDictToSourceWithDict(self):
citation = '<NAME>; <NAME>. & <NAME>. 2015. Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)'
s = dict_to_source({'citation': citation})
self.assertEqual(citation, s.citation)
def testDictToSourceWithDictWithMarkup(self):
citation = '<strong><NAME>; <NAME>. & <NAME>.</strong> 2015. Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)'
s = dict_to_source({'citation': citation, 'markup': 'HTML'})
self.assertEqual(citation, s.citation)
self.assertEqual('HTML', s.markup)
def testDictToSourceWithSource(self):
citation = '<NAME>; <NAME>. & <NAME>. 2015. Data-driven systems and system-driven data: the story of the Flanders Heritage Inventory (1995-2015)'
s = dict_to_source(Source(citation))
self.assertEqual(citation, s.citation)
class LabelFunctionTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _get_knokke_heist_nl(self):
return Label('Knokke-Heist', type="prefLabel", language='nl-BE')
def _get_cnocke_heyst_nl(self):
return Label('Cnock-Heyst', type="altLabel", language='nl-BE')
def _get_knokke_heist_en(self):
return Label('Knocke-Heyst', type="prefLabel", language='en-GB')
def _get_und(self):
return Label('nokke-eist', type='prefLabel', language='und')
def _get_sortlabel(self):
return Label('allereerste', type='sortLabel', language='nl-BE')
def test_label_empty(self):
self.assertEqual(None, label([]))
self.assertEqual(None, label([], 'nl-BE'))
self.assertEqual(None, label([], None))
self.assertEqual(None, label([], 'und'))
def test_label_pref(self):
kh = self._get_knokke_heist_nl()
labels = [kh]
self.assertEqual(kh, label(labels))
self.assertEqual(kh, label(labels, 'nl-BE'))
self.assertEqual(kh, label(labels, 'en-GB'))
self.assertEqual(kh, label(labels, None))
def test_label_pref_und(self):
und = self._get_und()
labels = [und]
assert label(labels) is not None
self.assertEqual(und, label(labels))
self.assertEqual(und, label(labels, 'nl-BE'))
self.assertEqual(und, label(labels, 'en-GB'))
self.assertEqual(und, label(labels, 'und'))
self.assertEqual(und, label(labels, 'any'))
self.assertEqual(und, label(labels, None))
def test_label_pref_nl_and_en(self):
kh = self._get_knokke_heist_nl()
khen = self._get_knokke_heist_en()
labels = [kh, khen]
self.assertIn(label(labels), [kh, khen])
self.assertEqual(kh, label(labels, 'nl-BE'))
self.assertEqual(khen, label(labels, 'en-GB'))
self.assertIn(label(labels, None), [kh, khen])
def test_label_inexact_language_match(self):
kh = self._get_knokke_heist_nl()
ch = self._get_cnocke_heyst_nl()
khen = self._get_knokke_heist_en()
labels = [kh, ch, khen]
assert khen == label(labels, 'en')
assert kh == label(labels, 'nl')
assert label(labels, None) in [kh, khen]
def test_exact_precedes_inexact_match(self):
khnl = Label('Knokke-Heist', type="prefLabel", language='nl')
chnl = Label('Cnock-Heyst', type="altLabel", language='nl')
khen = Label('Knocke-Heyst', type="prefLabel", language='en')
khnlbe = self._get_knokke_heist_nl()
chnlbe = self._get_cnocke_heyst_nl()
khengb = self._get_knokke_heist_en()
labels = [chnl, khen, khnlbe, khnl, chnlbe, khengb]
assert khnlbe | |
<gh_stars>1-10
cis_1_2_4 = [(
'CIS 1.2.4',
{
"set": {
"--kubelet-https": "false",
},
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.4',
{
"set": {
"--kubelet-https": "true",
},
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.4',
{
"unset": [
"--kubelet-https"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_2_1 = [(
'CIS 2.1',
{
"set": {
"--cert-file": "/etc/kubernetes/pki/etcd/server.crt",
"--key-file": "/etc/kubernetes/pki/etcd/server.key"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
)]
cis_2_2 = [(
'CIS 2.2',
{
"unset": [
"--client-cert-auth"
]
},
'/etc/kubernetes/manifests/etcd.yaml',
'failed'
),
(
'CIS 2.2',
{
"set": {
"--client-cert-auth": "false"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'failed'
),
(
'CIS 2.2',
{
"set": {
"--client-cert-auth": "true"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
)]
cis_2_3 = [(
'CIS 2.3',
{
"set": {
"--auto-tls": "false"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
),
(
'CIS 2.3',
{
"set": {
"--auto-tls": "true"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'failed'
),
(
'CIS 2.3',
{
"unset": [
"--auto-tls"
]
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
)]
cis_2_4 = [(
'CIS 2.4',
{
"set": {
"--peer-cert-file": "/etc/kubernetes/pki/etcd/peer.crt",
"--peer-key-file": "/etc/kubernetes/pki/etcd/peer.key"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
)]
cis_2_5 = [(
'CIS 2.5',
{
"unset": [
"--peer-client-cert-auth"
]
},
'/etc/kubernetes/manifests/etcd.yaml',
'failed'
),
(
'CIS 2.5',
{
"set": {
"--peer-client-cert-auth": "false"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'failed'
),
(
'CIS 2.5',
{
"set": {
"--peer-client-cert-auth": "true"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
)]
cis_2_6 = [(
'CIS 2.6',
{
"set": {
"--peer-auto-tls": "false"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
),
(
'CIS 2.6',
{
"set": {
"--peer-auto-tls": "true"
}
},
'/etc/kubernetes/manifests/etcd.yaml',
'failed'
),
(
'CIS 2.6',
{
"unset": [
"--peer-auto-tls"
]
},
'/etc/kubernetes/manifests/etcd.yaml',
'passed'
)]
cis_1_4_1 = [(
'CIS 1.4.1',
{
"set": {
"--profiling": "true"
}
},
'/etc/kubernetes/manifests/kube-scheduler.yaml',
'failed'
),
(
'CIS 1.4.1',
{
"unset": [
"--profiling"
]
},
'/etc/kubernetes/manifests/kube-scheduler.yaml',
'failed'
),
(
'CIS 1.4.1',
{
"set": {
"--profiling": "false"
}
},
'/etc/kubernetes/manifests/kube-scheduler.yaml',
'passed'
)]
cis_1_4_2 = [(
'CIS 1.4.2',
{
"set": {
"--bind-address": "0.0.0.0"
}
},
'/etc/kubernetes/manifests/kube-scheduler.yaml',
'failed'
),
(
'CIS 1.4.2',
{
"unset": [
"--bind-address"
]
},
'/etc/kubernetes/manifests/kube-scheduler.yaml',
'failed'
),
(
'CIS 1.4.2',
{
"set": {
"--bind-address": "127.0.0.1"
}
},
'/etc/kubernetes/manifests/kube-scheduler.yaml',
'passed'
)]
cis_1_3_2 = [(
'CIS 1.3.2',
{
"set": {
"--profiling": "true"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.2',
{
"unset": [
"--profiling"
]
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.2',
{
"set": {
"--profiling": "false"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'passed'
)]
cis_1_3_3 = [(
'CIS 1.3.3',
{
"set": {
"--use-service-account-credentials": "false"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.3',
{
"unset": [
"--use-service-account-credentials"
]
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.3',
{
"set": {
"--use-service-account-credentials": "true"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'passed'
)]
cis_1_3_4 = [(
'CIS 1.3.4',
{
"unset": [
"--use-service-account-credentials"
]
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'passed'
)]
cis_1_3_5 = [(
'CIS 1.3.5',
{
"unset": [
"--root-ca-file"
]
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
)]
cis_1_3_6 = [(
'CIS 1.3.6',
{
"set": {
"--feature-gates": "RotateKubeletServerCertificate=false"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.6',
{
"unset": [
"--feature-gates"
]
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.6',
{
"set": {
"--feature-gates": "RotateKubeletServerCertificate=true"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'passed'
)]
cis_1_3_7 = [(
'CIS 1.3.7',
{
"set": {
"--bind-address": "0.0.0.0"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.7',
{
"unset": [
"--bind-address"
]
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'failed'
),
(
'CIS 1.3.7',
{
"set": {
"--bind-address": "127.0.0.1"
}
},
'/etc/kubernetes/manifests/kube-controller-manager.yaml',
'passed'
)]
cis_1_2_2 = [(
'CIS 1.2.2',
{
"unset": [
"--token-auth-file"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_3 = [(
'CIS 1.2.3',
{
"unset": [
"--DenyServiceExternalIPs"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_5 = [(
'CIS 1.2.5',
{
"set": {
"--kubelet-client-certificate": "/etc/kubernetes/pki/apiserver-kubelet-client.crt ",
"--kubelet-client-key": "/etc/kubernetes/pki/apiserver-kubelet-client.key"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_6 = [(
'CIS 1.2.6',
{
"unset": [
"--kubelet-certificate-authority"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_7 = [(
'CIS 1.2.7',
{
"set": {
"--authorization-mode": "AlwaysAllow"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.7',
{
"unset": [
"--authorization-mode"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.7',
{
"set": {
"--authorization-mode": "Node,RBAC"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_8 = [(
'CIS 1.2.8',
{
"set": {
"--authorization-mode": "RBAC"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.8',
{
"unset": [
"--authorization-mode"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.8',
{
"set": {
"--authorization-mode": "Node,RBAC"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_9 = [(
'CIS 1.2.9',
{
"set": {
"--authorization-mode": "Node"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.9',
{
"unset": [
"--authorization-mode"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.9',
{
"set": {
"--authorization-mode": "Node,RBAC"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_10 = [(
'CIS 1.2.10',
{
"unset": [
"--enable-admission-plugins"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.10',
{
"set": {
"--enable-admission-plugins": "EventRateLimit"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_11 = [(
'CIS 1.2.11',
{
"set": {
"--enable-admission-plugins": "AlwaysAdmit"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.11',
{
"unset": [
"--enable-admission-plugins"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.11',
{
"set": {
"--enable-admission-plugins": "NodeRestriction"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_12 = [(
'CIS 1.2.12',
{
"unset": [
"--enable-admission-plugins"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.12',
{
"set": {
"--enable-admission-plugins": "AlwaysPullImages"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_13 = [(
'CIS 1.2.13',
{
"set": {
"--enable-admission-plugins": "AlwaysDeny"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.13',
{
"set": {
"--enable-admission-plugins": "SecurityContextDeny"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.13',
{
"set": {
"--enable-admission-plugins": "PodSecurityPolicy"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_14 = [(
'CIS 1.2.14',
{
"set": {
"--disable-admission-plugins": "ServiceAccount"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.14',
{
"unset": [
"--disable-admission-plugins"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_15 = [(
'CIS 1.2.15',
{
"set": {
"--disable-admission-plugins": "NamespaceLifecycle"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.15',
{
"unset": [
"--disable-admission-plugins"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_16 = [(
'CIS 1.2.16',
{
"unset": [
"--enable-admission-plugins"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.16',
{
"set": {
"--enable-admission-plugins": "NodeRestriction"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_17 = [(
'CIS 1.2.17',
{
"unset": [
"--secure-port"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.17',
{
"set": {
"--secure-port": "260492"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.17',
{
"set": {
"--secure-port": "6443"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_18 = [(
'CIS 1.2.18',
{
"set": {
"--profiling": "true"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.18',
{
"set": {
"--profiling": "false"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.18',
{
"unset": [
"--profiling"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_19 = [(
'CIS 1.2.19',
{
"unset": [
"--audit-log-path"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_20 = [(
'CIS 1.2.20',
{
"set": {
"--audit-log-maxage": "260492"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.20',
{
"set": {
"--audit-log-maxage": "30"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.20',
{
"unset": [
"--audit-log-maxage"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_21 = [(
'CIS 1.2.21',
{
"set": {
"--audit-log-maxbackup": "-1"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.21',
{
"set": {
"--audit-log-maxbackup": "10"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.21',
{
"unset": [
"--audit-log-maxbackup"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_22 = [(
'CIS 1.2.22',
{
"set": {
"--audit-log-maxsize": "-1"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.22',
{
"set": {
"--audit-log-maxsize": "100"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.22',
{
"unset": [
"--audit-log-maxsize"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
)]
cis_1_2_23 = [(
'CIS 1.2.23',
{
"set": {
"--request-timeout": "-1s"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.23',
{
"set": {
"--request-timeout": "300s"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.23',
{
"unset": [
"--request-timeout"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_24 = [(
'CIS 1.2.24',
{
"set": {
"--service-account-lookup": "false"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1.2.24',
{
"set": {
"--service-account-lookup": "true"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1.2.24',
{
"unset": [
"--service-account-lookup"
]
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_25 = [(
'CIS 1.2.25',
{
"set": {
"--service-account-key-file": "/etc/kubernetes/pki/sa.pub"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_26 = [(
'CIS 1.2.26',
{
"set": {
"--etcd-certfile": "/etc/kubernetes/pki/apiserver-etcd-client.crt",
"--etcd-keyfile": "/etc/kubernetes/pki/apiserver-etcd-client.key"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_27 = [(
'CIS 1.2.27',
{
"set": {
"--tls-cert-file": "/etc/kubernetes/pki/apiserver.crt",
"--tls-private-key-file": "/etc/kubernetes/pki/apiserver.key"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_28 = [(
'CIS 1.2.28',
{
"set": {
"--client-ca-file": "/etc/kubernetes/pki/ca.crt"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_29 = [(
'CIS 1.2.29',
{
"set": {
"--etcd-cafile": "/etc/kubernetes/pki/etcd/ca.crt"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_1_2_32 = [(
'CIS 1_2_32',
{
"set": {
"--tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_DUMMY"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'failed'
),
(
'CIS 1_2_32',
{
"set": {
"--tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
),
(
'CIS 1_2_32',
{
"set": {
"--tls-cipher-suites":
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
}
},
'/etc/kubernetes/manifests/kube-apiserver.yaml',
'passed'
)]
cis_4_2_1 = [(
'CIS 4.2.1',
{
"set": {
"authentication": {
"anonymous": {
"enabled": True
}
}
},
},
'/var/lib/kubelet/config.yaml',
'failed'
),
(
'CIS 4.2.1',
{
"set": {
"authentication": {
"anonymous": {
"enabled": False
}
}
}
},
'/var/lib/kubelet/config.yaml',
'passed'
)
]
cis_4_2_2 = [(
'CIS 4.2.2',
{
"set": {
"authorization": {
"mode": "AlwaysAllow"
}
}
},
'/var/lib/kubelet/config.yaml',
'failed'
),
(
'CIS 4.2.2',
{
"set": {
"authorization": {
"mode": "Webhook"
}
}
},
'/var/lib/kubelet/config.yaml',
'passed'
)]
cis_4_2_3 = [(
'CIS 4.2.3',
{
"unset": ["authentication.x509.clientCAFile"]
},
'/var/lib/kubelet/config.yaml',
'failed'
)]
cis_4_2_4 = [(
'CIS 4.2.4',
{
"set": {
"readOnlyPort": 26492
}
},
'/var/lib/kubelet/config.yaml',
'failed'
),
(
'CIS 4.2.4',
{
"set": {
"readOnlyPort": 0
}
},
'/var/lib/kubelet/config.yaml',
'passed'
)]
cis_4_2_5 = [(
'CIS 4.2.5',
{
"set": {
"streamingConnectionIdleTimeout": 0
}
},
'/var/lib/kubelet/config.yaml',
'failed'
),
(
'CIS 4.2.5',
{
"set": {
"streamingConnectionIdleTimeout": "26492s"
}
},
'/var/lib/kubelet/config.yaml',
'passed'
)]
cis_4_2_6 = [(
'CIS 4.2.6',
{
"set": {
"protectKernelDefaults": False
}
},
'/var/lib/kubelet/config.yaml',
'failed'
),
(
'CIS 4.2.6',
{
"set": {
"protectKernelDefaults": True
}
},
'/var/lib/kubelet/config.yaml',
'passed'
)]
cis_4_2_7 = [(
'CIS 4.2.7',
{
"set": {
"makeIPTablesUtilChains": False
}
| |
= traj_node._children[name]
hdf5_group = getattr(hdf5_group, name)
# Store final group and recursively everything below it
if current_depth <= max_depth:
self._tree_store_nodes_dfs(traj_node, leaf_name, store_data=store_data,
with_links=with_links, recursive=recursive,
max_depth=max_depth, current_depth=current_depth,
parent_hdf5_group=hdf5_group)
######################## Storing and Loading Sub Trees #######################################
def _tree_create_leaf(self, name, trajectory, hdf5_group):
""" Creates a new pypet leaf instance.
Returns the leaf and if it is an explored parameter the length of the range.
"""
class_name = self._all_get_from_attrs(hdf5_group, HDF5StorageService.CLASS_NAME)
# Create the instance with the appropriate constructor
class_constructor = trajectory._create_class(class_name)
instance = trajectory._construct_instance(class_constructor, name)
return instance
def _tree_load_nodes_dfs(self, parent_traj_node, load_data, with_links, recursive,
max_depth, current_depth, trajectory, as_new, hdf5_group):
"""Loads a node from hdf5 file and if desired recursively everything below
:param parent_traj_node: The parent node whose child should be loaded
:param load_data: How to load the data
:param with_links: If links should be loaded
:param recursive: Whether loading recursively below hdf5_group
:param max_depth: Maximum depth
:param current_depth: Current depth
:param trajectory: The trajectory object
:param as_new: If trajectory is loaded as new
:param hdf5_group: The hdf5 group containing the child to be loaded
"""
if max_depth is None:
max_depth = float('inf')
loading_list = [(parent_traj_node, current_depth, hdf5_group)]
while loading_list:
parent_traj_node, current_depth, hdf5_group = loading_list.pop()
if isinstance(hdf5_group, pt.link.SoftLink):
if with_links:
# We end up here when auto-loading a soft link
self._tree_load_link(parent_traj_node, load_data=load_data, traj=trajectory,
as_new=as_new, hdf5_soft_link=hdf5_group)
continue
name = hdf5_group._v_name
is_leaf = self._all_get_from_attrs(hdf5_group, HDF5StorageService.LEAF)
in_trajectory = name in parent_traj_node._children
if is_leaf:
# In case we have a leaf node, we need to check if we have to create a new
# parameter or result
if in_trajectory:
instance = parent_traj_node._children[name]
# Otherwise we need to create a new instance
else:
instance = self._tree_create_leaf(name, trajectory, hdf5_group)
# Add the instance to the trajectory tree
parent_traj_node._add_leaf_from_storage(args=(instance,), kwargs={})
self._prm_load_parameter_or_result(instance, load_data=load_data,
_hdf5_group=hdf5_group)
if as_new:
instance._stored = False
else:
if in_trajectory:
traj_group = parent_traj_node._children[name]
if load_data == pypetconstants.OVERWRITE_DATA:
traj_group.v_annotations.f_empty()
traj_group.v_comment = ''
else:
if HDF5StorageService.CLASS_NAME in hdf5_group._v_attrs:
class_name = self._all_get_from_attrs(hdf5_group,
HDF5StorageService.CLASS_NAME)
class_constructor = trajectory._create_class(class_name)
instance = trajectory._construct_instance(class_constructor, name)
args = (instance,)
else:
args = (name,)
# If the group does not exist create it'
traj_group = parent_traj_node._add_group_from_storage(args=args, kwargs={})
# Load annotations and comment
self._grp_load_group(traj_group, load_data=load_data, with_links=with_links,
recursive=False, max_depth=max_depth,
_traj=trajectory, _as_new=as_new,
_hdf5_group=hdf5_group)
if recursive and current_depth < max_depth:
new_depth = current_depth + 1
for children in (hdf5_group._v_groups, hdf5_group._v_links):
for new_hdf5_group_name in children:
new_hdf5_group = children[new_hdf5_group_name]
loading_list.append((traj_group, new_depth, new_hdf5_group))
def _tree_load_link(self, new_traj_node, load_data, traj, as_new, hdf5_soft_link):
""" Loads a link
:param new_traj_node: Node in traj containing link
:param load_data: How to load data in the linked node
:param traj: The trajectory
:param as_new: If data in linked node should be loaded as new
:param hdf5_soft_link: The hdf5 soft link
"""
try:
linked_group = hdf5_soft_link()
link_name = hdf5_soft_link._v_name
if (not link_name in new_traj_node._links or
load_data==pypetconstants.OVERWRITE_DATA):
link_location = linked_group._v_pathname
full_name = '.'.join(link_location.split('/')[2:])
if not full_name in traj:
self._tree_load_sub_branch(traj, full_name,
load_data=pypetconstants.LOAD_SKELETON,
with_links=False, recursive=False, _trajectory=traj,
_as_new=as_new, _hdf5_group=self._trajectory_group)
if (load_data == pypetconstants.OVERWRITE_DATA and
link_name in new_traj_node._links):
new_traj_node.f_remove_link(link_name)
if not link_name in new_traj_node._links:
new_traj_node._nn_interface._add_generic(new_traj_node,
type_name=nn.LINK,
group_type_name=nn.GROUP,
args=(link_name,
traj.f_get(full_name)),
kwargs={},
add_prefix=False,
check_naming=False)
else:
raise RuntimeError('You shall not pass!')
except pt.NoSuchNodeError:
self._logger.error('Link `%s` under `%s` is broken, cannot load it, '
'I will ignore it, you have to '
'manually delete it!' %
(hdf5_soft_link._v_name, new_traj_node.v_full_name))
def _tree_store_nodes_dfs(self, parent_traj_node, name, store_data, with_links, recursive,
max_depth, current_depth,
parent_hdf5_group):
"""Stores a node to hdf5 and if desired stores recursively everything below it.
:param parent_traj_node: The parental node
:param name: Name of node to be stored
:param store_data: How to store data
:param with_links: If links should be stored
:param recursive: Whether to store recursively the subtree
:param max_depth: Maximum recursion depth in tree
:param current_depth: Current depth
:param parent_hdf5_group: Parent hdf5 group
"""
if max_depth is None:
max_depth = float('inf')
store_list = [(parent_traj_node, name, current_depth, parent_hdf5_group)]
while store_list:
parent_traj_node, name, current_depth, parent_hdf5_group = store_list.pop()
# Check if we create a link
if name in parent_traj_node._links:
if with_links:
self._tree_store_link(parent_traj_node, name, parent_hdf5_group)
continue
traj_node = parent_traj_node._children[name]
# If the node does not exist in the hdf5 file create it
if not hasattr(parent_hdf5_group, name):
newly_created = True
new_hdf5_group = self._hdf5file.create_group(where=parent_hdf5_group,
name=name, filters=self._all_get_filters())
else:
newly_created = False
new_hdf5_group = getattr(parent_hdf5_group, name)
if traj_node.v_is_leaf:
self._prm_store_parameter_or_result(traj_node, store_data=store_data,
_hdf5_group=new_hdf5_group,
_newly_created=newly_created)
else:
self._grp_store_group(traj_node, store_data=store_data, with_links=with_links,
recursive=False, max_depth=max_depth,
_hdf5_group=new_hdf5_group,
_newly_created=newly_created)
if recursive and current_depth < max_depth:
for child in traj_node._children.keys():
store_list.append((traj_node, child, current_depth + 1, new_hdf5_group))
def _tree_store_link(self, node_in_traj, link, hdf5_group):
"""Creates a soft link.
:param node_in_traj: parental node
:param store_data: how to store data
:param link: name of link
:param hdf5_group: current parental hdf5 group
"""
if hasattr(hdf5_group, link):
return
linked_traj_node = node_in_traj._links[link]
linking_name = linked_traj_node.v_full_name.replace('.','/')
linking_name = '/' + self._trajectory_name + '/' + linking_name
try:
to_link_hdf5_group = self._hdf5file.get_node(where=linking_name)
except pt.NoSuchNodeError:
self._logger.debug('Could not store link `%s` under `%s` immediately, '
'need to store `%s` first. '
'Will store the link right after.' % (link,
node_in_traj.v_full_name,
linked_traj_node.v_full_name))
root = node_in_traj._nn_interface._root_instance
self._tree_store_sub_branch(root, linked_traj_node.v_full_name,
store_data=pypetconstants.STORE_DATA_SKIPPING,
with_links=False, recursive=False,
hdf5_group=self._trajectory_group)
to_link_hdf5_group = self._hdf5file.get_node(where=linking_name)
self._hdf5file.create_soft_link(where=hdf5_group,
name=link,
target=to_link_hdf5_group)
######################## Storing a Single Run ##########################################
def _srn_store_single_run(self, traj,
recursive=True,
store_data=pypetconstants.STORE_DATA,
max_depth=None):
""" Stores a single run instance to disk (only meta data)"""
if store_data != pypetconstants.STORE_NOTHING:
self._logger.debug('Storing Data of single run `%s`.' % traj.v_crun)
if max_depth is None:
max_depth = float('inf')
for name_pair in traj._new_nodes:
_, name = name_pair
parent_group, child_node = traj._new_nodes[name_pair]
if not child_node._stored:
self._tree_store_sub_branch(parent_group, name,
store_data=store_data,
with_links=True,
recursive=recursive,
max_depth=max_depth - child_node.v_depth,
hdf5_group=None)
for name_pair in traj._new_links:
_, link = name_pair
parent_group, _ = traj._new_links[name_pair]
self._tree_store_sub_branch(parent_group, link,
store_data=store_data,
with_links=True,
recursive=recursive,
max_depth=max_depth - parent_group.v_depth - 1,
hdf5_group=None)
def _srn_summarize_explored_parameters(self, paramlist):
"""Summarizes the parameter settings.
:param run_name: Name of the single run
:param paramlist: List of explored parameters
:param add_table: Whether to add the overview table
:param create_run_group:
If a group with the particular name should be created if it does not exist.
Might be necessary when trajectories are merged.
"""
runsummary = ''
paramlist = sorted(paramlist, key=lambda name: name.v_name + name.v_location)
for idx, expparam in enumerate(paramlist):
# Create the run summary for the `run` overview
if idx > 0:
runsummary += ', '
valstr = expparam.f_val_to_str()
if len(valstr) >= pypetconstants.HDF5_STRCOL_MAX_COMMENT_LENGTH:
valstr = valstr[0:pypetconstants.HDF5_STRCOL_MAX_COMMENT_LENGTH - 3]
valstr += '...'
if expparam.v_name in runsummary:
param_name = expparam.v_full_name
else:
param_name = expparam.v_name
runsummary = runsummary + param_name + ': ' + valstr
return runsummary
################# Methods used across Storing and Loading different Items ##################
def _all_store_param_or_result_table_entry(self, instance, table, flags,
additional_info=None):
"""Stores a single row into an overview table
:param instance: A parameter or result instance
:param table: Table where row will be inserted
:param flags:
Flags how to insert into the table. Potential Flags are
`ADD_ROW`, `REMOVE_ROW`, `MODIFY_ROW`
:param additional_info:
Dictionary containing information that cannot be extracted from
`instance`, but needs to be inserted, too.
"""
# assert isinstance(table, pt.Table)
location = instance.v_location
name = instance.v_name
fullname = instance.v_full_name
if (flags == (HDF5StorageService.ADD_ROW,) and table.nrows < 2
and 'location' in table.colnames):
# We add the modify row option here because you cannot delete the very first
# row of the table, so there is the rare condition, that the row might already
# exist.
# We also need to check if 'location' is in the columns in order to avoid
# confusion with the smaller explored parameter overviews
flags = (HDF5StorageService.ADD_ROW, HDF5StorageService.MODIFY_ROW)
if flags == (HDF5StorageService.ADD_ROW,):
# If we are sure we only want to add a row we do not need to search!
condvars = None
condition = None
else:
# Condition to search for an entry
condvars = {'namecol': table.cols.name, 'locationcol': table.cols.location,
'name': name, 'location': location}
condition = """(namecol == name) & (locationcol == location)"""
if HDF5StorageService.REMOVE_ROW in flags:
# If we want to remove a row, we don't need to extract information
insert_dict = {}
else:
# Extract information to insert from the instance and the additional info dict
colnames = set(table.colnames)
insert_dict = self._all_extract_insert_dict(instance, colnames, additional_info)
# Write the table entry
self._all_add_or_modify_row(fullname, insert_dict, table, condition=condition,
condvars=condvars, flags=flags)
def _all_get_or_create_table(self, where, tablename, description, expectedrows=None):
"""Creates a new table, or | |
#set up glyph for visualizing point cloud
sphereSource = vtk.vtkSphereSource()
sphereSource.SetRadius(self.sampleSizeScaleFactor/300)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(sphereSource.GetOutputPort())
glyph.SetInputData(polydata)
glyph.ScalingOff()
glyph.Update()
#display
modelNode=slicer.mrmlScene.GetFirstNodeByName('Landmark Point Cloud')
if modelNode is None:
modelNode = slicer.vtkMRMLModelNode()
modelNode.SetName('Landmark Point Cloud')
slicer.mrmlScene.AddNode(modelNode)
GPANodeCollection.AddItem(modelNode)
modelDisplayNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelDisplayNode')
GPANodeCollection.AddItem(modelDisplayNode)
modelNode.SetAndObserveDisplayNodeID(modelDisplayNode.GetID())
viewNode1 = slicer.mrmlScene.GetFirstNodeByName("View1") #name = "View"+ singletonTag
modelDisplayNode.SetViewNodeIDs([viewNode1.GetID()])
modelDisplayNode = modelNode.GetDisplayNode()
modelDisplayNode.SetScalarVisibility(True)
modelDisplayNode.SetActiveScalarName('LM Index')
modelDisplayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeLabels.txt')
modelNode.SetAndObservePolyData(glyph.GetOutput())
def plotDistributionGlyph(self, sliderScale):
self.unplotDistributions()
varianceMat = self.LM.calcLMVariation(self.sampleSizeScaleFactor,self.skipScalingOption)
i,j,k=self.LM.lmOrig.shape
pt=[0,0,0]
#set up vtk point array for each landmark point
points = vtk.vtkPoints()
points.SetNumberOfPoints(i)
scales = vtk.vtkDoubleArray()
scales.SetName("Scales")
index = vtk.vtkDoubleArray()
index.SetName("Index")
#set up tensor array to scale ellipses
tensors = vtk.vtkDoubleArray()
tensors.SetNumberOfTuples(i)
tensors.SetNumberOfComponents(9)
tensors.SetName("Tensors")
# get fiducial node for mean landmarks, make just labels visible
self.meanLandmarkNode.SetDisplayVisibility(1)
self.scaleMeanShapeSlider.value=0
for landmark in range(i):
pt=self.rawMeanLandmarks[landmark,:]
points.SetPoint(landmark,pt)
scales.InsertNextValue(sliderScale*(varianceMat[landmark,0]+varianceMat[landmark,1]+varianceMat[landmark,2])/3)
tensors.InsertTuple9(landmark,sliderScale*varianceMat[landmark,0],0,0,0,sliderScale*varianceMat[landmark,1],0,0,0,sliderScale*varianceMat[landmark,2])
index.InsertNextValue(landmark+1)
polydata=vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.GetPointData().AddArray(index)
if self.EllipseType.isChecked():
polydata.GetPointData().SetScalars(index)
polydata.GetPointData().SetTensors(tensors)
glyph = vtk.vtkTensorGlyph()
glyph.ExtractEigenvaluesOff()
modelNode=slicer.mrmlScene.GetFirstNodeByName('Landmark Variance Ellipse')
if modelNode is None:
modelNode = slicer.vtkMRMLModelNode()
modelNode.SetName('Landmark Variance Ellipse')
slicer.mrmlScene.AddNode(modelNode)
GPANodeCollection.AddItem(modelNode)
modelDisplayNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelDisplayNode')
modelNode.SetAndObserveDisplayNodeID(modelDisplayNode.GetID())
viewNode1 = slicer.mrmlScene.GetFirstNodeByName("View1") #name = "View"+ singletonTag
modelDisplayNode.SetViewNodeIDs([viewNode1.GetID()])
GPANodeCollection.AddItem(modelDisplayNode)
else:
polydata.GetPointData().SetScalars(scales)
polydata.GetPointData().AddArray(index)
glyph = vtk.vtkGlyph3D()
modelNode=slicer.mrmlScene.GetFirstNodeByName('Landmark Variance Sphere')
if modelNode is None:
modelNode = slicer.vtkMRMLModelNode()
modelNode.SetName('Landmark Variance Sphere')
slicer.mrmlScene.AddNode(modelNode)
GPANodeCollection.AddItem(modelNode)
modelDisplayNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelDisplayNode')
modelNode.SetAndObserveDisplayNodeID(modelDisplayNode.GetID())
viewNode1 = slicer.mrmlScene.GetFirstNodeByName("View1") #name = "View"+ singletonTag
modelDisplayNode.SetViewNodeIDs([viewNode1.GetID()])
GPANodeCollection.AddItem(modelDisplayNode)
sphereSource = vtk.vtkSphereSource()
sphereSource.SetThetaResolution(64)
sphereSource.SetPhiResolution(64)
glyph.SetSourceConnection(sphereSource.GetOutputPort())
glyph.SetInputData(polydata)
glyph.Update()
modelNode.SetAndObservePolyData(glyph.GetOutput())
modelDisplayNode = modelNode.GetDisplayNode()
modelDisplayNode.SetScalarVisibility(True)
modelDisplayNode.SetActiveScalarName('Index') #color by landmark number
modelDisplayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeLabels.txt')
# Interactive Visualization callbacks and helpers
def onSelect(self):
self.initializeOnSelect()
self.cloneLandmarkNode = self.copyLandmarkNode
self.cloneLandmarkNode.CreateDefaultDisplayNodes()
self.cloneLandmarkDisplayNode = self.cloneLandmarkNode.GetDisplayNode()
if self.modelVisualizationType.isChecked():
# get landmark node selected
logic = GPALogic()
self.sourceLMNode= slicer.util.loadMarkups(self.FudSelect.currentPath)
GPANodeCollection.AddItem(self.sourceLMNode)
self.sourceLMnumpy=logic.convertFudicialToNP(self.sourceLMNode)
# remove any excluded landmarks
j=len(self.LMExclusionList)
if (j != 0):
indexToRemove=[]
for i in range(j):
indexToRemove.append(self.LMExclusionList[i]-1)
self.sourceLMnumpy=np.delete(self.sourceLMnumpy,indexToRemove,axis=0)
# set up transform
targetLMVTK=logic.convertNumpyToVTK(self.rawMeanLandmarks)
sourceLMVTK=logic.convertNumpyToVTK(self.sourceLMnumpy)
VTKTPSMean = vtk.vtkThinPlateSplineTransform()
VTKTPSMean.SetSourceLandmarks( sourceLMVTK )
VTKTPSMean.SetTargetLandmarks( targetLMVTK )
VTKTPSMean.SetBasisToR() # for 3D transform
# transform from selected to mean
self.transformMeanNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTransformNode', 'Mean TPS Transform')
GPANodeCollection.AddItem(self.transformMeanNode)
self.transformMeanNode.SetAndObserveTransformToParent( VTKTPSMean )
# load model node
self.modelNode=slicer.util.loadModel(self.grayscaleSelector.currentPath)
GPANodeCollection.AddItem(self.modelNode)
self.modelDisplayNode = self.modelNode.GetDisplayNode()
self.modelNode.SetAndObserveTransformNodeID(self.transformMeanNode.GetID())
slicer.vtkSlicerTransformLogic().hardenTransform(self.modelNode)
# create a PC warped model as clone of the selected model node
shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene)
itemIDToClone = shNode.GetItemByDataNode(self.modelNode)
clonedItemID = slicer.modules.subjecthierarchy.logic().CloneSubjectHierarchyItem(shNode, itemIDToClone)
self.cloneModelNode = shNode.GetItemDataNode(clonedItemID)
self.cloneModelNode.SetName('PCA Warped Volume')
self.cloneModelDisplayNode = self.cloneModelNode.GetDisplayNode()
self.cloneModelDisplayNode.SetColor([0,0,1])
GPANodeCollection.AddItem(self.cloneModelNode)
visibility = self.meanLandmarkNode.GetDisplayVisibility()
self.cloneLandmarkNode.SetDisplayVisibility(visibility)
#Clean up
GPANodeCollection.RemoveItem(self.sourceLMNode)
slicer.mrmlScene.RemoveNode(self.sourceLMNode)
else:
self.cloneLandmarkNode.SetDisplayVisibility(1)
self.meanLandmarkNode.SetDisplayVisibility(1)
#set mean landmark color and scale from GUI
self.scaleMeanGlyph()
self.toggleMeanColor()
visibility = self.meanLandmarkNode.GetDisplayNode().GetPointLabelsVisibility()
self.cloneLandmarkDisplayNode.SetPointLabelsVisibility(visibility)
self.cloneLandmarkDisplayNode.SetTextScale(3)
if self.scaleMeanShapeSlider.value == 0: # If the scale is set to 0, reset to default scale
self.scaleMeanShapeSlider.value = 3
self.cloneLandmarkDisplayNode.SetGlyphScale(self.scaleMeanShapeSlider.value)
#apply custom layout
self.assignLayoutDescription()
# Set up transform for PCA warping
self.transformNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTransformNode', 'PC TPS Transform')
GPANodeCollection.AddItem(self.transformNode)
# Enable PCA warping and recording
self.slider1.populateComboBox(self.PCList)
self.slider2.populateComboBox(self.PCList)
self.applyEnabled = True
self.startRecordButton.enabled = True
def onApply(self):
pc1=self.slider1.boxValue()
pc2=self.slider2.boxValue()
pcSelected=[pc1,pc2]
# get scale values for each pc.
sf1=self.slider1.sliderValue()
sf2=self.slider2.sliderValue()
scaleFactors=np.zeros((2))
scaleFactors[0]=sf1/100.0
scaleFactors[1]=sf2/100.0
j=0
for i in pcSelected:
if i==0:
scaleFactors[j]=0.0
j=j+1
logic = GPALogic()
#get target landmarks
self.LM.ExpandAlongPCs(pcSelected,scaleFactors, self.sampleSizeScaleFactor)
target=self.rawMeanLandmarks+self.LM.shift
if hasattr(self, 'cloneModelNode'):
targetLMVTK=logic.convertNumpyToVTK(target)
sourceLMVTK=logic.convertNumpyToVTK(self.rawMeanLandmarks)
#Set up TPS
VTKTPS = vtk.vtkThinPlateSplineTransform()
VTKTPS.SetSourceLandmarks( sourceLMVTK )
VTKTPS.SetTargetLandmarks( targetLMVTK )
VTKTPS.SetBasisToR() # for 3D transform
#Connect transform to model
self.transformNode.SetAndObserveTransformToParent( VTKTPS )
self.cloneLandmarkNode.SetAndObserveTransformNodeID(self.transformNode.GetID())
self.cloneModelNode.SetAndObserveTransformNodeID(self.transformNode.GetID())
else:
index = 0
for targetLandmark in target:
self.cloneLandmarkNode.SetNthControlPointPositionFromArray(index,targetLandmark)
index+=1
def onStartRecording(self):
#set up sequences for template model and PC TPS transform
self.modelSequence=slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSequenceNode","GPAModelSequence")
self.modelSequence.SetHideFromEditors(0)
GPANodeCollection.AddItem(self.modelSequence)
self.transformSequence = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSequenceNode","GPATFSequence")
self.transformSequence.SetHideFromEditors(0)
GPANodeCollection.AddItem(self.transformSequence)
#Set up a new sequence browser and add sequences
browserNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSequenceBrowserNode", "GPASequenceBrowser")
browserLogic=slicer.modules.sequences.logic()
browserLogic.AddSynchronizedNode(self.modelSequence,self.cloneModelNode,browserNode)
browserLogic.AddSynchronizedNode(self.modelSequence,self.cloneLandmarkNode,browserNode)
browserLogic.AddSynchronizedNode(self.transformSequence,self.transformNode,browserNode)
browserNode.SetRecording(self.transformSequence,'true')
browserNode.SetRecording(self.modelSequence,'true')
#Set up widget to record
browserWidget=slicer.modules.sequences.widgetRepresentation()
browserWidget.setActiveBrowserNode(browserNode)
recordWidget = browserWidget.findChild('qMRMLSequenceBrowserPlayWidget')
recordWidget.setRecordingEnabled(1)
GPANodeCollection.AddItem(self.modelSequence)
GPANodeCollection.AddItem(self.transformSequence)
GPANodeCollection.AddItem(browserNode)
#enable stop recording
self.stopRecordButton.enabled = True
self.startRecordButton.enabled = False
def onStopRecording(self):
browserWidget=slicer.modules.sequences.widgetRepresentation()
recordWidget = browserWidget.findChild('qMRMLSequenceBrowserPlayWidget')
recordWidget.setRecordingEnabled(0)
slicer.util.selectModule(slicer.modules.sequences)
self.stopRecordButton.enabled = False
self.startRecordButton.enabled = True
def initializeOnSelect(self):
#remove nodes from previous runs
temporaryNode=slicer.mrmlScene.GetFirstNodeByName('Mean TPS Transform')
if(temporaryNode):
GPANodeCollection.RemoveItem(temporaryNode)
slicer.mrmlScene.RemoveNode(temporaryNode)
temporaryNode=slicer.mrmlScene.GetFirstNodeByName('GPA Warped Volume')
if(temporaryNode):
GPANodeCollection.RemoveItem(temporaryNode)
slicer.mrmlScene.RemoveNode(temporaryNode)
temporaryNode=slicer.mrmlScene.GetFirstNodeByName('PC TPS Transform')
if(temporaryNode):
GPANodeCollection.RemoveItem(temporaryNode)
slicer.mrmlScene.RemoveNode(temporaryNode)
#
# GPALogic
#
class GPALogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def hasImageData(self,volumeNode):
"""This is an example logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
logging.debug('hasImageData failed: no volume node')
return False
if volumeNode.GetImageData() is None:
logging.debug('hasImageData failed: no image data in volume node')
return False
return True
def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):
"""Validates if the output is not the same as input
"""
if not inputVolumeNode:
logging.debug('isValidInputOutputData failed: no input volume node defined')
return False
if not outputVolumeNode:
logging.debug('isValidInputOutputData failed: no output volume node defined')
return False
if inputVolumeNode.GetID()==outputVolumeNode.GetID():
logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')
return False
return True
def takeScreenshot(self,name,description,type=-1):
# show the message even if not taking a screen shot
slicer.util.delayDisplay('Take screenshot: '+description+'.\nResult is available in the Annotations module.', 3000)
lm = slicer.app.layoutManager()
# switch on the type to get the requested window
widget = 0
if type == slicer.qMRMLScreenShotDialog.FullLayout:
# full layout
widget = lm.viewport()
elif type == slicer.qMRMLScreenShotDialog.ThreeD:
# just the 3D window
widget = lm.threeDWidget(0).threeDView()
elif type == slicer.qMRMLScreenShotDialog.Red:
# red slice window
widget = lm.sliceWidget("Red")
elif type == slicer.qMRMLScreenShotDialog.Yellow:
# yellow slice window
widget = lm.sliceWidget("Yellow")
elif type == slicer.qMRMLScreenShotDialog.Green:
# green slice window
widget = lm.sliceWidget("Green")
else:
# default to using the full window
widget = slicer.util.mainWindow()
# reset the type so that the node is set correctly
type = slicer.qMRMLScreenShotDialog.FullLayout
# grab and convert to vtk image data
qimage = ctk.ctkWidgetsUtils.grabWidget(widget)
imageData = vtk.vtkImageData()
slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)
annotationLogic = slicer.modules.annotations.logic()
annotationLogic.CreateSnapShot(name, description, type, 1, imageData)
def loadLandmarks(self, filePathList, lmToRemove, extension):
# initial data array
if 'json' in extension:
import pandas
tempTable = pandas.DataFrame.from_dict(pandas.read_json(filePathList[0])['markups'][0]['controlPoints'])
landmarkNumber = len(tempTable)
landmarkTypeArray=[]
for i in range(landmarkNumber):
if tempTable['description'][i]=='Semi':
landmarkTypeArray.append(str(i+1))
landmarks=np.zeros(shape=(landmarkNumber,3,len(filePathList)))
for i in range(len(filePathList)):
try:
tmp1=pandas.DataFrame.from_dict(pandas.read_json(filePathList[i])['markups'][0]['controlPoints'])
except:
slicer.util.messageBox("Error: Load file {} failed:.".format(filePathList[i]))
logging.debug("Error: Load file {} failed:.".format(filePathList[i]))
if len(tmp1) == landmarkNumber:
lmArray = tmp1['position'].to_numpy()
for j in range(landmarkNumber):
landmarks[j,:,i]=lmArray[j]
else:
warning = "Error: Load file {} failed. There are {} landmarks instead of the expected {}.".format(filePathList[i],len(tmp1),landmarkNumber)
slicer.util.messageBox(warning)
return
else:
landmarks, landmarkTypeArray = self.initDataArray(filePathList)
landmarkNumber = landmarks.shape[0]
for i in range(len(filePathList)):
tmp1=self.importLandMarks(filePathList[i])
if len(tmp1) == landmarkNumber:
landmarks[:,:,i] = tmp1
else:
warning = "Error: Load file {} failed. There are {} landmarks instead of the expected {}.".format(filePathList[i],len(tmp1),landmarkNumber)
slicer.util.messageBox(warning)
return
if len(lmToRemove)>0:
indexToRemove=[]
for i in range(len(lmToRemove)):
indexToRemove.append(lmToRemove[i]-1)
landmarks=np.delete(landmarks,indexToRemove,axis=0)
return landmarks, landmarkTypeArray
def importLandMarks(self, filePath):
"""Imports the landmarks from file. Does not import sample if a landmark is -1000
Adjusts the resolution is log(nhrd) file is found returns kXd array of landmark data. k=# of landmarks d=dimension
"""
# import data file
datafile=open(filePath,'r')
data=[]
for row in datafile:
if not fnmatch.fnmatch(row[0],"#*"):
data.append(row.strip().split(','))
# Make Landmark array
dataArray=np.zeros(shape=(len(data),3))
j=0
sorter=[]
for i in data:
tmp=np.array(i)[1:4]
dataArray[j,0:3]=tmp
x=np.array(i).shape
j=j+1
slicer.app.processEvents()
return dataArray
def initDataArray(self, files):
"""
returns an np array for the storage of the landmarks and an array of landmark types (Fixed, Semi)
"""
dim=3
subjectNumber = len(files)
# import data file
datafile=open(files[0],'r')
landmarkType = []
rowNumber=0
for row in datafile:
if not fnmatch.fnmatch(row[0],"#*"):
rowNumber+=1
tmp=(row.strip().split(','))
if tmp[12] == 'Semi':
landmarkType.append(str(rowNumber))
i = rowNumber
landmarks=np.zeros(shape=(rowNumber,dim,subjectNumber))
return landmarks, landmarkType
def dist(self, a):
"""
Computes the euclidean distance matrix for nXK points in a 3D space. So the input matrix is nX3xk
Returns a nXnXk matrix
"""
id,jd,kd=a.shape
fnx = lambda q : q - np.reshape(q, (id, 1,kd))
dx=fnx(a[:,0,:])
dy=fnx(a[:,1,:])
dz=fnx(a[:,2,:])
return (dx**2.0+dy**2.0+dz**2.0)**0.5
def dist2(self, a):
"""
Computes the euclidean distance matrix for n points in a 3D space
Returns a nXn matrix
"""
id,jd=a.shape
fnx = lambda q : q - np.reshape(q, (id, 1))
dx=fnx(a[:,0])
dy=fnx(a[:,1])
dz=fnx(a[:,2])
return (dx**2.0+dy**2.0+dz**2.0)**0.5
#plotting functions
def makeScatterPlotWithFactors(self, data, files, factors,title,xAxis,yAxis,pcNumber):
#create two tables for the first two factors and then check for a third
#check if there | |
for bld.program for simpler target configuration.
The linker script is added as env input/output dependency for the
target.
Based on the target name all related targets are created:
- binary: <target>.<format>
- linker information: <target>.<format>.xml
- map information: <target>.<format>.map
"""
if "target" not in kw:
kw["target"] = "out"
if "linker_script" not in kw:
bld.fatal("linker script missing")
bld.env.LINKER_SCRIPT = kw["linker_script"].abspath()
kw["features"] = "c cprogram"
tgt_elf = bld.path.find_or_declare("%s.%s" % (kw["target"], bld.env.DEST_BIN_FMT))
tgt_xml = bld.path.find_or_declare(tgt_elf.name + ".xml")
tgt_map = bld.path.find_or_declare(tgt_elf.name + ".map")
kw["target"] = [tgt_elf, tgt_xml, tgt_map]
bld.add_manual_dependency(tgt_elf, kw["linker_script"])
if "linker_pulls" in kw:
scan_opt = "--scan_libraries"
if not scan_opt in bld.env.LINKFLAGS and not scan_opt in kw["linkflags"]:
bld.fatal(
"'linker_pulls' was specified without linker flag '--scan_libraries'."
)
bld.add_manual_dependency(tgt_elf, kw["linker_pulls"])
# if a hex file should be generated, we need to append the config
if "linker_script_hex" in kw:
kw["features"] += " hexgen"
bld.env.LINKER_SCRIPT_HEX = kw["linker_script_hex"].abspath()
bld.add_manual_dependency(tgt_elf, kw["linker_script_hex"])
elf_file_hash = binascii.hexlify(Utils.h_file(kw["linker_script"].abspath()))
txt = kw["linker_script_hex"].read().strip().splitlines()[0]
txt = re.search(r"\/\*(.*)\*\/", txt)
try:
txt = txt.group(1)
except IndexError:
bld.fatal("hashing error")
known_hash = bytes(txt.strip(), encoding="utf-8")
if not elf_file_hash == known_hash:
bld.fatal(
f"The hash of {kw['linker_script'].abspath()} has changed from "
f"{known_hash} to {elf_file_hash}. Reflect the changes of "
"the elf file linker script in the hex file linker script and "
"update the file hash of the elf linker script in the hex "
"file linker script."
)
return bld(*k, **kw)
@TaskGen.feature("c", "asm", "includes")
@TaskGen.after_method("propagate_uselib_vars", "process_source")
def apply_incpaths(self):
"""Adds the include paths"""
lst = self.to_incnodes(
self.to_list(getattr(self, "includes", [])) + self.env.INCLUDES
)
self.includes_nodes = lst
cwd = self.get_cwd()
self.env.INCPATHS = []
for i in lst:
if os.sep + "ti" + os.sep in i.abspath():
self.env.INCPATHS.append(i)
else:
self.env.INCPATHS.append(i.path_from(cwd))
@TaskGen.feature("c")
@TaskGen.before_method("apply_incpaths")
def check_duplicate_and_not_existing_includes(self):
"""Check if include directories are included more than once and if they
really exist on the file system. If include directories do not exist, or
they are included twice, raise an error."""
includes = self.to_incnodes(
self.to_list(getattr(self, "includes", [])) + self.env.INCLUDES
)
not_existing = list(
filter(
None,
[None if os.path.isdir(i.abspath()) else i.abspath() for i in includes],
)
)
abs_incs = [[x.abspath(), x.relpath()] for x in includes]
seen = {}
duplicates = []
for x_abs, x_rel in abs_incs:
if x_abs not in seen:
seen[x_abs] = 1
else:
if seen[x_abs] == 1:
duplicates.append((x_abs, x_rel))
seen[x_abs] += 1
err = ""
if duplicates or not_existing:
err = (
f"There are include errors when building '{self.target}' from "
f"sources {self.source} in build file from '{self.path}{os.sep}'.\n"
)
if not_existing:
err += f"Not existing include directories are: {not_existing}\n"
if duplicates:
duplicates = [
item
for t in duplicates
for item in t
if not os.sep + "build" + os.sep in item
]
duplicates = list(set(duplicates))
err += f"Duplicate include directories are: {duplicates}\n"
if err:
self.bld.fatal(err)
class hexgen(Task.Task): # pylint: disable-msg=invalid-name
"""Task create hex file from elf files"""
#: str: color in which the command line is displayed in the terminal
color = "YELLOW"
#: list of str: tasks after that hexgen task can be run
after = ["link_task"]
run_str = (
"${ARMHEX} -q ${HEXGENFLAGS} --map=${TGT[1].abspath()} ${LINKER_SCRIPT_HEX} "
"${SRC[0].abspath()} -o ${TGT[0].abspath()}"
)
"""str: string to be interpolated to create the command line to create a
hex file from an elf file."""
def keyword(self): # pylint: disable=no-self-use
"""displayed keyword when generating the hex file"""
return "Compiling"
def __str__(self):
"""additional information appended to the keyword"""
return "%s -> %s" % (self.inputs[0], self.outputs[0])
@TaskGen.feature("hexgen")
@TaskGen.after("apply_link")
def add_hexgen_task(self):
"""creates a tasks to create a hex file from the linker output
(task :py:class:`f_ti_arm_cgt.hexgen`)"""
if not hasattr(self, "link_task"):
return
self.hexgen = self.create_task(
"hexgen",
src=self.link_task.outputs[0],
tgt=[
self.link_task.outputs[0].change_ext(".hex"),
self.link_task.outputs[0].change_ext(".hex.map"),
],
)
class bingen(Task.Task): # pylint: disable-msg=invalid-name
"""Task create bin file from elf files"""
#: str: color in which the command line is displayed in the terminal
color = "CYAN"
#: list of str: tasks after that hexgen task can be run
after = ["link_task"]
run_str = (
"${TIOBJ2BIN} ${SRC[0].abspath()} ${TGT[0].abspath()} ${ARMOFD} "
"${ARMHEX} ${MKHEX4BIN}"
)
"""str: string to be interpolated to create the command line to create a
bin file from an elf file."""
def keyword(self): # pylint: disable=no-self-use
"""displayed keyword when generating the bin file"""
return "Compiling"
def __str__(self):
"""additional information appended to the keyword"""
return "%s -> %s" % (self.inputs[0], self.outputs[0])
@TaskGen.feature("cprogram")
@TaskGen.after("apply_link")
def add_bingen_task(self):
"""creates a task to create a bin file from the linker output
(task :py:class:`f_ti_arm_cgt.bingen`)
"""
if not hasattr(self, "link_task"):
return
self.bingen = self.create_task(
"bingen",
src=self.link_task.outputs[0],
tgt=[self.link_task.outputs[0].change_ext(".bin")],
)
@taskgen_method
def accept_node_to_link(self, node): # pylint: disable=unused-argument
"""filters which output files are not meant to be linked"""
return not node.name.endswith((".aux", "crl", "rl"))
@TaskGen.feature("c", "cprogram")
@TaskGen.after("apply_link")
def process_sizes(self):
"""creates size tasks for generated object and object-like files"""
if getattr(self, "link_task", None) is None:
return
for node in self.link_task.inputs:
out = node.change_ext(".size.log")
self.create_task("size", node, out)
for node in self.link_task.outputs:
if node.suffix() in (".a", "." + self.bld.env.DEST_BIN_FMT):
out = node.change_ext(".size.log")
self.create_task("size", node, out)
class size(Task.Task): # pylint: disable-msg=invalid-name
"""Task to run size on all input files"""
vars = ["ARMSIZE", "ARMSIZE_OPTS"]
#: str: color in which the command line is displayed in the terminal
color = "BLUE"
def run(self):
"""implements the actual behavior of size and pipes the output into
a file."""
cmd = (
Utils.subst_vars("${ARMSIZE} ${ARMSIZE_OPTS}", self.env)
+ " "
+ self.inputs[0].abspath()
)
cmd = cmd.split()
outfile = self.outputs[0].path_from(self.generator.path)
env = self.env.env or None
cwd = self.generator.bld.path.get_bld().abspath()
out, err = self.generator.bld.cmd_and_log(
cmd,
output=waflib.Context.BOTH,
quiet=waflib.Context.STDOUT,
env=env,
cwd=cwd,
)
self.generator.path.make_node(outfile).write(out)
if err:
Logs.error(err)
def keyword(self): # pylint: disable=no-self-use
"""displayed keyword when size is run on object files"""
return "Processing size"
@TaskGen.feature("c", "cprogram")
@TaskGen.after("apply_link")
def process_nm(self):
"""creates nm tasks for generated object files"""
if getattr(self, "link_task", None) is None:
return
for node in self.link_task.inputs:
out = node.change_ext(".nm.log")
self.create_task("nm", node, out)
for node in self.link_task.outputs:
if node.suffix() in (".a", "." + self.bld.env.DEST_BIN_FMT):
out = node.change_ext(".nm.log")
self.create_task("nm", node, out)
class nm(Task.Task): # pylint: disable-msg=invalid-name,too-few-public-methods
"""Task to run armnm on all input files"""
#: str: color in which the command line is displayed in the terminal
color = "PINK"
run_str = "${ARMNM} ${NMFLAGS} --output=${TGT} ${SRC}"
"""str: string to be interpolated to create the command line to create a
nm file from an ``*.obj``, ``*.a`` or ``*.elf`` file."""
def keyword(self): # pylint: disable=no-self-use
"""displayed keyword when armnm is run on object files"""
return "Processing nm"
@TaskGen.feature("c")
@TaskGen.after("c_pp")
def remove_stuff_from_pp(self):
"""creates pp tasks for generated object files"""
for node in self.c_pp_tasks:
outs = [node.outputs[0].change_ext(".ppr"), node.outputs[0].change_ext(".pprs")]
self.create_task("clean_pp_file", node.outputs[0], outs)
class clean_pp_file(
Task.Task
): # pylint: disable-msg=invalid-name,too-few-public-methods
"""Task to remove some information from the preprocessed files"""
#: str: color in which the command line is displayed in the terminal
color = "PINK"
#: tuple: strings that need to be removed from the preprocessed file
remove_str = ("#define", "#pragma", "# pragma", "_Pragma")
#: list: regular expressions that should be removed with a certain string
replace_str = [(r"__attribute__\(.*\)", "")]
def run(self):
"""Removes empty lines and strips some intrinsics that should not be
included in the postprocessed file"""
# read file, split text in list by lines and remove all empty entries
txt = list(filter(str.rstrip, self.inputs[0].read().splitlines()))
# join all lines without right side whitespace and write it to file
txt = os.linesep.join(line.rstrip() for line in txt) + os.linesep
self.outputs[0].write(txt, encoding="utf-8")
txt = os.linesep.join(
line.rstrip()
for line in txt.split(os.linesep)
if (not line.startswith(clean_pp_file.remove_str))
)
for rep in clean_pp_file.replace_str:
txt = re.sub(rep[0], rep[1], txt)
self.outputs[1].write(txt, encoding="utf-8")
def keyword(self): # pylint: disable=no-self-use
"""displayed keyword when post-processing the pre-processed files"""
return "Postprocessing"
class create_version_source(Task.Task): # pylint: disable=invalid-name
"""creates the version information file"""
#: int: priority of the task
weight = 1
#: str: color in which the command line is displayed in the terminal
color = "BLUE"
#: list of str: specifies task, that this task needs to run before
before = ["c"]
#: list of str: extensions that trigger a re-build
ext_out = [".h"]
def get_remote(self):
"""returns the git remote"""
# pylint: disable=no-member
remote = "No remote"
if self.repo:
remote = self.repo.git.remote("get-url", "--push", "origin")
return remote
def get_version_from_git(self):
"""returns a version string that is extracted directly
from the underlying git repository
"""
# pylint: disable=no-member
describe_output = f"no-vcs-{self.env.VERSION}-dirty"
if self.repo:
describe_output = self.repo.git.describe(
"--dirty", "--tags", "--long", "--always", "--match", "*.*"
)
if describe_output.endswith("-dirty"):
dirty = True
describe_output | |
"""
This module implements an inversion of control framework. It allows
dependencies among functions and classes to be declared with decorators and the
resulting dependency graphs to be executed.
A decorator used to declare dependencies is called a :class:`ComponentType`, a
decorated function or class is called a component, and a collection of
interdependent components is called a graph.
In the example below, ``needs`` is a :class:`ComponentType`, ``one``, ``two``,
and ``add`` are components, and the relationship formed by their dependencies
is a graph.
.. code-block:: python
from insights import dr
class needs(dr.ComponentType):
pass
@needs()
def one():
return 1
@needs()
def two():
return 2
@needs(one, two)
def add(a, b):
return a + b
results = dr.run(add)
Once all components have been imported, the graphs they form can be run. To
execute a graph, ``dr`` sorts its components into an order that guarantees
dependencies are tried before dependents. Components that raise exceptions are
considered invalid, and their dependents will not be executed. If a component
is skipped because of a missing dependency, its dependents also will not be
executed.
During evaluation, results are accumulated into an object called a
:class:`Broker`, which is just a fancy dictionary. Brokers can be inspected
after a run for results, exceptions, tracebacks, and execution times. You also
can register callbacks with a broker that get invoked after the attempted
execution of every component, so you can inspect it during an evaluation
instead of at the end.
"""
from __future__ import print_function
import inspect
import logging
import json
import os
import pkgutil
import re
import six
import sys
import time
import traceback
from collections import defaultdict
from functools import reduce as _reduce
from insights.contrib import importlib
from insights.contrib.toposort import toposort_flatten
from insights.util import defaults, enum, KeyPassingDefaultDict
log = logging.getLogger(__name__)
GROUPS = enum("single", "cluster")
MODULE_NAMES = {}
BASE_MODULE_NAMES = {}
TYPE_OBSERVERS = defaultdict(set)
COMPONENTS_BY_TYPE = defaultdict(set)
DEPENDENCIES = defaultdict(set)
DEPENDENTS = defaultdict(set)
COMPONENTS = defaultdict(lambda: defaultdict(set))
DELEGATES = {}
HIDDEN = set()
IGNORE = defaultdict(set)
ENABLED = defaultdict(lambda: True)
def set_enabled(component, enabled=True):
"""
Enable a component for evaluation. If set to False, the component is
skipped, and all components that require it will not execute. If component
is a fully qualified name string of a callable object instead of the
callable object itself, the component's module is loaded as a side effect
of calling this function.
Args:
component (str or callable): fully qualified name of the component or
the component object itself.
enabled (bool): whether the component is enabled for evaluation.
Returns:
None
"""
ENABLED[get_component(component) or component] = enabled
def is_enabled(component):
"""
Check to see if a component is enabled.
Args:
component (callable): The component to check. The component must
already be loaded.
Returns:
True if the component is enabled. False otherwise.
"""
return ENABLED[component]
def get_delegate(component):
return DELEGATES.get(component)
def add_ignore(c, i):
IGNORE[c].add(i)
def hashable(v):
try:
hash(v)
except:
return False
return True
def _get_from_module(name):
mod, _, n = name.rpartition(".")
if mod not in sys.modules:
importlib.import_module(mod)
return getattr(sys.modules[mod], n)
def _get_from_class(name):
mod, _, n = name.rpartition(".")
cls = _get_from_module(mod)
return getattr(cls, n)
def _import_component(name):
"""
Returns a class, function, or class method specified by the fully qualified
name.
"""
for f in (_get_from_module, _get_from_class):
try:
return f(name)
except:
pass
log.debug("Couldn't load %s" % name)
COMPONENT_IMPORT_CACHE = KeyPassingDefaultDict(_import_component)
def get_component(name):
""" Returns the class or function specified, importing it if necessary. """
return COMPONENT_IMPORT_CACHE[name]
def _find_component(name):
for d in DELEGATES:
if get_name(d) == name:
return d
COMPONENTS_BY_NAME = KeyPassingDefaultDict(_find_component)
def get_component_by_name(name):
"""
Look up a component by its fully qualified name. Return None if the
component hasn't been loaded.
"""
return COMPONENTS_BY_NAME[name]
@defaults(None)
def get_component_type(component):
return get_delegate(component).type
def get_components_of_type(_type):
return COMPONENTS_BY_TYPE.get(_type)
@defaults(None)
def get_group(component):
return get_delegate(component).group
def add_dependent(component, dep):
DEPENDENTS[component].add(dep)
def get_dependents(component):
return DEPENDENTS.get(component, set())
@defaults(set())
def get_dependencies(component):
return get_delegate(component).get_dependencies()
def add_dependency(component, dep):
get_delegate(component).add_dependency(dep)
class MissingRequirements(Exception):
"""
Raised during evaluation if a component's dependencies aren't met.
"""
def __init__(self, requirements):
self.requirements = requirements
super(MissingRequirements, self).__init__(requirements)
class SkipComponent(Exception):
"""
This class should be raised by components that want to be taken out of
dependency resolution.
"""
pass
def get_name(component):
"""
Attempt to get the string name of component, including module and class if
applicable.
"""
if six.callable(component):
name = getattr(component, "__qualname__", component.__name__)
return '.'.join([component.__module__, name])
return str(component)
def get_simple_name(component):
if six.callable(component):
return component.__name__
return str(component)
def get_metadata(component):
"""
Return any metadata dictionary associated with the component. Defaults to
an empty dictionary.
"""
return get_delegate(component).metadata if component in DELEGATES else {}
def get_tags(component):
"""
Return the set of tags associated with the component. Defaults to
``set()``.
"""
return get_delegate(component).tags if component in DELEGATES else set()
def get_module_name(obj):
try:
return inspect.getmodule(obj).__name__
except:
return None
def get_base_module_name(obj):
try:
return get_module_name(obj).split(".")[-1]
except:
return None
def mark_hidden(component):
global HIDDEN
if isinstance(component, (list, set)):
HIDDEN |= set(component)
else:
HIDDEN.add(component)
def is_hidden(component):
return component in HIDDEN
def walk_tree(root, method=get_dependencies):
for d in method(root):
yield d
for c in walk_tree(d, method=method):
yield c
def walk_dependencies(root, visitor):
"""
Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`.
"""
def visit(parent, visitor):
for d in get_dependencies(parent):
visitor(d, parent)
visit(d, visitor)
visitor(root, None)
visit(root, visitor)
def get_dependency_graph(component):
"""
Generate a component's graph of dependencies, which can be passed to
:func:`run` or :func:`run_incremental`.
"""
if component not in DEPENDENCIES:
raise Exception("%s is not a registered component." % get_name(component))
if not DEPENDENCIES[component]:
return {component: set()}
graph = defaultdict(set)
def visitor(c, parent):
if parent is not None:
graph[parent].add(c)
walk_dependencies(component, visitor)
graph = dict(graph)
# Find all items that don't depend on anything.
extra_items_in_deps = _reduce(set.union, graph.values(), set()) - set(graph.keys())
# Add empty dependencies where needed.
graph.update(dict((item, set()) for item in extra_items_in_deps))
return graph
def get_subgraphs(graph=None):
"""
Given a graph of possibly disconnected components, generate all graphs of
connected components. graph is a dictionary of dependencies. Keys are
components, and values are sets of components on which they depend.
"""
graph = graph or DEPENDENCIES
keys = set(graph)
frontier = set()
seen = set()
while keys:
frontier.add(keys.pop())
while frontier:
component = frontier.pop()
seen.add(component)
frontier |= set([d for d in get_dependencies(component) if d in graph])
frontier |= set([d for d in get_dependents(component) if d in graph])
frontier -= seen
yield dict((s, get_dependencies(s)) for s in seen)
keys -= seen
seen.clear()
def _import(path, continue_on_error):
log.debug("Importing %s" % path)
try:
return importlib.import_module(path)
except Exception as ex:
log.exception(ex)
if not continue_on_error:
raise
def _load_components(path, include=".*", exclude="test", continue_on_error=True):
num_loaded = 0
if path.endswith(".py"):
path, _ = os.path.splitext(path)
path = path.rstrip("/").replace("/", ".")
package = _import(path, continue_on_error)
if not package:
return 0
num_loaded += 1
do_include = re.compile(include).search if include else lambda x: True
do_exclude = re.compile(exclude).search if exclude else lambda x: False
if not hasattr(package, "__path__"):
return num_loaded
prefix = package.__name__ + "."
for _, name, is_pkg in pkgutil.iter_modules(path=package.__path__, prefix=prefix):
if not name.startswith(prefix):
name = prefix + name
if is_pkg:
num_loaded += _load_components(name, include, exclude, continue_on_error)
else:
if do_include(name) and not do_exclude(name):
_import(name, continue_on_error)
num_loaded += 1
return num_loaded
def load_components(*paths, **kwargs):
"""
Loads all components on the paths. Each path should be a package or module.
All components beneath a path are loaded.
Args:
paths (str): A package or module to load
Keyword Args:
include (str): A regular expression of packages and modules to include.
Defaults to '.*'
exclude (str): A regular expression of packges and modules to exclude.
Defaults to 'test'
continue_on_error (bool): If True, continue importing even if something
raises an ImportError. If False, raise the first ImportError.
Returns:
int: The total number of modules loaded.
Raises:
ImportError
"""
num_loaded = 0
for path in paths:
num_loaded += _load_components(path, **kwargs)
return num_loaded
def first_of(dependencies, broker):
for d in dependencies:
if d in broker:
return broker[d]
def split_requirements(requires):
req_all = []
req_any = []
for r in requires:
if isinstance(r, list):
req_any.append(r)
else:
req_all.append(r)
return req_all, req_any
def stringify_requirements(requires):
if isinstance(requires, tuple):
req_all, req_any = requires
else:
req_all, req_any = split_requirements(requires)
pretty_all = [get_name(r) for r in req_all]
pretty_any = [str([get_name(r) for r in any_list]) for any_list in req_any]
result = "All: %s" % pretty_all + " Any: " + " Any: ".join(pretty_any)
return result
def _register_component(delegate):
component = delegate.component
dependencies = delegate.get_dependencies()
DEPENDENCIES[component] = dependencies
COMPONENTS[delegate.group][component] |= dependencies
COMPONENTS_BY_TYPE[delegate.type].add(component)
for k, v in COMPONENTS_BY_TYPE.items():
if issubclass(delegate.type, k) and delegate.type is not k:
v.add(component)
DELEGATES[component] = delegate
MODULE_NAMES[component] = get_module_name(component)
BASE_MODULE_NAMES[component] = | |
logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
render_iterable: RenderResult
if isinstance(renderable, Control):
self._control.append(renderable.codes)
return
render_options = options or self.options
if isinstance(renderable, ConsoleRenderable):
render_iterable = renderable.__console__(self, render_options)
elif isinstance(renderable, str):
yield from self.render(self.render_str(renderable), render_options)
return
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __console__ method is required"
)
try:
iter_render = iter(render_iterable)
except TypeError:
raise errors.NotRenderableError(
f"object {render_iterable!r} is not renderable"
)
for render_output in iter_render:
if isinstance(render_output, Segment):
yield render_output
else:
yield from self.render(render_output, render_options)
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
yield from self._render(renderable, options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions],
style: Optional[Style] = None,
pad: bool = True,
) -> List[List[Segment]]:
"""Render objects in to a list of lines.
The output of render_lines is useful when further formatting of rendered console text
is required, such as the Panel class which draws a border around any renderable object.
Args:
renderables (Iterable[RenderableType]): Any object or objects renderable in the console.
options (Optional[ConsoleOptions]): Console options used to render with.
Returns:
List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
"""
render_options = options or self.options
with self:
_rendered = self.render(renderable, render_options)
if style is not None:
_rendered = Segment.apply_style(_rendered, style)
lines = list(
Segment.split_and_crop_lines(
_rendered,
render_options.max_width,
style=style,
include_new_lines=False,
pad=pad,
)
)
return lines
def render_str(
self,
text: str,
style: Union[str, Style] = "",
emoji: bool = None,
markup: bool = None,
) -> "Text":
"""Convert a string to a Text instance.
Args:
text (str): Text to render.
style (Union[str, Style], optional): Style to apply to rendered text.
emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
Returns:
ConsoleRenderable: Renderable object.
"""
emoji_enabled = emoji or (emoji is None and self._emoji)
markup_enabled = markup or (markup is None and self._markup)
if emoji_enabled:
if markup_enabled:
return render_markup(text, style=style)
else:
text = _emoji_replace(text)
else:
if markup_enabled:
return render_markup(text, style=style, emoji=False)
return Text(text, style=style)
def get_style(
self, name: Union[str, Style], *, default: Union[Style, str] = None
) -> Style:
"""Get a style merged with the current style.
Args:
name (str): The name of a style or a style definition.
Returns:
Style: A Style object.
Raises:
MissingStyle: If no style could be parsed from name.
"""
if isinstance(name, Style):
return name
try:
return self._styles.get(name) or Style.parse(name)
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
raise errors.MissingStyle(f"Failed to get style {name!r}; {error}")
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
emoji: bool = None,
markup: bool = None,
highlight: bool = None,
) -> List[ConsoleRenderable]:
"""Combined a number of renderables and text in to one renderable.
Args:
renderables (Iterable[Union[str, ConsoleRenderable]]): Anyting that Rich can render.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
Returns:
List[ConsoleRenderable]: A list of things to render.
"""
sep_text = Text(sep, end=end)
renderables: List[ConsoleRenderable] = []
append = renderables.append
text: List[Text] = []
append_text = text.append
_highlighter: HighlighterType = _null_highlighter
if highlight or (highlight is None and self._highlight):
_highlighter = self.highlighter
def check_text() -> None:
if text:
append(sep_text.join(text))
del text[:]
for renderable in objects:
rich_cast = getattr(renderable, "__rich__", None)
if rich_cast:
renderable = rich_cast()
if isinstance(renderable, str):
append_text(
_highlighter(
self.render_str(renderable, emoji=emoji, markup=markup)
)
)
elif isinstance(renderable, Text):
append_text(renderable)
elif isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
elif isinstance(renderable, (Mapping, Sequence)):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(str(renderable)))
check_text()
return renderables
def rule(
self,
title: str = "",
character: str = "─",
style: Union[str, Style] = "rule.line",
) -> None:
"""Draw a line with optional centered title.
Args:
title (str, optional): Text to render over the rule. Defaults to "".
character (str, optional): Character to form the line. Defaults to "─".
"""
from .rule import Rule
rule = Rule(title=title, character=character, style=style)
self.print(rule)
def print(
self,
*objects: Any,
sep=" ",
end="\n",
style: Union[str, Style] = None,
emoji: bool = None,
markup: bool = None,
highlight: bool = None,
) -> None:
r"""Print to the console.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
"""
if not objects:
self.line()
return
with self:
renderables = self._collect_renderables(
objects, sep, end, emoji=emoji, markup=markup, highlight=highlight
)
render_options = self.options
extend = self._buffer.extend
render = self.render
if style is None:
for renderable in renderables:
extend(render(renderable, render_options))
else:
for renderable in renderables:
extend(
Segment.apply_style(
render(renderable, render_options), self.get_style(style)
)
)
def print_exception(
self,
width: Optional[int] = 88,
extra_lines: int = 3,
theme: Optional[str] = None,
word_wrap: bool = False,
) -> None:
"""Prints a rich render of the last exception and traceback.
Args:
code_width (Optional[int], optional): Number of characters used to render code. Defaults to 88.
extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
theme (str, optional): Override pygments theme used in traceback
word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
"""
from .traceback import Traceback
traceback = Traceback(
width=width, extra_lines=extra_lines, theme=theme, word_wrap=word_wrap
)
self.print(traceback)
def log(
self,
*objects: Any,
sep=" ",
end="\n",
emoji: bool = None,
markup: bool = None,
highlight: bool = None,
log_locals: bool = False,
_stack_offset=1,
) -> None:
r"""Log rich content to the terminal.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
was called. Defaults to False.
_stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
"""
if not objects:
self.line()
return
renderables = self._collect_renderables(
objects, sep, end, emoji=emoji, markup=markup, highlight=highlight
)
caller = inspect.stack()[_stack_offset]
path = caller.filename.rpartition(os.sep)[-1]
line_no = caller.lineno
if log_locals:
locals_map = {
key: value
for key, value in caller.frame.f_locals.items()
if not key.startswith("__")
}
renderables.append(tabulate_mapping(locals_map, title="Locals"))
with self:
self._buffer.extend(
self.render(
self._log_render(self, renderables, path=path, line_no=line_no),
self.options,
)
)
def _check_buffer(self) -> None:
"""Check if the buffer may be | |
<gh_stars>0
"""
Create a CrowdFlower job to collect relevance judments for domain, query pairs.
This script will read a file of domain, query pairs from the command-line, and collect results from
Cetera for each pair. You may optionally specify different experimental groups (eg. baseline
vs. experiment1) via the `-g` option. These should be specified as JSON strings.
Example:
python arcs/launch_job.py \
-i ~/Data/arcs/20160126.experiment_1/queries.tsv \
-g '{"name": "adjusted boost clause", "description": "Moved field boosts to 'should' clause", "params": {}}' \
-r 10 \
-D 'postgresql://username:password@hostname/dbname'
The group definition should have a name, description, and params field. The params field should be
a nested object specifying any relevant parameters of the experiment.
A full CSV is created, which contains all of the job data. Additionally, a CrowdFlower CSV is
created which corresponds precisely with the data uploaded to create the job in CrowdFlower.
All data is persisted in a Postgres database, the parameters of which are specified via the -D
option.
"""
import argparse
import pandas as pd
import logging
import psycopg2
from functools import partial
from datetime import datetime
from cetera import get_cetera_results
from crowdflower import create_job_from_copy, add_data_to_job
from crowdsourcing_utils import cleanup_description, make_dataset_sample
from db import (
find_judged_qrps, insert_incomplete_job, add_raw_group_results,
insert_unjudged_data_for_group, insert_empty_group
)
from experiment import GroupDefinition
CORE_COLUMNS = ['domain', 'query', 'result_fxf', 'result_position', 'group_id']
DISPLAY_DATA = ['name', 'link', 'description', 'sample']
CSV_COLUMNS = CORE_COLUMNS + DISPLAY_DATA
RAW_COLUMNS = ['domain', 'query', 'results', 'group_id']
logging.basicConfig(format='%(message)s', level=logging.INFO)
LOGGER = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
def _transform_cetera_result(result, result_position, num_rows, num_columns):
"""
Utility function for transforming Cetera result dictionary into something
more suitable for the crowdsourcing task. Presently, we're grabbing name,
link (ie. URL), and the first sentence of description.
Args:
result (dict): A single search result from Cetera
result_position (int): The position of the result in the result set
num_rows (int): The number of rows to show in the dataset sample
num_columns (int): The number of columns to show in the dataset sample
Returns:
A dictionary of data for each result
"""
desc = cleanup_description(result["resource"].get("description"))
domain = result["metadata"]["domain"]
fxf = result["resource"].get("id")
data_sample = make_dataset_sample(domain, fxf, num_rows, num_columns)
return {
"result_domain": domain,
"result_position": result_position,
"result_fxf": fxf,
"name": result["resource"].get("name"),
"link": result["link"],
"description": desc,
"sample": data_sample
}
def raw_results_to_dataframe(group_results, group_id, num_rows, num_columns):
"""
Add group ID to raw results tuple.
Notes:
1. We keep raw results around for posterity.
2. When domain is specified as "www.opendatanetwork.com" in the input, we replace it with
the source domain of the corresponding result
Args:
group_results (iterable): An iterable of results tuples as returned by get_cetera_results
group_id (int): An identifier for the group of results
num_rows (int): The number of rows to show in the dataset sample
num_columns (int): The number of columns to show in the dataset sample
Returns:
An iterable of result dictionaries with the required and relevant metadata
"""
LOGGER.info("Transforming raw results")
results = pd.DataFrame.from_records(
[(results + (group_id,)) for results in group_results],
columns=RAW_COLUMNS)
transform = partial(_transform_cetera_result, num_rows=num_rows, num_columns=num_columns)
results["results"] = results["results"].apply(lambda rs: [transform(r[1], r[0]) for r in rs])
results["query"] = results["query"].apply(str)
return results
def filter_previously_judged(db_conn, qrps_df):
"""
Filter a Pandas DataFrame of query-result pairs to only those that have not
previously been judged.
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
qrps_df (pandas.DataFrame): A DataFrame of query, result data
Returns:
A copy of the input DataFrame filtered down to unjudged QRPs
"""
previously_judged = find_judged_qrps(db_conn)
return qrps_df[qrps_df.apply(
lambda row: (row["query"], row["result_fxf"]) not in previously_judged, axis=1)]
def expanded_results_dataframe(raw_results):
"""
Stack raw results column and join with `raw_results` dataframe such that we have one
query-result pair per row.
Args:
raw_results (pandas.DataFrame): A DataFrame with queries and results
Returns:
An expanded DataFrame with on query-result pair per row
"""
# create new series by stacking/expanding results list
results_s = raw_results["results"].apply(lambda rs: pd.Series(rs))
# drop unnecessary index, reset index to jibe w/ raw_results_df, and create new dataframe
expanded_results_df = pd.DataFrame(
{"result": results_s.unstack().reset_index(level=0, drop=True)})
# join w/ original dataframe
expanded_results_df = raw_results.join(expanded_results_df)
# filter all rows for which there are zero results
expanded_results_df = expanded_results_df[expanded_results_df["result"].notnull()]\
.reset_index()
# add columns from fields in dict
results_dict_df = pd.DataFrame.from_records(list(expanded_results_df["result"]))
results_dict_df.set_index(expanded_results_df.index, inplace=True)
expanded_results_df = expanded_results_df.join(results_dict_df)
# drop original domain, and replace with result domain
expanded_results_df = expanded_results_df.drop("domain", 1)
expanded_results_df = expanded_results_df.rename(columns={"result_domain": "domain"})
return expanded_results_df
def collect_search_results(groups, query_domain_file, num_results, num_rows, num_columns,
output_file=None, cetera_host=None, cetera_port=None):
"""
Send queries included in `query_domain_file` to Cetera, collecting n=num_results results
for each query. Bundle everything up into a Pandas DataFrame. Write out full expanded results
to a CSV.
Args:
groups (Iterable[GroupDefinition]): An iterable of GroupDefinitions
query_domain_file (str): A 2-column tab-delimited file containing query-domain pairs
num_results (int): The number of search results to fetch for each query
num_rows (int): The number of rows to show in the dataset sample
num_columns (int): The number of columns to show in the dataset sample
output_file (str): An optional file path to which the job CSV is to be written
cetera_host (str): An optional Cetera hostname
cetera_port (int): An optional Cetera port number
Returns:
A pair containing the raw results dataframe (one row per query-domain pair) and an expanded
results dataframe where each row corresponds to a query-result pair.
"""
assert(num_results > 0)
LOGGER.info("Reading query domain pairs from {}".format(query_domain_file))
with open(query_domain_file, "r") as f:
next(f) # skip header
domain_queries = [tuple(x.strip().split('\t')[:2]) for x in f if x.strip()]
raw_results_df = pd.DataFrame(columns=RAW_COLUMNS)
# get search results for queries in each group and combine
for group in groups:
results = get_cetera_results(domain_queries, cetera_host, cetera_port,
num_results=num_results, cetera_params=group.params)
raw_results_df = pd.concat(
[raw_results_df, raw_results_to_dataframe(results, group.id, num_rows, num_columns)])
output_file = output_file or \
"{}-full.csv".format(datetime.now().strftime("%Y%m%d"))
expanded_results_df = expanded_results_dataframe(raw_results_df)[CSV_COLUMNS]
expanded_results_df.to_csv(output_file, encoding="utf-8")
return raw_results_df, expanded_results_df
def submit_job(db_conn, groups, data_df, job_to_copy, output_file=None):
"""
Create CrowdFlower job for catalog search result data in `data_df`.
An external CrowdFlower ID is created by launching an initial empty job (using a previous job
(including settings and test data) as the initial state. After creating a CrowdFlower job and
getting an external ID, we persist the job itself to the DB
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
groups (iterable): An iterable of GroupDefinitions
data_df (pandas.DataFrame): A DataFrame of query, result data
job_to_copy (int): External identifier for existing job to copy for its test data
output_file (str): Optional path to a CSV file to be created and submitted to CrowdFlower
Returns:
An Arcs Job with its external ID populated
"""
LOGGER.info("Creating CrowdFlower job")
# create empty CrowdFlower job by copying test units from existing job
job = create_job_from_copy(job_to_copy)
# filter previously judged QRPs, so that we don't pay to have them rejudged
num_rows_pre_filter = len(data_df)
data_df = filter_previously_judged(db_conn, data_df)
num_rows_post_filter = len(data_df)
LOGGER.info("Eliminated {} rows that had been previously judged".format(
num_rows_pre_filter - num_rows_post_filter))
# multiple groups may in fact produce the same results, for any given query,
# so let's ensure we're having each (query, result) pair judged only once
grouped = data_df.groupby(["query", "result_fxf"])
data_df = grouped.first().reset_index()
LOGGER.info("Eliminated {} redundant query-result rows".format(
num_rows_post_filter - len(data_df)))
output_file = output_file or \
"{}-crowdflower.csv".format(datetime.now().strftime("%Y%m%d"))
LOGGER.info("Writing out {} rows as CSV to {}".format(len(data_df), output_file))
data_df.to_csv(output_file, encoding="utf-8",
index=False, escapechar="\\", na_rep=None)
LOGGER.info("Adding data to job from CSV")
try:
add_data_to_job(job.external_id, output_file)
except Exception as e:
if hasattr(e, "message"):
msg = "Unable to send CSV to CrowdFlower: {}".format(e.message)
else:
msg = "Unable to send CSV to CrowdFlower"
LOGGER.warn(msg)
LOGGER.warn("Try uploading the data manually using the web UI.")
LOGGER.info("Job submitted.")
LOGGER.info("Job consists of {} group(s): {}".format(
len(groups), '\n'.join([str(g) for g in groups])))
LOGGER.info("https://make.crowdflower.com/jobs/{}".format(job.external_id))
return job
def _df_data_to_records(df):
return (dict(zip(df.columns, record)) for record in df.to_records(index=False))
def persist_job_data(db_conn, job, groups, raw_data_df):
"""
Write all job data to the DB.
We write an initial incomplete job, using the external ID populated upon job submission. We
store the job unit data in a JSON blob in the DB. And we write group-specific data to the DB
without any judgments that will be updated upon job completion. The input data should be the
full DataFrame, as opposed to the deduplicated data we send to CrowdFlower.
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
job (Job): An Arcs Job object with, at a minimum, its external_id set
groups (iterable): An iterable of GroupDefinitions
data_df (pandas.DataFrame): A DataFrame of query, result data
raw_data_df (pandas.DataFrame): A DataFrame of raw | |
tip
if root_block.header.total_difficulty <= self.root_tip.total_difficulty:
check(
self.__is_same_root_chain(
self.root_tip,
self.db.get_root_block_header_by_hash(
self.header_tip.hash_prev_root_block
),
)
)
return False
# Switch to the root block with higher total diff
self.root_tip = root_block.header
self.confirmed_header_tip = shard_header
orig_header_tip = self.header_tip
if shard_header:
orig_block = self.db.get_minor_block_by_height(shard_header.height)
# get_minor_block_by_height only returns block on the best chain
# so orig_block could be on a fork and thus will not be found by
# get_minor_block_by_height
if not orig_block or orig_block.header != shard_header:
# TODO: shard_header might not be the tip of the longest chain
# need to switch to the tip of the longest chain
self.header_tip = shard_header
# the current header_tip might point to a root block on a fork with r_block
# we need to scan back until finding a minor block pointing to the same root chain r_block is on.
# the worst case would be that we go all the way back to orig_block (shard_header)
while not self.__is_same_root_chain(
self.root_tip,
self.db.get_root_block_header_by_hash(self.header_tip.hash_prev_root_block),
):
if self.header_tip.height == 0:
# we are at genesis block now but the root block it points to is still on a fork from root_tip.
# we have to reset the genesis block based on the root chain identified by root_tip
genesis_root_header = self.root_tip
genesis_height = self.env.quark_chain_config.get_genesis_root_height(
self.full_shard_id
)
check(genesis_root_header.height >= genesis_height)
# first find the root block at genesis root height
while genesis_root_header.height != genesis_height:
genesis_root_header = self.db.get_root_block_header_by_hash(
genesis_root_header.hash_prev_block
)
check(genesis_root_header is not None)
# recover the genesis block
self.header_tip = self.db.get_genesis_block(
genesis_root_header.get_hash()
).header
check(self.header_tip is not None)
break
self.header_tip = self.db.get_minor_block_header_by_hash(
self.header_tip.hash_prev_minor_block
)
if self.header_tip != orig_header_tip:
header_tip_hash = self.header_tip.get_hash()
self.meta_tip = self.db.get_minor_block_meta_by_hash(header_tip_hash)
self.__rewrite_block_index_to(
self.db.get_minor_block_by_hash(header_tip_hash)
)
Logger.info(
"[{}] shard tip reset from {} to {} by root block {}".format(
self.branch.to_str(),
orig_header_tip.height,
self.header_tip.height,
root_block.header.height,
)
)
return True
def _is_neighbor(self, remote_branch: Branch, root_height=None):
root_height = self.root_tip.height if root_height is None else root_height
shard_size = len(
self.env.quark_chain_config.get_initialized_full_shard_ids_before_root_height(
root_height
)
)
return is_neighbor(self.branch, remote_branch, shard_size)
def __run_one_xshard_tx(self, evm_state, xshard_deposit_tx):
tx = xshard_deposit_tx
# TODO: Check if target address is a smart contract address or user address
evm_state.delta_token_balance(
tx.to_address.recipient, tx.transfer_token_id, tx.value
)
evm_state.gas_used = evm_state.gas_used + (
opcodes.GTXXSHARDCOST if tx.gas_price != 0 else 0
)
check(evm_state.gas_used <= evm_state.gas_limit)
xshard_fee = (
opcodes.GTXXSHARDCOST
* tx.gas_price
* self.local_fee_rate.numerator
// self.local_fee_rate.denominator
)
add_dict(evm_state.block_fee_tokens, {tx.gas_token_id: xshard_fee})
evm_state.delta_token_balance(
evm_state.block_coinbase, tx.gas_token_id, xshard_fee
)
def __run_cross_shard_tx_with_cursor(self, evm_state, mblock):
cursor_info = self.db.get_minor_block_meta_by_hash(
mblock.header.hash_prev_minor_block
).xshard_tx_cursor_info
cursor = XshardTxCursor(self, mblock.header, cursor_info)
tx_list = []
while True:
xshard_deposit_tx = cursor.get_next_tx()
if xshard_deposit_tx is None:
# EOF
break
tx_list.append(xshard_deposit_tx)
self.__run_one_xshard_tx(evm_state, xshard_deposit_tx)
# Impose soft-limit of xshard gas limit
if evm_state.gas_used >= mblock.meta.evm_xshard_gas_limit:
break
evm_state.xshard_receive_gas_used = evm_state.gas_used
return tx_list, cursor.get_cursor_info()
def contain_remote_minor_block_hash(self, h):
return self.db.contain_remote_minor_block_hash(h)
def get_transaction_by_hash(self, h):
""" Returns (block, index) where index is the position of tx in the block """
block, index = self.db.get_transaction_by_hash(h)
if block:
return block, index
if h in self.tx_dict:
block = MinorBlock(MinorBlockHeader(), MinorBlockMeta())
block.tx_list.append(self.tx_dict[h])
return block, 0
return None, None
def get_transaction_receipt(
self, h
) -> Optional[Tuple[MinorBlock, int, TransactionReceipt]]:
block, index = self.db.get_transaction_by_hash(h)
if not block:
return None
receipt = block.get_receipt(self.evm_state.db, index)
if receipt.contract_address != Address.create_empty_account(0):
address = receipt.contract_address
check(
address.full_shard_key
== self.evm_state.get_full_shard_key(address.recipient)
)
return block, index, receipt
def get_transaction_list_by_address(self, address, start, limit):
if not self.env.cluster_config.ENABLE_TRANSACTION_HISTORY:
return [], b""
if start == bytes(1): # get pending tx
tx_list = []
for orderable_tx in self.tx_queue.txs + self.tx_queue.aside:
tx = orderable_tx.tx
if Address(tx.sender, tx.from_full_shard_key) == address:
tx_list.append(
TransactionDetail(
TypedTransaction(
SerializedEvmTransaction.from_evm_tx(tx)
).get_hash(),
address,
Address(tx.to, tx.to_full_shard_key) if tx.to else None,
tx.value,
block_height=0,
timestamp=0,
success=False,
gas_token_id=tx.gas_token_id,
transfer_token_id=tx.transfer_token_id,
)
)
return tx_list, b""
return self.db.get_transactions_by_address(address, start, limit)
def get_shard_stats(self) -> ShardStats:
cutoff = self.header_tip.create_time - 60
block = self.db.get_minor_block_by_hash(self.header_tip.get_hash())
tx_count = 0
block_count = 0
stale_block_count = 0
last_block_time = 0
while block.header.height > 0 and block.header.create_time > cutoff:
tx_count += len(block.tx_list)
block_count += 1
stale_block_count += max(
0, (self.db.get_block_count_by_height(block.header.height) - 1)
)
block = self.db.get_minor_block_by_hash(block.header.hash_prev_minor_block)
if last_block_time == 0:
last_block_time = self.header_tip.create_time - block.header.create_time
check(stale_block_count >= 0)
return ShardStats(
branch=self.branch,
height=self.header_tip.height,
difficulty=self.header_tip.difficulty,
coinbase_address=self.header_tip.coinbase_address,
timestamp=self.header_tip.create_time,
tx_count60s=tx_count,
pending_tx_count=len(self.tx_queue),
total_tx_count=self.db.get_total_tx_count(self.header_tip.get_hash()),
block_count60s=block_count,
stale_block_count60s=stale_block_count,
last_block_time=last_block_time,
)
def get_logs(
self,
addresses: List[Address],
topics: List[Optional[Union[str, List[str]]]],
start_block: int,
end_block: int,
) -> Optional[List[Log]]:
if addresses and (
len(set(addr.full_shard_key for addr in addresses)) != 1
or self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
addresses[0].full_shard_key
)
!= self.full_shard_id
):
# should have the same full_shard_id for the given addresses
return None
log_filter = Filter(self.db, addresses, topics, start_block, end_block)
try:
logs = log_filter.run()
return logs
except Exception as e:
Logger.error_exception()
return None
def estimate_gas(self, tx: TypedTransaction, from_address) -> Optional[int]:
"""Estimate a tx's gas usage by binary searching."""
evm_tx_start_gas = tx.tx.to_evm_tx().startgas
# binary search. similar as in go-ethereum
lo = 21000 - 1
hi = evm_tx_start_gas if evm_tx_start_gas > 21000 else self.evm_state.gas_limit
cap = hi
def run_tx(gas):
try:
evm_state = self.evm_state.ephemeral_clone() # type: EvmState
evm_state.gas_used = 0
evm_tx = self.__validate_tx(tx, evm_state, from_address, gas=gas)
success, _ = apply_transaction(
evm_state, evm_tx, tx_wrapper_hash=bytes(32)
)
return success
except Exception:
return False
while lo + 1 < hi:
mid = (lo + hi) // 2
if run_tx(mid):
hi = mid
else:
lo = mid
if hi == cap and not run_tx(hi):
return None
return hi
def gas_price(self) -> Optional[int]:
curr_head = self.header_tip.get_hash()
if curr_head == self.gas_price_suggestion_oracle.last_head:
return self.gas_price_suggestion_oracle.last_price
curr_height = self.header_tip.height
start_height = curr_height - self.gas_price_suggestion_oracle.check_blocks + 1
if start_height < 3:
start_height = 3
prices = []
for i in range(start_height, curr_height + 1):
block = self.db.get_minor_block_by_height(i)
if not block:
Logger.error("Failed to get block {} to retrieve gas price".format(i))
continue
prices.extend(block.get_block_prices())
if not prices:
return None
prices.sort()
price = prices[
(len(prices) - 1) * self.gas_price_suggestion_oracle.percentile // 100
]
self.gas_price_suggestion_oracle.last_price = price
self.gas_price_suggestion_oracle.last_head = curr_head
return price
def validate_minor_block_seal(self, block: MinorBlock):
consensus_type = self.env.quark_chain_config.shards[
block.header.branch.get_full_shard_id()
].CONSENSUS_TYPE
if not self.shard_config.POSW_CONFIG.ENABLED:
validate_seal(block.header, consensus_type)
else:
diff = self.posw_diff_adjust(block)
validate_seal(block.header, consensus_type, adjusted_diff=diff)
def posw_diff_adjust(self, block: MinorBlock) -> int:
start_time = time.time()
header = block.header
diff = header.difficulty
coinbase_address = header.coinbase_address.recipient
# Evaluate stakes before the to-be-added block
evm_state = self._get_evm_state_for_new_block(block, ephemeral=True)
config = self.shard_config.POSW_CONFIG
stakes = evm_state.get_balance(
coinbase_address, self.env.quark_chain_config.genesis_token
)
block_threshold = stakes // config.TOTAL_STAKE_PER_BLOCK
block_threshold = min(config.WINDOW_SIZE, block_threshold)
# The func is inclusive, so need to fetch block counts until prev block
# Also only fetch prev window_size - 1 block counts because the
# new window should count the current block
block_cnt = self._get_posw_coinbase_blockcnt(
header.hash_prev_minor_block, length=config.WINDOW_SIZE - 1
)
cnt = block_cnt.get(coinbase_address, 0)
if cnt < block_threshold:
diff //= config.DIFF_DIVIDER
# TODO: remove it if verified not time consuming
passed_ms = (time.time() - start_time) * 1000
Logger.debug("Adjust PoSW diff took %s milliseconds" % passed_ms)
return diff
def _get_evm_state_from_height(self, height: Optional[int]) -> Optional[EvmState]:
if height is None or height == self.header_tip.height:
return self.evm_state
# note `_get_evm_state_for_new_block` actually fetches the state in the previous block
# so adding 1 is needed here to get the next block
block = self.db.get_minor_block_by_height(height + 1)
if not block:
Logger.error("Failed to get block at height {}".format(height))
return None
return self._get_evm_state_for_new_block(block)
def __get_coinbase_addresses_until_block(
self, header_hash: bytes, length: int
) -> List[bytes]:
"""Get coinbase addresses up until block of given hash within the window."""
curr_block = self.db.get_minor_block_by_hash(header_hash)
if not curr_block:
raise ValueError("curr block not found: hash {}".format(header_hash.hex()))
header = curr_block.header
height = header.height
prev_hash = header.hash_prev_minor_block
if prev_hash in self.coinbase_addr_cache: # mem cache hit
_, addrs = self.coinbase_addr_cache[prev_hash]
addrs = addrs.copy()
if len(addrs) == length:
addrs.popleft()
addrs.append(header.coinbase_address.recipient)
else: # miss, iterating DB
addrs = deque()
for _ in range(length):
addrs.appendleft(header.coinbase_address.recipient)
if header.height == 0:
break
header = self.db.get_minor_block_header_by_hash(
header.hash_prev_minor_block
)
check(header is not None, "mysteriously missing block")
self.coinbase_addr_cache[header_hash] = (height, addrs)
# in case cached too much, clean up
if len(self.coinbase_addr_cache) > 128: # size around 640KB if window size 256
self.coinbase_addr_cache = {
k: (h, addrs)
for k, (h, addrs) in self.coinbase_addr_cache.items()
if h > height - 16 # keep most recent ones
}
return list(addrs)
@functools.lru_cache(maxsize=16)
def _get_posw_coinbase_blockcnt(
self, header_hash: bytes, length: int = None
) -> Dict[bytes, int]:
""" PoSW needed function: get coinbase addresses up until the given block
hash (inclusive) along with block counts within | |
required
def test_create_tempo_go_gps_device_with_vehicle_status(self):
self.minimum_valid_data["vehicle_status"] = "unloaded"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding is_active field to minimum valid data required
def test_create_tempo_go_gps_device_with_is_active(self):
self.minimum_valid_data["is_active"] = True
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding driver field to minimum valid data required
def test_create_tempo_go_gps_device_with_driver(self):
self.minimum_valid_data["driver"] = self.driver.id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_with_full_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.tempo_go_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_invalid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
data = self.tempo_go_gps_device_data.copy()
data["location_time"] = "10-09-2015"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = self.tempo_go_gps_device_data.copy()
data["vehicle_status"] = "InvalidChoice"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = self.tempo_go_gps_device_data.copy()
data["latitude"] = "InvalidLatitude"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = self.tempo_go_gps_device_data.copy()
data["driver"] = -1
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = "asdsad"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = self.driver.id * 100
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_all_4_function_without_token(self):
response = self.client.post(self.create_url, self.tempo_go_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(self.update_url, self.tempo_go_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(self.partial_update_url, self.tempo_go_gps_device_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(self.retrieve_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_all_4_functions_with_invalid_token(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token + "invalidToken")
response = self.client.post(self.create_url, self.tempo_go_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(self.update_url, self.tempo_go_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(self.partial_update_url, self.tempo_go_gps_device_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(self.retrieve_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_tempo_go_gps_device_with_minimum_valid_date(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.update_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_update_tempo_go_gps_device_with_full_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.update_url, self.tempo_go_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_partial_update_tempo_go_gps_device_with_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
data = {"driver_number": "1234567890"}
response = self.client.patch(self.partial_update_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
data = {"location_time": "2017-01-28T22:22:30.792000"}
response = self.client.patch(self.partial_update_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
data = {"driver": self.driver.id}
response = self.client.patch(self.partial_update_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_retrieve_existing_tempo_go_gps_device(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_retrieve_non_existing_tempo_go_gps_device(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
bad_retrieve_url = reverse("driver_tempo_go_gps_device_retrieve",
kwargs={"pk": self.tempo_go_gps_device.id * 1000})
response = self.client.get(bad_retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class TempoGoGPSDeviceLogTest(DriverSetup):
"""
Test cases for Tempo Go GPS Device Log
"""
def setUp(self):
super().setUp()
self.tempo_go_gps_device_log_data = {
"gps_log_id": "59a6827a4824d40814649809",
"datetime": "2017-08-30T14:07:32",
"vehicle_id": "57240867-e4c3-471a-9d69-d2ccc6284541",
"latitude": "17.7029022222",
"longitude": "83.2226755556",
"speed": 0,
"driver_name": "<NAME>",
"driver_number": "8985118722",
"driving_licence_number": "AP03520458/2005 VZM",
"vehicle_number": "ap35x7379",
"vehicle_type": None,
"vehicle_status": "unloaded",
"device": self.tempo_go_gps_device.id,
}
self.minimum_valid_data = {
"gps_log_id": "59a6827a4824d40814649809",
"vehicle_id": "cg04lw9448",
"datetime": "2017-05-06T11:36:30"
}
self.create_url = reverse("driver_tempo_go_gps_device_log_create")
self.update_url = reverse("driver_tempo_go_gps_device_log_update",
kwargs={"pk": self.tempo_go_gps_device_log.id})
self.partial_update_url = reverse("driver_tempo_go_gps_device_log_partial_update",
kwargs={"pk": self.tempo_go_gps_device_log.id})
self.retrieve_url = reverse("driver_tempo_go_gps_device_log_retrieve",
kwargs={"pk": self.tempo_go_gps_device_log.id})
def test_create_tempo_go_gps_device_log_with_minimum_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_other_data_format(self):
pass
def test_create_tempo_go_gps_device_log_with_valid_deleted(self):
data = self.minimum_valid_data.copy()
data["deleted"] = True
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted"] = False
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted"] = 0
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted"] = 1
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_log_device_with_invalid_deleted(self):
data = self.minimum_valid_data.copy()
data["deleted"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted"] = -12
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted"] = None
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_deleted_on(self):
data = self.minimum_valid_data.copy()
data["deleted_on"] = datetime.now()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted_on"] = str(datetime.now())
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted_on"] = None
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_deleted_on(self):
data = self.minimum_valid_data.copy()
data["deleted_on"] = "invalid_format"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted_on"] = "09/12/18"
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted_on"] = "09:12:18:20:20:300"
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_gps_log_id(self):
data = self.minimum_valid_data.copy()
data["gps_log_id"] = "valid_log"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["gps_log_id"] = generate_random_string(49)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["gps_log_id"] = generate_random_string(50)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_gps_log_id(self):
data = self.minimum_valid_data.copy()
data["gps_log_id"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["gps_log_id"] = generate_random_string(51)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_datetime(self):
data = self.minimum_valid_data.copy()
data["datetime"] = datetime.now()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["datetime"] = str(datetime.now())
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_datetime(self):
data = self.minimum_valid_data.copy()
data["datetime"] = "invalid_format"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["datetime"] = "09/12/18"
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["datetime"] = "09:12:18:20:20:300"
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted_on"] = None
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_vehicle_id(self):
data = self.minimum_valid_data.copy()
data["vehicle_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_id"] = generate_random_string(49)
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_id"] = generate_random_string(50)
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_vehicle_id(self):
data = self.minimum_valid_data.copy()
data["vehicle_id"] = generate_random_string(51)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_id"] = None
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_longitude(self):
data = self.minimum_valid_data.copy()
data["longitude"] = 1232333.1231
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["longitude"] = 1.1234567891
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["longitude"] = None
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_longitude(self):
data = self.minimum_valid_data.copy()
data["longitude"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["longitude"] = 1.12345678911
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_latitude(self):
data = self.minimum_valid_data.copy()
data["latitude"] = 1232333.1231
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["latitude"] = 1.1234567891
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["latitude"] = None
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_latitude(self):
data = self.minimum_valid_data.copy()
data["latitude"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["latitude"] = 1.12345678911
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_driver_name(self):
data = self.minimum_valid_data.copy()
data["driver_name"] = "MyNameIsKhan"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = generate_random_string(49)
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = generate_random_string(50)
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = None
data["gps_log_id"] = "asd1203"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_driver_name(self):
data = self.minimum_valid_data.copy()
data["driver_name"] = generate_random_string(51)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_driver_number(self):
data = self.minimum_valid_data.copy()
data["driver_number"] = "1800140020"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_number"] = "9878787878"
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_number"] = None
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_driver_number(self):
data = self.minimum_valid_data.copy()
data["driver_number"] = "0123456789"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "123456789"
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "12345678911"
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "12345ab678"
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "invalid123"
data["gps_log_id"] = "asd1203"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_driving_licence_number(self):
data = self.minimum_valid_data.copy()
data["driving_licence_number"] = "dl12ab35844"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driving_licence_number"] = generate_random_string(19)
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driving_licence_number"] = generate_random_string(20)
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_driving_licence_number(self):
data = self.minimum_valid_data.copy()
data["driving_licence_number"] = generate_random_string(21)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_tempo_go_gps_device_log_with_valid_vehicle_number(self):
data = self.minimum_valid_data.copy()
data["vehicle_number"] = "dl12ab5844"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_number"] = "MH-12-Bom-2018"
data["gps_log_id"] = "asd1200"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_number"] = "MH-12-B2018"
data["gps_log_id"] = "asd1201"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_number"] = None
data["gps_log_id"] = "asd1202"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tempo_go_gps_device_log_with_invalid_vehicle_number(self):
data = self.minimum_valid_data.copy()
data["vehicle_number"] = "m12bom2018"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_number"] = "12MH2018BOM"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_number"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
| |
import math
import time
import numpy as np
import multiprocessing as mp
from typing import List, Callable
from flare.kernels.utils import from_mask_to_args, from_grad_to_mask
_global_training_data = {}
_global_training_labels = {}
def queue_wrapper(result_queue, wid,
func, args):
"""
wrapper function for multiprocessing queue
"""
result_queue.put((wid, func(*args)))
def partition_cr(n_sample, size, n_cpus):
"""
partition the training data for matrix calculation
the number of blocks are close to n_cpus
since mp.Process does not allow to change the thread number
"""
# divid the block by n_cpu partitions, with size n_sample0
# if the divided chunk is smaller than the requested chunk n_sample
# use the requested chunk size
n_sample0 = int(math.ceil(np.sqrt(size*size/n_cpus/2.)))
if (n_sample0 > n_sample):
n_sample = n_sample0
block_id=[]
nbatch = 0
nbatch1 = 0
nbatch2 = 0
e1 = 0
while (e1 < size):
s1 = int(n_sample*nbatch1)
e1 = int(np.min([s1 + n_sample, size]))
nbatch2 = nbatch1
nbatch1 += 1
e2 = 0
while (e2 <size):
s2 = int(n_sample*nbatch2)
e2 = int(np.min([s2 + n_sample, size]))
block_id += [(s1, e1, s2, e2)]
nbatch2 += 1
nbatch += 1
return block_id, nbatch
def partition_c(n_sample, size, n_cpus):
"""
partition the training data for vector calculation
the number of blocks are the same as n_cpus
since mp.Process does not allow to change the thread number
"""
n_sample0 = int(math.ceil(size/n_cpus))
if (n_sample0 > n_sample):
n_sample = n_sample0
block_id = []
nbatch = 0
e = 0
while (e < size):
s = n_sample*nbatch
e = np.min([s + n_sample, size])
block_id += [(s, e)]
nbatch += 1
return block_id, nbatch
def partition_update(n_sample, size, old_size, n_cpus):
ns = int(math.ceil(size/n_sample))
nproc = (size*3-old_size*3)*(ns+old_size*3)//2
if (nproc < n_cpus):
n_sample = int(math.ceil(size/np.sqrt(n_cpus*2)))
ns = int(math.ceil(size/n_sample))
ns_new = int(math.ceil((size-old_size)/n_sample))
old_ns = int(math.ceil(old_size/n_sample))
nbatch = 0
block_id = []
for ibatch1 in range(old_ns):
s1 = int(n_sample*ibatch1)
e1 = int(np.min([s1 + n_sample, old_size]))
for ibatch2 in range(ns_new):
s2 = int(n_sample*ibatch2)+old_size
e2 = int(np.min([s2 + n_sample, size]))
block_id += [(s1, e1, s2, e2)]
nbatch += 1
for ibatch1 in range(ns_new):
s1 = int(n_sample*ibatch1)+old_size
e1 = int(np.min([s1 + n_sample, size]))
for ibatch2 in range(ns_new):
s2 = int(n_sample*ibatch2)+old_size
e2 = int(np.min([s2 + n_sample, size]))
block_id += [(s1, e1, s2, e2)]
nbatch += 1
return block_id, nbatch
def obtain_noise_len(hyps, hyps_mask):
"""
obtain the noise parameter from hyps and mask
"""
# assume sigma_n is the final hyperparameter
sigma_n = hyps[-1]
# correct it if map is defined
non_noise_hyps = len(hyps)-1
train_noise = True
if (hyps_mask is not None):
train_noise = hyps_mask.get('train_noise', True)
if (train_noise is False):
sigma_n = hyps_mask['original'][-1]
non_noise_hyps = len(hyps)
return sigma_n, non_noise_hyps, train_noise
#######################################
##### KY MATRIX FUNCTIONS
#######################################
def get_ky_mat_pack(hyps: np.ndarray, name: str,
s1: int, e1: int, s2: int, e2: int,
same: bool, kernel, cutoffs, hyps_mask):
""" Compute covariance matrix element between set1 and set2
:param hyps: list of hyper-parameters
:param name: name of the gp instance.
:param same: whether the row and column are the same
:param kernel: function object of the kernel
:param cutoffs: The cutoff values used for the atomic environments
:type cutoffs: list of 2 float numbers
:param hyps_mask: dictionary used for multi-group hyperparmeters
:return: covariance matrix
"""
# initialize matrices
training_data = _global_training_data[name]
size1 = (e1-s1)*3
size2 = (e2-s2)*3
k_mat = np.zeros([size1, size2])
ds = [1, 2, 3]
# calculate elements
args = from_mask_to_args(hyps, hyps_mask, cutoffs)
for m_index in range(size1):
x_1 = training_data[int(math.floor(m_index / 3))+s1]
d_1 = ds[m_index % 3]
if (same):
lowbound = m_index
else:
lowbound = 0
for n_index in range(lowbound, size2):
x_2 = training_data[int(math.floor(n_index / 3))+s2]
d_2 = ds[n_index % 3]
kern_curr = kernel(x_1, x_2, d_1, d_2, *args)
# store kernel value
k_mat[m_index, n_index] = kern_curr
if (same):
k_mat[n_index, m_index] = kern_curr
return k_mat
def get_ky_mat(hyps: np.ndarray, name: str,
kernel, cutoffs=None, hyps_mask=None,
n_cpus=1, n_sample=100):
""" parallel version of get_ky_mat
:param hyps: list of hyper-parameters
:param name: name of the gp instance.
:param kernel: function object of the kernel
:param cutoffs: The cutoff values used for the atomic environments
:type cutoffs: list of 2 float numbers
:param hyps_mask: dictionary used for multi-group hyperparmeters
:return: covariance matrix
"""
training_data = _global_training_data[name]
size = len(training_data)
size3 = 3*size
if (n_cpus is None):
n_cpus = mp.cpu_count()
if (n_cpus == 1):
k_mat = get_ky_mat_pack(
hyps, name, 0, size, 0, size, True,
kernel, cutoffs, hyps_mask)
else:
# initialize matrices
block_id, nbatch = partition_cr(n_sample, size, n_cpus)
result_queue = mp.Queue()
children = []
for wid in range(nbatch):
s1, e1, s2, e2 = block_id[wid]
children.append(
mp.Process(
target=queue_wrapper,
args=(result_queue, wid,
get_ky_mat_pack,
(hyps, name, s1, e1, s2, e2,
s1==s2, kernel, cutoffs, hyps_mask
)
)
)
)
# Run child processes.
for c in children:
c.start()
# Wait for all results to arrive.
k_mat = np.zeros([size3, size3])
for _ in range(nbatch):
wid, result_chunk = result_queue.get(block=True)
s1, e1, s2, e2 = block_id[wid]
k_mat[s1*3:e1*3, s2*3:e2*3] = result_chunk
if (s1 != s2):
k_mat[s2*3:e2*3, s1*3:e1*3] = result_chunk.T
# Join child processes (clean up zombies).
for c in children:
c.join()
# matrix manipulation
del result_queue
del children
sigma_n, _, __ = obtain_noise_len(hyps, hyps_mask)
ky_mat = k_mat
ky_mat += sigma_n ** 2 * np.eye(size3)
return ky_mat
def get_like_from_ky_mat(ky_mat, name):
""" compute the likelihood from the covariance matrix
:param ky_mat: the covariance matrix
:return: float, likelihood
"""
# catch linear algebra errors
try:
ky_mat_inv = np.linalg.inv(ky_mat)
l_mat = np.linalg.cholesky(ky_mat)
alpha = np.matmul(ky_mat_inv, labels)
except:
return -1e8
return get_like_from_mats(ky_mat, l_mat, alpha, name)
def get_like_from_mats(ky_mat, l_mat, alpha, name):
""" compute the likelihood from the covariance matrix
:param ky_mat: the covariance matrix
:return: float, likelihood
"""
# catch linear algebra errors
labels = _global_training_labels[name]
# calculate likelihood
like = (-0.5 * np.matmul(labels, alpha) -
np.sum(np.log(np.diagonal(l_mat))) -
math.log(2 * np.pi) * ky_mat.shape[1] / 2)
return like
#######################################
##### KY MATRIX FUNCTIONS and gradients
#######################################
def get_ky_and_hyp_pack(name, s1, e1, s2, e2, same: bool,
hyps: np.ndarray, kernel_grad, cutoffs=None, hyps_mask=None):
"""
computes a block of ky matrix and its derivative to hyper-parameter
If the cpu set up is None, it uses as much as posible cpus
:param hyps: list of hyper-parameters
:param name: name of the gp instance.
:param kernel_grad: function object of the kernel gradient
:param cutoffs: The cutoff values used for the atomic environments
:type cutoffs: list of 2 float numbers
:param hyps_mask: dictionary used for multi-group hyperparmeters
:return: hyp_mat, ky_mat
"""
# assume sigma_n is the final hyperparameter
sigma_n, non_noise_hyps, _ = obtain_noise_len(hyps, hyps_mask)
# initialize matrices
size1 = (e1-s1) * 3
size2 = (e2-s2) * 3
k_mat = np.zeros([size1, size2])
hyp_mat = np.zeros([non_noise_hyps, size1, size2])
args = from_mask_to_args(hyps, hyps_mask, cutoffs)
ds = [1, 2, 3]
training_data = _global_training_data[name]
# calculate elements
for m_index in range(size1):
x_1 = training_data[int(math.floor(m_index / 3))+s1]
d_1 = ds[m_index % 3]
if (same):
lowbound = m_index
else:
lowbound = 0
for n_index in range(lowbound, size2):
x_2 = training_data[int(math.floor(n_index / 3))+s2]
d_2 = ds[n_index % 3]
# calculate kernel and gradient
cov = kernel_grad(x_1, x_2, d_1, d_2, *args)
# store kernel value
k_mat[m_index, n_index] = cov[0]
grad = from_grad_to_mask(cov[1], hyps_mask)
hyp_mat[:, m_index, n_index] = grad
if (same):
k_mat[n_index, m_index] = cov[0]
hyp_mat[:, n_index, m_index] = grad
return hyp_mat, k_mat
def get_ky_and_hyp(hyps: np.ndarray, name,
kernel_grad, cutoffs=None,
hyps_mask=None,
n_cpus=1, n_sample=100):
"""
parallel version of get_ky_and_hyp
:param hyps: list of hyper-parameters
:param name: name of the gp instance.
:param kernel_grad: function object of the kernel gradient
:param cutoffs: The cutoff values used for the atomic environments
:type cutoffs: list of 2 float numbers
:param hyps_mask: dictionary used for multi-group hyperparmeters
:param n_cpus: number of cpus to use.
:param n_sample: the size of block for matrix to compute
:return: hyp_mat, ky_mat
"""
training_data = _global_training_data[name]
size = len(training_data)
size3 = size*3
sigma_n, non_noise_hyps, train_noise = obtain_noise_len(hyps, hyps_mask)
if (n_cpus is None):
n_cpus = mp.cpu_count()
if (n_cpus == 1):
hyp_mat0, k_mat = get_ky_and_hyp_pack(
name, 0, size, 0, size, True,
hyps, kernel_grad, cutoffs, hyps_mask)
else:
block_id, nbatch = partition_cr(n_sample, size, n_cpus)
result_queue = mp.Queue()
children = []
for wid in range(nbatch):
s1, e1, s2, e2 = block_id[wid]
children.append(
mp.Process(
target=queue_wrapper,
args=(result_queue, wid,
get_ky_and_hyp_pack,
(name, s1, e1, s2, e2, s1==s2,
hyps, kernel_grad, cutoffs, hyps_mask))))
# Run child processes.
for c in children:
| |
<reponame>scivision/isrutils<gh_stars>1-10
#!/usr/bin/env python
from configparser import ConfigParser
from pathlib import Path
import logging
from sys import stderr
from time import time
import h5py
from datetime import datetime
import numpy as np
from numpy.ma import masked_invalid
import xarray
from matplotlib.pyplot import figure, subplots, gcf
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
from matplotlib.dates import DateFormatter
from matplotlib.pyplot import draw, pause, show
from matplotlib.colors import LogNorm
from matplotlib.cm import jet
import matplotlib.animation as anim
import matplotlib.gridspec as gridspec
import isrutils
from GeoData.plotting import polarplot
from sciencedates import find_nearest as findnearest
# from sciencedates.ticks import timeticks
from GeoData.plotting import plotazelscale
from .common import findindex2Dsphere, timesync, projectisrhist
from . import str2dt
from . import isrselect, readACF
ALTMIN = 60e3 # meters
def writeplots(fg, t="", odir=None, ctxt="", ext=".png"):
from matplotlib.pyplot import close
if odir:
odir = Path(odir).expanduser()
odir.mkdir(parents=True, exist_ok=True)
if isinstance(t, xarray.DataArray):
t = datetime.utcfromtimestamp(t.item() / 1e9)
elif isinstance(t, (float, int)):
t = datetime.utcfromtimestamp(t / 1e9)
# :-6 keeps up to millisecond if present.
ppth = odir / (ctxt + str(t)[:-6] + ext, "-").replace(":", "")
print("saving", ppth)
fg.savefig(ppth, dpi=100, bbox_inches="tight")
close(fg)
# %% looping
def simpleloop(inifn):
ini = ConfigParser(
allow_no_value=True, empty_lines_in_values=False, inline_comment_prefixes=(";"), strict=True
)
ini.read(inifn)
dpath = Path(ini.get("data", "path")).expanduser()
ft = ini.get("data", "ftype", fallback="").split(",")
# %% parse user directory / file list input
if dpath.is_dir() and not ft:
flist = sorted(dpath.glob("*dt*.h5"))
elif dpath.is_dir() and ft: # glob pattern
flist = []
for t in ft:
flist.extend(sorted(dpath.glob(f"*.{t}.h5")))
elif dpath.is_file(): # a single file was specified
flist = [flist]
else:
raise FileNotFoundError(f"unknown path/filetype {dpath} / {ft}")
if not flist:
raise FileNotFoundError(f"no files found in {dpath}")
print(f"examining {len(flist)} {ft} files in {dpath}\n")
# %% api catchall
P = {
"odir": ini.get("plot", "odir", fallback=None),
"verbose": ini.getboolean("plot", "verbose", fallback=False),
"scan": ini.getboolean("data", "scan", fallback=False),
# N times the median is declared a detection
"medthres": ini.getfloat("data", "medthreas", fallback=2.0),
"tlim": ini.get("plot", "tlim", fallback=None),
"beamid": ini.getint("data", "beamid"),
"acf": ini.getboolean("plot", "acf", fallback=False),
}
if P["tlim"]:
P["tlim"] = P["tlim"].split(",")
P["tlim"] = str2dt(P["tlim"])
for p in (
"flim_pl",
"vlim",
"vlim_pl",
"vlim",
"vlimacf",
"vlimacfslice",
"vlimint",
"zlim",
"zlim_pl",
"zsum",
):
val = ini.get("plot", p, fallback=None)
if not val: # None or ''
P[p] = [None, None]
continue
P[p] = np.array(val.split(",")).astype(float)
# %% loop over files
for f in flist:
# read data
specdown, specup, snrsamp, azel, isrlla, snrint, snr30int, ionsum = isrselect(dpath / f, P)
# %% plot
# summed ion line over altitude range
# tic = time()
hit = plotsumionline(ionsum, None, f, P)
if isinstance(hit, bool):
print(f.stem, hit)
# if P['verbose']: print(f'sum plot took {(time()-tic):.1f} sec.')
if (
hit and not P["acf"]
): # if P['acf'], it was already plotted. Otherwise, we plot only if hit
readACF(f, P)
if hit or not P["scan"]:
# 15 sec integration
plotsnr(snrint, f, P, azel, ctxt="int_")
# 200 ms integration
plotsnr(snrsamp, f, P, azel)
# plasma line spectrum
plotplasmaline(specdown, specup, f, P, azel)
# %%
def isrstacker(flist, P):
for fn in flist:
fn = Path(fn).expanduser()
if not fn.is_file():
continue
specdown, specup, snrsamp, azel, isrlla, snrint, snr30int = isrselect(fn, P)
if fn.samefile(flist[0]):
specdowns = specdown
specups = specup
snrsamps = snrsamp
snrints = snrint
snr30ints = snr30int
else:
if snrsamp is not None:
snrsamps = xarray.concat((snrsamps, snrsamp), axis=1)
if snrint is not None:
snrints = xarray.concat((snrints, snrint), axis=1)
# TOOD other concat & update to xarray syntax
# %% plots
plotplasmaline(specdowns, specups, flist, P)
plotsnr(snrsamps, fn, P)
# %% ACF
readACF(fn, P)
plotsnr(snrints, fn, P)
plotsnr1d(snr30ints, fn, P)
plotsnr(snr30ints, fn, P)
# plotsnrmesh(snr,fn,P)
# %% joint isr optical plot
def dojointplot(ds, spec, freq, beamazel, optical, optazel, optlla, isrlla, heightkm, utopt, P):
"""
ds: radar data
f1,a1: radar figure,axes
f2,a2: optical figure,axes
"""
vidnorm = LogNorm()
assert isinstance(ds, xarray.DataArray)
# %% setup master figure
fg = figure(figsize=(8, 12))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
# %% setup radar plot(s)
a1 = fg.add_subplot(gs[1])
plotsumionline(ds, a1, isrutils.expfn(P["isrfn"]), P["zlim"])
h1 = a1.axvline(np.nan, color="k", linestyle="--")
t1 = a1.text(0.05, 0.95, "time=", transform=a1.transAxes, va="top", ha="left")
# %% setup top optical plot
if optical is not None:
a0 = fg.add_subplot(gs[0])
clim = compclim(optical, lower=10, upper=99.99)
h0 = a0.imshow(
optical[0, ...],
origin="lower",
interpolation="none",
cmap="gray",
norm=vidnorm,
vmin=clim[0],
vmax=clim[1],
)
a0.set_axis_off()
t0 = a0.set_title("")
# %% plot magnetic zenith beam
azimg = optazel[:, 1].reshape(optical.shape[1:])
elimg = optazel[:, 2].reshape(optical.shape[1:])
optisrazel = projectisrhist(isrlla, beamazel, optlla, optazel, heightkm)
br, bc = findindex2Dsphere(azimg, elimg, optisrazel["az"], optisrazel["el"])
# hollow beam circle
# a2.scatter(bc,br,s=500,marker='o',facecolors='none',edgecolor='red', alpha=0.5)
# beam data, filled circle
s0 = a0.scatter(
bc,
br,
s=2700,
alpha=0.6,
linewidths=3,
edgecolors=jet(np.linspace(ds.min().item(), ds.max().item())),
)
a0.autoscale(True, tight=True)
fg.tight_layout()
# %% time sync
tisr = ds.time.data
Iisr, Iopt = timesync(tisr, utopt, P["tlim"])
# %% iterate
first = True
Writer = anim.writers["ffmpeg"]
writer = Writer(fps=5, metadata=dict(artist="<NAME>, Ph.D."), codec="ffv1")
ofn = Path(P["odir"]).expanduser() / (
"joint_" + str(datetime.fromtimestamp(utopt[0]))[:-3].replace(":", "") + ".mkv"
)
print(f"writing {ofn}")
with writer.saving(fg, str(ofn), 150):
for iisr, iopt in zip(Iisr, Iopt):
ctisr = tisr[iisr]
# %% update isr plot
h1.set_xdata(ctisr)
t1.set_text("isr: {}".format(ctisr))
# %% update hist plot
if iopt is not None:
ctopt = datetime.utcfromtimestamp(utopt[iopt])
h0.set_data(optical[iopt, ...])
t0.set_text("optical: {}".format(ctopt))
s0.set_array(
ds.loc[ctisr]
) # FIXME circle not changing magnetic zenith beam color? NOTE this is isr time index
# %% anim
if first and iopt is not None:
plotazelscale(optical[iopt, ...], azimg, elimg)
show()
first = False
#
draw()
pause(0.01)
writer.grab_frame(facecolor="k")
if ofn.suffix == ".png":
try:
writeplots(fg, ctopt, ofn, ctxt="joint")
except UnboundLocalError:
writeplots(fg, ctisr, ofn, ctxt="isr")
def compclim(imgs, lower: float = 0.5, upper: float = 99.9, Nsamples: int = 50):
"""
inputs:
images: Nframe x ypix x xpix grayscale image stack (have not tried with 4-D color)
lower,upper: percentage (0,100)% to clip
Nsamples: number of frames to test across the image stack (don't use too many for memory reasons)
"""
sampind = np.linspace(0, imgs.shape[0], Nsamples, endpoint=False, dtype=int)
clim = np.percentile(imgs[sampind, ...], [lower, upper])
if upper == 100.0:
clim[1] = imgs.max() # consider all images
return clim
def plotsnr(snr, fn, P, azel, ctxt=""):
if not isinstance(snr, xarray.DataArray) or min(snr.shape) < 2:
return
P["tlim"] = isrutils.str2dt(P["tlim"])
if "int" in ctxt:
vlim = P["vlimint"]
else:
vlim = P["vlim"]
assert (
snr.ndim == 2 and snr.shape[1] > 0
), f'you seem to have extracted zero times, look at tlim {P["tlim"]}'
fg = figure() # figsize=(30,12))
ax = fg.gca()
try:
h = ax.pcolormesh(
snr.time,
snr.srng,
10 * masked_invalid(np.log10(snr.data)),
vmin=vlim[0],
vmax=vlim[1],
cmap="cubehelix_r",
)
except ValueError as e:
print(e, file=stderr)
return
ax.autoscale(True, tight=True)
ax.set_xlim(P["tlim"])
ax.set_ylim(P["zlim"])
ax.set_ylabel("slant range [km]")
ax.set_xlabel("Time [UTC]")
ax.xaxis.set_major_formatter(DateFormatter("%H:%M:%S"))
# %% date ticks
fg.autofmt_xdate()
# tdiff = snr.time[-1] - snr.time[0]
# majtick,mintick = timeticks(tdiff)
# ax.xaxis.set_major_locator(majtick)
# ax.xaxis.set_minor_locator(mintick)
ax.tick_params(axis="both", which="both", direction="out")
c = fg.colorbar(h, ax=ax, fraction=0.075, shrink=0.5)
c.set_label("Power [dB]")
# Ts = f'{(snr.time[1] - snr.time[0]).item()/1e9:.3f}' if snr.time.size >= 2 else ''
ax.set_title(
f"Az,El {azel[0]:.1f},{azel[1]:.1f} {isrutils.expfn(fn)}"
"{str(datetime.fromtimestamp(snr.time[0].item()/1e9))[:10]}"
"$T_{{sample}}$={Ts} sec."
)
try:
for m in P["tmark"]:
try:
ax.annotate(
m[2],
m[:2],
xytext=(m[3] * 100, 50),
textcoords="offset points",
color="white",
ha="left",
bbox={"alpha": 0.2},
arrowprops={
"facecolor": "white",
"arrowstyle": "-[",
"connectionstyle": "arc3,rad=0.2",
},
)
except Exception as e:
logging.error(f"failed to annotate {e}")
except KeyError:
pass
# if you get RuntimeError here, will also error on savefig
fg.tight_layout()
# %% output
ofn = ctxt + "power_" + isrutils.expfn(fn)
writeplots(fg, snr.time[0].item(), P["odir"], ofn)
return fg
def plotsnr1d(snr, P):
if not isinstance(snr, xarray.DataArray):
return
tind = abs(snr.time - P["t0"]).argmin()
tind = range(tind - 1, tind + 2)
t1 = snr.time[tind]
S = 10 * np.log10(snr[snr.srng >= P["zlim"][0], t1])
z = S.index
ax = figure().gca()
ax.plot(S.iloc[:, 0], z, color="r", label=str(t1[0]))
ax.plot(S.iloc[:, 1], z, color="k", label=str(t1[1]))
ax.plot(S.iloc[:, 2], z, color="b", label=str(t1[2]))
# ax.set_ylim(zlim)
ax.autoscale(True, "y", tight=True)
ax.set_xlim(-5)
ax.legend()
ax.set_title(P["fn"])
ax.set_xlabel("SNR [dB]")
ax.set_ylabel("altitude [km]")
def plotsnrmesh(snr, fn, P):
if not isinstance(snr, xarray.DataArray):
return
tind = abs(snr.time - P["t0"]).argmin()
tind = range(tind - 5, tind + 6)
t1 = snr.time[tind]
S = 10 * np.log10(snr[snr.srng >= P["zlim"][0], t1])
z = S.index
x, y = np.meshgrid(S.time.data.astype(float) / 1e9, z)
ax3 = figure().gca(projection="3d")
# ax3.plot_wireframe(x,y,S.data)
# ax3.scatter(x,y,S.data)
ax3.plot_surface(x, y, S.data, cmap="jet")
ax3.set_zlim(P["vlim"])
ax3.set_zlabel("SNR [dB]")
ax3.set_ylabel("altitude [km]")
ax3.set_xlabel("time")
ax3.autoscale(True, "y", tight=True)
def plotacf(spec: xarray.DataArray, fn: Path, azel, t, dt, P: dict):
"""
plot PSD derived from ACF.
"""
if not isinstance(spec, xarray.DataArray):
return
# %% alt vs freq
fg = figure()
ax = fg.gca()
assert 10 <= azel[1] <= 90, "possibly invalid elevation angle for this beam"
goodz | |
and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
str
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['payload'] = \
payload
return self.post_headline_v1_headline_endpoint.call_with_http_info(**kwargs)
def post_headline_v1_headline_with_http_info(
self,
payload,
**kwargs
) -> typing.Tuple[str, int, typing.MutableMapping]:
"""POST request to create a headline from input text # noqa: E501
Endpoint for initiating a processing job to create a headline from input text. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
str
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['payload'] = \
payload
return self.post_headline_v1_headline_endpoint.call_with_http_info(**kwargs)
def post_headline_v1_headline_async(
self,
payload,
**kwargs
) -> "ApplyResult[str]":
"""POST request to create a headline from input text # noqa: E501
Endpoint for initiating a processing job to create a headline from input text. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[str]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['payload'] = \
payload
return self.post_headline_v1_headline_endpoint.call_with_http_info(**kwargs)
def post_headline_v1_headline_with_http_info_async(
self,
payload,
**kwargs
) -> "ApplyResult[typing.Tuple[str, int, typing.MutableMapping]]":
"""POST request to create a headline from input text # noqa: E501
Endpoint for initiating a processing job to create a headline from input text. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(str, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['payload'] = \
payload
return self.post_headline_v1_headline_endpoint.call_with_http_info(**kwargs)
def post_summary_v1_summary(
self,
payload,
**kwargs
) -> str:
"""POST request to create a headline and summary from input text # noqa: E501
Endpoint for initiating a processing job to create a headline and summary from input text. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
str
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['payload'] = \
payload
return self.post_summary_v1_summary_endpoint.call_with_http_info(**kwargs)
def post_summary_v1_summary_with_http_info(
self,
payload,
**kwargs
) -> typing.Tuple[str, int, typing.MutableMapping]:
"""POST request to create a headline and summary from input text # noqa: E501
Endpoint for initiating a processing job to create a headline and summary from input text. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
str
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['payload'] = \
payload
return self.post_summary_v1_summary_endpoint.call_with_http_info(**kwargs)
def post_summary_v1_summary_async(
self,
payload,
**kwargs
) -> "ApplyResult[str]":
"""POST request to create a headline and summary from input text # noqa: E501
Endpoint for initiating a processing job to create a headline and summary from input text. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response | |
"""Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
# generic
import sys
import os
import argparse
import re
import wx.lib.agw.aui.tabmdi
from os import walk
import importlib
import random
import lmfit
from lmfit import Model
from lmfit import minimize, Parameters
import imp
# wx
import wx
import wx.aui
import wx.lib.scrolledpanel
# import ctypes
import keyword
import wx.grid as gridlib
import wx.lib.agw.aui as aui
import wx.lib.agw.flatmenu as FM
from itertools import count
# specialized
import scipy
from scipy.optimize import curve_fit
from scipy.integrate import simps
from scipy import integrate
# Import subfunctions
from percolate.Subfunctions.area import Area
from percolate.Subfunctions.DirReader import DirReader
from percolate.Subfunctions.FindValue import FindValue
from percolate.Subfunctions.SumRules import SumRules
from percolate.Subfunctions.Multiplexer import Multiplexer
from percolate.Subfunctions.XAS import Xas
from percolate.Subfunctions.difference import difference
from percolate.Subfunctions.step_subtraction import step_subtraction
from percolate.Subfunctions.Transpose import Transpose
from percolate.Subfunctions.Normalise import Normalise
from percolate.Subfunctions.background_subtraction import background_subtraction
from percolate.Subfunctions.background_subtraction import background_subtraction2
from percolate.Subfunctions.parser import EXAFSStreamParser
from percolate.Subfunctions.parser import XMCDStreamParser
from percolate.Subfunctions.IdentifyPeaks import IdentifyPeaks
# Toolkit imports
from percolate.toolkit.find_array_equivalent import find_array_equivalent
from percolate.toolkit.zerolistmaker import zerolistmaker
from percolate.toolkit.import_path import import_path
# Framework imports
from percolate.framework import Port
from percolate.framework import InPort
from percolate.framework import OutPort
from percolate.framework import StreamOutput
from percolate.framework import TextOutput
from percolate.framework import StreamInput
from percolate.framework import ArrayOutput
from percolate.framework import FilePathInput
from percolate.framework import DirPathInput
from percolate.framework import MuxInput
from percolate.framework import MuxOutput
from percolate.framework import Param_input
from percolate.framework import func_Output
from percolate.framework import int_input
from percolate.framework import num_input
from percolate.framework import free_int_input
from percolate.framework import bool_input
from percolate.framework import choice_input
from percolate.framework import Function
from percolate.framework import Edge
from percolate.framework import CompositeFn
# matplotlib (Plotting)
import matplotlib
import matplotlib.cm as cm
import matplotlib.cbook as cbook
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import (
NavigationToolbar2WxAgg as NavigationToolbar,
)
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as pl
import math
def exists_in_list(list, item):
for entry in list:
if entry == item:
return True
def exists_in_dict(dict, item):
for key, value in dict.items():
if key == item:
return True
# GUI
class InputControlBase(Function):
def __init__(self, target_port, name="source"):
super().__init__(name)
# Auto Create Edge from local output to remote input port
self.outport = OutPort(self, "auto", self.read)
self.edge = Edge(self.outport, target_port)
def changed(self):
"""Drive 2-phase update"""
self.outport.notice(False)
self.outport.notice(True)
def evaluate(self):
# input controls are always valid so do nothing
pass
def read(self):
raise NotImplementedError()
class OutputControlBase(Function):
def __init__(self, source_port, name="sink"):
super().__init__(name)
# Auto Create Edge from local input to remote source port
self.in_port = InPort(self, "auto")
self.edge = Edge(source_port, self.in_port)
# Control panel widgets
class FileSelectorControl(InputControlBase):
def __init__(self, parent, port):
super().__init__(port, "FileSelector(%s)" % port.name)
self.port = port
# Create UI file selector in panel
self.ui = wx.FileSelector(label=port.name)
self.data = None
# hook selection event
def on_select(self, filename):
# open file, read it & write to port
self.data = readfile(filename)
self.changed()
def read(self):
return self.data
def dragg(self):
pass
class MyDropTarget(wx.TextDropTarget):
def __init__(self, parent, object, app):
wx.TextDropTarget.__init__(self)
self.object = object
self.app = app
self.parent = parent
def OnDropText(self, x, y, data):
port = self.app.lookup_port(data)
self.parent.DisplayData(port)
self.parent.DisplayPorts(port)
return True
class MaxPlotControl(OutputControlBase):
def __init__(self, parent, port, aui_notebook, manager, app):
# dictionary for plots
self._dict = {}
# parent is the main frame
self.panel = wx.Panel(app.main_frame)
self.datarequest = wx.CheckBox(self.panel, label="Show data points")
self.linerequest = wx.CheckBox(self.panel, label="Show line")
self.legendrequest = wx.CheckBox(self.panel, label="Show Legend")
self.guidelinesrequest = wx.CheckBox(self.panel, label="Show Guidelines")
# ititial conditions
self.linerequest.SetValue(True)
self.legendrequest.SetValue(True)
self.guidelinesrequest.SetValue(True)
cbxsizer = wx.BoxSizer(wx.HORIZONTAL)
cbxsizer.AddMany(
[
(self.guidelinesrequest, 1, wx.EXPAND),
(self.datarequest, 1, wx.EXPAND),
(self.linerequest, 1, wx.EXPAND),
(self.legendrequest, 1, wx.EXPAND),
]
)
hbox = wx.BoxSizer(wx.HORIZONTAL)
fgs = wx.FlexGridSizer(4, 1, 10, 10)
self.panel.fig = Figure((5, 3), 75)
self.panel.canvas = FigureCanvas(self.panel, 1, self.panel.fig)
self.panel.toolbar = NavigationToolbar(self.panel.canvas) # matplotlib toolbar
self.panel.toolbar.Realize()
self.items_in_plot = wx.CheckListBox(
self.panel, name="Ports Displayed", choices=[]
)
canvas_sizer = wx.BoxSizer(wx.HORIZONTAL)
canvas_sizer.AddMany(
[(self.panel.canvas, 5, wx.EXPAND), (self.items_in_plot, 1, wx.EXPAND)]
)
dt = MyDropTarget(self, self.panel.canvas, app)
self.panel.canvas.SetDropTarget(dt)
fgs.AddMany(
[
(canvas_sizer, 1, wx.EXPAND),
(self.panel.toolbar, 1, wx.EXPAND),
(cbxsizer, 1, wx.EXPAND),
]
)
fgs.AddGrowableRow(0, 7)
fgs.AddGrowableCol(0, 1)
hbox.Add(fgs, proportion=1, flag=wx.ALL | wx.EXPAND, border=15)
self.panel.SetSizer(hbox)
self.panel.Bind(wx.EVT_CHECKBOX, self.on_data_point_request, self.datarequest)
self.panel.Bind(wx.EVT_CHECKBOX, self.on_line_request, self.linerequest)
self.panel.Bind(wx.EVT_CHECKBOX, self.on_legend_request, self.legendrequest)
self.panel.Bind(
wx.EVT_CHECKBOX, self.on_guideline_request, self.guidelinesrequest
)
self.panel.Bind(wx.EVT_CHECKLISTBOX, self.on_port_select, self.items_in_plot)
self.ports = []
self.evaluate()
self.datapoints = False
self.drawline = True
self.legend = True
self.guidelines = True
self.selected_ports = []
def on_port_select(self, evt):
self.ports = []
for item in self._dict.keys():
if exists_in_list(self.items_in_plot.CheckedStrings, item.name):
self.ports.append(item)
else:
pass
self.evaluate()
def on_legend_request(self, evt):
if evt.IsChecked():
self.legend = True
else:
self.legend = False
self.evaluate()
def on_guideline_request(self, evt):
if evt.IsChecked():
self.guidelines = True
else:
self.guidelines = False
self.evaluate()
def on_data_point_request(self, evt):
if evt.IsChecked():
self.datapoints = True
else:
self.datapoints = False
self.evaluate()
def on_line_request(self, evt):
if evt.IsChecked():
self.drawline = True
else:
self.drawline = False
self.evaluate()
def DisplayPorts(self, data):
# create all gui references to ports
if len(self.items_in_plot.Items) != 0:
if exists_in_list(self.items_in_plot.Items, data.name):
pass
else:
self.items_in_plot.Append(data.name)
else: # first instance
self.items_in_plot.Append(data.name)
self.items_in_plot.Check(len(self.items_in_plot.Items) - 1, check=True)
def DisplayData(self, data):
# create new item in dictionary
self._dict[data] = self
# clear the ports then fill from dictionary
self.ports = []
for item in self._dict.keys():
self.ports.append(item)
# just call for evaluation
self.evaluate()
def evaluate(self):
# clear figure
self.panel.fig.clear()
# add subplot
self.a = self.panel.fig.add_subplot(111)
# create color dictionary
colors = [
"blue",
"black",
"brown",
"red",
"green",
"orange",
"turquoise",
"pink",
"blue",
"black",
"red",
"green",
"orange",
"pink",
"purple",
"black",
"red",
"blue",
"blue",
"black",
"brown",
"red",
"yellow",
"green",
"orange",
"blue",
"black",
"brown",
"red",
"yellow",
"green",
"orange",
"turquoise",
"pink",
"blue",
"black",
"red",
"green",
"orange",
"pink",
"purple",
"black",
"red",
"blue",
"blue",
"black",
"brown",
"red",
"yellow",
"green",
"orange",
"blue",
"black",
"brown",
"red",
"yellow",
"green",
"orange",
"turquoise",
"pink",
"blue",
"black",
"red",
"green",
"orange",
"pink",
"purple",
"black",
"red",
"blue",
"blue",
"black",
"brown",
"red",
"yellow",
"green",
"orange",
]
# we need to accompany multiple different ports
count = 0
for port in self.ports:
# data
x = np.array(port.read()["data"][0])
y = np.array(port.read()["data"][1])
lbl = list(port.read()["label"])
lines = np.array(port.read()["data"][2])
count = count + 1
if x is not None and len(x.shape) > 1:
count = 0
# for item in x:
for x_data, y_data, label in zip(x, y, lbl):
count = count + 1
try:
for line in lines:
if self.guidelines:
self.panel.lines = self.a.axvline(line, 0, 1)
except:
pass
if len(x) != len(y):
y = "".join(y)
self.panel.lines = self.a.plot(
x_data, y_data, color=colors[count], label=y
)
else:
if self.datapoints and self.drawline:
self.panel.lines = self.a.plot(
x_data,
y_data,
color=colors[count],
label=label + " - " + str(port.name),
)
self.a.scatter(
x_data, y_data, marker="o", color="black", s=5
)
elif self.drawline:
self.panel.lines = self.a.plot(
x_data,
y_data,
color=colors[count],
label=label + " - " + str(port.name),
)
elif self.datapoints:
self.a.scatter(
x_data, y_data, marker="o", color="black", s=5
)
else:
pass
lines_labels = [
self.a.get_legend_handles_labels() for a in self.panel.fig.axes
]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
if self.legend:
self.a.legend(lines, labels)
# self.a.legend()
else:
if self.datapoints and self.drawline:
self.panel.lines = self.a.plot(
x, y, color=colors[count], label=lbl[0] + " - " + str(port.name)
)
self.a.scatter(x, y, marker="o", color="black", s=5)
elif self.drawline:
self.panel.lines = self.a.plot(
x, y, color=colors[count], label=lbl[0] + " - " + str(port.name)
)
elif self.datapoints:
self.a.scatter(x, y, marker="o", color="black", s=5)
else:
pass
if self.legend:
self.a.legend()
self.panel.fig.canvas.draw()
self.panel.toolbar.update()
class MinimalPlotControl(OutputControlBase):
"""Represents a vector output in one line
- double clicking on graph opens a large static graph window
- opening a canvas and dragging into window set up dynamic plot window"""
def __init__(self, parent, fgs, port, aui_notebook, manager, app):
super().__init__(port, "PlotControl(%s)" % port.name)
self.port = port
self.aui_notebook = aui_notebook
self.manager = manager
self.app = app
self.parent = parent
self.fig = Figure((5, 0.8), 75)
self.canvas = FigureCanvas(parent, -1, self.fig)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.canvas, 7, wx.ALIGN_CENTRE | wx.LEFT)
fgs.Add(sizer, 0, wx.TOP | wx.EXPAND)
# handlers
self.canvas.mpl_connect("axes_enter_event", self.enter_axes)
self.canvas.mpl_connect("axes_leave_event", self.leave_axes)
self.canvas.mpl_connect("button_press_event", self.OnDrag)
self.canvas.mpl_connect("button_release_event", self.OnDrag)
self.evaluate()
def enter_axes(self, event):
event.inaxes.patch.set_facecolor("lightgrey")
event.canvas.draw()
def leave_axes(self, event):
event.inaxes.patch.set_facecolor("white")
event.canvas.draw()
def OnDrag(self, e):
if e.dblclick:
| |
<reponame>CHCMATT/Code
import re
import time
import traceback
import threading
import socket
import asyncore
import asynchat
import os
from util import output, database
from util.tools import convertmask
from util.web import uncharset, shorten
from core import triggers
from core.dispatch import dispatch
cwd = os.getcwd()
class Origin(object):
source = re.compile(r'([^!]*)!?([^@]*)@?(.*)')
def __init__(self, bot, source, args):
match = Origin.source.match(source or '')
self.nick, self.user, self.host = match.groups()
if len(args) > 1:
target = args[1]
else:
target = None
mappings = {bot.nick: self.nick, None: None}
self.sender = mappings.get(target, target)
class Bot(asynchat.async_chat):
def __init__(self, nick, name, user, channels, server_password=<PASSWORD>, debug=False):
asynchat.async_chat.__init__(self)
self.set_terminator('\n')
self.buffer = ''
self.id = 0
self.nick = nick
self.default = nick
self.name = name
self.user = user
self.irc_timeout = 45
self.server_options = {}
self.server_password = <PASSWORD>_password
self.channels = channels or list()
self.stack = list()
self.muted = False
self.debug = debug
self.lastping = int(time.time())
self.chan = {}
self.bans = {}
self.logs = {
'bot': [],
'channel': {}
}
self.special_chars = {
'white': '\x0300', 'black': '\x0301', 'blue': '\x0302',
'navy': '\x0302', 'green': '\x0303', 'red': '\x0304',
'brown': '\x0305', 'maroon': '\x0305', 'purple': '\x0306',
'orange': '\x0307', 'olive': '\x0307', 'gold': '\x0307',
'yellow': '\x0308', 'lightgreen': '\x0309', 'lime': '\x0309',
'teal': '\x0310', 'cyan': '\x0311', 'lightblue': '\x0312',
'royal': '\x0312', 'lightpurple': '\x0313', 'pink': '\x0313',
'fuchsia': '\x0313', 'grey': '\x0314', 'gray': '\x0314',
'lightgrey': '\x0315', 'silver': '\x0315',
# Even more special...
'bold': '\x02', 'b': '\x02', 'italic': '\x1d', 'i': '\x1d',
'reset': '\x0f', 'r': '\x0f', 'clear': '\x03', 'c': '\x03',
'reverse': '\x16', 'underline': '\x1f', 'u': '\x1f'
}
# Load ignorelist
self.blocks = database.get(self.nick, 'ignore', [])
self.re_blocks = [convertmask(x) for x in self.blocks]
self.sending = threading.RLock()
def initiate_send(self):
self.sending.acquire()
asynchat.async_chat.initiate_send(self)
self.sending.release()
# def push(self, *args, **kargs):
# asynchat.async_chat.push(self, *args, **kargs)
def run(self, id, host, port):
self.id = id
self.initiate_connect(host, port)
def initiate_connect(self, host, port):
output.normal('Connecting to %s:%s...' % (host, port), 'STATUS')
try:
# self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
source_address = ((self.config('bind_ip'), 0) if self.config('bind_ip') else None)
self.set_socket(socket.create_connection((host, port), source_address=source_address))
self.connect((host, port))
asyncore.loop()
except KeyboardInterrupt:
os._exit(0)
except Exception as e:
output.error('Connection to %s:%s failed! (%s)' % (host, port, str(e)))
os._exit(1)
def handle_connect(self):
self.irc_startup = int(time.time())
output.success('Connected!', 'STATUS')
if self.server_password:
self.write(('PASS', self.server_password), output=False)
self.write(('NICK', self.nick), output=False)
self.write(('USER', self.user, '+iw', self.nick),
self.name, output=False)
def handle_close(self):
os._exit(1)
def handle_error(self):
'''Handle any uncaptured error in the core. Overrides asyncore's handle_error'''
trace = traceback.format_exc()
output.error('Fatal error in core, please review exception below:')
output.error('Exception: ' + trace)
def collect_incoming_data(self, data):
self.buffer += data
def found_terminator(self):
line = self.buffer
self.raw = line
if line.endswith('\r'):
line = line[:-1]
self.buffer = ''
if line.startswith(':'):
source, line = line[1:].split(' ', 1)
else:
source = None
if ' :' in line:
argstr, text = line.split(' :', 1)
args = argstr.split()
args.append(text)
else:
args = line.split()
text = args[-1]
origin = Origin(self, source, args)
if args[0] == 'PING':
self.write(('PONG', text))
self.lastping = int(time.time())
return
if args[0] == 'PONG':
self.lastping = int(time.time())
return
if self.debug:
output.warning(repr(self.raw), 'DEBUG')
try:
if source and origin.nick != self.nick:
getattr(triggers, 'trigger_%s' % args[0])(self, origin, line, args, text,)
except AttributeError:
pass
except KeyError:
pass
# Execute this last so we know that out data is parsed first.
# Slightly slower but we know we can get up-to-date information
if args[0] == 'PRIVMSG':
if self.muted and text[1::].split()[0].lower() not in ['unmute', 'help', 'mute']:
return
dispatch(self, origin, tuple([text] + args))
def mute(self):
self.muted = True
def unmute(self):
self.muted = False
# def dispatch(self, origin, args):
# pass
def error(self, origin, supress=False):
try:
trace = traceback.format_exc()
output.error(trace)
if supress:
return
lines = list(reversed(trace.splitlines()))
report = [lines[0].strip()]
for line in lines:
line = line.strip()
if line.startswith('File "/'):
report.append(line[0].lower() + line[1:])
break
else:
report.append('{red}Source unknown.{c}')
self.msg(origin.sender, '{red}%s{c} ({b}%s{b})' % (report[0], report[1]))
except:
self.msg(origin.sender, '{red}Got an error.')
def __write(self, args, text=None, raw=False):
try:
if raw:
# temp = ' '.join(args)[:510] + ' :' + text + '\r\n'
temp = args[:510] + '\r\n'
elif not raw:
if text:
# 510 because CR and LF count too
temp = (' '.join(args) + ' :' + text)[:510] + '\r\n'
else:
temp = ' '.join(args)[:510] + '\r\n'
self.push(temp)
if self.debug and 'nickserv' not in temp.lower():
output.warning(' > ' + temp, 'DEBUG')
except IndexError:
return
def write(self, args, text=None, raw=False, output=True):
try:
args = [self.safe(arg, u=True) for arg in args]
if text is not None:
text = self.safe(text, u=True)
if raw:
self.__write(args, text, raw)
else:
self.__write(args, text)
if args[0] == 'PRIVMSG':
if args[1].startswith('#'):
self.add_logs(text, args[1], self.nick)
except:
pass
try:
getattr(triggers, 'trigger_write_%s' %
args[0])(self, args, text, raw,)
except AttributeError:
return
except KeyError:
pass
def safe(self, input, u=False):
"""
Strips the line endings and ensures the correct encoding before sending data
"""
input = input.replace('\n', '').replace('\r', '')
if u:
try:
input = input.encode('utf-8')
except:
pass
return input
def msg(self, recipient, text, x=False, shorten_urls=True, bypass_loop=False, colors=True):
"""
Sends most messages to a direct location or recipient
auto shortens URLs by default unless specified in the
config
"""
self.sending.acquire()
if colors:
text = self.format(text, shorten_urls=shorten_urls)
if isinstance(text, unicode):
try:
text = text.encode('utf-8')
except UnicodeEncodeError as e:
text = e.__class__ + ': ' + str(e)
if isinstance(recipient, unicode):
try:
recipient = recipient.encode('utf-8')
except UnicodeEncodeError as e:
return
if not x:
text = text.replace('\x01', '')
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.5 seconds <nope>+ penalty</nope>
if not bypass_loop: # Used if you want to bypass the global rate limiter
def wait(sk, txt):
if sk:
elapsed = time.time() - sk[-1][0]
if elapsed < 3:
# penalty = float(max(0, len(txt) - 50)) / 70
wait = 0.5 # + penalty
if elapsed < wait:
time.sleep(wait - elapsed)
wait(self.stack, text)
# Loop detection
messages = [m[1] for m in self.stack[-8:]]
if messages.count(text) >= 5:
text = '...'
if messages.count('...') > 2:
self.sending.release()
return
self.__write(('PRIVMSG', self.safe(recipient)), self.safe(text))
output.normal('(%s) %s' %
(self.nick, self.stripcolors(self.clear_format(self.safe(text)))), self.safe(recipient))
if self.safe(recipient).startswith('#') and self.safe(recipient) in self.logs['channel']:
self.add_logs(text, recipient, self.nick)
self.stack.append((time.time(), text))
self.stack = self.stack[-10:]
self.sending.release()
def add_logs(self, text, channel, nick):
""" Adds bot chat messages to the bot-central log dictionary """
tmp = {
'message': self.stripcolors(self.clear_format(self.safe(text))),
'nick': self.nick,
'time': int(time.time()),
'channel': self.safe(channel)
}
# Remove ACTION's
if '\x01ACTION' in tmp['message']:
tmp['message'] = '(me) ' + \
tmp['message'].replace('\x01', '').strip('ACTION')
self.logs['channel'][self.safe(channel)].append(tmp)
self.logs['bot'].append(tmp)
def format(self, message, shorten_urls=True):
'''
formatting to support color/bold/italic/etc assignment
and URL shortening in Codes responses
'''
message = uncharset(message)
if self.config('shorten_urls') and shorten_urls:
regex = re.compile(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
re.IGNORECASE).findall(message)
for url in regex:
try:
message = message.replace(url, shorten(url))
except:
pass
if not self.config('text_decorations'):
return self.clear_format(message)
try:
for special in self.special_chars:
message = message.replace('{%s}' % special, self.special_chars[special])
return message
except:
return self.clear_format(message)
def clear_format(self, message):
""" Cleans the custom made color parser (see above function) """
find_char = re.compile(r'{.*?}')
charlist = find_char.findall(message)
for custom in charlist:
message = message.replace(custom, '', 1)
return message
def stripcolors(self, data):
"""
Note: the replacement method is CRUCIAL to keep from
left over color digits. Order is very important.
"""
colors = [
u"\x0300", u"\x0301", u"\x0302", u"\x0303", u"\x0304", u"\x0305",
u"\x0306", u"\x0307", u"\x0308", u"\x0309", u"\x0310", u"\x0311",
u"\x0312", u"\x0313", u"\x0314", u"\x0315", u"\x031", u"\x032",
u"\x033", u"\x034", u"\x035", u"\x036", u"\x037", u"\x038", u"\x039",
u"\x030", u"\x03", u"\x02", u"\x09", u"\x13", u"\x0f", u"\x15"
]
data = uncharset(data)
for color in colors:
try:
data = data.replace(color, '')
except:
pass
return str(data.encode('ascii', 'ignore'))
def changenick(self, nick):
""" Change the nickname of the bot """
chars = set('`+=;,<>?')
if not any((c in chars) for c in nick) and nick[0] != '-' and \
len(nick) > 1 and len(nick) <= 20:
self.write(('NICK', self.nick))
self.nick = nick.encode('ascii', 'ignore')
return True
else:
return None
def notice(self, dest, text):
"""
Send an IRC NOTICE to a user or a channel. See IRC protocol
documentation for more information
"""
text = self.format(text)
self.write(('NOTICE', dest), text)
def me(self, dest, text):
"""
Send an action (/me) to a user or a channel.
Keep in mind has to be "me" as a lambda action already exists
"""
text = self.format(text)
self.write(('PRIVMSG', dest), '\x01ACTION {}\x01'.format(text))
def restart(self):
"""
Reconnect to IRC and restart the bot process while keeping all other
bot processes in tact and untouched
"""
self.close()
os._exit(1)
def quit(self):
"""
Disconnect from IRC | |
function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
integrator = rk4
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t is not None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))//2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
xs = [np.array(t[0])*DX+XOFF for t in trajectories]
ys = [np.array(t[1])*DY+YOFF for t in trajectories]
return xs, ys
xx = np.linspace(-3, 3, 100)
yy = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(xx, yy)
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
theta = np.arctan(V/U)
x0 = X[::2, ::2].flatten()
y0 = Y[::2, ::2].flatten()
length = speed[::2, ::2].flatten()/40
angle = theta[::2, ::2].flatten()
x1 = x0 + length * np.cos(angle)
y1 = y0 + length * np.sin(angle)
xs, ys = streamlines(xx, yy, U.T, V.T, density=2)
cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"])
ix = ((length-length.min())/(length.max()-length.min())*5).astype('int')
colors = cm[ix]
p1 = figure(x_range=(-3,3 ), y_range=(-3, 3))
p1.segment(x0, y0, x1, y1, color=colors, line_width=2)
p2 = figure(x_range=p1.x_range, y_range=p1.y_range)
p2.multi_line(xs, ys, color="#ee6666", line_width=2, line_alpha=0.8)
output_file("vector.html", title="vector.py example")
show(gridplot([[p1,p2]], plot_width=400, plot_height=400)) # open a browser
from typing import Any, List, Tuple
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
def streamlines(x: np.ndarray, y, u, v, density: float = 1) -> Tuple[List[Any], List[Any]]:
''' Return streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines.
'''
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = np.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
NBX = int(30*density)
NBY = int(30*density)
blank = np.zeros((NBY,NBX))
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
if type(xi) == np.ndarray:
x = xi.astype(np.int)
y = yi.astype(np.int)
else:
x = np.int(xi)
y = np.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
integrator = rk4
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t is not None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))//2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
xs = [np.array(t[0])*DX+XOFF for t in trajectories]
ys = [np.array(t[1])*DY+YOFF for t in trajectories]
return xs, ys
xx = np.linspace(-3, 3, 100)
yy = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(xx, yy)
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
theta = np.arctan(V/U)
x0 = X[::2, ::2].flatten()
y0 = Y[::2, ::2].flatten()
length = speed[::2, ::2].flatten()/40
angle = theta[::2, ::2].flatten()
x1 = x0 + length * np.cos(angle)
y1 = y0 + length * np.sin(angle)
xs, ys = streamlines(xx, yy, U.T, V.T, density=2)
cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"])
ix = ((length-length.min())/(length.max()-length.min())*5).astype('int')
colors = cm[ix]
p1 = figure(x_range=(-3,3 ), y_range=(-3, 3))
p1.segment(x0, y0, x1, y1, color=colors, line_width=2)
p2 = figure(x_range=p1.x_range, y_range=p1.y_range)
p2.multi_line(xs, ys, color="#ee6666", line_width=2, line_alpha=0.8)
output_file("vector.html", title="vector.py example")
show(gridplot([[p1,p2]], plot_width=400, plot_height=400)) # open a browser
import numpy as np
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.les_mis import data
nodes = data['nodes']
names = [node['name'] for node in sorted(data['nodes'], key=lambda | |
<reponame>oleksandr-pavlyk/cpython
import binascii
import functools
import hmac
import hashlib
import unittest
import unittest.mock
import warnings
from test.support import hashlib_helper, check_disallow_instantiation
from _operator import _compare_digest as operator_compare_digest
try:
import _hashlib as _hashopenssl
from _hashlib import HMAC as C_HMAC
from _hashlib import hmac_new as c_hmac_new
from _hashlib import compare_digest as openssl_compare_digest
except ImportError:
_hashopenssl = None
C_HMAC = None
c_hmac_new = None
openssl_compare_digest = None
try:
import _sha256 as sha256_module
except ImportError:
sha256_module = None
def ignore_warning(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
category=DeprecationWarning)
return func(*args, **kwargs)
return wrapper
class TestVectorsTestCase(unittest.TestCase):
def assert_hmac_internals(
self, h, digest, hashname, digest_size, block_size
):
self.assertEqual(h.hexdigest().upper(), digest.upper())
self.assertEqual(h.digest(), binascii.unhexlify(digest))
self.assertEqual(h.name, f"hmac-{hashname}")
self.assertEqual(h.digest_size, digest_size)
self.assertEqual(h.block_size, block_size)
def assert_hmac(
self, key, data, digest, hashfunc, hashname, digest_size, block_size
):
h = hmac.HMAC(key, data, digestmod=hashfunc)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
h = hmac.HMAC(key, data, digestmod=hashname)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
h = hmac.HMAC(key, digestmod=hashname)
h2 = h.copy()
h2.update(b"test update")
h.update(data)
self.assertEqual(h.hexdigest().upper(), digest.upper())
h = hmac.new(key, data, digestmod=hashname)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
h = hmac.new(key, None, digestmod=hashname)
h.update(data)
self.assertEqual(h.hexdigest().upper(), digest.upper())
h = hmac.new(key, digestmod=hashname)
h.update(data)
self.assertEqual(h.hexdigest().upper(), digest.upper())
h = hmac.new(key, data, digestmod=hashfunc)
self.assertEqual(h.hexdigest().upper(), digest.upper())
self.assertEqual(
hmac.digest(key, data, digest=hashname),
binascii.unhexlify(digest)
)
self.assertEqual(
hmac.digest(key, data, digest=hashfunc),
binascii.unhexlify(digest)
)
h = hmac.HMAC.__new__(hmac.HMAC)
h._init_old(key, data, digestmod=hashname)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
if c_hmac_new is not None:
h = c_hmac_new(key, data, digestmod=hashname)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
h = c_hmac_new(key, digestmod=hashname)
h2 = h.copy()
h2.update(b"test update")
h.update(data)
self.assertEqual(h.hexdigest().upper(), digest.upper())
func = getattr(_hashopenssl, f"openssl_{hashname}")
h = c_hmac_new(key, data, digestmod=func)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
h = hmac.HMAC.__new__(hmac.HMAC)
h._init_hmac(key, data, digestmod=hashname)
self.assert_hmac_internals(
h, digest, hashname, digest_size, block_size
)
@hashlib_helper.requires_hashdigest('md5', openssl=True)
def test_md5_vectors(self):
# Test the HMAC module against test vectors from the RFC.
def md5test(key, data, digest):
self.assert_hmac(
key, data, digest,
hashfunc=hashlib.md5,
hashname="md5",
digest_size=16,
block_size=64
)
md5test(b"\x0b" * 16,
b"Hi There",
"9294727A3638BB1C13F48EF8158BFC9D")
md5test(b"Jefe",
b"what do ya want for nothing?",
"750c783e6ab0b503eaa86e310a5db738")
md5test(b"\xaa" * 16,
b"\xdd" * 50,
"56be34521d144c88dbb8c733f0e8b3f6")
md5test(bytes(range(1, 26)),
b"\xcd" * 50,
"697eaf0aca3a3aea3a75164746ffaa79")
md5test(b"\x0C" * 16,
b"Test With Truncation",
"56461ef2342edc00f9bab995690efd4c")
md5test(b"\xaa" * 80,
b"Test Using Larger Than Block-Size Key - Hash Key First",
"6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd")
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
@hashlib_helper.requires_hashdigest('sha1', openssl=True)
def test_sha_vectors(self):
def shatest(key, data, digest):
self.assert_hmac(
key, data, digest,
hashfunc=hashlib.sha1,
hashname="sha1",
digest_size=20,
block_size=64
)
shatest(b"\x0b" * 20,
b"Hi There",
"b617318655057264e28bc0b6fb378c8ef146be00")
shatest(b"Jefe",
b"what do ya want for nothing?",
"effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
shatest(b"\xAA" * 20,
b"\xDD" * 50,
"125d7342b9ac11cd91a39af48aa17b4f63f175d3")
shatest(bytes(range(1, 26)),
b"\xCD" * 50,
"4c9007f4026250c6bc8414f9bf50c86c2d7235da")
shatest(b"\x0C" * 20,
b"Test With Truncation",
"4c1a03424b55e07fe7f27be1d58bb9324a9a5a04")
shatest(b"\xAA" * 80,
b"Test Using Larger Than Block-Size Key - Hash Key First",
"aa4ae5e15272d00e95705637ce8a3b55ed402112")
shatest(b"\xAA" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"e8e99d0f45237d786d6bbaa7965c7808bbff1a91")
def _rfc4231_test_cases(self, hashfunc, hash_name, digest_size, block_size):
def hmactest(key, data, hexdigests):
digest = hexdigests[hashfunc]
self.assert_hmac(
key, data, digest,
hashfunc=hashfunc,
hashname=hash_name,
digest_size=digest_size,
block_size=block_size
)
# 4.2. Test Case 1
hmactest(key = b'\x0b'*20,
data = b'Hi There',
hexdigests = {
hashlib.sha224: '896fb1128abbdf196832107cd49df33f'
'47b4b1169912ba4f53684b22',
hashlib.sha256: 'b0344c61d8db38535ca8afceaf0bf12b'
'881dc200c9833da726e9376c2e32cff7',
hashlib.sha384: 'afd03944d84895626b0825f4ab46907f'
'15f9dadbe4101ec682aa034c7cebc59c'
'faea9ea9076ede7f4af152e8b2fa9cb6',
hashlib.sha512: '87aa7cdea5ef619d4ff0b4241a1d6cb0'
'2379f4e2ce4ec2787ad0b30545e17cde'
'daa833b7d6b8a702038b274eaea3f4e4'
'be9d914eeb61f1702e696c203a126854',
})
# 4.3. Test Case 2
hmactest(key = b'Jefe',
data = b'what do ya want for nothing?',
hexdigests = {
hashlib.sha224: 'a30e01098bc6dbbf45690f3a7e9e6d0f'
'8bbea2a39e6148008fd05e44',
hashlib.sha256: '5bdcc146bf60754e6a042426089575c7'
'5a003f089d2739839dec58b964ec3843',
hashlib.sha384: 'af45d2e376484031617f78d2b58a6b1b'
'9c7ef464f5a01b47e42ec3736322445e'
'8e2240ca5e69e2c78b3239ecfab21649',
hashlib.sha512: '164b7a7bfcf819e2e395fbe73b56e0a3'
'87bd64222e831fd610270cd7ea250554'
'9758bf75c05a994a6d034f65f8f0e6fd'
'caeab1a34d4a6b4b636e070a38bce737',
})
# 4.4. Test Case 3
hmactest(key = b'\xaa'*20,
data = b'\xdd'*50,
hexdigests = {
hashlib.sha224: '7fb3cb3588c6c1f6ffa9694d7d6ad264'
'9365b0c1f65d69d1ec8333ea',
hashlib.sha256: '773ea91e36800e46854db8ebd09181a7'
'2959098b3ef8c122d9635514ced565fe',
hashlib.sha384: '88062608d3e6ad8a0aa2ace014c8a86f'
'0aa635d947ac9febe83ef4e55966144b'
'2a5ab39dc13814b94e3ab6e101a34f27',
hashlib.sha512: 'fa73b0089d56a284efb0f0756c890be9'
'b1b5dbdd8ee81a3655f83e33b2279d39'
'bf3e848279a722c806b485a47e67c807'
'b946a337bee8942674278859e13292fb',
})
# 4.5. Test Case 4
hmactest(key = bytes(x for x in range(0x01, 0x19+1)),
data = b'\xcd'*50,
hexdigests = {
hashlib.sha224: '6c11506874013cac6a2abc1bb382627c'
'ec6a90d86efc012de7afec5a',
hashlib.sha256: '82558a389a443c0ea4cc819899f2083a'
'85f0faa3e578f8077a2e3ff46729665b',
hashlib.sha384: '3e8a69b7783c25851933ab6290af6ca7'
'7a9981480850009cc5577c6e1f573b4e'
'6801dd23c4a7d679ccf8a386c674cffb',
hashlib.sha512: 'b0ba465637458c6990e5a8c5f61d4af7'
'e576d97ff94b872de76f8050361ee3db'
'a91ca5c11aa25eb4d679275cc5788063'
'a5f19741120c4f2de2adebeb10a298dd',
})
# 4.7. Test Case 6
hmactest(key = b'\xaa'*131,
data = b'Test Using Larger Than Block-Siz'
b'e Key - Hash Key First',
hexdigests = {
hashlib.sha224: '95e9a0db962095adaebe9b2d6f0dbce2'
'd499f112f2d2b7273fa6870e',
hashlib.sha256: '60e431591ee0b67f0d8a26aacbf5b77f'
'8e0bc6213728c5140546040f0ee37f54',
hashlib.sha384: '4ece084485813e9088d2c63a041bc5b4'
'4f9ef1012a2b588f3cd11f05033ac4c6'
'0c2ef6ab4030fe8296248df163f44952',
hashlib.sha512: '80b24263c7c1a3ebb71493c1dd7be8b4'
'9b46d1f41b4aeec1121b013783f8f352'
'6b56d037e05f2598bd0fd2215d6a1e52'
'95e64f73f63f0aec8b915a985d786598',
})
# 4.8. Test Case 7
hmactest(key = b'\xaa'*131,
data = b'This is a test using a larger th'
b'an block-size key and a larger t'
b'han block-size data. The key nee'
b'ds to be hashed before being use'
b'd by the HMAC algorithm.',
hexdigests = {
hashlib.sha224: '3a854166ac5d9f023f54d517d0b39dbd'
'946770db9c2b95c9f6f565d1',
hashlib.sha256: '9b09ffa71b942fcb27635fbcd5b0e944'
'bfdc63644f0713938a7f51535c3a35e2',
hashlib.sha384: '6617178e941f020d351e2f254e8fd32c'
'602420feb0b8fb9adccebb82461e99c5'
'a678cc31e799176d3860e6110c46523e',
hashlib.sha512: 'e37b6a775dc87dbaa4dfa9f96e5e3ffd'
'debd71f8867289865df5a32d20cdc944'
'b6022cac3c4982b10d5eeb55c3e4de15'
'134676fb6de0446065c97440fa8c6a58',
})
@hashlib_helper.requires_hashdigest('sha224', openssl=True)
def test_sha224_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha224, 'sha224', 28, 64)
@hashlib_helper.requires_hashdigest('sha256', openssl=True)
def test_sha256_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha256, 'sha256', 32, 64)
@hashlib_helper.requires_hashdigest('sha384', openssl=True)
def test_sha384_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha384, 'sha384', 48, 128)
@hashlib_helper.requires_hashdigest('sha512', openssl=True)
def test_sha512_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha512, 'sha512', 64, 128)
@hashlib_helper.requires_hashdigest('sha256')
def test_legacy_block_size_warnings(self):
class MockCrazyHash(object):
"""Ain't no block_size attribute here."""
def __init__(self, *args):
self._x = hashlib.sha256(*args)
self.digest_size = self._x.digest_size
def update(self, v):
self._x.update(v)
def digest(self):
return self._x.digest()
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
with self.assertRaises(RuntimeWarning):
hmac.HMAC(b'a', b'b', digestmod=MockCrazyHash)
self.fail('Expected warning about missing block_size')
MockCrazyHash.block_size = 1
with self.assertRaises(RuntimeWarning):
hmac.HMAC(b'a', b'b', digestmod=MockCrazyHash)
self.fail('Expected warning about small block_size')
def test_with_digestmod_no_default(self):
"""The digestmod parameter is required as of Python 3.8."""
with self.assertRaisesRegex(TypeError, r'required.*digestmod'):
key = b"\x0b" * 16
data = b"Hi There"
hmac.HMAC(key, data, digestmod=None)
with self.assertRaisesRegex(TypeError, r'required.*digestmod'):
hmac.new(key, data)
with self.assertRaisesRegex(TypeError, r'required.*digestmod'):
hmac.HMAC(key, msg=data, digestmod='')
class ConstructorTestCase(unittest.TestCase):
expected = (
"6c845b47f52b3b47f6590c502db7825aad757bf4fadc8fa972f7cd2e76a5bdeb"
)
@hashlib_helper.requires_hashdigest('sha256')
def test_normal(self):
# Standard constructor call.
try:
hmac.HMAC(b"key", digestmod='sha256')
except Exception:
self.fail("Standard constructor call raised exception.")
@hashlib_helper.requires_hashdigest('sha256')
def test_with_str_key(self):
# Pass a key of type str, which is an error, because it expects a key
# of type bytes
with self.assertRaises(TypeError):
h = hmac.HMAC("key", digestmod='sha256')
@hashlib_helper.requires_hashdigest('sha256')
def test_dot_new_with_str_key(self):
# Pass a key of type str, which is an error, because it expects a key
# of type bytes
with self.assertRaises(TypeError):
h = hmac.new("key", digestmod='sha256')
@hashlib_helper.requires_hashdigest('sha256')
def test_withtext(self):
# Constructor call with text.
try:
h = hmac.HMAC(b"key", b"hash this!", digestmod='sha256')
except Exception:
self.fail("Constructor call with text argument raised exception.")
self.assertEqual(h.hexdigest(), self.expected)
@hashlib_helper.requires_hashdigest('sha256')
def test_with_bytearray(self):
try:
h = hmac.HMAC(bytearray(b"key"), bytearray(b"hash this!"),
digestmod="sha256")
except Exception:
self.fail("Constructor call with bytearray arguments raised exception.")
self.assertEqual(h.hexdigest(), self.expected)
@hashlib_helper.requires_hashdigest('sha256')
def test_with_memoryview_msg(self):
try:
h = hmac.HMAC(b"key", memoryview(b"hash this!"), digestmod="sha256")
except Exception:
self.fail("Constructor call with memoryview msg raised exception.")
self.assertEqual(h.hexdigest(), self.expected)
@hashlib_helper.requires_hashdigest('sha256')
def test_withmodule(self):
# Constructor call with text and digest module.
try:
h = hmac.HMAC(b"key", b"", hashlib.sha256)
except Exception:
self.fail("Constructor call with hashlib.sha256 raised exception.")
@unittest.skipUnless(C_HMAC is not None, 'need _hashlib')
def test_internal_types(self):
# internal types like _hashlib.C_HMAC are not constructable
check_disallow_instantiation(self, C_HMAC)
with self.assertRaisesRegex(TypeError, "immutable type"):
C_HMAC.value = None
@unittest.skipUnless(sha256_module is not None, 'need _sha256')
def test_with_sha256_module(self):
h = hmac.HMAC(b"key", b"hash this!", digestmod=sha256_module.sha256)
self.assertEqual(h.hexdigest(), self.expected)
self.assertEqual(h.name, "hmac-sha256")
digest = hmac.digest(b"key", b"hash this!", sha256_module.sha256)
self.assertEqual(digest, binascii.unhexlify(self.expected))
class SanityTestCase(unittest.TestCase):
@hashlib_helper.requires_hashdigest('sha256')
def test_exercise_all_methods(self):
# Exercising all methods once.
# This must not raise any exceptions
try:
h = hmac.HMAC(b"my secret key", digestmod="sha256")
h.update(b"compute the hash of this text!")
h.digest()
h.hexdigest()
h.copy()
except Exception:
self.fail("Exception raised during normal usage of HMAC class.")
class CopyTestCase(unittest.TestCase):
@hashlib_helper.requires_hashdigest('sha256')
def test_attributes_old(self):
# Testing if attributes are of same type.
h1 = hmac.HMAC.__new__(hmac.HMAC)
h1._init_old(b"key", b"msg", digestmod="sha256")
h2 = h1.copy()
self.assertEqual(type(h1._inner), type(h2._inner),
"Types of inner don't match.")
self.assertEqual(type(h1._outer), type(h2._outer),
"Types of outer don't match.")
@hashlib_helper.requires_hashdigest('sha256')
def test_realcopy_old(self):
# Testing if the copy method created a real copy.
h1 = hmac.HMAC.__new__(hmac.HMAC)
h1._init_old(b"key", b"msg", digestmod="sha256")
h2 = h1.copy()
# Using id() in case somebody has overridden __eq__/__ne__.
self.assertTrue(id(h1) != id(h2), "No real copy of the HMAC instance.")
self.assertTrue(id(h1._inner) != id(h2._inner),
"No real copy of the attribute 'inner'.")
self.assertTrue(id(h1._outer) != id(h2._outer),
"No real copy of the attribute 'outer'.")
self.assertIs(h1._hmac, None)
@unittest.skipIf(_hashopenssl is None, "test requires _hashopenssl")
@hashlib_helper.requires_hashdigest('sha256')
def test_realcopy_hmac(self):
h1 = hmac.HMAC.__new__(hmac.HMAC)
h1._init_hmac(b"key", b"msg", digestmod="sha256")
h2 = h1.copy()
self.assertTrue(id(h1._hmac) != id(h2._hmac))
@hashlib_helper.requires_hashdigest('sha256')
def test_equality(self):
# Testing if the copy has the same digests.
h1 = hmac.HMAC(b"key", digestmod="sha256")
h1.update(b"some random text")
h2 = h1.copy()
self.assertEqual(h1.digest(), h2.digest(),
"Digest of copy doesn't match original digest.")
self.assertEqual(h1.hexdigest(), h2.hexdigest(),
"Hexdigest of copy doesn't match original hexdigest.")
@hashlib_helper.requires_hashdigest('sha256')
def test_equality_new(self):
# Testing if the copy has the same digests with hmac.new().
h1 = hmac.new(b"key", digestmod="sha256")
h1.update(b"some random text")
h2 = h1.copy()
self.assertTrue(
id(h1) != id(h2), "No real copy of the HMAC instance."
)
self.assertEqual(h1.digest(), h2.digest(),
"Digest of copy doesn't match | |
+ 12*m.b69 + 12*m.b70 - m.x176 - m.x181 - m.x182 + m.x386 + m.x391 + m.x392 <= 12)
m.c4306 = Constraint(expr= 12*m.b66 + 12*m.b69 + 12*m.b70 - m.x178 - m.x181 - m.x182 + m.x388 + m.x391 + m.x392 <= 12)
m.c4307 = Constraint(expr= 12*m.b72 + 12*m.b75 - m.x184 - m.x187 + m.x282 + m.x285 + m.x380 + m.x383 <= 12)
m.c4308 = Constraint(expr= 12*m.b72 + 12*m.b76 - m.x184 - m.x188 + m.x282 + m.x286 + m.x380 + m.x384 <= 12)
m.c4309 = Constraint(expr= 12*m.b73 + 12*m.b77 - m.x185 - m.x189 + m.x283 + m.x287 + m.x381 + m.x385 <= 12)
m.c4310 = Constraint(expr= 12*m.b73 + 12*m.b78 - m.x185 - m.x190 + m.x283 + m.x288 + m.x381 + m.x386 <= 12)
m.c4311 = Constraint(expr= 12*m.b73 + 12*m.b79 - m.x185 - m.x191 + m.x283 + m.x289 + m.x381 + m.x387 <= 12)
m.c4312 = Constraint(expr= 12*m.b74 + 12*m.b80 - m.x186 - m.x192 + m.x284 + m.x290 + m.x382 + m.x388 <= 12)
m.c4313 = Constraint(expr= 12*m.b74 + 12*m.b81 - m.x186 - m.x193 + m.x284 + m.x291 + m.x382 + m.x389 <= 12)
m.c4314 = Constraint(expr= 12*m.b75 + 12*m.b82 - m.x187 - m.x194 + m.x285 + m.x292 + m.x383 + m.x390 <= 12)
m.c4315 = Constraint(expr= 12*m.b77 + 12*m.b82 - m.x189 - m.x194 + m.x287 + m.x292 + m.x385 + m.x390 <= 12)
m.c4316 = Constraint(expr= 12*m.b79 + 12*m.b85 - m.x191 - m.x197 + m.x289 + m.x295 + m.x387 + m.x393 <= 12)
m.c4317 = Constraint(expr= 12*m.b81 + 12*m.b85 - m.x193 - m.x197 + m.x291 + m.x295 + m.x389 + m.x393 <= 12)
m.c4318 = Constraint(expr= 12*m.b82 + 12*m.b83 - m.x194 - m.x195 + m.x292 + m.x293 + m.x390 + m.x391 <= 12)
m.c4319 = Constraint(expr= 12*m.b84 + 12*m.b85 - m.x196 - m.x197 + m.x294 + m.x295 + m.x392 + m.x393 <= 12)
m.c4320 = Constraint(expr= 12*m.b72 + 12*m.b73 + 12*m.b74 - m.x184 - m.x185 - m.x186 + m.x282 + m.x283 + m.x284
+ m.x380 + m.x381 + m.x382 <= 12)
m.c4321 = Constraint(expr= 12*m.b76 + 12*m.b83 + 12*m.b84 - m.x188 - m.x195 - m.x196 + m.x286 + m.x293 + m.x294
+ m.x384 + m.x391 + m.x392 <= 12)
m.c4322 = Constraint(expr= 12*m.b78 + 12*m.b83 + 12*m.b84 - m.x190 - m.x195 - m.x196 + m.x288 + m.x293 + m.x294
+ m.x386 + m.x391 + m.x392 <= 12)
m.c4323 = Constraint(expr= 12*m.b80 + 12*m.b83 + 12*m.b84 - m.x192 - m.x195 - m.x196 + m.x290 + m.x293 + m.x294
+ m.x388 + m.x391 + m.x392 <= 12)
m.c4324 = Constraint(expr= 12*m.b86 + 12*m.b89 - m.x198 - m.x201 + m.x282 + m.x285 + m.x296 + m.x299 + m.x380 + m.x383
<= 12)
m.c4325 = Constraint(expr= 12*m.b86 + 12*m.b90 - m.x198 - m.x202 + m.x282 + m.x286 + m.x296 + m.x300 + m.x380 + m.x384
<= 12)
m.c4326 = Constraint(expr= 12*m.b87 + 12*m.b91 - m.x199 - m.x203 + m.x283 + m.x287 + m.x297 + m.x301 + m.x381 + m.x385
<= 12)
m.c4327 = Constraint(expr= 12*m.b87 + 12*m.b92 - m.x199 - m.x204 + m.x283 + m.x288 + m.x297 + m.x302 + m.x381 + m.x386
<= 12)
m.c4328 = Constraint(expr= 12*m.b87 + 12*m.b93 - m.x199 - m.x205 + m.x283 + m.x289 + m.x297 + m.x303 + m.x381 + m.x387
<= 12)
m.c4329 = Constraint(expr= 12*m.b88 + 12*m.b94 - m.x200 - m.x206 + m.x284 + m.x290 + m.x298 + m.x304 + m.x382 + m.x388
<= 12)
m.c4330 = Constraint(expr= 12*m.b88 + 12*m.b95 - m.x200 - m.x207 + m.x284 + m.x291 + m.x298 + m.x305 + m.x382 + m.x389
<= 12)
m.c4331 = Constraint(expr= 12*m.b89 + 12*m.b96 - m.x201 - m.x208 + m.x285 + m.x292 + m.x299 + m.x306 + m.x383 + m.x390
<= 12)
m.c4332 = Constraint(expr= 12*m.b91 + 12*m.b96 - m.x203 - m.x208 + m.x287 + m.x292 + m.x301 + m.x306 + m.x385 + m.x390
<= 12)
m.c4333 = Constraint(expr= 12*m.b93 + 12*m.b99 - m.x205 - m.x211 + m.x289 + m.x295 + m.x303 + m.x309 + m.x387 + m.x393
<= 12)
m.c4334 = Constraint(expr= 12*m.b95 + 12*m.b99 - m.x207 - m.x211 + m.x291 + m.x295 + m.x305 + m.x309 + m.x389 + m.x393
<= 12)
m.c4335 = Constraint(expr= 12*m.b96 + 12*m.b97 - m.x208 - m.x209 + m.x292 + m.x293 + m.x306 + m.x307 + m.x390 + m.x391
<= 12)
m.c4336 = Constraint(expr= 12*m.b98 + 12*m.b99 - m.x210 - m.x211 + m.x294 + m.x295 + m.x308 + m.x309 + m.x392 + m.x393
<= 12)
m.c4337 = Constraint(expr= 12*m.b86 + 12*m.b87 + 12*m.b88 - m.x198 - m.x199 - m.x200 + m.x282 + m.x283 + m.x284
+ m.x296 + m.x297 + m.x298 + m.x380 + m.x381 + m.x382 <= 12)
m.c4338 = Constraint(expr= 12*m.b90 + 12*m.b97 + 12*m.b98 - m.x202 - m.x209 - m.x210 + m.x286 + m.x293 + m.x294
+ m.x300 + m.x307 + m.x308 + m.x384 + m.x391 + m.x392 <= 12)
m.c4339 = Constraint(expr= 12*m.b92 + 12*m.b97 + 12*m.b98 - m.x204 - m.x209 - m.x210 + m.x288 + m.x293 + m.x294
+ m.x302 + m.x307 + m.x308 + m.x386 + m.x391 + m.x392 <= 12)
m.c4340 = Constraint(expr= 12*m.b94 + 12*m.b97 + 12*m.b98 - m.x206 - m.x209 - m.x210 + m.x290 + m.x293 + m.x294
+ m.x304 + m.x307 + m.x308 + m.x388 + m.x391 + m.x392 <= 12)
m.c4341 = Constraint(expr= 12*m.b100 + 12*m.b103 - m.x212 - m.x215 + m.x282 + m.x285 + m.x296 + m.x299 + m.x310
+ m.x313 + m.x380 + m.x383 <= 12)
m.c4342 = Constraint(expr= 12*m.b100 + 12*m.b104 - m.x212 - m.x216 + m.x282 + m.x286 + m.x296 + m.x300 + m.x310
+ m.x314 + m.x380 + m.x384 <= 12)
m.c4343 = Constraint(expr= 12*m.b101 + 12*m.b105 - m.x213 - m.x217 + m.x283 + m.x287 + m.x297 + m.x301 + m.x311
+ m.x315 + m.x381 + m.x385 <= 12)
m.c4344 = Constraint(expr= 12*m.b101 + 12*m.b106 - m.x213 - m.x218 + m.x283 + m.x288 + m.x297 + m.x302 + m.x311
+ m.x316 + m.x381 + m.x386 <= 12)
m.c4345 = Constraint(expr= 12*m.b101 + 12*m.b107 - m.x213 - m.x219 + m.x283 + m.x289 + m.x297 + m.x303 + m.x311
+ m.x317 + m.x381 + m.x387 <= 12)
m.c4346 = Constraint(expr= 12*m.b102 + 12*m.b108 - m.x214 - m.x220 + m.x284 + m.x290 + m.x298 + m.x304 + m.x312
+ m.x318 + m.x382 + m.x388 <= 12)
m.c4347 = Constraint(expr= 12*m.b102 + 12*m.b109 - m.x214 - m.x221 + m.x284 + m.x291 + m.x298 + m.x305 + m.x312
+ m.x319 + m.x382 + m.x389 <= 12)
m.c4348 = Constraint(expr= 12*m.b103 + 12*m.b110 - m.x215 - m.x222 + m.x285 + m.x292 + m.x299 + m.x306 + m.x313
+ m.x320 + m.x383 + m.x390 <= 12)
m.c4349 = Constraint(expr= 12*m.b105 + 12*m.b110 - m.x217 - m.x222 + m.x287 + m.x292 + m.x301 + m.x306 + m.x315
+ m.x320 + m.x385 + m.x390 <= 12)
m.c4350 = Constraint(expr= 12*m.b107 + 12*m.b113 - m.x219 - m.x225 + m.x289 + m.x295 + m.x303 + m.x309 + m.x317
+ m.x323 + m.x387 + m.x393 <= 12)
m.c4351 = Constraint(expr= 12*m.b109 + 12*m.b113 - m.x221 - m.x225 + m.x291 + m.x295 + m.x305 + m.x309 + m.x319
+ m.x323 + m.x389 + m.x393 <= 12)
m.c4352 = Constraint(expr= 12*m.b110 + 12*m.b111 - m.x222 - m.x223 + m.x292 + m.x293 + m.x306 + m.x307 + m.x320
+ m.x321 + m.x390 + m.x391 <= 12)
m.c4353 = Constraint(expr= 12*m.b112 + 12*m.b113 - m.x224 - m.x225 + m.x294 + m.x295 + m.x308 + m.x309 + m.x322
+ m.x323 + m.x392 + m.x393 <= 12)
m.c4354 = Constraint(expr= 12*m.b100 + 12*m.b101 + 12*m.b102 - m.x212 - m.x213 - m.x214 + m.x282 + m.x283 + m.x284
+ m.x296 + m.x297 + m.x298 + m.x310 + m.x311 + m.x312 + m.x380 + m.x381 + m.x382 <= 12)
m.c4355 = Constraint(expr= 12*m.b104 + 12*m.b111 + 12*m.b112 - m.x216 - m.x223 - m.x224 + m.x286 + m.x293 + m.x294
+ m.x300 + m.x307 + m.x308 + m.x314 + m.x321 + m.x322 + m.x384 + m.x391 + m.x392 <= 12)
m.c4356 = Constraint(expr= 12*m.b106 + 12*m.b111 + 12*m.b112 - m.x218 - m.x223 - m.x224 + m.x288 + m.x293 + m.x294
+ m.x302 + m.x307 + m.x308 + | |
<reponame>preranaandure/wildlifecompliance<gh_stars>1-10
import abc
import ast
import logging
import datetime
from datetime import date, timedelta
from concurrency.exceptions import RecordModifiedError
from django.core.exceptions import ValidationError, FieldError
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils import timezone
from ledger.checkout.utils import calculate_excl_gst
from wildlifecompliance.exceptions import ReturnServiceException
from wildlifecompliance.components.main.utils import checkout, singleton
from wildlifecompliance.components.main.utils import flush_checkout_session
from wildlifecompliance.components.returns.payments import ReturnFeePolicy
from wildlifecompliance.components.returns.utils_schema import Schema
from wildlifecompliance.components.returns.utils import get_session_return
from wildlifecompliance.components.returns.utils import bind_return_to_invoice
from wildlifecompliance.components.returns.utils import ReturnSpeciesUtility
from wildlifecompliance.components.returns.email import (
send_sheet_transfer_email_notification,
send_return_invoice_notification,
send_return_accept_email_notification,
)
from wildlifecompliance.components.returns.models import (
Return,
ReturnType,
ReturnTable,
ReturnInvoice,
ReturnRow,
ReturnUserAction,
ReturnActivity,
)
logger = logging.getLogger(__name__)
# logger = logging
'''
Associations: +----------------------+
+---------------- | WildlifeLicence |
| +----------------------+
V
+----------------+ +------------+ +----------------------+
| ReturnData | <--+--- | Return | <------- | ApplicationCondition |
+----------------+ | +------------+ +----------------------+
+----------------+ | ^ |
| ReturnQuestion | <--+ | V
+----------------+ | +------------+ +----------------------+
+----------------+ | | ReturnType | | Application |
| ReturnSheet | <--+ +------------+ +----------------------+
+----------------+
'''
class ReturnService(object):
'''
A Facade for the Return (compliance) subsystem responsible for requests.
'''
__metaclass__ = abc.ABCMeta
@staticmethod
def discard_return_request(request, condition):
'''
Discard Return for a Licence Condition.
:param: ApplicationCondition for the generated Return.
'''
logger.debug('ReturnService.discard_return() - start')
logger_title = '{0} AppID {1} CondID {2}'.format(
'ReturnService.discard_return()',
condition.application.id, condition.id
)
successful = False
try:
command = DiscardRequestCommand(request, condition)
command.execute()
successful = True
except ReturnServiceException as rse:
log = '{0} {1}'.format(logger_title, rse)
logger.exception(log)
except Exception as e:
log = '{0} {1}'.format(logger_title, e)
logger.exception(log)
raise
logger.debug('ApplicationService.submit_application_request() - end')
return successful
@staticmethod
def generate_return_request(request, licence, selected_activity) -> bool:
'''
Services a request for generating Returns for Conditions on the
Selected Activity for the Wildlife Licence.
:param: request is an incoming client request.
:param: licence is a WildlifeLicence for the Return.
:param: selected_activity is the ApplicationSelectedActivity.
'''
logger.debug('ReturnService.generate_return_request() - start')
logger_title = '{0} AppID {1}'.format(
'ReturnService.generate_return_request()',
selected_activity.application_id,
)
successful = False
try:
generator = ReturnGenerator()
generator.create_return(request, licence, selected_activity)
successful = True
except ReturnServiceException as rse:
log = '{0} {1}'.format(logger_title, rse)
logger.exception(log)
except Exception as e:
log = '{0} {1}'.format(logger_title, e)
logger.exception(log)
raise
logger.debug('ReturnService.generate_return_request() - end')
return successful
@staticmethod
def etl_return_sheet(real_time=False, return_ids=None) -> bool:
'''
A service call to cleanse Return running sheets.
:param: return_ids is a List of identifiers.
'''
logger.debug('ReturnService.etl_return_sheet() - start')
logger_title = '{0}'.format(
'ReturnService.etl_return_sheet()',
)
successful = False
try:
etl = ReturnETL(CleanseReturnSheet(real_time, return_ids))
etl.process()
successful = True
except ReturnServiceException as rse:
log = '{0} {1}'.format(logger_title, rse)
logger.exception(log)
except Exception as e:
log = '{0} {1}'.format(logger_title, e)
logger.exception(log)
raise
logger.debug('ReturnService.etl_return_sheet() - end')
return successful
@staticmethod
def record_deficiency_request(request, a_return):
'''
'''
if a_return.has_data:
data = ReturnData(a_return)
data.store(request)
if a_return.has_question:
question = ReturnQuestion(a_return)
question.store(request)
return []
@staticmethod
def unassign_officer_request(request, a_return):
'''
Remove an officer from a reqested return.
:param request details from view.
:param a return for assignment.
'''
with transaction.atomic():
try:
if a_return.assigned_to:
a_return.assigned_to = None
a_return.save()
# Create a log entry.
a_return.log_user_action(
ReturnUserAction.ACTION_UNASSIGN.format(
a_return.id), request)
except BaseException as e:
logger.error('ERR: unassign_officer_request : {0}'.format(e))
raise
@staticmethod
def assign_officer_request(request, a_return, an_officer):
'''
Assign an officer to requested return.
:param request details from view.
:param a return for assignment.
:param an officer is EmailUser details.
'''
with transaction.atomic():
try:
if an_officer != a_return.assigned_to:
a_return.assigned_to = an_officer
a_return.save()
# Create a log entry.
a_return.log_user_action(
ReturnUserAction.ACTION_ASSIGN_TO.format(
a_return.id, '{}({})'.format(
an_officer.get_full_name(),
an_officer.email)
), request)
except BaseException as e:
logger.error('ERR: assign_officer_request : {0}'.format(e))
raise
@staticmethod
def accept_return_request(request, a_return):
'''
Process an accepted requested return.
'''
workflow_status = [
# Expected status for requested return.
Return.RETURN_PROCESSING_STATUS_WITH_CURATOR
]
try:
if a_return.processing_status not in workflow_status:
raise Exception('Cannot accept a return not with curator.')
# Set status.
status = Return.RETURN_PROCESSING_STATUS_ACCEPTED
a_return.set_processing_status(status)
# Send notification.
send_return_accept_email_notification(a_return, request)
# Log action.
a_return.log_user_action(
ReturnUserAction.ACTION_ACCEPT_REQUEST.format(a_return),
request
)
except BaseException as e:
logger.error('accept_return_request(): {0}'.format(e))
raise
@staticmethod
def submit_session_return_request(request):
'''
Process a requested return submission using session attributes from the
request.
NOTE: Session is not deleted on successful submission.
'''
is_submitted = False
try:
the_return = get_session_return(request.session)
the_return.set_submitted(request)
is_submitted = True
logger.info('Submit Successful Return: {0}'.format(
the_return.id))
except BaseException as e:
logger.error('submit_session_return_request(): {0}'.format(e))
raise
return is_submitted
@staticmethod
def invoice_session_return_request(request):
'''
Process a return payment invoice using session attributes from the
request.
NOTE: Session is not deleted on successful invoicing.
'''
is_invoiced = False
try:
the_return = get_session_return(request.session)
invoice_ref = request.GET.get('invoice')
bind_return_to_invoice(request, the_return, invoice_ref)
send_return_invoice_notification(the_return, invoice_ref, request)
is_invoiced = True
logger.info('Paid Invoice: {0} Return: {1} Amt: {2}'.format(
invoice_ref, the_return.id, the_return.return_fee))
except BaseException as e:
logger.error('invoice_session_return_request(): {0}'.format(e))
raise
return is_invoiced
@staticmethod
def calculate_fees(a_return, data_source=None):
'''
Calculates fees for a Return.
'''
# update any fees.
fee_policy = ReturnFeePolicy.get_fee_policy_for(a_return)
fee_policy.set_return_fee() # force a re-calculation.
return fee_policy.get_dynamic_attributes()
@staticmethod
def get_product_lines(a_return):
'''
Get product lines for fees associated with a return to be charged
through checkout.
'''
return ReturnFeePolicy.get_fee_product_lines_for(a_return)
@staticmethod
def verify_due_return_id(return_id):
'''
Vertification of return due date for a single return.
'''
ReturnService.verify_due_returns(return_id, False)
@staticmethod
def verify_due_returns(id=0, for_all=True):
'''
Vertification of return due date seven days before it is due and
updating the processing status.
:return a count of total returns due.
'''
DUE_DAYS = 7
verified = []
today_plus_7 = date.today() + timedelta(days=DUE_DAYS)
today = date.today()
all_returns = Return.objects.filter(
processing_status__in=[
Return.RETURN_PROCESSING_STATUS_DRAFT,
Return.RETURN_PROCESSING_STATUS_FUTURE,
Return.RETURN_PROCESSING_STATUS_DUE,
Return.RETURN_PROCESSING_STATUS_OVERDUE,
]
)
due_returns = all_returns.filter(
due_date__range=[today, today_plus_7],
processing_status__in=[
Return.RETURN_PROCESSING_STATUS_DRAFT,
Return.RETURN_PROCESSING_STATUS_FUTURE
]
)
status = Return.RETURN_PROCESSING_STATUS_DUE
for a_return in due_returns:
if not for_all and not a_return.id == id:
continue
# set future species list for return before setting status.
utils = ReturnSpeciesUtility(a_return)
licence_activity_id = a_return.condition.licence_activity_id
selected_activity = [
a for a in a_return.application.activities
if a.licence_activity_id == licence_activity_id
][0]
raw_specie_names = utils.get_raw_species_list_for(
selected_activity
)
utils.set_species_list_future(raw_specie_names)
# update status for the return.
a_return.set_processing_status(status)
verified.append(a_return)
overdue_returns = all_returns.filter(
due_date__lt=today,
processing_status__in=[
Return.RETURN_PROCESSING_STATUS_DRAFT,
Return.RETURN_PROCESSING_STATUS_FUTURE,
Return.RETURN_PROCESSING_STATUS_DUE
]
).exclude(
return_type__data_format=ReturnType.FORMAT_SHEET
)
status = Return.RETURN_PROCESSING_STATUS_OVERDUE
for a_return in overdue_returns:
if not for_all and not a_return.id == id:
continue
a_return.set_processing_status(status)
expired_returns = all_returns.filter(
due_date__lt=today,
processing_status__in=[
Return.RETURN_PROCESSING_STATUS_DRAFT,
Return.RETURN_PROCESSING_STATUS_FUTURE,
Return.RETURN_PROCESSING_STATUS_DUE,
Return.RETURN_PROCESSING_STATUS_OVERDUE,
],
return_type__data_format=ReturnType.FORMAT_SHEET
)
status = Return.RETURN_PROCESSING_STATUS_EXPIRED
for a_return in expired_returns:
if not for_all and not a_return.id == id:
continue
# Expired Running Sheets have a 14 day grace period before the
# status changes to Expired. Allows for final updates when not
# renewing.
# NOTE: At renewal the stock totals are aggregated for the newly
# generated return. The old one is then discarded.
expired_date = a_return.due_date + timedelta(days=14)
expired_plus_14 = [
a_return.due_date + datetime.timedelta(n)
for n in range(int((expired_date - a_return.due_date).days))
]
if today in expired_plus_14:
continue
a_return.set_processing_status(status)
return verified
@staticmethod
def get_details_for(a_return):
"""
Return data presented in table format with column headers.
:return: formatted data.
"""
logger.debug('ReturnService.get_details_for() - start')
details = []
try:
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
details = sheet.table
if a_return.has_data:
data = ReturnData(a_return)
details = data.table
if a_return.has_question:
question = ReturnQuestion(a_return)
details = question.table
except BaseException as e:
# NOTE: invalid schema exception can be thrown here.
logger.error('get_details_for() ID {0} - {1}'.format(
a_return.id, e
))
logger.debug('ReturnService.get_details_for() - end')
return details
@staticmethod
def store_request_details_for(a_return, request):
"""
Return data presented in table format with column headers.
:return: formatted data.
"""
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
sheet.store(request)
if a_return.has_data:
data = ReturnData(a_return)
data.store(request)
if a_return.has_question:
question = ReturnQuestion(a_return)
question.store(request)
fee = ReturnService.calculate_fees(a_return)
a_return.set_return_fee(fee['fees']['return'])
return []
@staticmethod
def validate_sheet_transfer_for(a_return, request):
'''
Validates the transfer details of stock on a Return running sheet.
'''
is_valid = False
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
is_valid = sheet.is_valid_transfer(request)
return is_valid
@staticmethod
def get_sheet_activity_list_for(a_return):
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
return sheet.activity_list
return None
@staticmethod
def get_sheet_species_list_for(a_return):
'''
Get list of species available for the return.
'''
logger.debug('ReturnService.get_sheet_species_list_for() - start')
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
return sheet.species_list
return None
@staticmethod
def get_sheet_species_saved_for(a_return):
'''
Get list of species saved for the return.
'''
logger.debug('ReturnService.get_sheet_species_saved_for() - start')
saved_list = None
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
saved_list = sheet.get_species_saved()
logger.debug('ReturnService.get_sheet_species_saved_for() - end')
return saved_list
@staticmethod
def get_sheet_species_for(a_return):
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
return sheet.species
return None
@staticmethod
def get_species_list_for(a_return):
'''
Get list of species available for the return.
'''
logger.debug('ReturnService.get_species_list_for() - start')
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
return sheet.species_list
if a_return.has_data:
data = ReturnData(a_return)
return data.species_list
return None
@staticmethod
def set_species_for(a_return, species_id):
updated = None
if a_return.has_sheet:
updated = ReturnSheet(a_return)
updated.set_species(species_id)
if a_return.has_data:
updated = ReturnData(a_return)
updated.set_species(species_id)
return updated
@staticmethod
def get_species_for(a_return):
if a_return.has_sheet:
sheet = ReturnSheet(a_return)
return sheet.species
if a_return.has_data:
| |
#!/usr/bin/env python
# Copyright (c) 2002-2009 ActiveState Software Inc.
# License: MIT (see LICENSE.txt for license details)
# Author: <NAME>
"""An improvement on Python's standard cmd.py module.
As with cmd.py, this module provides "a simple framework for writing
line-oriented command intepreters." This module provides a 'RawCmdln'
class that fixes some design flaws in cmd.Cmd, making it more scalable
and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
or simple shells. And it provides a 'Cmdln' class that add
optparse-based option processing. Basically you use it like this:
import cmdln
class MySVN(cmdln.Cmdln):
name = "svn"
@cmdln.alias('stat', 'st')
@cmdln.option('-v', '--verbose', action='store_true'
help='print verbose information')
def do_status(self, subcmd, opts, *paths):
print "handle 'svn status' command"
#...
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
See the README.txt or <http://code.google.com/p/cmdln/> for more
details.
"""
__version_info__ = (1, 2, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
from os import path
import sys
import re
import types
import cmd
import optparse
from pprint import pprint
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser # python3
import datetime
if sys.hexversion > 0x03000000:
ClassType = type
else:
ClassType = types.ClassType
#---- globals
LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
# An unspecified optional argument when None is a meaningful value.
_NOT_SPECIFIED = ("Not", "Specified")
# Pattern to match a TypeError message from a call that
# failed because of incorrect number of arguments (see
# Python/getargs.c).
_INCORRECT_NUM_ARGS_RE = re.compile(
r"(takes [\w ]+ )(\d+)[\w ]*( arguments? \()(\d+)( given\))")
#---- exceptions
class CmdlnError(Exception):
"""A cmdln.py usage error."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class CmdlnUserError(Exception):
"""An error by a user of a cmdln-based tool/shell."""
pass
#---- public methods and classes
def alias(*aliases):
"""Decorator to add aliases for Cmdln.do_* command handlers.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.alias("!", "sh")
def do_shell(self, argv):
#...implement 'shell' command
"""
def decorate(f):
if not hasattr(f, "aliases"):
f.aliases = []
f.aliases += aliases
return f
return decorate
class RawCmdln(cmd.Cmd):
"""An improved (on cmd.Cmd) framework for building multi-subcommand
scripts (think "svn" & "cvs") and simple shells (think "pdb" and
"gdb").
A simple example:
import cmdln
class MySVN(cmdln.RawCmdln):
name = "svn"
@cmdln.aliases('stat', 'st')
def do_status(self, argv):
print "handle 'svn status' command"
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
"""
name = None # if unset, defaults basename(sys.argv[0])
prompt = None # if unset, defaults to self.name+"> "
version = None # if set, default top-level options include --version
# Default messages for some 'help' command error cases.
# They are interpolated with one arg: the command.
nohelp = "no help on '%s'"
unknowncmd = "unknown command: '%s'"
helpindent = '' # string with which to indent help output
def __init__(self, completekey='tab',
stdin=None, stdout=None, stderr=None):
"""Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically.
The optional arguments 'stdin', 'stdout' and 'stderr' specify
alternate input, output and error output file objects; if not
specified, sys.* are used.
If 'stdout' but not 'stderr' is specified, stdout is used for
error output. This is to provide least surprise for users used
to only the 'stdin' and 'stdout' options with cmd.Cmd.
"""
if self.name is None:
self.name = os.path.basename(sys.argv[0])
if self.prompt is None:
self.prompt = self.name+"> "
self._name_str = self._str(self.name)
self._prompt_str = self._str(self.prompt)
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
if stderr is not None:
self.stderr = stderr
elif stdout is not None:
self.stderr = stdout
else:
self.stderr = sys.stderr
self.cmdqueue = []
self.completekey = completekey
self.cmdlooping = False
def get_option_defaults(self, cmdname):
"""Return default values for command options
For all options registered for the given command (`cmdname`), return
the default values as a dictionary (option name as keys, default value
as values)
If `cmdname` is None, return default for top-level options
"""
return {}
def get_optparser(self):
"""Hook for subclasses to set the option parser for the
top-level command/shell.
NOTE: you may not override this method anymore; cmdln.option decorator
can now be used on the class itself to create toplevel options.
This option parser is retrieved and used by `.main()' to handle
top-level options.
The default implements a single '-h|--help' option. Sub-classes
can return None to have no options at the top-level. Typically
an instance of CmdlnOptionParser should be returned.
"""
return self._create_toplevel_optparser()
def _create_toplevel_optparser(self):
version = (self.version is not None
and "%s %s" % (self._name_str, self.version)
or None)
parser = CmdlnOptionParser(self, version=version)
# if ``useconfig`` is used, add the -c option to specify extra config
# file
# if hasattr(self, 'defaultsconfig'):
# parser.add_option('-c', '--configfile',
# dest='configfile',
# help="specify the config file location",
# default=None)
# add toplevel options
if hasattr(self, 'toplevel_optparser_options'):
for args, kwargs in self.toplevel_optparser_options:
parser.add_option(*args, **kwargs)
return parser
def postoptparse(self):
"""Hook method executed just after `.main()' parses top-level
options.
When called `self.options' holds the results of the option parse.
"""
def main(self, argv=None, loop=LOOP_NEVER):
"""A possible mainline handler for a script, like so:
import cmdln
class MyCmd(cmdln.Cmdln):
name = "mycmd"
...
if __name__ == "__main__":
MyCmd().main()
By default this will use sys.argv to issue a single command to
'MyCmd', then exit. The 'loop' argument can be use to control
interactive shell behaviour.
Arguments:
"argv" (optional, default sys.argv) is the command to run.
It must be a sequence, where the first element is the
command name and subsequent elements the args for that
command.
"loop" (optional, default LOOP_NEVER) is a constant
indicating if a command loop should be started (i.e. an
interactive shell). Valid values (constants on this module):
LOOP_ALWAYS start loop and run "argv", if any
LOOP_NEVER run "argv" (or .emptyline()) and exit
LOOP_IF_EMPTY run "argv", if given, and exit;
otherwise, start loop
"""
if argv is None:
argv = sys.argv
else:
argv = argv[:] # don't modify caller's list
try:
self.optparser = self.get_optparser()
if self.optparser: # i.e. optparser=None means don't process for opts
try:
self.options, args = self.optparser.parse_args(argv[1:])
except StopOptionProcessing:
return 0
else:
# Set default options *after* parsing command line options
# This is an requirement for CmdlnWithConfigParser which
# relies on the -c option which is only parsed in the above
# `try' block
self.optparser.set_defaults(**self.get_option_defaults(None))
self.options, args = self.optparser.parse_args(argv[1:])
else:
self.options, args = None, argv[1:]
self.postoptparse()
except CmdlnUserError:
_, ex, _ = sys.exc_info()
msg = "%s: %s\nTry '%s help' for info.\n"\
% (self.name, ex, self.name)
self.stderr.write(self._str(msg))
self.stderr.flush()
return 1
if loop == LOOP_ALWAYS:
if args:
self.cmdqueue.append(args)
return self.cmdloop()
elif loop == LOOP_NEVER:
if args:
return self.cmd(args)
else:
return self.emptyline()
elif loop == LOOP_IF_EMPTY:
if args:
return self.cmd(args)
else:
return self.cmdloop()
def cmd(self, argv):
"""Run one command and exit.
"argv" is the arglist for the command to run. argv[0] is the
command to run. If argv is an empty list then the
'emptyline' handler is run.
Returns the return value from the command handler.
"""
assert isinstance(argv, (list, tuple)), \
"'argv' is not a sequence: %r" % argv
retval = None
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
return retval
def _str(self, s):
"""Safely convert the given str/unicode to a string for printing."""
try:
return str(s)
except UnicodeError:
#XXX What is the proper encoding to use here? 'utf-8' seems
# to work better than "getdefaultencoding" (usually
# 'ascii'), on OS X at least.
#return s.encode(sys.getdefaultencoding(), "replace")
return s.encode("utf-8", "replace")
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse into an argv, and
dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
the argv. In other words, start a shell.
"intro" (optional) is a introductory message to print when
starting the command loop. This overrides the class
"intro" attribute, if any.
"""
self.cmdlooping = True
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is None:
intro = self.intro
if intro:
intro_str = | |
self.abort("Failed to build the required target(s)")
if self.exception is not None:
return self.exception
for action in self.new_persistent_actions:
for name, partial_up_to_date in action.required.items():
full_up_to_date = Invocation.up_to_date.get(name)
if full_up_to_date is None:
partial_up_to_date.mtime_ns = 0
else:
assert full_up_to_date.producer == partial_up_to_date.producer
partial_up_to_date.mtime_ns = full_up_to_date.mtime_ns
if Logger.isEnabledFor(logging.DEBUG) and self.oldest_output_path is not None:
if self.newest_input_path is None:
Logger.debug("No inputs")
else:
Logger.debug(
f"Newest input: {self.newest_input_path} "
f"time: {_datetime_from_nanoseconds(self.newest_input_mtime_ns)}"
)
return None
async def done(self, awaitable: Awaitable) -> Any:
"""
Await some non-DynaMake function.
"""
self.abort_due_to_other()
result = await awaitable
self._become_current()
return result
def abort_due_to_other(self) -> None:
"""
If another invocation has failed, and failure aborts builds, abort this invocation as well.
"""
global failure_aborts_build # pylint: disable=invalid-name
if Logger.errors and failure_aborts_build.value:
self.abort("Aborting due to previous error")
def _become_current(self) -> None:
Invocation.current = self
current_thread().name = self.stack
_QUANTIZED_OF_NANOSECONDS: Dict[int, float] = {}
_NANOSECONDS_OF_QUANTIZED: Dict[str, int] = {}
def _datetime_from_str(string: str) -> datetime:
return datetime.strptime(string, "%Y-%m-%d %H:%M:%S.%f")
def _datetime_from_nanoseconds(nanoseconds: int) -> str:
if not _is_test: # pylint: disable=protected-access
# pragma: no cover
seconds = datetime.fromtimestamp(nanoseconds // 1_000_000_000).strftime("%Y-%m-%d %H:%M:%S")
fraction = "%09d" % (nanoseconds % 1_000_000_000) # pylint: disable=consider-using-f-string
return seconds + "." + fraction
quantized = _QUANTIZED_OF_NANOSECONDS.get(nanoseconds, None)
if quantized is not None:
return str(quantized)
higher_nanoseconds = None
higher_quantized = None
lower_nanoseconds = None
lower_quantized = None
for old_nanoseconds, old_quantized in _QUANTIZED_OF_NANOSECONDS.items():
if old_nanoseconds < nanoseconds:
if lower_nanoseconds is None or lower_nanoseconds < old_nanoseconds:
lower_nanoseconds = old_nanoseconds
lower_quantized = old_quantized
if old_nanoseconds > nanoseconds:
if higher_nanoseconds is None or higher_nanoseconds < old_nanoseconds:
higher_nanoseconds = nanoseconds
higher_quantized = old_quantized
if lower_quantized is None:
if higher_quantized is None:
quantized = 1
else:
quantized = higher_quantized - 1
else:
if higher_quantized is None:
quantized = lower_quantized + 1
else:
quantized = (lower_quantized + higher_quantized) / 2
_QUANTIZED_OF_NANOSECONDS[nanoseconds] = quantized
_NANOSECONDS_OF_QUANTIZED[str(quantized)] = nanoseconds
return str(quantized)
def _nanoseconds_from_datetime_str(string: str) -> int:
if _is_test: # pylint: disable=protected-access
return _NANOSECONDS_OF_QUANTIZED[string]
seconds_string, nanoseconds_string = string.split(".")
seconds_datetime = _datetime_from_str(seconds_string + ".0")
seconds = int(seconds_datetime.timestamp())
nanoseconds_string = (nanoseconds_string + 9 * "0")[:9]
nanoseconds = int(nanoseconds_string)
return seconds * 1_000_000_000 + nanoseconds
def _reset_test_dates() -> None:
global _QUANTIZED_OF_NANOSECONDS
global _NANOSECONDS_OF_QUANTIZED
_QUANTIZED_OF_NANOSECONDS = {}
_NANOSECONDS_OF_QUANTIZED = {}
def step(
output: Strings, priority: float = 0 # pylint: disable=redefined-outer-name
) -> Callable[[Callable], Callable]:
"""
Decorate a build step functions.
The ``priority`` (default: 0) is used to pick between multiple steps providing the same output. This is typically
used to define low-priority steps with pattern outputs and high-priority steps which override them for specific
output(s).
"""
def _wrap(wrapped: Callable) -> Callable:
Step(wrapped, output, priority)
return wrapped
return _wrap
def require(*paths: Strings) -> None:
"""
Require an input file for the step.
This queues an async build of the input file using the appropriate step, and immediately returns.
"""
for path in each_string(*paths):
Invocation.current.require(path)
async def sync() -> Optional[BaseException]:
"""
Wait until all the input files specified so far are built.
This is invoked automatically before running actions.
"""
current = Invocation.current
return await current.done(current.sync())
async def shell(*command: Strings, prefix: Optional[Strings] = None, **resources: int) -> None:
"""
Execute a shell command.
The caller is responsible for all quotations. If the first character of the command is ``@`` then it is "silent",
that is, it is logged in the FILE level and not the INFO level.
This first waits until all input files requested so far are ready.
The shell command is only executed after any ``resources`` are obtained. This can be used to ensure a bounded total
amount used by of any resource declared by ``resource_parameters``.
If ``prefix`` is specified, it is silently added to the command. By default this is the value of the
:py:const:`default_shell_prefix` parameter.
"""
current = Invocation.current
if prefix is None:
global default_shell_prefix # pylint: disable=invalid-name
prefix = default_shell_prefix.value
def _run_shell(parts: List[str]) -> Awaitable:
assert prefix is not None
global shell_executable # pylint: disable=invalid-name
return asyncio.create_subprocess_shell(
" ".join(flatten(prefix, parts)),
executable=shell_executable.value,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
await current.done(current.run_action("shell", _run_shell, *command, **resources))
async def spawn(*command: Strings, **resources: int) -> None:
"""
Execute an external program with arguments.
If the first character of the command is ``@`` then it is "silent", that is, it is logged in the FILE level and not
the INFO level.
This first waits until all input files requested so far are ready.
"""
current = Invocation.current
def _run_exec(parts: List[str]) -> Awaitable:
return asyncio.create_subprocess_exec(*parts, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
await current.done(current.run_action("spawn", _run_exec, *command, **resources))
def make(
parser: ArgumentParser,
*,
default_targets: Strings = "all",
logger_name: str = "dynamake",
adapter: Optional[Callable[[Namespace], None]] = None,
) -> None:
"""
A generic ``main`` function for ``DynaMake``.
If no explicit targets are given, will build the ``default_targets`` (default: ``all``).
Uses the ``logger_name`` (default: ``dynamake``) to create the global :py:class:`Logger`.
The optional ``adapter`` may perform additional adaptation of the execution environment based on the parsed
command-line arguments before the actual function(s) are invoked.
"""
default_targets = flatten(default_targets)
_load_modules()
parser.add_argument("TARGET", nargs="*", help=f'The file or target to make (default: {" ".join(default_targets)})')
parser.add_argument(
"--module",
"-m",
metavar="MODULE",
action="append",
help="A Python module to load (containing function definitions)",
)
Parameter.add_to_parser(parser)
parser.add_argument(
"--list_steps",
"-ls",
default=False,
action="store_true",
help="List all the build steps and their targets, and exit.",
)
args = parser.parse_args()
Parameter.parse_args(args)
Logger.setup(logger_name)
if adapter is not None:
adapter(args)
_compute_jobs()
if args.list_steps:
_list_steps()
else:
_build_targets([path for path in args.TARGET if path is not None] or flatten(default_targets))
def _load_modules() -> None:
# TODO: This needs to be done before we set up the command line options parser, because the options depend on the
# loaded modules. Catch-22. This therefore employs a brutish option detection which may not be 100% correct.
did_import = False
for option, value in zip(sys.argv, sys.argv[1:]):
if option in ["-m", "--module"]:
did_import = True
import_module(value)
if not did_import and os.path.exists(DEFAULT_MODULE + ".py"):
import_module(DEFAULT_MODULE)
def _compute_jobs() -> None:
global jobs # pylint: disable=invalid-name
amount = int(jobs.value)
if jobs.value < 0:
cpu_count = os.cpu_count() or 1
amount = cpu_count // -jobs.value
amount = max(amount, 1)
amount = min(amount, cpu_count)
jobs.value = amount
Resources.available["jobs"] = Resources.total["jobs"] = amount
def _list_steps() -> None:
is_first = True
steps = [(step.priority, step.name, step) for step in Step.by_name.values()]
for _, _, step in sorted(steps): # pylint: disable=redefined-outer-name
if not is_first:
print()
is_first = False
doc = step.function.__doc__
if doc:
print("# " + dedent(doc).strip().replace("\n", "\n# "))
print(f"{step.name}:")
print(f" priority: {step.priority}")
print(" outputs:")
for output in sorted(step.output): # pylint: disable=redefined-outer-name
properties = []
if is_exists(output):
properties.append("exists")
if is_optional(output):
properties.append("optional")
if is_phony(output):
properties.append("phony")
if is_precious(output):
properties.append("precious")
if properties:
print(f' - {output}: [{", ".join(properties)}]')
else:
print(f" - {output}")
def _build_targets(targets: List[str]) -> None:
Logger.trace("Targets: " + " ".join(targets))
if Logger.isEnabledFor(logging.DEBUG):
for value in Resources.available.values():
if value > 0:
Logger.debug("Available resources: " + _dict_to_str(Resources.available))
break
result: Optional[BaseException] = None
try:
for target in targets:
require(target)
result = asyncio.get_event_loop().run_until_complete(Invocation.top.sync())
except StepException as exception: # pylint: disable=broad-except
result = exception
Invocation.current = Invocation.top
if result is not None and not isinstance(result, DryRunException):
Logger.error("Fail")
if _is_test: # pylint: disable=protected-access
no_additional_complaints()
raise result
sys.exit(1)
if isinstance(result, DryRunException):
status = "DryRun"
elif Invocation.actions_count > 0:
status = "Done"
elif Invocation.skipped_count > 0:
status = "Skipped"
else:
status = "Complete"
Logger.trace(status)
@asynccontextmanager
async def reading(*names: Strings) -> AsyncGenerator:
"""
Async context for actions that reads some data which might be accessed by other actions.
The actual locks are only obtained when invoking the :py:func:`locks` function (which is automatic for running
actions). Otherwise, this just collects the required locks. Deferring the actual locking allows us to avoid
deadlocks.
"""
invocation = Invocation.current
assert not invocation.has_locks
old_required_locks = invocation.required_locks
try:
invocation.required_locks = copy(old_required_locks)
for name in each_string(*names):
if name not in invocation.required_locks:
invocation.required_locks[name] = False
yield
finally:
invocation._become_current() # pylint: disable=protected-access
invocation.required_locks = old_required_locks
@asynccontextmanager
async def writing(*names: Strings) -> AsyncGenerator:
"""
Async context for actions that modify some data which might be accessed by other actions.
The actual locks are only obtained when invoking the :py:func:`locks` function (which is automatic for running
actions). Otherwise, this just collects the required locks. Deferring the actual locking allows us to avoid
deadlocks.
"""
invocation = Invocation.current
assert not invocation.has_locks
| |
import contextvars
import functools
import platform
import sys
import threading
import time
import types
import warnings
import weakref
from contextlib import contextmanager, ExitStack
from math import inf
from textwrap import dedent
import gc
import attr
import outcome
import sniffio
import pytest
from .tutil import (
slow,
check_sequence_matches,
gc_collect_harder,
ignore_coroutine_never_awaited_warnings,
buggy_pypy_asyncgens,
restore_unraisablehook,
create_asyncio_future_in_new_loop,
)
from ... import _core
from .._run import DEADLINE_HEAP_MIN_PRUNE_THRESHOLD
from ..._threads import to_thread_run_sync
from ..._timeouts import sleep, fail_after
from ...testing import (
wait_all_tasks_blocked,
Sequencer,
assert_checkpoints,
)
# slightly different from _timeouts.sleep_forever because it returns the value
# its rescheduled with, which is really only useful for tests of
# rescheduling...
async def sleep_forever():
return await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
def test_basic():
async def trivial(x):
return x
assert _core.run(trivial, 8) == 8
with pytest.raises(TypeError):
# Missing an argument
_core.run(trivial)
with pytest.raises(TypeError):
# Not an async function
_core.run(lambda: None)
async def trivial2(x):
await _core.checkpoint()
return x
assert _core.run(trivial2, 1) == 1
def test_initial_task_error():
async def main(x):
raise ValueError(x)
with pytest.raises(ValueError) as excinfo:
_core.run(main, 17)
assert excinfo.value.args == (17,)
def test_run_nesting():
async def inception():
async def main(): # pragma: no cover
pass
return _core.run(main)
with pytest.raises(RuntimeError) as excinfo:
_core.run(inception)
assert "from inside" in str(excinfo.value)
async def test_nursery_warn_use_async_with():
with pytest.raises(RuntimeError) as excinfo:
on = _core.open_nursery()
with on:
pass # pragma: no cover
excinfo.match(
r"use 'async with open_nursery\(...\)', not 'with open_nursery\(...\)'"
)
# avoid unawaited coro.
async with on:
pass
async def test_nursery_main_block_error_basic():
exc = ValueError("whoops")
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery():
raise exc
assert excinfo.value is exc
async def test_child_crash_basic():
exc = ValueError("uh oh")
async def erroring():
raise exc
try:
# nursery.__aexit__ propagates exception from child back to parent
async with _core.open_nursery() as nursery:
nursery.start_soon(erroring)
except ValueError as e:
assert e is exc
async def test_basic_interleave():
async def looper(whoami, record):
for i in range(3):
record.append((whoami, i))
await _core.checkpoint()
record = []
async with _core.open_nursery() as nursery:
nursery.start_soon(looper, "a", record)
nursery.start_soon(looper, "b", record)
check_sequence_matches(
record, [{("a", 0), ("b", 0)}, {("a", 1), ("b", 1)}, {("a", 2), ("b", 2)}]
)
def test_task_crash_propagation():
looper_record = []
async def looper():
try:
while True:
await _core.checkpoint()
except _core.Cancelled:
print("looper cancelled")
looper_record.append("cancelled")
async def crasher():
raise ValueError("argh")
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(looper)
nursery.start_soon(crasher)
with pytest.raises(ValueError) as excinfo:
_core.run(main)
assert looper_record == ["cancelled"]
assert excinfo.value.args == ("argh",)
def test_main_and_task_both_crash():
# If main crashes and there's also a task crash, then we get both in a
# MultiError
async def crasher():
raise ValueError
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
print(excinfo.value)
assert {type(exc) for exc in excinfo.value.exceptions} == {
ValueError,
KeyError,
}
def test_two_child_crashes():
async def crasher(etype):
raise etype
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher, KeyError)
nursery.start_soon(crasher, ValueError)
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
assert {type(exc) for exc in excinfo.value.exceptions} == {
ValueError,
KeyError,
}
async def test_child_crash_wakes_parent():
async def crasher():
raise ValueError
with pytest.raises(ValueError):
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
await sleep_forever()
async def test_reschedule():
t1 = None
t2 = None
async def child1():
nonlocal t1, t2
t1 = _core.current_task()
print("child1 start")
x = await sleep_forever()
print("child1 woke")
assert x == 0
print("child1 rescheduling t2")
_core.reschedule(t2, outcome.Error(ValueError()))
print("child1 exit")
async def child2():
nonlocal t1, t2
print("child2 start")
t2 = _core.current_task()
_core.reschedule(t1, outcome.Value(0))
print("child2 sleep")
with pytest.raises(ValueError):
await sleep_forever()
print("child2 successful exit")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
# let t1 run and fall asleep
await _core.checkpoint()
nursery.start_soon(child2)
async def test_current_time():
t1 = _core.current_time()
# Windows clock is pretty low-resolution -- appveyor tests fail unless we
# sleep for a bit here.
time.sleep(time.get_clock_info("perf_counter").resolution)
t2 = _core.current_time()
assert t1 < t2
async def test_current_time_with_mock_clock(mock_clock):
start = mock_clock.current_time()
assert mock_clock.current_time() == _core.current_time()
assert mock_clock.current_time() == _core.current_time()
mock_clock.jump(3.14)
assert start + 3.14 == mock_clock.current_time() == _core.current_time()
async def test_current_clock(mock_clock):
assert mock_clock is _core.current_clock()
async def test_current_task():
parent_task = _core.current_task()
async def child():
assert _core.current_task().parent_nursery.parent_task is parent_task
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
async def test_root_task():
root = _core.current_root_task()
assert root.parent_nursery is root.eventual_parent_nursery is None
def test_out_of_context():
with pytest.raises(RuntimeError):
_core.current_task()
with pytest.raises(RuntimeError):
_core.current_time()
async def test_current_statistics(mock_clock):
# Make sure all the early startup stuff has settled down
await wait_all_tasks_blocked()
# A child that sticks around to make some interesting stats:
async def child():
try:
await sleep_forever()
except _core.Cancelled:
pass
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us
assert stats.tasks_living == 3
assert stats.run_sync_soon_queue_size == 0
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
token = _core.current_trio_token()
token.run_sync_soon(lambda: None)
token.run_sync_soon(lambda: None, idempotent=True)
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us + child
assert stats.tasks_living == 4
# the exact value here might shift if we change how we do accounting
# (currently it only counts tasks that we already know will be
# runnable on the next pass), but still useful to at least test the
# difference between now and after we wake up the child:
assert stats.tasks_runnable == 0
assert stats.run_sync_soon_queue_size == 2
nursery.cancel_scope.cancel()
stats = _core.current_statistics()
print(stats)
assert stats.tasks_runnable == 1
# Give the child a chance to die and the run_sync_soon a chance to clear
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope(deadline=_core.current_time() + 5):
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == 5
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == inf
async def test_cancel_scope_repr(mock_clock):
scope = _core.CancelScope()
assert "unbound" in repr(scope)
with scope:
assert "active" in repr(scope)
scope.deadline = _core.current_time() - 1
assert "deadline is 1.00 seconds ago" in repr(scope)
scope.deadline = _core.current_time() + 10
assert "deadline is 10.00 seconds from now" in repr(scope)
# when not in async context, can't get the current time
assert "deadline" not in await to_thread_run_sync(repr, scope)
scope.cancel()
assert "cancelled" in repr(scope)
assert "exited" in repr(scope)
def test_cancel_points():
async def main1():
with _core.CancelScope() as scope:
await _core.checkpoint_if_cancelled()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint_if_cancelled()
_core.run(main1)
async def main2():
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main2)
async def main3():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
_core.run(main3)
async def main4():
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main4)
async def test_cancel_edge_cases():
with _core.CancelScope() as scope:
# Two cancels in a row -- idempotent
scope.cancel()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert scope.cancelled_caught
with _core.CancelScope() as scope:
# Check level-triggering
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
with pytest.raises(_core.Cancelled):
await sleep_forever()
async def test_cancel_scope_multierror_filtering():
async def crasher():
raise KeyError
try:
with _core.CancelScope() as outer:
try:
async with _core.open_nursery() as nursery:
# Two children that get cancelled by the nursery scope
nursery.start_soon(sleep_forever) # t1
nursery.start_soon(sleep_forever) # t2
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
# One child that gets cancelled by the outer scope
nursery.start_soon(sleep_forever) # t3
outer.cancel()
# And one that raises a different error
nursery.start_soon(crasher) # t4
# and then our __aexit__ also receives an outer Cancelled
except _core.MultiError as multi_exc:
# Since the outer scope became cancelled before the
# nursery block exited, all cancellations inside the
# nursery block continue propagating to reach the
# outer scope.
assert len(multi_exc.exceptions) == 5
summary = {}
for exc in multi_exc.exceptions:
summary.setdefault(type(exc), 0)
summary[type(exc)] += 1
assert summary == {_core.Cancelled: 4, KeyError: 1}
raise
except AssertionError: # pragma: no cover
raise
except BaseException as exc:
# This is outside the outer scope, so all the Cancelled
# exceptions should have been absorbed, leaving just a regular
# KeyError from crasher()
assert type(exc) is KeyError
else: # pragma: no cover
assert False
async def test_precancelled_task():
# a task that gets spawned into an already-cancelled nursery should begin
# execution (https://github.com/python-trio/trio/issues/41), but get a
# cancelled error at its first blocking call.
record = []
async def blocker():
record.append("started")
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.cancel_scope.cancel()
nursery.start_soon(blocker)
assert record == ["started"]
async def test_cancel_shielding():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
await _core.checkpoint()
outer.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
assert inner.shield is False
with pytest.raises(TypeError):
inner.shield = "hello"
assert inner.shield is False
inner.shield = True
assert inner.shield is True
# shield protects us from 'outer'
await _core.checkpoint()
with _core.CancelScope() as innerest:
innerest.cancel()
# but it doesn't protect us from scope inside inner
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
await _core.checkpoint()
inner.shield = False
# can disable shield again
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# re-enable shield
inner.shield = True
await _core.checkpoint()
# shield | |
<gh_stars>0
import copy
import logging
import re
import typing
from contextlib import suppress
from inspect import getdoc, iscoroutinefunction
import discord
from discord.ext import commands
from . import context, error, http, model
from .utils import manage_commands
from .utils.manage_components import get_components_ids, get_messages_ids
def _get_val(d: dict, key): # util function to get value from dict with fallback to None key
try:
value = d[key]
except KeyError: # if there is no specific key set, we fallback to "global/any"
value = d[None]
return value
class SlashCommand:
"""
Slash command handler class.
:param client: discord.py Client or Bot instance.
:type client: Union[discord.Client, discord.ext.commands.Bot]
:param sync_commands: Whether to sync commands automatically. Default `False`.
:type sync_commands: bool
:param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands registered in :class:``SlashCommand``. Default `False`.
:type delete_from_unused_guilds: bool
:param sync_on_cog_reload: Whether to sync commands on cog reload. Default `False`.
:type sync_on_cog_reload: bool
:param override_type: Whether to override checking type of the client and try register event.
:type override_type: bool
:param application_id: The application id of the bot, required only when the application id and bot id are different. (old bots)
:type application_id: int
.. note::
If ``sync_on_cog_reload`` is enabled, command syncing will be triggered when :meth:`discord.ext.commands.Bot.reload_extension`
is triggered.
:ivar _discord: Discord client of this client.
:ivar commands: Dictionary of the registered commands via :func:`.slash` decorator.
:ivar req: :class:`.http.SlashCommandRequest` of this client.
:ivar logger: Logger of this client.
:ivar sync_commands: Whether to sync commands automatically.
:ivar sync_on_cog_reload: Whether to sync commands on cog reload.
:ivar has_listener: Whether discord client has listener add function.
"""
def __init__(
self,
client: typing.Union[discord.Client, commands.Bot],
sync_commands: bool = False,
delete_from_unused_guilds: bool = False,
sync_on_cog_reload: bool = False,
override_type: bool = False,
application_id: typing.Optional[int] = None,
):
self._discord = client
self.commands = {}
self.subcommands = {}
self.components = {}
self.logger = logging.getLogger("discord_slash")
self.req = http.SlashCommandRequest(self.logger, self._discord, application_id)
self.sync_commands = sync_commands
self.sync_on_cog_reload = sync_on_cog_reload
if self.sync_commands:
self._discord.loop.create_task(self.sync_all_commands(delete_from_unused_guilds))
if (
not isinstance(client, commands.Bot)
and not isinstance(client, commands.AutoShardedBot)
and not override_type
):
self.logger.warning(
"Detected discord.Client! It is highly recommended to use `commands.Bot`. Do not add any `on_socket_response` event."
)
self._discord.on_socket_response = self.on_socket_response
self.has_listener = False
else:
if not hasattr(self._discord, "slash"):
self._discord.slash = self
else:
raise error.DuplicateSlashClient("You can't have duplicate SlashCommand instances!")
self._discord.add_listener(self.on_socket_response)
self.has_listener = True
default_add_function = self._discord.add_cog
def override_add_cog(cog: commands.Cog):
default_add_function(cog)
self.get_cog_commands(cog)
self._discord.add_cog = override_add_cog
default_remove_function = self._discord.remove_cog
def override_remove_cog(name: str):
cog = self._discord.get_cog(name)
if cog is None:
return
self.remove_cog_commands(cog)
default_remove_function(name)
self._discord.remove_cog = override_remove_cog
if self.sync_on_cog_reload:
orig_reload = self._discord.reload_extension
def override_reload_extension(*args):
orig_reload(*args)
self._discord.loop.create_task(
self.sync_all_commands(delete_from_unused_guilds)
)
self._discord.reload_extension = override_reload_extension
def get_cog_commands(self, cog: commands.Cog):
"""
Gets slash command from :class:`discord.ext.commands.Cog`.
.. note::
Since version ``1.0.9``, this gets called automatically during cog initialization.
:param cog: Cog that has slash commands.
:type cog: discord.ext.commands.Cog
"""
if hasattr(cog, "_slash_registered"): # Temporary warning
return self.logger.warning(
"Calling get_cog_commands is no longer required "
"to add cog slash commands. Make sure to remove all calls to this function."
)
cog._slash_registered = True # Assuming all went well
func_list = [getattr(cog, x) for x in dir(cog)]
self._get_cog_slash_commands(cog, func_list)
self._get_cog_component_callbacks(cog, func_list)
def _get_cog_slash_commands(self, cog, func_list):
res = [
x
for x in func_list
if isinstance(x, (model.CogBaseCommandObject, model.CogSubcommandObject))
]
for x in res:
x.cog = cog
if isinstance(x, model.CogBaseCommandObject):
if x.name in self.commands:
raise error.DuplicateCommand(x.name)
self.commands[x.name] = x
else:
if x.base in self.commands:
base_command = self.commands[x.base]
for i in x.allowed_guild_ids:
if i not in base_command.allowed_guild_ids:
base_command.allowed_guild_ids.append(i)
base_permissions = x.base_command_data["api_permissions"]
if base_permissions:
for applicable_guild in base_permissions:
if applicable_guild not in base_command.permissions:
base_command.permissions[applicable_guild] = []
base_command.permissions[applicable_guild].extend(
base_permissions[applicable_guild]
)
self.commands[x.base].has_subcommands = True
else:
self.commands[x.base] = model.BaseCommandObject(x.base, x.base_command_data)
if x.base not in self.subcommands:
self.subcommands[x.base] = {}
if x.subcommand_group:
if x.subcommand_group not in self.subcommands[x.base]:
self.subcommands[x.base][x.subcommand_group] = {}
if x.name in self.subcommands[x.base][x.subcommand_group]:
raise error.DuplicateCommand(f"{x.base} {x.subcommand_group} {x.name}")
self.subcommands[x.base][x.subcommand_group][x.name] = x
else:
if x.name in self.subcommands[x.base]:
raise error.DuplicateCommand(f"{x.base} {x.name}")
self.subcommands[x.base][x.name] = x
def _get_cog_component_callbacks(self, cog, func_list):
res = [x for x in func_list if isinstance(x, model.CogComponentCallbackObject)]
for x in res:
x.cog = cog
self._add_comp_callback_obj(x)
def remove_cog_commands(self, cog):
"""
Removes slash command from :class:`discord.ext.commands.Cog`.
.. note::
Since version ``1.0.9``, this gets called automatically during cog de-initialization.
:param cog: Cog that has slash commands.
:type cog: discord.ext.commands.Cog
"""
if hasattr(cog, "_slash_registered"):
del cog._slash_registered
func_list = [getattr(cog, x) for x in dir(cog)]
self._remove_cog_slash_commands(func_list)
self._remove_cog_component_callbacks(func_list)
def _remove_cog_slash_commands(self, func_list):
res = [
x
for x in func_list
if isinstance(x, (model.CogBaseCommandObject, model.CogSubcommandObject))
]
for x in res:
if isinstance(x, model.CogBaseCommandObject):
if x.name not in self.commands:
continue # Just in case it is removed due to subcommand.
if x.name in self.subcommands:
self.commands[x.name].func = None
continue # Let's remove completely when every subcommand is removed.
del self.commands[x.name]
else:
if x.base not in self.subcommands:
continue # Just in case...
if x.subcommand_group:
del self.subcommands[x.base][x.subcommand_group][x.name]
if not self.subcommands[x.base][x.subcommand_group]:
del self.subcommands[x.base][x.subcommand_group]
else:
del self.subcommands[x.base][x.name]
if not self.subcommands[x.base]:
del self.subcommands[x.base]
if x.base in self.commands:
if self.commands[x.base].func:
self.commands[x.base].has_subcommands = False
else:
del self.commands[x.base]
def _remove_cog_component_callbacks(self, func_list):
res = [x for x in func_list if isinstance(x, model.CogComponentCallbackObject)]
for x in res:
self.remove_component_callback_obj(x)
async def to_dict(self):
"""
Converts all commands currently registered to :class:`SlashCommand` to a dictionary.
Returns a dictionary in the format:
.. code-block:: python
{
"global" : [], # list of global commands
"guild" : {
0000: [] # list of commands in the guild 0000
}
}
Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_
"""
await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand.
all_guild_ids = []
for x in self.commands:
for i in self.commands[x].allowed_guild_ids:
if i not in all_guild_ids:
all_guild_ids.append(i)
cmds = {"global": [], "guild": {x: [] for x in all_guild_ids}}
wait = {} # Before merging to return dict, let's first put commands to temporary dict.
for x in self.commands:
selected = self.commands[x]
if selected.allowed_guild_ids:
for y in selected.allowed_guild_ids:
if y not in wait:
wait[y] = {}
command_dict = {
"name": x,
"description": selected.description or "No Description.",
"options": selected.options or [],
"default_permission": selected.default_permission,
"permissions": {},
}
if y in selected.permissions:
command_dict["permissions"][y] = selected.permissions[y]
wait[y][x] = copy.deepcopy(command_dict)
else:
if "global" not in wait:
wait["global"] = {}
command_dict = {
"name": x,
"description": selected.description or "No Description.",
"options": selected.options or [],
"default_permission": selected.default_permission,
"permissions": selected.permissions or {},
}
wait["global"][x] = copy.deepcopy(command_dict)
# Separated normal command add and subcommand add not to
# merge subcommands to one. More info at Issue #88
# https://github.com/eunwoo1104/discord-py-slash-command/issues/88
for x in self.commands:
if not self.commands[x].has_subcommands:
continue
tgt = self.subcommands[x]
for y in tgt:
sub = tgt[y]
if isinstance(sub, model.SubcommandObject):
_dict = {
"name": sub.name,
"description": sub.description or "No Description.",
"type": model.SlashCommandOptionType.SUB_COMMAND,
"options": sub.options or [],
}
if sub.allowed_guild_ids:
for z in sub.allowed_guild_ids:
wait[z][x]["options"].append(_dict)
else:
wait["global"][x]["options"].append(_dict)
else:
queue = {}
base_dict = {
"name": y,
"description": "No Description.",
"type": model.SlashCommandOptionType.SUB_COMMAND_GROUP,
"options": [],
}
for z in sub:
sub_sub = sub[z]
_dict = {
"name": sub_sub.name,
"description": sub_sub.description or "No Description.",
"type": model.SlashCommandOptionType.SUB_COMMAND,
"options": sub_sub.options or [],
}
if sub_sub.allowed_guild_ids:
for i in sub_sub.allowed_guild_ids:
if i not in queue:
queue[i] = copy.deepcopy(base_dict)
queue[i]["options"].append(_dict)
else:
if "global" not in queue:
queue["global"] = copy.deepcopy(base_dict)
queue["global"]["options"].append(_dict)
for i in queue:
wait[i][x]["options"].append(queue[i])
for x in wait:
if x == "global":
[cmds["global"].append(n) for n in wait["global"].values()]
else:
[cmds["guild"][x].append(n) for n in wait[x].values()]
return cmds
async def sync_all_commands(
self, delete_from_unused_guilds=False, delete_perms_from_unused_guilds=False
):
"""
Matches commands registered on Discord to commands registered here.
Deletes any commands on Discord but not here, and registers any not on Discord.
This is done with a `put` request.
A PUT request will only be made if there are changes detected.
If ``sync_commands`` is ``True``, then this will be automatically called.
:param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands registered in :class:``SlashCommand``
:param delete_perms_from_unused_guilds: If the bot should make a request to clear permissions for guilds that haven't got any permissions registered in :class:``SlashCommand``
"""
permissions_map = {}
cmds = await self.to_dict()
self.logger.info("Syncing commands...")
cmds_formatted = {None: cmds["global"]}
for guild in cmds["guild"]:
cmds_formatted[guild] = cmds["guild"][guild]
| |
<reponame>hitfee01/3DDeepBoxRetina2D
# from https://github.com/amdegroot/ssd.pytorch
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
from Archs_2D.BBox import BBoxes
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of bboxes. The jaccard overlap
is simply the intersection over union of two bboxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding bboxes, Shape: [num_bboxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])) # [A,B]
area_b = ((box_b[2]-box_b[0]) *
(box_b[3]-box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, bboxes=None, labels=None):
for t in self.transforms:
img, bboxes, labels = t(img, bboxes=bboxes, labels=labels)
return img, bboxes, labels
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img, bboxes=None, labels=None):
return self.lambd(img, bboxes, labels)
class ConvertFromInts(object):
def __call__(self, image, bboxes=None, labels=None):
return image.astype(np.float32), bboxes, labels
class SubtractMeans(object):
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, bboxes=None, labels=None):
image = image.astype(np.float32)
image -= self.mean
return image.astype(np.float32), bboxes, labels
class ToAbsoluteCoords(object):
def __call__(self, image, bboxes=None, labels=None):
height, width, channels = image.shape
bboxes[:, 0] *= width
bboxes[:, 2] *= width
bboxes[:, 1] *= height
bboxes[:, 3] *= height
return image, bboxes, labels
class ToPercentCoords(object):
def __call__(self, image, bboxes=None, labels=None):
height, width, channels = image.shape
bboxes[:, 0::2] /= width
bboxes[:, 1::2] /= height
return image, bboxes, labels
class Resize(object):
def __init__(self, size=300):
self.size = size
def __call__(self, image, bboxes=None, labels=None):
if isinstance(self.size, (tuple, list)) and len(self.size) == 2:
if isinstance(image, list):
image = [cv2.resize(src=src, dsize=self.size, interpolation=cv2.INTER_CUBIC) for src in image]
else:
image = cv2.resize(src=image, dsize=self.size, interpolation=cv2.INTER_CUBIC)
else:
if isinstance(image, list):
image = [cv2.resize(src=src, dsize=(self.size, self.size), interpolation=cv2.INTER_CUBIC) for src in image]
else:
image = cv2.resize(src=image, dsize=(self.size, self.size), interpolation=cv2.INTER_CUBIC)
return image, bboxes, labels
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, bboxes=None, labels=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, bboxes, labels
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, bboxes=None, labels=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, bboxes, labels
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, bboxes=None, labels=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, bboxes, labels
class ConvertColor(object):
def __init__(self, current, transform):
self.transform = transform
self.current = current
def __call__(self, image, bboxes=None, labels=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'RGB' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif self.current == 'BGR' and self.transform == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
elif self.current == 'HSV' and self.transform == "RGB":
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
else:
raise NotImplementedError
return image, bboxes, labels
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, bboxes=None, labels=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, bboxes, labels
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, bboxes=None, labels=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, bboxes, labels
class ToCV2Image(object):
def __call__(self, tensor, bboxes=None, labels=None):
return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), bboxes, labels
# class ToTensor(object):
# def __call__(self, cvimage, bboxes=None, labels=None):
# return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), bboxes, labels
class ToTorchTensor(object):
def __init__(self):
self.toTensor = transforms.ToTensor()
def __call__(self, image, bboxes=None, labels=None):
if bboxes is not None:
if not torch.is_tensor(bboxes):
bboxes = torch.from_numpy(bboxes.astype(np.float32))
if isinstance(image, list):
image = [self.toTensor(src) for src in image]
else:
image = self.toTensor(image)
return image, bboxes, labels
class ToTensor(object):
def __init__(self):
self.trans = transforms.ToTensor()
def __call__(self, cvimage, bboxes=None, labels=None):
timg = torch.from_numpy(cvimage.astype(np.float32))
# timg = self.trans(cvimage)
if bboxes is not None:
bboxes = torch.from_numpy(bboxes.astype(np.float32))
if labels is not None:
labels = torch.from_numpy(labels.astype(np.long))
return timg, bboxes, labels
class ToNCHW(object):
def __call__(self, image, bboxes=None, labels=None):
return image.permute(2, 0, 1), bboxes, labels
class ToBBoxes(object):
def __call__(self, image, bboxes=None, labels=None):
return image, BBoxes(bboxes), labels
class RandomCrop(object):
def __init__(self, rate=0.3):
self._rate = rate
def __call__(self, image, bboxes=None, labels=None):
height, width, _ = image.shape
if isinstance(bboxes, (tuple, list)):
bboxes = np.array(bboxes, dtype=np.float32)
elif torch.is_tensor(bboxes):
bboxes = bboxes.numpy().astype(dtype=np.float32)
assert(bboxes.ndim == 1)
bboxes[0::2] = np.clip(bboxes[0::2], a_min=0, a_max=width)
bboxes[1::2] = np.clip(bboxes[1::2], a_min=0, a_max=height)
if random.randint(2):
# crop image
x_range = (bboxes[2] - bboxes[0])*self._rate
y_range = (bboxes[3] - bboxes[1])*self._rate
det_bboxes = np.random.random_sample(bboxes.shape)
det_bboxes[0::2] = det_bboxes[0::2] * x_range * 2 - x_range
det_bboxes[1::2] = det_bboxes[1::2] * y_range * 2 - y_range
bboxes += det_bboxes
bboxes[0::2] = np.clip(bboxes[0::2], a_min=0, a_max=width)
bboxes[1::2] = np.clip(bboxes[1::2], a_min=0, a_max=height)
crop = image[int(bboxes[1]):(int(bboxes[3]) + 1), int(bboxes[0]):(int(bboxes[2]) + 1)]
return crop, bboxes, labels
class Crop(object):
def __call__(self, image, bboxes=None, labels=None):
height, width, _ = image.shape
if isinstance(bboxes, (tuple, list)):
bboxes = np.array(bboxes, dtype=np.float32)
elif torch.is_tensor(bboxes):
bboxes = bboxes.numpy().astype(dtype=np.float32)
assert(bboxes.ndim == 2)#(num, 4)
num, _ = bboxes.shape
crop = []
for i in range(num):
bbox = bboxes[i]
bbox[0::2] = np.clip(bbox[0::2], a_min=0, a_max=width)
bbox[1::2] = np.clip(bbox[1::2], a_min=0, a_max=height)
crop.append(image[int(bbox[1]):(int(bbox[3]) + 1), int(bbox[0]):(int(bbox[2]) + 1)])
return crop, bboxes, labels
class RandomSampleCrop(object):
"""Crop
Arguments:
img (Image): the image being input during training
bboxes (Tensor): the original bounding bboxes in pt form
labels (Tensor): the class labels for each bbox
mode (float tuple): the min and max jaccard overlaps
Return:
(img, bboxes, labels)
img (Image): the cropped image
bboxes (Tensor): the adjusted bounding bboxes in pt form
labels (Tensor): the class labels for each bbox
"""
def __init__(self):
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# randomly sample a patch
(None, None),
)
def __call__(self, image, bboxes=None, labels=None):
height, width, _ = image.shape
while True:
# randomly choose a mode
mode = random.choice(self.sample_options)
if mode is None:
return image, bboxes, labels
min_iou, max_iou = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
# max trails (50)
for _ in range(50):
current_image = image
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
# aspect ratio constraint b/t .5 & 2
if h / w < 0.5 or h / w > 2:
continue
left = random.uniform(width - w)
top = random.uniform(height - h)
# convert to integer rect x1,y1,x2,y2
rect = np.array([int(left), int(top), int(left+w), int(top+h)])
# calculate IoU (jaccard overlap) b/t the cropped and gt bboxes
overlap = jaccard_numpy(bboxes, rect)
# is min and max overlap constraint satisfied? if not try again
if overlap.min() < min_iou and max_iou < overlap.max():
continue
# cut the crop from the image
current_image = current_image[rect[1]:rect[3], rect[0]:rect[2],
:]
# keep overlap with gt box IF center in sampled patch
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0
# mask in all gt bboxes that above and to the left of centers
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
# mask in all gt | |
<gh_stars>100-1000
# Copyright 2019 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
converter.node - class to manage Node
"""
from __future__ import division
from __future__ import print_function
import six
import logging
from common import *
from utils import *
_LOG = logging.getLogger(__name__)
def make_node(name, type, inputs, outputs, attrs=None, consts=None):
if type == KaldiOpType.Gemm.name:
return GemmNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Append.name:
return AppendNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Bias.name:
return BiasNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Constant.name:
return ConstantNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Conv1d.name:
return Conv1dNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Conv.name:
return ConvNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Dct.name:
return DCTNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.DynamicLSTM.name:
return DynamicLSTMNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.ExtractPooling.name:
return ExtractPoolingNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Identity.name:
return IdentityNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.IfDefined.name:
return IfDefinedNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.LstmNonlinear.name:
return LSTMNonLinearNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Offset.name:
return OffsetNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.ReplaceIndex.name:
return ReplaceIndexNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.RestrictedAttention.name:
return RestrictedAttentionNode(name, type, inputs, outputs,
attrs, consts)
elif type == KaldiOpType.Round.name:
return RoundNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Scales.name:
return ScalesNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Splice.name:
return SpliceNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.StatisticsExtraction.name:
return StatisticsExtractionNode(name, type, inputs,
outputs, attrs, consts)
elif type == KaldiOpType.StatisticsPooling.name:
return StatisticsPoolingNode(name, type, inputs, outputs,
attrs, consts)
elif type == KaldiOpType.SumGroup.name:
return SumGroupNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.Subsample.name:
return SubsampleNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.TargetRMSNorm.name:
return TargetRMSNormNode(name, type, inputs, outputs, attrs, consts)
elif type == KaldiOpType.PerEltScale.name:
return PerEltScaleNode(name, type, inputs, outputs, attrs, consts)
else:
return Node(name, type, inputs, outputs, attrs, consts)
class Node(object):
def __init__(self, name, type, inputs, outputs, attrs={}, consts=None):
self.name = name
self.type = type
self.nexts = []
self.output_shape = None
self._inputs = inputs
self._outputs = outputs
self._input_dim = 0
self._output_dim = 0
self._dependencies = []
self._input_indexes = []
self._output_indexes = []
self.input_range = [-100000, 100000]
self.output_range = [-100000, 100000]
if attrs is None:
self.attrs = {}
else:
self.attrs = attrs
self.update_attrs()
if consts is None:
self.consts = {}
else:
self.consts = consts
def update_attrs(self):
if self.attrs is None:
return
if self.type == KaldiOpType.Gemm.name:
self.attrs['transB'] = 1
elif self.type == KaldiOpType.Append.name:
self.attrs['axis'] = -1
for key, value in self.attrs.items():
if key in ['input_dim',
'dim',
'output_dim',
'const_component_dim',
'offset',
'mod',
'left_context', 'right_context',
'p'] and not isinstance(value, int):
self.attrs[key] = int(value)
elif key in ['target_rms', 'epsilon',
'count',
'scale',
'variance_floor'] and not isinstance(value, float):
self.attrs[key] = float(value)
elif key in ['context'] and not isinstance(value, list):
self.attrs[key] = value.tolist()
elif self.type == KaldiOpType.Splice.name:
if 'context' not in self.attrs and\
'left_context' in self._attrs and\
'right_context' in self._attrs:
left_context = self.read_attribute('left_context')
right_context = self.read_attribute('right_context')
context = [
t for t in range(-left_context, right_context + 1)]
self.attrs['context'] = context
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, inputs):
if isinstance(inputs, six.string_types):
self._inputs = [inputs]
elif isinstance(inputs, list):
self._inputs = inputs
else:
kaldi_check(False,
"inputs(%s) should be a list or a string!" % inputs)
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, outputs):
if isinstance(outputs, six.string_types):
self._outputs = [outputs]
elif isinstance(outputs, list):
self._outputs = outputs
else:
kaldi_check(False,
"outputs(%s) should be a list or a string!" % outputs)
@property
def dependencies(self):
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
self._dependencies = dependencies
# self.attrs['dependencies'] = dependencies
@property
def input_indexes(self):
return self._input_indexes
@input_indexes.setter
def input_indexes(self, input_indexes):
self._input_indexes = input_indexes
# self.attrs['input_indexes'] = input_indexes
@property
def output_indexes(self):
return self._output_indexes
@output_indexes.setter
def output_indexes(self, output_indexes):
self._output_indexes = output_indexes
# self.attrs['output_indexes'] = output_indexes
@property
def output_dim(self):
return self._output_dim
@output_dim.setter
def output_dim(self, output_dim):
self._output_dim = output_dim
self.attrs['output_dim'] = output_dim
@property
def input_dim(self):
return self._input_dim
@input_dim.setter
def input_dim(self, input_dim):
self.attrs['input_dim'] = input_dim
self._input_dim = input_dim
def set_attribute(self, attr_name, attr_value):
self.attrs[attr_name] = attr_value
def read_attribute(self, attr_name):
kaldi_check(attr_name in self.attrs, "cannot find")
return self.attrs[attr_name]
def info(self):
_LOG.info("name:%s type: %s"
" inputs: %s,"
" outputs: %s,"
" attrs: %s,"
" shape: %s," %
(self.name,
self.type,
self.inputs,
self.outputs,
self.attrs,
self.output_shape))
def inference_dim(self, dims_by_name, nodes_by_name):
if self.name in dims_by_name:
output_dim = dims_by_name[self.name]
self.input_dim = output_dim
elif 'output_dim' in self.attrs:
output_dim = self.read_attribute('output_dim')
self.input_dim = output_dim
elif 'dim' in self.attrs:
output_dim = self.read_attribute('dim')
self.input_dim = output_dim
elif 'input_dim' in self.attrs:
output_dim = self.read_attribute('input_dim')
else:
if self.inputs[0] in dims_by_name:
output_dim = dims_by_name[self.inputs[0]]
self.input_dim = output_dim
else:
kaldi_check(self.inputs[0] in nodes_by_name,
"Cannot find node: %s" % self.inputs[0])
input_node = nodes_by_name[self.inputs[0]]
input_node.inference_dim(dims_by_name, nodes_by_name)
self.input_dim = input_node.output_dim
output_dim = self.input_dim
self.output_dim = output_dim
dims_by_name[self.name] = output_dim
def is_simple(self):
return True
def inference_shape(self, batch, shapes, nodes_by_name):
if self.name in shapes:
return
output_chunk = len(self.output_indexes)
output_shape = [batch, output_chunk, self.output_dim]
shapes[self.name] = output_shape
self.output_shape = output_shape
def precompute(self):
pass
def inference_index(self, indexes_by_name, nodes_by_name):
input_name = self.inputs[0]
if input_name in indexes_by_name:
input_indexes = indexes_by_name[input_name]
self.input_indexes = input_indexes
else:
kaldi_check(input_name in nodes_by_name,
"Cannot find node: %s" % input_name)
input_node = nodes_by_name[input_name]
input_node.inference_index(indexes_by_name, nodes_by_name)
input_indexes = indexes_by_name[input_name]
self.input_indexes = input_indexes
indexes_by_name[self.name] = self.output_indexes
kaldi_check(set(self.dependencies) <= set(self.input_indexes),
"input indexes is sufficient for computation")
def inference_dependencies(self,
output_indexes,
dependencies_by_name,
nodes_by_name,
subsample_factor):
kaldi_check(len(output_indexes) > 0, "invalid output indexes values.")
dependencies = list()
[start, end] = self.input_range
current_output_indexes = list()
for index in output_indexes:
if index in range(start, int(end + 1)):
dependencies.append(index)
current_output_indexes.append(index)
if self.name in dependencies_by_name:
dependencies.extend(dependencies_by_name[self.name])
dependencies = list(set(dependencies))
dependencies.sort()
self.dependencies = dependencies
current_output_indexes.extend(self.output_indexes)
current_output_indexes = list(set(current_output_indexes))
current_output_indexes.sort()
self.output_indexes = current_output_indexes
dependencies_by_name[self.name] = dependencies
def inference_range(self, ranges_by_name, nodes_by_name):
if self.name not in ranges_by_name:
input_name = self.inputs[0]
if input_name in ranges_by_name:
[start, end] = ranges_by_name[input_name]
else:
kaldi_check(input_name in nodes_by_name,
"Cannot find node: %s" % input_name)
input_node = nodes_by_name[input_name]
input_node.inference_range(ranges_by_name, nodes_by_name)
[start, end] = input_node.output_range
ranges_by_name[self.name] = [start, end]
self.input_range = [start, end]
self.output_range = [start, end]
class GemmNode(Node):
def inference_dim(self, dims_by_name, nodes_by_name):
if 'num_repeats' in self.attrs:
num_repeats = self.attrs['num_repeats']
else:
num_repeats = 1
weights_name = self.inputs[1]
kaldi_check(weights_name in self.consts,
"%s is not found in const." % weights_name)
weights_shape = self.consts[weights_name].shape
output_dim = weights_shape[0] * num_repeats
self.output_dim = output_dim
dims_by_name[self.name] = output_dim
class AppendNode(Node):
def is_simple(self):
return False
def inference_dim(self, dims_by_name, nodes_by_name):
output_dim = 0
for input_name in self.inputs:
if input_name in dims_by_name:
input_dim = dims_by_name[input_name]
else:
kaldi_check(input_name in nodes_by_name,
"Cannot find %s'." % input_name)
input_node = nodes_by_name[input_name]
input_node.inference_dim(dims_by_name, nodes_by_name)
input_dim = input_node.output_dim
output_dim += input_dim
self.output_dim = output_dim
dims_by_name[self.name] = output_dim
def inference_index(self, indexes_by_name, nodes_by_name):
input_indexes = list()
for input_name in self.inputs:
if input_name in indexes_by_name:
input_indexes.extend(indexes_by_name[input_name])
input_indexes = list(set(input_indexes))
input_indexes.sort()
self.input_indexes = input_indexes
indexes_by_name[self.name] = self.output_indexes
kaldi_check(set(self.dependencies) <= set(self.input_indexes),
"input indexes is sufficient for computation")
def inference_range(self, ranges_by_name, nodes_by_name):
if self.name not in ranges_by_name:
[start, end] = self.input_range
for input in self.inputs:
if input in ranges_by_name:
[input_start, input_end] = ranges_by_name[input]
else:
kaldi_check(input in nodes_by_name,
"Cannot find node: %s" % input)
input_node = nodes_by_name[input]
input_node.inference_range(ranges_by_name, nodes_by_name)
[input_start, input_end] = input_node.output_range
start = max(start, input_start)
end = min(end, input_end)
ranges_by_name[self.name] = [start, end]
self.input_range = [start, end]
self.output_range = [start, end]
class BiasNode(Node):
def inference_dim(self, dims_by_name, nodes_by_name):
if 'output_dim' in self.attrs:
output_dim = self.attrs['output_dim']
else:
weight_name = self.inputs[-1]
kaldi_check(weight_name in self.consts,
"Cannot find %s in %s's consts." %
(weight_name, self.name))
weights_shape = self.consts[weight_name].shape
output_dim = weights_shape[-1]
self.output_dim = output_dim
dims_by_name[self.name] = output_dim
class ConstantNode(Node):
def inference_dim(self, dims_by_name, nodes_by_name):
if 'output_dim' in self.attrs:
output_dim = self.attrs['output_dim']
else:
weight_name = self.inputs[-1]
kaldi_check(weight_name in self.consts,
"Cannot find %s in %s's consts." %
(weight_name, | |
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import datetime
import typing
import weakref
import mcpython.common.block.AbstractBlock as Block
import mcpython.engine.world.AbstractInterface
import mcpython.server.worldgen.map.AbstractChunkInfoMap
import mcpython.util.enums
import mcpython.util.math
from mcpython import shared
from mcpython.common.container.ResourceStack import ItemStack
from mcpython.common.entity.ItemEntity import ItemEntity
from mcpython.engine import logger
class Chunk(mcpython.engine.world.AbstractInterface.IChunk):
"""
Default representation of a chunk in the world
Defines the default behaviour
"""
BLOCK_REGISTRY = shared.registry.get_by_name("minecraft:block")
now = datetime.datetime.now() # when is now?
def __init__(
self,
dimension: mcpython.engine.world.AbstractInterface.IDimension,
position: typing.Tuple[int, int],
):
"""
Will create a new chunk instance
:param dimension: the world.Dimension.Dimension object used to store this chunk
:param position: the position of the chunk
WARNING: use Dimension.get_chunk() where possible [saver variant, will do some work in the background]
"""
super().__init__()
self.dimension = dimension
# The position of the chunk
self.position = tuple(int(e) for e in position)
# Used when the chunks gets invalid or is loaded at the moment
self.is_ready = False
# Indicated that the chunk is shown to the player
# todo: client-only
self.visible = False
# Indicated that the chunk is loaded
self.loaded = False
# Indicates that the chunk is generated
self.generated = False
# Indicated that the chunk was modified
self.dirty = False
if shared.world_generation_handler is not None:
# Creates the needed chunk maps as defined in the world generation handler
shared.world_generation_handler.setup_chunk_maps(self)
# For all default chunks, we add such ticket. todo: remove & set only when needed
self.add_chunk_load_ticket(
mcpython.engine.world.AbstractInterface.ChunkLoadTicketType.SPAWN_CHUNKS
)
def entity_iterator(self) -> typing.Iterable:
return tuple(self.entities)
def tick(self):
"""
General chunk tick
todo: move random ticks & entity ticks here
"""
self.check_for_unload()
def save(self):
shared.world.save_file.dump(
self, "minecraft:chunk", dimension=self.get_dimension(), chunk=self.position
)
def as_shareable(self) -> mcpython.engine.world.AbstractInterface.IChunk:
return self
def mark_dirty(self):
self.dirty = True
return self
def get_dimension(self) -> mcpython.engine.world.AbstractInterface.IDimension:
return self.dimension
def get_position(self) -> typing.Tuple[int, int]:
return self.position
def get_maximum_y_coordinate_from_generation(
self, x: int, z: int, default=None
) -> int:
"""
Helper function for getting the y height at the given xz generation based on the generation code, by looking
up the internal map
:param x: the x coord
:param z: the y coord
:param default: the default value when no value is set
:return: the y value at that position
"""
height_map = self.get_map("minecraft:height_map")
return height_map.get_at_xz(x, z)[0][1] if (x, z) in height_map else default
def draw(self):
"""
Will draw the chunk with the content for it
Draws all entities
todo: for this, add a batch
Will schedule a chunk load from saves when needed
"""
if not self.is_ready or not self.visible:
return
# todo: add a list of blocks which want an draw() call
# load if needed
if not self.loaded:
shared.tick_handler.schedule_once(
shared.world.save_file.read,
"minecraft:chunk",
dimension=self.dimension.get_dimension_id(),
chunk=self.position,
)
# todo: can we also use batches & manipulate vertex data?
# [WIP, see rendering/entities/EntityBoxRenderingHelper.py]
for entity in self.entities:
entity.draw()
ALL_FACES_EXPOSED = {x: True for x in mcpython.util.enums.EnumSide.iterate()}
def exposed_faces(
self, position: typing.Tuple[int, int, int]
) -> typing.Dict[str, bool]:
"""
Returns an dict of the exposed status of every face of the given block
:param position: the position to check
:return: the dict for the status
"""
instance = self.get_block(position)
if instance is None or type(instance) == str:
return self.ALL_FACES_EXPOSED.copy()
faces = {}
for face in mcpython.util.enums.FACE_ORDER:
pos = face.relative_offset(position)
chunk_position = mcpython.util.math.position_to_chunk(pos)
if chunk_position != self.position:
chunk = self.dimension.get_chunk(chunk_position, generate=False)
if chunk is None:
continue
else:
chunk = self
if (
not chunk.is_loaded()
and shared.world.hide_faces_to_not_generated_chunks
):
faces[face.normal_name] = False
else:
block = chunk.get_block(pos)
faces[face.normal_name] = block is None or (
not isinstance(block, str)
and (
not block.face_solid & face.invert().bitflag
or not instance.face_solid & face.bitflag
)
)
return faces
def exposed_faces_list(
self, position: typing.Tuple[int, int, int]
) -> typing.List[bool]:
instance = self.get_block(position)
if instance is None or type(instance) == str:
return [True] * 6
faces = [False] * 6
for face in mcpython.util.enums.FACE_ORDER:
pos = face.relative_offset(position)
chunk_position = mcpython.util.math.position_to_chunk(pos)
if chunk_position != self.position:
chunk = self.dimension.get_chunk(chunk_position, generate=False)
if chunk is None:
continue
else:
chunk = self
if (
not chunk.is_loaded()
and shared.world.hide_faces_to_not_generated_chunks
):
faces[face.index] = False
else:
block = chunk.get_block(pos)
faces[face.index] = block is None or (
not isinstance(block, str)
and (
not block.face_solid & face.invert().bitflag
or not instance.face_solid & face.bitflag
)
)
return faces
def exposed_faces_flag(self, block) -> int:
if block is None or type(block) == str:
return 0b111111
faces = 0
for face in mcpython.util.enums.FACE_ORDER:
pos = face.relative_offset(block.position)
chunk_position = mcpython.util.math.position_to_chunk(pos)
if chunk_position != self.position:
chunk = self.dimension.get_chunk(chunk_position, generate=False)
if chunk is None:
continue
else:
chunk = self
if not (
not chunk.is_loaded()
and shared.world.hide_faces_to_not_generated_chunks
):
new_block = chunk.get_block(pos)
if new_block is None or (
not isinstance(new_block, str)
and (
not new_block.face_solid & face.invert().bitflag
or not new_block.face_solid & face.bitflag
)
):
faces ^= face.bitflag
return faces
def exposed_faces_iterator(
self, position: typing.Tuple[int, int, int]
) -> typing.Iterator[mcpython.util.enums.EnumSide]:
instance = self.get_block(position)
if instance is None or type(instance) == str:
yield from mcpython.util.enums.EnumSide.iterate()
for face in mcpython.util.enums.FACE_ORDER:
pos = face.relative_offset(position)
chunk_position = mcpython.util.math.position_to_chunk(pos)
if chunk_position != self.position:
chunk = self.dimension.get_chunk(chunk_position, generate=False)
if chunk is None:
continue
else:
chunk = self
if not (
not chunk.is_loaded()
and shared.world.hide_faces_to_not_generated_chunks
):
block = chunk.get_block(pos)
if not (
block is None
or (
not isinstance(block, str)
and (
not block.face_solid & face.invert().bitflag
or not instance.face_solid & face.bitflag
)
)
):
yield face
def is_position_blocked(self, position: typing.Tuple[float, float, float]) -> bool:
"""
Will return if at a given position is a block or a block is scheduled [e.g. by world generation]
:param position: the position to check
:return: if there is an block
"""
return position in self._world or (
shared.world_generation_handler is not None
and shared.world_generation_handler.task_handler.get_block(position, self)
is not None
)
def add_block_unsafe(self, *args, **kwargs):
return asyncio.get_event_loop().run_until_complete(
self.add_block(*args, **kwargs)
)
async def add_block(
self,
position: tuple,
block_name: typing.Union[str, Block.AbstractBlock],
immediate=True,
block_update=True,
block_update_self=True,
lazy_setup: typing.Callable[[Block.AbstractBlock], None] = None,
check_build_range=True,
block_state=None,
replace_existing=True,
network_sync=True,
):
"""
Adds a block to the given position
:param position: the position to add
:param block_name: the name of the block or an instance of it
:param immediate: if the block should be shown if needed
:param block_update: if an block-update should be sent to neighbor blocks
:param block_update_self: if the block should get an block-update
:param lazy_setup: a callable for setting up the block instance
:param check_build_range: if the build limits should be checked
:param block_state: the block state to create in, or None if not set
:param replace_existing: if existing blocks should be replaced
:param network_sync: do network sync or not
:return: the block instance or None if it could not be created
"""
# check if it is in build range
r = self.dimension.get_world_height_range()
if check_build_range and (position[1] < r[0] or position[1] > r[1]):
return
if position != mcpython.util.math.normalize(position):
raise ValueError(
"position '{}' is no valid block position".format(position)
)
if position in self._world:
if not replace_existing:
return
await self.remove_block(
position,
immediate=immediate,
block_update=block_update,
block_update_self=block_update_self,
network_sync=network_sync,
)
if block_name in [None, "air", "minecraft:air"]:
return
if issubclass(type(block_name), Block.AbstractBlock):
block = block_name
block.position = position
block.dimension = self.dimension.get_name()
if lazy_setup is not None:
result = lazy_setup(block)
if isinstance(result, typing.Awaitable):
await result
if shared.IS_CLIENT:
block.face_info.update()
# Create the block instance from the registry
else:
if not self.BLOCK_REGISTRY.is_valid_key(block_name):
return
block_cls = self.BLOCK_REGISTRY.get(block_name)
block = block_cls()
block.position = position
block.dimension = self.dimension.get_name()
if lazy_setup is not None:
result = lazy_setup(block)
if isinstance(result, typing.Awaitable):
await result
# store the block instance in the local world
self._world[position] = block
await block.on_block_added()
if block_state is not None:
await block.set_model_state(block_state)
self.mark_dirty()
self.mark_position_dirty(position)
if immediate and shared.IS_CLIENT:
block.face_info.update()
if block_update:
await self.on_block_updated(position, include_itself=block_update_self)
self.check_neighbors(position)
return block
async def on_block_updated(
self, position: typing.Tuple[int, int, int], include_itself=True
):
"""
Will call to the neighbor blocks an block update
:param position: the position in the center
:param include_itself: if the block itself | |
"""
The :mod:`scikitplot.metrics` module includes plots for machine learning
evaluation metrics e.g. confusion matrix, silhouette scores, etc.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import silhouette_score
from sklearn.metrics import silhouette_samples
from sklearn.calibration import calibration_curve
from scipy import interp
from scikitplot.helpers import binary_ks_curve, validate_labels
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax
def plot_roc_curve(y_true, y_probas, title='ROC Curves',
curves=('micro', 'macro', 'each_class'),
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Generates the ROC curves from labels and predicted scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"ROC Curves".
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "macro", "each_class")`
i.e. "micro" for micro-averaged curve, "macro" for macro-averaged
curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.metrics.plot_roc_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_roc_curve.png
:align: center
:alt: ROC Curves
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
if 'micro' not in curves and 'macro' not in curves and \
'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro", "macro", or "each_class"')
classes = np.unique(y_true)
probas = y_probas
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, probas[:, i],
pos_label=classes[i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in fpr:
i += 1
micro_key += str(i)
y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
y_true = np.hstack((1 - y_true, y_true))
fpr[micro_key], tpr[micro_key], _ = roc_curve(y_true.ravel(),
probas.ravel())
roc_auc[micro_key] = auc(fpr[micro_key], tpr[micro_key])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[x] for x in range(len(classes))]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(classes)):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= len(classes)
macro_key = 'macro'
i = 0
while macro_key in fpr:
i += 1
macro_key += str(i)
fpr[macro_key] = all_fpr
tpr[macro_key] = mean_tpr
roc_auc[macro_key] = auc(fpr[macro_key], tpr[macro_key])
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
if 'each_class' in curves:
for i in range(len(classes)):
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(fpr[i], tpr[i], lw=2, color=color,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(classes[i], roc_auc[i]))
if 'micro' in curves:
ax.plot(fpr[micro_key], tpr[micro_key],
label='micro-average ROC curve '
'(area = {0:0.2f})'.format(roc_auc[micro_key]),
color='deeppink', linestyle=':', linewidth=4)
if 'macro' in curves:
ax.plot(fpr[macro_key], tpr[macro_key],
label='macro-average ROC curve '
'(area = {0:0.2f})'.format(roc_auc[macro_key]),
color='navy', linestyle=':', linewidth=4)
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)
ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax
def plot_ks_statistic(y_true, y_probas, title='KS Statistic Plot',
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates the KS Statistic plot from labels and scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"KS Statistic Plot".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> skplt.metrics.plot_ks_statistic(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_ks_statistic.png
:align: center
:alt: KS Statistic
"""
y_true = | |
= instance
# if we do have the key only set it if source is not None
elif source is not None:
self.sources[key] = source
self.instances[key] = instance
# if setting in cached settings add
if key in SETTINGS_CACHE_KEYS:
SETTINGS_CACHE[key] = copy.deepcopy(value)
# then do the normal dictionary setting
super(ParamDict, self).__setitem__(key, value)
def __contains__(self, key: str) -> bool:
"""
Method to find whether ParamDict instance has key="key"
used with the "in" operator
if key exists in ParamDict True is returned else False is returned
:param key: string, "key" to look for in ParamDict instance
:return bool: True if ParamDict instance has a key "key", else False
"""
# set function name
_ = display_func(None, '__contains__', __NAME__, 'ParamDict')
# run contains command from super
return super(ParamDict, self).__contains__(key)
def __delitem__(self, key: str):
"""
Deletes the "key" from ParamDict instance, case insensitive
:param key: string, the key to delete from ParamDict instance,
case insensitive
:return None:
"""
# set function name
_ = display_func(None, '__delitem__', __NAME__, 'ParamDict')
# delete item using super
super(ParamDict, self).__delitem__(key)
def __repr__(self):
"""
Get the offical string representation for this instance
:return: return the string representation
:rtype: str
"""
# set function name
_ = display_func(None, '__repr__', __NAME__, 'ParamDict')
# get string from string print
return self._string_print()
def __str__(self) -> str:
"""
Get the informal string representation for this instance
:return: return the string representation
:rtype: str
"""
# set function name
_ = display_func(None, '__repr__', __NAME__, 'ParamDict')
# get string from string print
return self._string_print()
def set(self, key: str, value: object,
source: Union[None, str] = None,
instance: Union[None, object] = None):
"""
Set an item even if params is locked
:param key: str, the key to set
:param value: object, the value of the key to set
:param source: str, the source of the value/key to set
:param instance: object, the instance of the value/key to set
:type key: str
:type source: str
:type instance: object
:return: None
"""
# set function name
_ = display_func(None, 'set', __NAME__, 'ParamDict')
# if we dont have the key in sources set it regardless
if key not in self.sources:
self.sources[key] = source
self.instances[key] = instance
# if we do have the key only set it if source is not None
elif source is not None:
self.sources[key] = source
self.instances[key] = instance
# then do the normal dictionary setting
super(ParamDict, self).__setitem__(key, value)
def lock(self):
"""
Locks the parameter dictionary
:return:
"""
# set function name
_ = display_func(None, 'lock', __NAME__, 'ParamDict')
# set locked to True
self.locked = True
def unlock(self):
"""
Unlocks the parameter dictionary
:return:
"""
# set function name
_ = display_func(None, 'unlock', __NAME__, 'ParamDict')
# set locked to False
self.locked = False
def get(self, key: str, default: Union[None, object] = None) -> object:
"""
Overrides the dictionary get function
If "key" is in ParamDict instance then returns this value, else
returns "default" (if default returned source is set to None)
key is case insensitive
:param key: string, the key to search for in ParamDict instance
case insensitive
:param default: object or None, if key not in ParamDict instance this
object is returned
:type key: str
:return value: if key in ParamDict instance this value is returned else
the default value is returned (None if undefined)
"""
# set function name
_ = display_func(None, 'get', __NAME__, 'ParamDict')
# if we have the key return the value
if key in self.keys():
return self.__getitem__(key)
# else return the default key (None if not defined)
else:
self.sources[key] = None
return default
def set_source(self, key: str, source: str):
"""
Set a key to have sources[key] = source
raises a ConfigError if key not found
:param key: string, the main dictionary string
:param source: string, the source to set
:type key: str
:type source: str
:return None:
:raises ConfigError: if key not found
"""
# set function name
_ = display_func(None, 'set_source', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# don't put full path for sources in package
source = _check_mod_source(source)
# only add if key is in main dictionary
if key in self.keys():
self.sources[key] = source
# add to history
if key in self.source_history:
self.source_history[key].append(source)
else:
self.source_history[key] = [source]
else:
# log error: source cannot be added for key
emsg = self.textentry('00-003-00026', args=[key])
raise ConfigError(emsg, level='error')
def set_instance(self, key: str, instance: object):
"""
Set a key to have instance[key] = instance
raise a Config Error if key not found
:param key: str, the key to add
:param instance: object, the instance to store (normally Const/Keyword)
:type key: str
:return None:
:raises ConfigError: if key not found
"""
# set function name
_ = display_func(None, 'set_instance', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# only add if key is in main dictionary
if key in self.keys():
self.instances[key] = instance
else:
# log error: instance cannot be added for key
emsg = self.textentry('00-003-00027', args=[key])
raise ConfigError(emsg, level='error')
def append_source(self, key: str, source: str):
"""
Adds source to the source of key (appends if exists)
i.e. sources[key] = oldsource + source
:param key: string, the main dictionary string
:param source: string, the source to set
:type key: str
:type source: str
:return None:
"""
# set function name
_ = display_func(None, 'append_source', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# if key exists append source to it
if key in self.keys() and key in list(self.sources.keys()):
self.sources[key] += ' {0}'.format(source)
else:
self.set_source(key, source)
def set_sources(self, keys: List[str],
sources: Union[str, List[str], dict]):
"""
Set a list of keys sources
raises a ConfigError if key not found
:param keys: list of strings, the list of keys to add sources for
:param sources: string or list of strings or dictionary of strings,
the source or sources to add,
if a dictionary source = sources[key] for key = keys[i]
if list source = sources[i] for keys[i]
if string all sources with these keys will = source
:type keys: list
:type sources: Union[str, list, dict]
:return None:
"""
# set function name
_ = display_func(None, 'set_sources', __NAME__, 'ParamDict')
# loop around each key in keys
for k_it in range(len(keys)):
# assign the key from k_it
key = keys[k_it]
# capitalise
key = _capitalise_key(key)
# Get source for this iteration
if type(sources) == list:
source = sources[k_it]
elif type(sources) == dict:
source = sources[key]
else:
source = str(sources)
# set source
self.set_source(key, source)
def set_instances(self, keys: List[str],
instances: Union[object, list, dict]):
"""
Set a list of keys sources
raises a ConfigError if key not found
:param keys: list of strings, the list of keys to add sources for
:param instances: object or list of objects or dictionary of objects,
the source or sources to add,
if a dictionary source = sources[key] for key = keys[i]
if list source = sources[i] for keys[i]
if object all sources with these keys will = source
:type keys: list
:type instances: Union[object, list, dict]
:return None:
"""
# set function name
_ = display_func(None, 'set_instances', __NAME__, 'ParamDict')
# loop around each key in keys
for k_it in range(len(keys)):
# assign the key from k_it
key = keys[k_it]
# capitalise
key = _capitalise_key(key)
# Get source for this iteration
if type(instances) == list:
instance = instances[k_it]
elif type(instances) == dict:
instance = instances[key]
else:
instance = instances
# set source
self.set_instance(key, instance)
def append_sources(self, keys: str, sources: Union[str, List[str], dict]):
"""
Adds list of keys sources (appends if exists)
raises a ConfigError if key not found
:param keys: list of strings, the list of keys to add sources for
:param sources: string or list of strings or dictionary of strings,
the source or sources to add,
if a dictionary source = sources[key] for key = keys[i]
if list source = sources[i] for keys[i]
if string all sources with these keys will = source
:type keys: list
:type sources: | |
PhoneA.
Swap active call on PhoneA.
Merge calls to conference on PhoneA.
Hangup on PhoneC, check call continues between AB.
Hangup on PhoneB, check A ends.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_voice_3g, (self.log, ads[1])), (phone_setup_voice_3g,
(self.log, ads[2]))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mt_add_1x_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_epdg_conference_merge_drop(call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mt_add_1x_swap_twice_merge_drop_wfc_wifi_preferred(self):
"""Test swap and merge feature in epdg call.
PhoneA (epdg) call PhoneB (1x), accept on PhoneB.
PhoneC (1x) call PhoneA (epdg), accept on PhoneA.
Swap active call on PhoneA.
Swap active call on PhoneA.
Merge calls to conference on PhoneA.
Hangup on PhoneC, check call continues between AB.
Hangup on PhoneB, check A ends.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_voice_3g, (self.log, ads[1])), (phone_setup_voice_3g,
(self.log, ads[2]))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mt_add_1x_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_epdg_conference_merge_drop(call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_twice_drop_held_wfc_wifi_only(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneB, check if call continues between AC.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[1],
ad_verify=ads[0],
call_id=call_ac_id,
call_state=CALL_STATE_ACTIVE,
ads_active=[ads[0], ads[2]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_twice_drop_held_wfc_wifi_preferred(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneB, check if call continues between AC.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[1],
ad_verify=ads[0],
call_id=call_ac_id,
call_state=CALL_STATE_ACTIVE,
ads_active=[ads[0], ads[2]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_twice_drop_active_wfc_wifi_only(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_HOLDING,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_twice_drop_active_wfc_wifi_preferred(
self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_HOLDING,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_twice_drop_active_apm_wifi_preferred(
self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_HOLDING,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mt_add_epdg_swap_twice_drop_held_wfc_wifi_only(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneC (epdg) call PhoneA (epdg), accept on PhoneA.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneB, check if call continues between AC.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mt_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[1],
ad_verify=ads[0],
call_id=call_ac_id,
call_state=CALL_STATE_ACTIVE,
ads_active=[ads[0], ads[2]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mt_add_epdg_swap_twice_drop_held_wfc_wifi_preferred(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneC (epdg) call PhoneA (epdg), accept on PhoneA.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneB, check if call continues between AC.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mt_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[1],
ad_verify=ads[0],
call_id=call_ac_id,
call_state=CALL_STATE_ACTIVE,
ads_active=[ads[0], ads[2]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mt_add_epdg_swap_twice_drop_active_wfc_wifi_only(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneC (epdg) call PhoneA (epdg), accept on PhoneA.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mt_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_HOLDING,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mt_add_epdg_swap_twice_drop_active_wfc_wifi_preferred(
self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneC (epdg) call PhoneA (epdg), accept on PhoneA.
Swap active call on PhoneA.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mt_add_epdg_swap_x(2)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_HOLDING,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_once_drop_held_wfc_wifi_only(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], False, WFC_MODE_WIFI_ONLY,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(1)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_ACTIVE,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_swap_once_drop_held_wfc_wifi_preferred(self):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.
Hangup call from PhoneC, check if call continues between AB.
"""
ads = | |
u'man bouncing ball medium skin tone': u'\U000026f9\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man bouncing ball mediumdark skin tone': u'\U000026f9\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man bouncing ball dark skin tone': u'\U000026f9\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman bouncing ball': u'\U000026f9\U0000fe0f\U0000200d\U00002640\U0000fe0f',
u'woman bouncing ball light skin tone': u'\U000026f9\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman bouncing ball mediumlight skin tone': u'\U000026f9\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman bouncing ball medium skin tone': u'\U000026f9\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman bouncing ball mediumdark skin tone': u'\U000026f9\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman bouncing ball dark skin tone': u'\U000026f9\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'person lifting weights': u'\U0001f3cb\U0000fe0f',
u'person lifting weights light skin tone': u'\U0001f3cb\U0001f3fb',
u'person lifting weights mediumlight skin tone': u'\U0001f3cb\U0001f3fc',
u'person lifting weights medium skin tone': u'\U0001f3cb\U0001f3fd',
u'person lifting weights mediumdark skin tone': u'\U0001f3cb\U0001f3fe',
u'person lifting weights dark skin tone': u'\U0001f3cb\U0001f3ff',
u'man lifting weights': u'\U0001f3cb\U0000fe0f\U0000200d\U00002642\U0000fe0f',
u'man lifting weights light skin tone': u'\U0001f3cb\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man lifting weights mediumlight skin tone': u'\U0001f3cb\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man lifting weights medium skin tone': u'\U0001f3cb\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man lifting weights mediumdark skin tone': u'\U0001f3cb\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man lifting weights dark skin tone': u'\U0001f3cb\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman lifting weights': u'\U0001f3cb\U0000fe0f\U0000200d\U00002640\U0000fe0f',
u'woman lifting weights light skin tone': u'\U0001f3cb\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman lifting weights mediumlight skin tone': u'\U0001f3cb\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman lifting weights medium skin tone': u'\U0001f3cb\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman lifting weights mediumdark skin tone': u'\U0001f3cb\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman lifting weights dark skin tone': u'\U0001f3cb\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'person biking': u'\U0001f6b4',
u'person biking light skin tone': u'\U0001f6b4\U0001f3fb',
u'person biking mediumlight skin tone': u'\U0001f6b4\U0001f3fc',
u'person biking medium skin tone': u'\U0001f6b4\U0001f3fd',
u'person biking mediumdark skin tone': u'\U0001f6b4\U0001f3fe',
u'person biking dark skin tone': u'\U0001f6b4\U0001f3ff',
u'man biking': u'\U0001f6b4\U0000200d\U00002642\U0000fe0f',
u'man biking light skin tone': u'\U0001f6b4\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man biking mediumlight skin tone': u'\U0001f6b4\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man biking medium skin tone': u'\U0001f6b4\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man biking mediumdark skin tone': u'\U0001f6b4\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man biking dark skin tone': u'\U0001f6b4\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman biking': u'\U0001f6b4\U0000200d\U00002640\U0000fe0f',
u'woman biking light skin tone': u'\U0001f6b4\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman biking mediumlight skin tone': u'\U0001f6b4\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman biking medium skin tone': u'\U0001f6b4\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman biking mediumdark skin tone': u'\U0001f6b4\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman biking dark skin tone': u'\U0001f6b4\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'person mountain biking': u'\U0001f6b5',
u'person mountain biking light skin tone': u'\U0001f6b5\U0001f3fb',
u'person mountain biking mediumlight skin tone': u'\U0001f6b5\U0001f3fc',
u'person mountain biking medium skin tone': u'\U0001f6b5\U0001f3fd',
u'person mountain biking mediumdark skin tone': u'\U0001f6b5\U0001f3fe',
u'person mountain biking dark skin tone': u'\U0001f6b5\U0001f3ff',
u'man mountain biking': u'\U0001f6b5\U0000200d\U00002642\U0000fe0f',
u'man mountain biking light skin tone': u'\U0001f6b5\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man mountain biking mediumlight skin tone': u'\U0001f6b5\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man mountain biking medium skin tone': u'\U0001f6b5\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man mountain biking mediumdark skin tone': u'\U0001f6b5\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man mountain biking dark skin tone': u'\U0001f6b5\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman mountain biking': u'\U0001f6b5\U0000200d\U00002640\U0000fe0f',
u'woman mountain biking light skin tone': u'\U0001f6b5\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman mountain biking mediumlight skin tone': u'\U0001f6b5\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman mountain biking medium skin tone': u'\U0001f6b5\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman mountain biking mediumdark skin tone': u'\U0001f6b5\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman mountain biking dark skin tone': u'\U0001f6b5\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'racing car': u'\U0001f3ce\U0000fe0f',
u'motorcycle': u'\U0001f3cd\U0000fe0f',
u'person cartwheeling': u'\U0001f938',
u'person cartwheeling light skin tone': u'\U0001f938\U0001f3fb',
u'person cartwheeling mediumlight skin tone': u'\U0001f938\U0001f3fc',
u'person cartwheeling medium skin tone': u'\U0001f938\U0001f3fd',
u'person cartwheeling mediumdark skin tone': u'\U0001f938\U0001f3fe',
u'person cartwheeling dark skin tone': u'\U0001f938\U0001f3ff',
u'man cartwheeling': u'\U0001f938\U0000200d\U00002642\U0000fe0f',
u'man cartwheeling light skin tone': u'\U0001f938\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man cartwheeling mediumlight skin tone': u'\U0001f938\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man cartwheeling medium skin tone': u'\U0001f938\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man cartwheeling mediumdark skin tone': u'\U0001f938\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man cartwheeling dark skin tone': u'\U0001f938\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman cartwheeling': u'\U0001f938\U0000200d\U00002640\U0000fe0f',
u'woman cartwheeling light skin tone': u'\U0001f938\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman cartwheeling mediumlight skin tone': u'\U0001f938\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman cartwheeling medium skin tone': u'\U0001f938\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman cartwheeling mediumdark skin tone': u'\U0001f938\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman cartwheeling dark skin tone': u'\U0001f938\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'people wrestling': u'\U0001f93c',
u'men wrestling': u'\U0001f93c\U0000200d\U00002642\U0000fe0f',
u'women wrestling': u'\U0001f93c\U0000200d\U00002640\U0000fe0f',
u'person playing water polo': u'\U0001f93d',
u'person playing water polo light skin tone': u'\U0001f93d\U0001f3fb',
u'person playing water polo mediumlight skin tone': u'\U0001f93d\U0001f3fc',
u'person playing water polo medium skin tone': u'\U0001f93d\U0001f3fd',
u'person playing water polo mediumdark skin tone': u'\U0001f93d\U0001f3fe',
u'person playing water polo dark skin tone': u'\U0001f93d\U0001f3ff',
u'man playing water polo': u'\U0001f93d\U0000200d\U00002642\U0000fe0f',
u'man playing water polo light skin tone': u'\U0001f93d\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man playing water polo mediumlight skin tone': u'\U0001f93d\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man playing water polo medium skin tone': u'\U0001f93d\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man playing water polo mediumdark skin tone': u'\U0001f93d\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man playing water polo dark skin tone': u'\U0001f93d\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman playing water polo': u'\U0001f93d\U0000200d\U00002640\U0000fe0f',
u'woman playing water polo light skin tone': u'\U0001f93d\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman playing water polo mediumlight skin tone': u'\U0001f93d\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman playing water polo medium skin tone': u'\U0001f93d\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman playing water polo mediumdark skin tone': u'\U0001f93d\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman playing water polo dark skin tone': u'\U0001f93d\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'person playing handball': u'\U0001f93e',
u'person playing handball light skin tone': u'\U0001f93e\U0001f3fb',
u'person playing handball mediumlight skin tone': u'\U0001f93e\U0001f3fc',
u'person playing handball medium skin tone': u'\U0001f93e\U0001f3fd',
u'person playing handball mediumdark skin tone': u'\U0001f93e\U0001f3fe',
u'person playing handball dark skin tone': u'\U0001f93e\U0001f3ff',
u'man playing handball': u'\U0001f93e\U0000200d\U00002642\U0000fe0f',
u'man playing handball light skin tone': u'\U0001f93e\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man playing handball mediumlight skin tone': u'\U0001f93e\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man playing handball medium skin tone': u'\U0001f93e\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man playing handball mediumdark skin tone': u'\U0001f93e\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man playing handball dark skin tone': u'\U0001f93e\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman playing handball': u'\U0001f93e\U0000200d\U00002640\U0000fe0f',
u'woman playing handball light skin tone': u'\U0001f93e\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman playing handball mediumlight skin tone': u'\U0001f93e\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman playing handball medium skin tone': u'\U0001f93e\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman playing handball mediumdark skin tone': u'\U0001f93e\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman playing handball dark skin tone': u'\U0001f93e\U0001f3ff\U0000200d\U00002640\U0000fe0f',
u'person juggling': u'\U0001f939',
u'person juggling light skin tone': u'\U0001f939\U0001f3fb',
u'person juggling mediumlight skin tone': u'\U0001f939\U0001f3fc',
u'person juggling medium skin tone': u'\U0001f939\U0001f3fd',
u'person juggling mediumdark skin tone': u'\U0001f939\U0001f3fe',
u'person juggling dark skin tone': u'\U0001f939\U0001f3ff',
u'man juggling': u'\U0001f939\U0000200d\U00002642\U0000fe0f',
u'man juggling light skin tone': u'\U0001f939\U0001f3fb\U0000200d\U00002642\U0000fe0f',
u'man juggling mediumlight skin tone': u'\U0001f939\U0001f3fc\U0000200d\U00002642\U0000fe0f',
u'man juggling medium skin tone': u'\U0001f939\U0001f3fd\U0000200d\U00002642\U0000fe0f',
u'man juggling mediumdark skin tone': u'\U0001f939\U0001f3fe\U0000200d\U00002642\U0000fe0f',
u'man juggling dark skin tone': u'\U0001f939\U0001f3ff\U0000200d\U00002642\U0000fe0f',
u'woman juggling': u'\U0001f939\U0000200d\U00002640\U0000fe0f',
u'woman juggling light skin tone': u'\U0001f939\U0001f3fb\U0000200d\U00002640\U0000fe0f',
u'woman juggling mediumlight skin tone': u'\U0001f939\U0001f3fc\U0000200d\U00002640\U0000fe0f',
u'woman juggling medium skin tone': u'\U0001f939\U0001f3fd\U0000200d\U00002640\U0000fe0f',
u'woman juggling mediumdark skin tone': u'\U0001f939\U0001f3fe\U0000200d\U00002640\U0000fe0f',
u'woman juggling dark skin tone': u'\U0001f939\U0001f3ff\U0000200d\U00002640\U0000fe0f',
},
u'family': {
u'man and woman holding hands': u'\U0001f46b',
u'two men holding hands': u'\U0001f46c',
u'two women holding hands': u'\U0001f46d',
u'kiss': u'\U0001f48f',
u'kiss woman man': u'\U0001f469\U0000200d\U00002764\U0000fe0f\U0000200d\U0001f48b\U0000200d\U0001f468',
u'kiss man man': u'\U0001f468\U0000200d\U00002764\U0000fe0f\U0000200d\U0001f48b\U0000200d\U0001f468',
u'kiss woman woman': u'\U0001f469\U0000200d\U00002764\U0000fe0f\U0000200d\U0001f48b\U0000200d\U0001f469',
u'couple with heart': u'\U0001f491',
u'couple with heart woman man': u'\U0001f469\U0000200d\U00002764\U0000fe0f\U0000200d\U0001f468',
u'couple with heart man man': u'\U0001f468\U0000200d\U00002764\U0000fe0f\U0000200d\U0001f468',
u'couple with heart woman woman': u'\U0001f469\U0000200d\U00002764\U0000fe0f\U0000200d\U0001f469',
u'family': u'\U0001f46a',
u'family man woman boy': u'\U0001f468\U0000200d\U0001f469\U0000200d\U0001f466',
u'family man woman girl': u'\U0001f468\U0000200d\U0001f469\U0000200d\U0001f467',
u'family man woman girl boy': u'\U0001f468\U0000200d\U0001f469\U0000200d\U0001f467\U0000200d\U0001f466',
u'family man woman boy boy': u'\U0001f468\U0000200d\U0001f469\U0000200d\U0001f466\U0000200d\U0001f466',
u'family man woman girl girl': u'\U0001f468\U0000200d\U0001f469\U0000200d\U0001f467\U0000200d\U0001f467',
u'family man man boy': u'\U0001f468\U0000200d\U0001f468\U0000200d\U0001f466',
u'family man man girl': u'\U0001f468\U0000200d\U0001f468\U0000200d\U0001f467',
u'family man man girl boy': u'\U0001f468\U0000200d\U0001f468\U0000200d\U0001f467\U0000200d\U0001f466',
u'family man man boy boy': u'\U0001f468\U0000200d\U0001f468\U0000200d\U0001f466\U0000200d\U0001f466',
u'family man man girl girl': u'\U0001f468\U0000200d\U0001f468\U0000200d\U0001f467\U0000200d\U0001f467',
u'family woman woman boy': u'\U0001f469\U0000200d\U0001f469\U0000200d\U0001f466',
u'family woman woman girl': u'\U0001f469\U0000200d\U0001f469\U0000200d\U0001f467',
u'family woman woman girl boy': u'\U0001f469\U0000200d\U0001f469\U0000200d\U0001f467\U0000200d\U0001f466',
u'family woman woman boy boy': u'\U0001f469\U0000200d\U0001f469\U0000200d\U0001f466\U0000200d\U0001f466',
u'family woman woman girl girl': u'\U0001f469\U0000200d\U0001f469\U0000200d\U0001f467\U0000200d\U0001f467',
u'family man boy': u'\U0001f468\U0000200d\U0001f466',
u'family man boy boy': u'\U0001f468\U0000200d\U0001f466\U0000200d\U0001f466',
u'family man girl': u'\U0001f468\U0000200d\U0001f467',
u'family man girl boy': u'\U0001f468\U0000200d\U0001f467\U0000200d\U0001f466',
u'family man girl girl': u'\U0001f468\U0000200d\U0001f467\U0000200d\U0001f467',
u'family woman boy': u'\U0001f469\U0000200d\U0001f466',
u'family woman boy boy': u'\U0001f469\U0000200d\U0001f466\U0000200d\U0001f466',
u'family woman girl': u'\U0001f469\U0000200d\U0001f467',
u'family woman girl boy': u'\U0001f469\U0000200d\U0001f467\U0000200d\U0001f466',
u'family woman girl girl': u'\U0001f469\U0000200d\U0001f467\U0000200d\U0001f467',
},
u'body': {
u'selfie': u'\U0001f933',
u'selfie light skin tone': u'\U0001f933\U0001f3fb',
u'selfie mediumlight skin tone': u'\U0001f933\U0001f3fc',
u'selfie medium skin tone': u'\U0001f933\U0001f3fd',
u'selfie mediumdark skin tone': u'\U0001f933\U0001f3fe',
u'selfie dark skin tone': u'\U0001f933\U0001f3ff',
u'flexed biceps': u'\U0001f4aa',
u'flexed biceps light skin tone': u'\U0001f4aa\U0001f3fb',
u'flexed biceps mediumlight skin tone': u'\U0001f4aa\U0001f3fc',
u'flexed biceps medium skin tone': u'\U0001f4aa\U0001f3fd',
u'flexed biceps mediumdark skin tone': u'\U0001f4aa\U0001f3fe',
u'flexed biceps dark skin tone': u'\U0001f4aa\U0001f3ff',
u'leg': u'\U0001f9b5',
u'leg light skin tone': u'\U0001f9b5\U0001f3fb',
u'leg mediumlight skin tone': u'\U0001f9b5\U0001f3fc',
u'leg medium skin tone': u'\U0001f9b5\U0001f3fd',
u'leg mediumdark skin tone': u'\U0001f9b5\U0001f3fe',
u'leg dark skin tone': u'\U0001f9b5\U0001f3ff',
u'foot': u'\U0001f9b6',
u'foot light skin tone': u'\U0001f9b6\U0001f3fb',
u'foot mediumlight skin tone': u'\U0001f9b6\U0001f3fc',
u'foot medium skin tone': u'\U0001f9b6\U0001f3fd',
u'foot mediumdark skin tone': u'\U0001f9b6\U0001f3fe',
u'foot dark skin tone': u'\U0001f9b6\U0001f3ff',
u'backhand index pointing left': u'\U0001f448',
u'backhand index pointing left light skin tone': u'\U0001f448\U0001f3fb',
u'backhand index pointing left mediumlight skin tone': u'\U0001f448\U0001f3fc',
u'backhand index pointing left medium skin tone': u'\U0001f448\U0001f3fd',
u'backhand index pointing left mediumdark skin tone': u'\U0001f448\U0001f3fe',
u'backhand index pointing left dark skin tone': u'\U0001f448\U0001f3ff',
u'backhand index pointing right': u'\U0001f449',
u'backhand index pointing right light skin tone': u'\U0001f449\U0001f3fb',
u'backhand index pointing right mediumlight skin tone': u'\U0001f449\U0001f3fc',
u'backhand index pointing right medium skin tone': u'\U0001f449\U0001f3fd',
u'backhand index pointing right mediumdark skin tone': u'\U0001f449\U0001f3fe',
u'backhand index pointing right dark skin tone': u'\U0001f449\U0001f3ff',
u'index pointing up': u'\U0000261d\U0000fe0f',
u'index pointing up light skin tone': u'\U0000261d\U0001f3fb',
u'index pointing up mediumlight skin tone': u'\U0000261d\U0001f3fc',
u'index pointing up medium skin tone': u'\U0000261d\U0001f3fd',
u'index pointing up mediumdark | |
resonance['FSMothersNumbers'])
# Choose the offshellness
special_mass = (1.0 + options['offshellness'])*mass
# Discard impossible kinematics
if special_mass<final_state_energy:
raise InvalidCmd('The offshellness specified (%s) is such'\
%options['offshellness']+' that the resulting kinematic is '+\
'impossible for resonance %s %s.'%(evaluator.full_model.
get_particle(resonance['ParticlePDG']).get_name(),
str(list(resonance['FSMothersNumbers']))))
continue
# Add it to the list of accepted resonances
kept_resonances.append(resonance)
for resonance in kept_resonances:
# Chose the PS point for the resonance
set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
# misc.sprint(kept_resonances)
# misc.sprint(len(kept_resonances))
return tuple(kept_resonances)
def set_PSpoint(resonance, force_other_res_offshell=[],
allow_energy_increase=1.5, isolation_cuts=True):
""" Starting from the specified resonance, construct a phase space point
for it and possibly also enforce other resonances to be onshell. Possibly
allow to progressively increase enregy by steps of the integer specified
(negative float to forbid it) and possible enforce default isolation cuts
as well."""
def invmass(momenta):
""" Computes the invariant mass of a list of momenta."""
ptot = [sum(p[i] for p in momenta) for i in range(4)]
return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
model = evaluator.full_model
def getmass(pdg):
""" Returns the mass of a particle given the current model and its
pdg given in argument."""
return model.get('parameter_dict')[
model.get_particle(pdg).get('mass')].real
N_trials = 0
max_trial = 1e4
nstep_for_energy_increase = 1e3
PS_point_found = None
if options['offshellness'] > 0.0:
offshellness = options['offshellness']
else:
# We must undershoot the offshellness since one needs more
# energy than the target mass to have a valid PS point. So we
# start with an offshellness 4 times larger, and progressively reduce
# it later
offshellness = (0.25*(options['offshellness']+1.0))-1.0
# When offshellness is negative, it is progressively decreased every
# nstep_for_energy_increase attempts (not increased!), so it is more
# dangerous, and we therefore want the steps to be smaller
if options['offshellness'] < 0.0:
energy_increase = math.sqrt(allow_energy_increase)
else:
energy_increase = allow_energy_increase
# Make sure to remove the resonance itself from force_other_res_offshell
other_res_offshell = [res for res in force_other_res_offshell if
res!=resonance]
# Now play it smart on finding starting energy and offshellness and
# register all resonance masses
all_other_res_masses = [getmass(res['ParticlePDG'])
for res in other_res_offshell]
resonance_mass = getmass(resonance['ParticlePDG'])
str_res = '%s %s'%(model.get_particle(
resonance['ParticlePDG']).get_name(),
str(list(resonance['FSMothersNumbers'])))
leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
# Find what is the minimum possible offshellness given
# the mass of the daughters of this resonance.
# This will only be relevant when options['offshellness'] is negative
daughter_masses = sum(getmass(leg_number_to_leg[\
number].get('id')) for number in resonance['FSMothersNumbers'])
min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
# Compute the minimal energy given the external states, add 20% to leave
# enough phase-space
min_energy = max(sum(getmass(l.get('id')) for l in \
process.get('legs') if l.get('state')==True),
sum(getmass(l.get('id')) for l in \
process.get('legs') if l.get('state')==False))
# List all other offshellnesses of the potential daughters of this
# resonance
daughter_offshellnesses = [(1.0+options['offshellness'])*mass
for i, mass in enumerate(all_other_res_masses) if
other_res_offshell[i]['FSMothersNumbers'].issubset(
resonance['FSMothersNumbers'])]
if options['offshellness'] >= 0.0:
if len(daughter_offshellnesses)>0:
max_mass = max(daughter_offshellnesses)
# A factor two to have enough phase-space
offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
options['offshellness'])
max_mass = max([(1.0+options['offshellness'])*mass for mass in \
all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
# Account for external_masses too
# A factor two to have enough phase-space open
target = max(min_energy*1.2,max_mass*2.0)
if target > options['energy']:
logger.warning("The user-defined energy %f seems "%options['energy']+
" insufficient to reach the minimum propagator invariant mass "+
"%f required for the chosen offshellness %f."%(max_mass,
options['offshellness']) + " Energy reset to %f."%target)
options['energy'] = target
else:
if len(daughter_offshellnesses) > 0:
min_mass = min(daughter_offshellnesses)
# A factor one half to have enough phase-space
offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
options['offshellness'])
# Make sure the chosen offshellness leaves enough energy to produce
# the daughter masses
if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
msg = 'The resonance %s cannot accomodate'%str_res+\
' an offshellness of %f because the daughter'%options['offshellness']+\
' masses are %f.'%daughter_masses
if options['offshellness']<min_offshellnes:
msg += ' Try again with an offshellness'+\
' smaller (in absolute value) of at least %f.'%min_offshellnes
else:
msg += ' Try again with a smalled offshellness (in absolute value).'
raise InvalidCmd(msg)
min_mass = min([(1.0+options['offshellness'])*mass for mass in \
all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
# Account for external_masses too
# A factor two to have enough phase-space open
if 2.0*min_mass < options['energy']:
new_energy = max(min_energy*1.2, 2.0*min_mass)
logger.warning("The user-defined energy %f seems "%options['energy']+
" too large to not overshoot the maximum propagator invariant mass "+
"%f required for the chosen offshellness %f."%(min_mass,
options['offshellness']) + " Energy reset to %f."%new_energy)
options['energy'] = new_energy
if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
logger.debug("The target energy is not compatible with the mass"+
" of the external states for this process (%f). It is "%min_mass+
"unlikely that a valid kinematic configuration will be found.")
if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
options['offshellness']>0.0 and offshellness>options['offshellness']:
logger.debug("Offshellness increased to %f"%offshellness+
" so as to try to find a kinematical configuration with"+
" offshellness at least equal to %f"%options['offshellness']+
" for all resonances.")
start_energy = options['energy']
while N_trials<max_trial:
N_trials += 1
if N_trials%nstep_for_energy_increase==0:
if allow_energy_increase > 0.0:
old_offshellness = offshellness
if offshellness > 0.0:
options['energy'] *= energy_increase
offshellness *= energy_increase
else:
options['energy'] = max(options['energy']/energy_increase,
min_energy*1.2)
offshellness = max(min_offshellnes,
((offshellness+1.0)/energy_increase)-1.0)
if old_offshellness!=offshellness:
logger.debug('Trying to find a valid kinematic'+\
" configuration for resonance '%s'"%str_res+\
' with increased offshellness %f'%offshellness)
candidate = get_PSpoint_for_resonance(resonance, offshellness)
pass_offshell_test = True
for i, res in enumerate(other_res_offshell):
# Make sure other resonances are sufficiently offshell too
if offshellness > 0.0:
if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
((1.0+options['offshellness'])*all_other_res_masses[i]):
pass_offshell_test = False
break
else:
if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
((1.0+options['offshellness'])*all_other_res_masses[i]):
pass_offshell_test = False
break
if not pass_offshell_test:
continue
# Make sure it is isolated
if isolation_cuts:
# Set ptcut to 5% of total energy
if not evaluator.pass_isolation_cuts(candidate,
ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
continue
PS_point_found = candidate
break
# Restore the initial energy setup
options['energy'] = start_energy
if PS_point_found is None:
err_msg = 'Could not find a valid PS point in %d'%max_trial+\
' trials. Try increasing the energy, modify the offshellness '+\
'or relax some constraints.'
if options['offshellness']<0.0:
err_msg +='Try with a positive offshellness instead (or a '+\
'negative one of smaller absolute value)'
raise InvalidCmd, err_msg
else:
# misc.sprint('PS point found in %s trials.'%N_trials)
# misc.sprint(PS_point_found)
resonance['offshellnesses'] = []
all_other_res_masses = [resonance_mass] + all_other_res_masses
other_res_offshell = [resonance] + other_res_offshell
for i, res in enumerate(other_res_offshell):
if i==0:
res_str = 'self'
else:
res_str = '%s %s'%(model.get_particle(
res['ParticlePDG']).get_name(),
str(list(res['FSMothersNumbers'])))
resonance['offshellnesses'].append((res_str,(
(invmass([PS_point_found[j-1] for j in
res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
resonance['PS_point_used'] = PS_point_found
def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
""" Assigns a kinematic configuration to the resonance dictionary
given in argument."""
# Get the particle mass
mass_string = evaluator.full_model.get_particle(
resonance['ParticlePDG']).get('mass')
mass = evaluator.full_model.get('parameter_dict')[mass_string].real
# Choose the offshellness
special_mass = (1.0 + offshellness)*mass
# Create a fake production and decay process
prod_proc = base_objects.Process({'legs':base_objects.LegList(
copy.copy(leg) for leg in process.get('legs') if
leg.get('number') not in resonance['FSMothersNumbers'])})
# Add the resonant particle as a final state
# ID set to 0 since its mass will be forced
# Number set so as to be first in the list in get_momenta
prod_proc.get('legs').append(base_objects.Leg({
'number':max(l.get('number') for l in process.get('legs'))+1,
'state':True,
'id':0}))
# now the decay process
decay_proc = base_objects.Process({'legs':base_objects.LegList(
copy.copy(leg) for leg in process.get('legs') if leg.get('number')
in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
# Add the resonant particle as an initial state
# ID set to 0 since its mass will be forced
# Number set to -1 as well so as to be sure it appears first in
# get_momenta
decay_proc.get('legs').insert(0,base_objects.Leg({
'number':-1,
'state':False,
'id':0}))
prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
special_mass=special_mass)[0]
decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
special_mass=special_mass)[0]
momenta = glue_momenta(prod_kinematic,decay_kinematic)
# Reshuffle the momentum so as to put it back in the order specified
# in the process definition.
# First the production momenta, without the special decayed particle
ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
for i in range(len(prod_proc.get('legs'))-1)]
# And then the decay ones.
ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
# Return the PSpoint found in the right order
return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
# misc.sprint(resonance['PS_point_used'])
@misc.mute_logger()
def get_width(PDG, lambdaCMS, param_card):
""" Returns the width to use | |
Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
return self.call_with_http_info(**kwargs)
self.iam_project_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/project/{projectId}/service',
'operation_id': 'iam_project_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
],
'required': [
'project_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
},
'location_map': {
'project_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_project_service_list
)
def __iam_project_tag_create(
self,
project_id,
tag,
**kwargs
):
"""Create iam/project.tag # noqa: E501
Create iam/project.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_project_tag_create(project_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.iam_project_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/project/{projectId}/tag',
'operation_id': 'iam_project_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'tag',
],
'required': [
'project_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
},
'location_map': {
'project_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_project_tag_create
)
def __iam_project_tag_delete(
self,
project_id,
tag_id,
**kwargs
):
"""Delete iam/project.tag # noqa: E501
Delete iam/project.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_project_tag_delete(project_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.iam_project_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/project/{projectId}/tag/{tagId}',
'operation_id': 'iam_project_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'tag_id',
],
'required': [
'project_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_project_tag_delete
)
def __iam_project_tag_get(
self,
project_id,
tag_id,
**kwargs
):
"""Get iam/project.tag # noqa: E501
Get iam/project.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_project_tag_get(project_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.iam_project_tag_get = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/project/{projectId}/tag/{tagId}',
'operation_id': 'iam_project_tag_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'tag_id',
],
'required': [
'project_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_project_tag_get
)
def __iam_project_tag_list(
self,
project_id,
**kwargs
):
"""List iam/project.tag # noqa: E501
List iam/project.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_project_tag_list(project_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if | |
"""
This class controls the textbox GUI for any shop state.
A Gui object is created and updated by the shop state.
"""
import pygame as pg
from harren.data import setup, observer
from harren.data.components import textbox
from harren.data import constants as c
from harren.py_compat import pickle
class Gui(object):
"""Class that controls the GUI of the shop state."""
def __init__(self, level):
self.level = level
self.game_data = self.level.game_data
self.level.game_data['last direction'] = 'down'
self.SFX_observer = observer.SoundEffects()
self.observers = [self.SFX_observer]
self.sellable_items = level.sell_items
self.player_inventory = level.game_data['player inventory']
self.name = level.name
self.state = 'dialogue'
self.no_selling = ['Inn', 'magic shop']
self.weapon_list = ['Long Sword', 'Rapier']
self.armor_list = ['Chain Mail', 'Wooden Shield']
self.font = pg.font.Font(setup.FONTS[c.MAIN_FONT], 22)
self.index = 0
self.timer = 0.0
self.allow_input = False
self.items = level.items
self.item_to_be_sold = None
self.item_to_be_purchased = None
self.dialogue = level.dialogue
self.accept_dialogue = level.accept_dialogue
self.accept_sale_dialogue = level.accept_sale_dialogue
self.arrow = textbox.NextArrow()
self.selection_arrow = textbox.NextArrow()
self.arrow_pos1 = (50, 475)
self.arrow_pos2 = (50, 515)
self.arrow_pos3 = (50, 555)
self.arrow_pos4 = (50, 495)
self.arrow_pos5 = (50, 535)
self.arrow_pos_list = [self.arrow_pos1, self.arrow_pos2, self.arrow_pos3]
self.two_arrow_pos_list = [self.arrow_pos4, self.arrow_pos5]
self.arrow_index = 0
self.selection_arrow.rect.topleft = self.arrow_pos1
self.dialogue_box = self.make_dialogue_box(self.dialogue, self.index)
self.gold_box = self.make_gold_box()
if self.name in self.no_selling:
choices = self.items[0]['dialogue']
else:
choices = ['Buy', 'Sell', 'Leave']
self.selection_box = self.make_selection_box(choices)
self.state_dict = self.make_state_dict()
def notify(self, event):
"""Notify all observers of event."""
for x in self.observers:
x.on_notify(event)
def make_dialogue_box(self, dialogue_list, index):
"""
Make the sprite that controls the dialogue.
"""
image = setup.GFX['dialoguebox']
rect = image.get_rect()
surface = pg.Surface(rect.size)
surface.set_colorkey(c.BLACK)
surface.blit(image, rect)
dialogue = self.font.render(dialogue_list[index],
True,
c.NEAR_BLACK)
dialogue_rect = dialogue.get_rect(left=50, top=50)
surface.blit(dialogue, dialogue_rect)
sprite = pg.sprite.Sprite()
sprite.image = surface
sprite.rect = rect
self.check_to_draw_arrow(sprite)
return sprite
def check_to_draw_arrow(self, sprite):
"""
Blink arrow if more text needs to be read.
"""
if self.index < len(self.dialogue) - 1:
sprite.image.blit(self.arrow.image, self.arrow.rect)
def make_gold_box(self):
"""Make the box to display total gold"""
image = setup.GFX['goldbox']
rect = image.get_rect(bottom=608, right=800)
surface = pg.Surface(rect.size)
surface.set_colorkey(c.BLACK)
surface.blit(image, (0, 0))
gold = self.player_inventory['GOLD']['quantity']
text = 'Gold: ' + str(gold)
text_render = self.font.render(text, True, c.NEAR_BLACK)
text_rect = text_render.get_rect(x=80, y=60)
surface.blit(text_render, text_rect)
sprite = pg.sprite.Sprite()
sprite.image = surface
sprite.rect = rect
return sprite
def make_selection_box(self, choices):
"""Make the box for the player to select options"""
image = setup.GFX['shopbox']
rect = image.get_rect(bottom=608)
surface = pg.Surface(rect.size)
surface.set_colorkey(c.BLACK)
surface.blit(image, (0, 0))
if len(choices) == 2:
choice1 = self.font.render(choices[0], True, c.NEAR_BLACK)
choice1_rect = choice1.get_rect(x=200, y=35)
choice2 = self.font.render(choices[1], True, c.NEAR_BLACK)
choice2_rect = choice2.get_rect(x=200, y=75)
surface.blit(choice1, choice1_rect)
surface.blit(choice2, choice2_rect)
elif len(choices) == 3:
choice1 = self.font.render(choices[0], True, c.NEAR_BLACK)
choice1_rect = choice1.get_rect(x=200, y=15)
choice2 = self.font.render(choices[1], True, c.NEAR_BLACK)
choice2_rect = choice2.get_rect(x=200, y=55)
choice3 = self.font.render(choices[2], True, c.NEAR_BLACK)
choice3_rect = choice3.get_rect(x=200, y=95)
surface.blit(choice1, choice1_rect)
surface.blit(choice2, choice2_rect)
surface.blit(choice3, choice3_rect)
sprite = pg.sprite.Sprite()
sprite.image = surface
sprite.rect = rect
return sprite
def make_state_dict(self):
"""Make the state dictionary for the GUI behavior"""
state_dict = {'dialogue': self.control_dialogue,
'select': self.make_selection,
'confirmpurchase': self.confirm_purchase,
'confirmsell': self.confirm_sell,
'reject': self.reject_insufficient_gold,
'accept': self.accept_purchase,
'acceptsell': self.accept_sale,
'hasitem': self.has_item,
'buysell': self.buy_sell,
'sell': self.sell_items,
'cantsell': self.cant_sell,
'cantsellequippedweapon': self.cant_sell_equipped_weapon,
'cantsellequippedarmor': self.cant_sell_equipped_armor}
return state_dict
def control_dialogue(self, keys, current_time):
"""Control the dialogue boxes"""
self.dialogue_box = self.make_dialogue_box(self.dialogue, self.index)
if self.index < (len(self.dialogue) - 1) and self.allow_input:
if keys[pg.K_SPACE]:
self.index += 1
self.allow_input = False
if self.index == (len(self.dialogue) - 1):
self.state = self.begin_new_transaction()
self.notify(c.CLICK2)
if not keys[pg.K_SPACE]:
self.allow_input = True
def begin_new_transaction(self):
"""Set state to buysell or select, depending if the shop
is a Inn/Magic shop or not"""
if self.level.name in self.no_selling:
state = 'select'
else:
state = 'buysell'
return state
def make_selection(self, keys, current_time):
"""Control the selection"""
choices = []
for item in self.items:
choices.append(item['dialogue'])
if self.name in self.no_selling:
choices.append('Leave')
else:
choices.append('Cancel')
self.dialogue_box = self.make_dialogue_box(self.dialogue, self.index)
self.selection_box = self.make_selection_box(choices)
self.gold_box = self.make_gold_box()
if len(choices) == 2:
arrow_list = self.two_arrow_pos_list
elif len(choices) == 3:
arrow_list = self.arrow_pos_list
else:
arrow_list = None
AssertionError('Only two items supported')
self.selection_arrow.rect.topleft = arrow_list[self.arrow_index]
if keys[pg.K_DOWN] and self.allow_input:
if self.arrow_index < (len(choices) - 1):
self.arrow_index += 1
self.allow_input = False
self.notify(c.CLICK)
elif keys[pg.K_UP] and self.allow_input:
if self.arrow_index > 0:
self.arrow_index -= 1
self.allow_input = False
self.notify(c.CLICK)
elif keys[pg.K_SPACE] and self.allow_input:
if self.arrow_index == 0:
self.state = 'confirmpurchase'
self.item_to_be_purchased = self.items[0]
elif self.arrow_index == 1 and len(choices) == 3:
self.state = 'confirmpurchase'
self.item_to_be_purchased = self.items[1]
else:
if self.level.name in self.no_selling:
self.level.state = 'transition out'
self.game_data['last state'] = self.level.name
else:
self.state = 'buysell'
self.notify(c.CLICK2)
self.arrow_index = 0
self.allow_input = False
if not keys[pg.K_SPACE] and not keys[pg.K_UP] and not keys[pg.K_DOWN]:
self.allow_input = True
def confirm_purchase(self, keys, current_time):
"""Confirm selection state for GUI"""
dialogue = ['Are you sure?']
choices = ['Yes', 'No']
self.selection_box = self.make_selection_box(choices)
self.gold_box = self.make_gold_box()
self.dialogue_box = self.make_dialogue_box(dialogue, 0)
self.selection_arrow.rect.topleft = self.two_arrow_pos_list[self.arrow_index]
if keys[pg.K_DOWN] and self.allow_input:
if self.arrow_index < (len(choices) - 1):
self.arrow_index += 1
self.allow_input = False
self.notify(c.CLICK)
elif keys[pg.K_UP] and self.allow_input:
if self.arrow_index > 0:
self.arrow_index -= 1
self.allow_input = False
self.notify(c.CLICK)
elif keys[pg.K_SPACE] and self.allow_input:
if self.arrow_index == 0:
self.buy_item()
elif self.arrow_index == 1:
self.state = self.begin_new_transaction()
self.notify(c.CLICK2)
self.arrow_index = 0
self.allow_input = False
if not keys[pg.K_SPACE] and not keys[pg.K_DOWN] and not keys[pg.K_UP]:
self.allow_input = True
def buy_item(self):
"""Attempt to allow player to purchase item"""
item = self.item_to_be_purchased
self.player_inventory['GOLD']['quantity'] -= item['price']
if self.player_inventory['GOLD']['quantity'] < 0:
self.player_inventory['GOLD']['quantity'] += item['price']
self.state = 'reject'
else:
if item['type'] in self.player_inventory and not self.name == c.POTION_SHOP:
self.state = 'hasitem'
self.player_inventory['GOLD']['quantity'] += item['price']
else:
self.notify(c.CLOTH_BELT)
self.state = 'accept'
self.add_player_item(item)
def add_player_item(self, item):
"""Add item to player's inventory."""
item_type = item['type']
quantity = item['quantity']
value = item['price']
power = item['power']
magic_list = ['Cure', 'Fire Blast']
player_armor = ['Chain Mail', 'Wooden Shield']
player_weapons = ['Rapier', 'Long Sword']
player_items = self.level.game_data['player inventory']
player_health = self.level.game_data['player stats']['health']
player_magic = self.level.game_data['player stats']['magic']
equipped_armor = self.level.game_data['player inventory']['equipped armor']
item_to_add = {'quantity': quantity,
'value': value,
'power': power}
if item_type in magic_list:
item_to_add = {'magic points': item['magic points'],
'power': item['power']}
player_items[item_type] = item_to_add
if item_type in player_armor:
equipped_armor.append(item_type)
if item_type in player_weapons:
player_items['equipped weapon'] = item_type
if item_type in player_items and item_type not in magic_list:
player_items[item_type]['quantity'] += quantity
elif quantity > 0:
player_items[item_type] = item_to_add
elif item_type == 'room':
player_health['current'] = player_health['maximum']
player_magic['current'] = player_magic['maximum']
pickle.dump(self.game_data, open('save.p', 'wb'))
def confirm_sell(self, keys, current_time):
"""Confirm player wants to sell item."""
dialogue = ['Are you sure?']
choices = ['Yes', 'No']
self.dialogue_box = self.make_dialogue_box(dialogue, 0)
self.selection_box = self.make_selection_box(choices)
self.selection_arrow.rect.topleft = self.two_arrow_pos_list[self.arrow_index]
if keys[pg.K_DOWN] and self.allow_input:
if self.arrow_index < (len(choices) - 1):
self.arrow_index += 1
self.allow_input = False
self.notify(c.CLICK)
elif keys[pg.K_UP] and self.allow_input:
if self.arrow_index > 0:
self.arrow_index -= 1
self.allow_input = False
self.notify(c.CLICK)
elif keys[pg.K_SPACE] and self.allow_input:
if self.arrow_index == 0:
self.sell_item_from_inventory()
elif self.arrow_index == 1:
self.state = self.begin_new_transaction()
self.notify(c.CLICK2)
self.allow_input = False
self.arrow_index = 0
if not keys[pg.K_SPACE] and not keys[pg.K_UP] and not keys[pg.K_DOWN]:
self.allow_input = True
def sell_item_from_inventory(self):
"""Allow player to sell item to shop."""
item_price = self.item_to_be_sold['price']
item_name = self.item_to_be_sold['type']
if item_name in self.weapon_list:
if item_name == self.game_data['player inventory']['equipped weapon']:
self.state = 'cantsellequippedweapon'
else:
self.notify(c.CLOTH_BELT)
self.sell_inventory_data_adjust(item_price, item_name)
elif item_name in self.armor_list:
if item_name in self.game_data['player inventory']['equipped armor']:
self.state = 'cantsellequippedarmor'
else:
self.notify(c.CLOTH_BELT)
self.sell_inventory_data_adjust(item_price, item_name)
else:
self.notify(c.CLOTH_BELT)
self.sell_inventory_data_adjust(item_price, item_name)
def sell_inventory_data_adjust(self, item_price, item_name):
"""Add gold and subtract item during sale."""
self.player_inventory['GOLD']['quantity'] += (item_price / 2)
self.state = 'acceptsell'
if self.player_inventory[item_name]['quantity'] > 1:
self.player_inventory[item_name]['quantity'] -= 1
else:
del self.player_inventory[self.item_to_be_sold['type']]
def reject_insufficient_gold(self, keys, current_time):
"""Reject player selection if they do not have enough gold"""
dialogue = ["You don't have enough gold!"]
self.dialogue_box = self.make_dialogue_box(dialogue, 0)
if keys[pg.K_SPACE] and self.allow_input:
self.notify(c.CLICK2)
self.state = self.begin_new_transaction()
self.selection_arrow.rect.topleft = self.arrow_pos1
self.allow_input = False
if not keys[pg.K_SPACE]:
self.allow_input = True
def accept_purchase(self, keys, current_time):
"""Accept purchase and confirm with message"""
self.dialogue_box = self.make_dialogue_box(self.accept_dialogue, 0)
self.gold_box = self.make_gold_box()
if keys[pg.K_SPACE] and self.allow_input:
self.notify(c.CLICK2)
self.state = self.begin_new_transaction()
self.selection_arrow.rect.topleft = self.arrow_pos1
self.allow_input = False
if not keys[pg.K_SPACE]:
self.allow_input = True
def accept_sale(self, keys, current_time):
"""Confirm to player that item was sold"""
self.dialogue_box = self.make_dialogue_box(
self.accept_sale_dialogue,
0
)
self.gold_box = self.make_gold_box()
if keys[pg.K_SPACE] and self.allow_input:
self.notify(c.CLICK2)
self.state = self.begin_new_transaction()
self.selection_arrow.rect.topleft = self.arrow_pos1
self.allow_input = False
if not keys[pg.K_SPACE]:
self.allow_input = True
def has_item(self, keys, current_time):
"""Tell player he has item already"""
dialogue = ['You have that item | |
SolveMethod.NUMPY_SOLVE,
_delete_truss_after: bool = False, _override_res: Optional[tuple[dict]] = None):
self.truss = truss
self.sig_figs = sig_figs
warnings.filterwarnings('ignore')
if _override_res is None:
self.results = truss.calculate(solution_method=solution_method)
self.tensions, self.reactions, self.stresses, self.strains = {}, {}, {}, {}
self.buckling_ratios = {}
# populate the tensions, reactions, etc. dictionaries from the results
self.get_data(truss)
else:
self.tensions, self.reactions, self.stresses, self.strains, self.buckling_ratios = \
(*_override_res,)
# set the truss's results before rounding but after zeroing small numbers
self.truss.results = {'internal_forces': self.tensions.copy(),
'reaction_forces': self.reactions.copy(),
'stresses': self.stresses.copy(), 'strains': self.strains.copy(),
'buckling_ratios': self.buckling_ratios.copy()}
# round these results to the required precision
self.round_data()
# HACK: clear the truss registry to avoid issues if building another truss
if _delete_truss_after:
truss._delete_truss()
def __repr__(self):
repr_str = f'\n Axial forces are: '\
f'(positive = tension; negative = compression) \n \t {str(self.tensions)}'
repr_str += f'\n Axial stresses are: \n \t {str(self.stresses)}'
repr_str += f'\n Reaction forces are (horizontal, vertical) components (signs '\
f'consistent with coordinate system): \n \t {str(self.reactions)}'
repr_str += f'\n Buckling ratios are: \n \t {str(self.buckling_ratios)}'
repr_str += f'\n Strains are: \n \t {str(self.strains)}'
repr_str += f'\n\n Units are {self.truss.units.split(",")[0]}, values '\
f'{f"not rounded" if self.sig_figs is None else f"rounded to {self.sig_figs} s.f."}'
return repr_str
def round_data(self) -> None:
"""
Replaces the calculated data with rounded values, to precision given by Truss.Result.sig_figs.
"""
for item in list(self.tensions.keys()):
try:
self.tensions[item] = sigfig.round(self.tensions[item], self.sig_figs)
self.stresses[item] = sigfig.round(self.stresses[item], self.sig_figs)
self.strains[item] = sigfig.round(self.strains[item], self.sig_figs)
self.buckling_ratios[item] = sigfig.round(self.buckling_ratios[item], self.sig_figs)
except KeyError:
continue
for item in list(self.reactions.keys()):
try:
self.reactions[item] = (sigfig.round(self.reactions[item][0], self.sig_figs),
sigfig.round(self.reactions[item][1], self.sig_figs))
except KeyError:
continue
def get_data(self, truss: object) -> None:
"""
Calculate tensions, stresses, strains, reaction forces and buckling ratios
from the calculate() function.
"""
# any forces smaller than `SMALL_NUM` will be set to zero (assumed to be due to rounding
# errors in the solver function). Currently set to 10 times smaller than the least
# significant digit of the smallest internal force value.
# NOTE: maybe move this functionality into `round_data()`.
# SMALL_NUM = 1e-8
SMALL_NUM = 0.1 * 10 ** (-1 * self.sig_figs) * min(
[abs(f) for f in self.results.values()
if type(f) is not tuple and f > (0.1 * 10 ** (-1 * self.sig_figs))])
print(SMALL_NUM)
for item in self.results:
if isinstance(self.results[item], float):
if abs(self.results[item]) < SMALL_NUM:
self.tensions.update({item: 0})
else:
self.tensions.update({item: self.results[item]})
self.stresses.update({
item: self.tensions[item] / truss.get_bar_by_name(item).effective_area})
self.strains.update({item: self.stresses[item] / truss.get_bar_by_name(item).E})
self.buckling_ratios.update({item: truss.get_bar_by_name(item).buckling_ratio})
# NOTE: could check if the bar is in compression using: if self.results[item] < 0:
elif isinstance(self.results[item], tuple):
self.reactions.update({item: (
self.results[item][0] if abs(self.results[item][0]) > SMALL_NUM else 0,
self.results[item][1] if abs(self.results[item][1]) > SMALL_NUM else 0)})
else:
warnings.warn(f'''A result appears to have been formed incorrectly. This is an internal
error. Bad value ignored: {self.results[item]}''', RuntimeWarning)
continue
# TRUSS METHODS
def calculate(self, solution_method: SolveMethod = SolveMethod.SCIPY) -> dict[str, Union[float, tuple]]:
"""
The main part of the program. Calculates the forces in the truss's bars and supports
in order to maintain force equilibrium with the given loads. Outputs as a dictionary in the form
`{bar_name: axial_force_value} + {support_name: (reaction_force_value_x, reaction_force_value_y)}`
"""
# List of dictionaries for unknowns, given default zero values
wanted_vars = []
for bar in self.get_all_bars():
wanted_vars.append('Tension in ' + bar.name)
for support in self.get_all_supports():
if support.support_type in {'pin', 'encastre'}:
wanted_vars.append('Horizontal reaction at ' + support.joint.name)
wanted_vars.append('Vertical reaction at ' + support.joint.name)
elif support.support_type == 'roller':
wanted_vars.append('Magnitude of reaction at ' + support.joint.name)
else:
continue
all_directions = {}
for joint in self.get_all_joints():
# Reset the directions dictionary for this joint
directions = {}
connected_bars = self.get_all_bars_connected_to_joint(joint)
# Get the anticlockwise (polar) angle of each connected joint relative to this joint which have bars
for bar in connected_bars:
angle = bar.get_direction(joint)
directions['Tension in ' + bar.name] = angle
# If there are reactions at this joint, store their directions too
if any([s.joint.name == joint.name for s in self.get_all_supports()]):
if self.get_support_by_joint(joint).support_type == 'roller':
directions['Magnitude of reaction at ' + joint.name] = math.atan2(
*reversed(list(self.get_support_by_joint(joint).roller_normal)))
else:
directions['Horizontal reaction at ' + joint.name] = 0
directions['Vertical reaction at ' + joint.name] = math.pi / 2
# If there are external loads at this joint, store their directions too
for load in self.get_all_loads_at_joint(joint):
directions['Horizontal component of {} at {}'.format(load.name, joint.name)] = 0
directions['Vertical component of {} at {}'.format(load.name, joint.name)] = math.pi / 2
all_directions[joint.name] = directions
# Populate the coefficients and constants matrices (initially lists of lists)
# in preparation to solve the matrix equation M * x = B
coefficients, constants = [], []
for joint_name in self.get_all_joints(str_names_only=True):
# get the coefficients (matrix M), representing the unknown internal/reaction forces
current_line = [round(math.cos(all_directions[joint_name].get(var, math.pi / 2)), 10)
for var in wanted_vars]
coefficients.append(current_line)
current_line = [round(math.sin(all_directions[joint_name].get(var, 0)), 10) for var in wanted_vars]
coefficients.append(current_line)
# get the constants (vector B), representing the external loads, -ve since on other side of eqn
loads_here = self.get_all_loads_at_joint_by_name(joint_name)
constants.append([-1 * sum([load.x for load in loads_here])])
constants.append([-1 * sum([load.y for load in loads_here])])
# Sanitise load data
for i in range(len(constants)):
if constants[i] == [] or constants[i] == [None]:
constants[i] = [0]
# Solve the system - both coefficient and constant matrices are sparse (for most practical cases)
# so ideally the SCIPY method is faster. NOTE: However testing showed that the difference is not huge,
# possibly because the solution itself is not sparse.
if solution_method is SolveMethod.NUMPY_STD:
m, b = np.matrix(coefficients), np.matrix(constants)
x = np.linalg.inv(m) * b
elif solution_method is SolveMethod.NUMPY_SOLVE:
m, b = np.matrix(coefficients), np.matrix(constants)
x = np.linalg.solve(m, b)
elif solution_method is SolveMethod.SCIPY:
m, b = csr_matrix(coefficients), csr_matrix(constants)
x = linsolver.spsolve(m, b)
else:
raise SyntaxError(f"The solution method {solution_method} is not supported. \n"
f"The allowed methods are (either using constants or string literals): \n"
f"{get_constants(SolveMethod)}\n"
f"For example: \t solution_method=SolveMethod.NUMPY_SOLVE \t or \t"
f"solution_method='numpy_solve'")
# Match values back to variable names
output_dict = {}
for i, bar in enumerate(self.get_all_bars()):
output_dict[bar.name] = float(x[i])
else:
_i = i
for support in self.get_all_supports():
output_dict[support.name] = (float(x[_i]), float(x[_i + 1]))
_i += 2
# HACK: For whatever reason, sometimes the pin jointed reaction forces are wrong.
# Couldn't be bothered fixing the root cause so correct them here by resolving at the supports.
for support in self.get_all_supports():
reaction_corrected = [0, 0]
for bar in self.get_all_bars_connected_to_joint(support.joint):
angle = bar.get_direction(support.joint)
reaction_corrected[0] -= output_dict[bar.name] * math.cos(angle)
reaction_corrected[1] -= output_dict[bar.name] * math.sin(angle)
output_dict[support.name] = tuple(reaction_corrected)
# Return the values in dict form
return output_dict
def is_statically_determinate(self) -> bool:
"""
Does a simple arithmetic check to estimate if the truss
is statically determinate (b + F = 2j). Also stores attributes for later quick use.
"""
# b: number of bars in the truss
# F: number of degrees of freedom for the reactions at the supports
# j: number of joints in the truss
# if b + F > 2j, the truss is overconstrained, while if b + F < 2j, the truss is a mechanism
self.b = len(self.get_all_bars(str_names_only=True))
self.F = sum([2 if support.support_type in {'encastre', 'pin'}
else 1 if support.support_type == 'roller'
else 0 for support in Truss.Support])
self.j = len(self.get_all_joints(str_names_only=True))
return self.b + self.F == 2 * self.j
def classify_error_in_truss(self, e: np.linalg.LinAlgError) -> None:
"""
If there was an exception raised when solving, attempt to find the cause and raise
a more user-friendly exception message.
"""
valid = self.is_statically_determinate()
if not valid:
raise ArithmeticError(f'''The truss is not statically determinate.
It cannot be solved. \nBars: {self.b} \t Reactions: {self.F} \t Joints: {self.j}.
\n b + F = {self.b + self.F}, 2j = {2 * self.j}''')
elif str(e) == "Singular matrix":
raise TypeError('''
The truss contains mechanistic and/or overconstrained components despite
being globally statically determinate. It cannot be solved.''')
else:
raise TypeError("Something else went wrong. Requires attention.")
def dump_truss_to_json(self, filedir: Optional[str] = None, filename: Optional[str] = None) -> None:
"""
Writes the details of the truss, with the results if available, to
a JSON file which can be read using `load_truss_from_json()`.
NOTE: If this truss is deleted before this function is called, only the results
| |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_mag_meter')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_mag_meter')
_mag_meter = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_mag_meter', [dirname(__file__)])
except ImportError:
import _mag_meter
return _mag_meter
try:
_mod = imp.load_module('_mag_meter', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_mag_meter = swig_import_helper()
del swig_import_helper
else:
import _mag_meter
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _mag_meter.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _mag_meter.SwigPyIterator_value(self)
def incr(self, n=1):
return _mag_meter.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _mag_meter.SwigPyIterator_decr(self, n)
def distance(self, x):
return _mag_meter.SwigPyIterator_distance(self, x)
def equal(self, x):
return _mag_meter.SwigPyIterator_equal(self, x)
def copy(self):
return _mag_meter.SwigPyIterator_copy(self)
def next(self):
return _mag_meter.SwigPyIterator_next(self)
def __next__(self):
return _mag_meter.SwigPyIterator___next__(self)
def previous(self):
return _mag_meter.SwigPyIterator_previous(self)
def advance(self, n):
return _mag_meter.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _mag_meter.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _mag_meter.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _mag_meter.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _mag_meter.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _mag_meter.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _mag_meter.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _mag_meter.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
def new_doubleArray(nelements):
return _mag_meter.new_doubleArray(nelements)
new_doubleArray = _mag_meter.new_doubleArray
def delete_doubleArray(ary):
return _mag_meter.delete_doubleArray(ary)
delete_doubleArray = _mag_meter.delete_doubleArray
def doubleArray_getitem(ary, index):
return _mag_meter.doubleArray_getitem(ary, index)
doubleArray_getitem = _mag_meter.doubleArray_getitem
def doubleArray_setitem(ary, index, value):
return _mag_meter.doubleArray_setitem(ary, index, value)
doubleArray_setitem = _mag_meter.doubleArray_setitem
def new_longArray(nelements):
return _mag_meter.new_longArray(nelements)
new_longArray = _mag_meter.new_longArray
def delete_longArray(ary):
return _mag_meter.delete_longArray(ary)
delete_longArray = _mag_meter.delete_longArray
def longArray_getitem(ary, index):
return _mag_meter.longArray_getitem(ary, index)
longArray_getitem = _mag_meter.longArray_getitem
def longArray_setitem(ary, index, value):
return _mag_meter.longArray_setitem(ary, index, value)
longArray_setitem = _mag_meter.longArray_setitem
def new_intArray(nelements):
return _mag_meter.new_intArray(nelements)
new_intArray = _mag_meter.new_intArray
def delete_intArray(ary):
return _mag_meter.delete_intArray(ary)
delete_intArray = _mag_meter.delete_intArray
def intArray_getitem(ary, index):
return _mag_meter.intArray_getitem(ary, index)
intArray_getitem = _mag_meter.intArray_getitem
def intArray_setitem(ary, index, value):
return _mag_meter.intArray_setitem(ary, index, value)
intArray_setitem = _mag_meter.intArray_setitem
def new_shortArray(nelements):
return _mag_meter.new_shortArray(nelements)
new_shortArray = _mag_meter.new_shortArray
def delete_shortArray(ary):
return _mag_meter.delete_shortArray(ary)
delete_shortArray = _mag_meter.delete_shortArray
def shortArray_getitem(ary, index):
return _mag_meter.shortArray_getitem(ary, index)
shortArray_getitem = _mag_meter.shortArray_getitem
def shortArray_setitem(ary, index, value):
return _mag_meter.shortArray_setitem(ary, index, value)
shortArray_setitem = _mag_meter.shortArray_setitem
def getStructSize(self):
try:
return eval('sizeof_' + repr(self).split(';')[0].split('.')[-1])
except (NameError) as e:
typeString = 'sizeof_' + repr(self).split(';')[0].split('.')[-1]
raise NameError(e.message + '\nYou tried to get this size macro: ' + typeString +
'\n It appears to be undefined. \nYou need to run the SWIG GEN_SIZEOF' +
' SWIG macro against the class/struct in your SWIG file if you want to ' +
' make this call.\n')
def protectSetAttr(self, name, value):
if(hasattr(self, name) or name == 'this'):
object.__setattr__(self, name, value)
else:
raise ValueError('You tried to add this variable: ' + name + '\n' +
'To this class: ' + str(self))
def protectAllClasses(moduleType):
import inspect
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for member in clsmembers:
try:
exec(str(member[0]) + '.__setattr__ = protectSetAttr')
exec(str(member[0]) + '.getStructSize = getStructSize')
except (AttributeError, TypeError) as e:
pass
def new_boolArray(nelements):
return _mag_meter.new_boolArray(nelements)
new_boolArray = _mag_meter.new_boolArray
def delete_boolArray(ary):
return _mag_meter.delete_boolArray(ary)
delete_boolArray = _mag_meter.delete_boolArray
def boolArray_getitem(ary, index):
return _mag_meter.boolArray_getitem(ary, index)
boolArray_getitem = _mag_meter.boolArray_getitem
def boolArray_setitem(ary, index, value):
return _mag_meter.boolArray_setitem(ary, index, value)
boolArray_setitem = _mag_meter.boolArray_setitem
class IntVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)
__repr__ = _swig_repr
def iterator(self):
return _mag_meter.IntVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _mag_meter.IntVector___nonzero__(self)
def __bool__(self):
return _mag_meter.IntVector___bool__(self)
def __len__(self):
return _mag_meter.IntVector___len__(self)
def __getslice__(self, i, j):
return _mag_meter.IntVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _mag_meter.IntVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _mag_meter.IntVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _mag_meter.IntVector___delitem__(self, *args)
def __getitem__(self, *args):
return _mag_meter.IntVector___getitem__(self, *args)
def __setitem__(self, *args):
return _mag_meter.IntVector___setitem__(self, *args)
def pop(self):
return _mag_meter.IntVector_pop(self)
def append(self, x):
return _mag_meter.IntVector_append(self, x)
def empty(self):
return _mag_meter.IntVector_empty(self)
def size(self):
return _mag_meter.IntVector_size(self)
def swap(self, v):
return _mag_meter.IntVector_swap(self, v)
def begin(self):
return _mag_meter.IntVector_begin(self)
def end(self):
return _mag_meter.IntVector_end(self)
def rbegin(self):
return _mag_meter.IntVector_rbegin(self)
def rend(self):
return _mag_meter.IntVector_rend(self)
def clear(self):
return _mag_meter.IntVector_clear(self)
def get_allocator(self):
return _mag_meter.IntVector_get_allocator(self)
def pop_back(self):
return _mag_meter.IntVector_pop_back(self)
def erase(self, *args):
return _mag_meter.IntVector_erase(self, *args)
def __init__(self, *args):
this = _mag_meter.new_IntVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _mag_meter.IntVector_push_back(self, x)
def front(self):
return _mag_meter.IntVector_front(self)
def back(self):
return _mag_meter.IntVector_back(self)
def assign(self, n, x):
return _mag_meter.IntVector_assign(self, n, x)
def resize(self, *args):
return _mag_meter.IntVector_resize(self, *args)
def insert(self, *args):
return _mag_meter.IntVector_insert(self, *args)
def reserve(self, n):
return _mag_meter.IntVector_reserve(self, n)
def capacity(self):
return _mag_meter.IntVector_capacity(self)
__swig_destroy__ = _mag_meter.delete_IntVector
__del__ = lambda self: None
IntVector_swigregister = _mag_meter.IntVector_swigregister
IntVector_swigregister(IntVector)
class DoubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)
__repr__ = _swig_repr
def iterator(self):
return _mag_meter.DoubleVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _mag_meter.DoubleVector___nonzero__(self)
def __bool__(self):
return _mag_meter.DoubleVector___bool__(self)
def __len__(self):
return _mag_meter.DoubleVector___len__(self)
def __getslice__(self, i, j):
return _mag_meter.DoubleVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _mag_meter.DoubleVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _mag_meter.DoubleVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _mag_meter.DoubleVector___delitem__(self, *args)
def __getitem__(self, *args):
return _mag_meter.DoubleVector___getitem__(self, *args)
def __setitem__(self, *args):
return _mag_meter.DoubleVector___setitem__(self, *args)
def pop(self):
return _mag_meter.DoubleVector_pop(self)
def append(self, x):
return _mag_meter.DoubleVector_append(self, x)
def empty(self):
return _mag_meter.DoubleVector_empty(self)
def size(self):
return _mag_meter.DoubleVector_size(self)
def swap(self, v):
return _mag_meter.DoubleVector_swap(self, v)
def begin(self):
return _mag_meter.DoubleVector_begin(self)
def end(self):
return _mag_meter.DoubleVector_end(self)
def rbegin(self):
return _mag_meter.DoubleVector_rbegin(self)
def rend(self):
return _mag_meter.DoubleVector_rend(self)
def clear(self):
return _mag_meter.DoubleVector_clear(self)
def get_allocator(self):
return _mag_meter.DoubleVector_get_allocator(self)
def pop_back(self):
return _mag_meter.DoubleVector_pop_back(self)
def erase(self, *args):
return _mag_meter.DoubleVector_erase(self, *args)
def __init__(self, *args):
this = _mag_meter.new_DoubleVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _mag_meter.DoubleVector_push_back(self, x)
def front(self):
return _mag_meter.DoubleVector_front(self)
def back(self):
return _mag_meter.DoubleVector_back(self)
def assign(self, n, x):
return _mag_meter.DoubleVector_assign(self, n, x)
def resize(self, *args):
return _mag_meter.DoubleVector_resize(self, *args)
def insert(self, *args):
return _mag_meter.DoubleVector_insert(self, *args)
def reserve(self, n):
return _mag_meter.DoubleVector_reserve(self, n)
def capacity(self):
return _mag_meter.DoubleVector_capacity(self)
__swig_destroy__ = _mag_meter.delete_DoubleVector
__del__ = lambda self: None
DoubleVector_swigregister = _mag_meter.DoubleVector_swigregister
DoubleVector_swigregister(DoubleVector)
class StringVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StringVector, name)
__repr__ = _swig_repr
def iterator(self):
return _mag_meter.StringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _mag_meter.StringVector___nonzero__(self)
def __bool__(self):
return _mag_meter.StringVector___bool__(self)
def __len__(self):
return _mag_meter.StringVector___len__(self)
def __getslice__(self, i, j):
return _mag_meter.StringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _mag_meter.StringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _mag_meter.StringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _mag_meter.StringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _mag_meter.StringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _mag_meter.StringVector___setitem__(self, *args)
def pop(self):
return _mag_meter.StringVector_pop(self)
def append(self, x):
return _mag_meter.StringVector_append(self, x)
def empty(self):
return _mag_meter.StringVector_empty(self)
def size(self):
return _mag_meter.StringVector_size(self)
def swap(self, v):
return _mag_meter.StringVector_swap(self, v)
def begin(self):
return _mag_meter.StringVector_begin(self)
def end(self):
return _mag_meter.StringVector_end(self)
def rbegin(self):
return _mag_meter.StringVector_rbegin(self)
def rend(self):
return _mag_meter.StringVector_rend(self)
def clear(self):
return _mag_meter.StringVector_clear(self)
def get_allocator(self):
return _mag_meter.StringVector_get_allocator(self)
def pop_back(self):
return _mag_meter.StringVector_pop_back(self)
def erase(self, *args):
return _mag_meter.StringVector_erase(self, *args)
def __init__(self, *args):
this = _mag_meter.new_StringVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _mag_meter.StringVector_push_back(self, x)
def front(self):
return _mag_meter.StringVector_front(self)
def back(self):
return _mag_meter.StringVector_back(self)
def assign(self, n, x):
return _mag_meter.StringVector_assign(self, n, x)
def resize(self, *args):
return _mag_meter.StringVector_resize(self, *args)
def insert(self, *args):
return _mag_meter.StringVector_insert(self, *args)
def reserve(self, | |
from collections import namedtuple, OrderedDict
from deepnp.functions import *
import deepnp.initializers as init
class RNNCell:
def __init__(self, batch_size, input_dim, hidden_dim, Wx=None, Wh=None, bias=None):
N, D, H = batch_size, input_dim, hidden_dim
Wx = init.normal(D, H) if Wx is None else Wx
Wh = init.normal(H, H) if Wh is None else Wh
bias = init.normal(H) if bias is None else bias
self.batch_size = N
self.params = [Wx, Wh, bias]
self.grads = {'Wx': np.zeros_like(Wx),
'Wh': np.zeros_like(Wh),
'bias': np.zeros_like(bias),
'h_prev': np.zeros(shape=(N, H))}
self.cache = {'x_t': None,
'h_prev': None,
'h_next': None}
def forward(self, x_t, h_prev):
Wx, Wh, bias = self.params
linear = np.matmul(x_t, Wx) + np.matmul(h_prev, Wh) + bias
h_next = np.tanh(linear)
self.cache = {'x_t': x_t,
'h_prev': h_prev,
'h_next': h_next}
return h_next
def backward(self, d_h_next=1, optimize=True):
Wx, Wh, bias = self.params
x_t, h_prev, h_next = self.cache['x_t'], self.cache['h_prev'], self.cache['h_next']
d_linear = d_h_next * (1 - np.square(h_next)) # element-wise dot product
d_bias = np.sum(d_linear, axis=0, keepdims=False)
d_Wx = np.matmul(x_t.T, d_linear)
d_Wh = np.matmul(h_prev.T, d_linear)
d_h_prev = np.matmul(d_linear, Wh.T)
if not optimize:
d_x_t = np.matmul(d_linear, Wx.T)
self.grads['Wx'][...] = d_Wx
self.grads['Wh'][...] = d_Wh
self.grads['bias'][...] = d_bias
self.grads['h_prev'][...] = d_h_prev
return self.grads
class RNNLayer:
def __init__(self, input_dim, hidden_dim, Wx=None, Wh=None, bias=None):
D, H = input_dim, hidden_dim
self.input_dim, self.hidden_dim = D, H
Wx = init.normal(D, H) if Wx is None else Wx
Wh = init.normal(H, H) if Wh is None else Wh
bias = init.normal(H) if bias is None else bias
self.params = [Wx, Wh, bias]
self.grads = {'Wx': np.zeros_like(Wx),
'Wh': np.zeros_like(Wh),
'bias': np.zeros_like(bias)}
self.timestep_cells = []
def update(self, parameters):
self.params = parameters
self.timestep_cells = list()
def forward(self, x_sequence, h_init=None):
batch_size, timesteps, input_dim = x_sequence.shape
N, T, D, H = batch_size, timesteps, input_dim, self.hidden_dim
Wx, Wh, bias = self.params
h_prev = init.zeros(N, H) if h_init is None else h_init
# N*T*D Style
h_stack = init.empty(N, T, H)
for t in range(T):
timestep_cell = RNNCell(N, D, H, Wx=Wx, Wh=Wh, bias=bias)
h_next = timestep_cell.forward(x_sequence[:, t, :], h_prev) # 해당 timestep마다의 (N, D) 형태 2차원 텐서, h_prev
self.timestep_cells.append(timestep_cell)
h_stack[:, t, :] = h_next # h_next는 (N, H)의 한 timestep rnn 결과물
h_prev = h_next
# T*N*D Style (Deprecated)
# for time_step, x_t in enumerate(x_sequence):
# timestep_cell = RNNCell(self.N, self.D, self.hidden_dim, Wx=Wx, Wh=Wh, bias=bias)
# h_next = timestep_cell.forward(x_t, h_prev)
# self.rnn_steps.append(timestep_cell)
# h_prev = h_next
h_last = h_next
return h_last, h_stack
def backward(self, d_h_next=1, optimize=True):
Wx, Wh, bias = self.params
d_Wx = np.zeros_like(Wx)
d_Wh = np.zeros_like(Wh)
d_bias = np.zeros_like(bias)
for idx, layer in enumerate(reversed(self.timestep_cells)):
grad = layer.backward(d_h_next=d_h_next, optimize=optimize)
d_Wx += grad['Wx']
d_Wh += grad['Wh']
d_bias += grad['bias']
d_h_next = grad['h_prev']
for d in [d_Wx, d_Wh, d_bias]:
np.clip(d, -1, 1, out=d)
self.grads['Wx'][...] = d_Wx
self.grads['Wh'][...] = d_Wh
self.grads['bias'][...] = d_bias
return self.grads
class RNNLayerWithTimesteps:
def __init__(self, input_dim, hidden_dim, Wx=None, Wh=None, bias=None):
D, H = input_dim, hidden_dim
self.input_dim, self.hidden_dim = D, H
Wx = init.normal(D, H) if Wx is None else Wx
Wh = init.normal(H, H) if Wh is None else Wh
bias = init.normal(H) if bias is None else bias
self.params = [Wx, Wh, bias]
self.grads = {'Wx': np.zeros_like(Wx),
'Wh': np.zeros_like(Wh),
'bias': np.zeros_like(bias)}
self.timestep_cells = []
def update(self, *parameters):
self.params = parameters
self.timestep_cells = list()
def forward(self, x_sequence, h_init=None):
batch_size, timesteps, input_dim = x_sequence.shape
N, T, D, H = batch_size, timesteps, input_dim, self.hidden_dim
Wx, Wh, bias = self.params
h_prev = init.zeros(N, H) if h_init is None else h_init
# N*T*D Style
h_stack = init.empty(N, T, H)
for t in range(T):
timestep_cell = RNNCell(N, D, H, Wx=Wx, Wh=Wh, bias=bias)
h_next = timestep_cell.forward(x_sequence[:, t, :], h_prev) # 해당 timestep마다의 (N, D) 형태 2차원 텐서, h_prev
self.timestep_cells.append(timestep_cell)
h_stack[:, t, :] = h_next # h_next는 (N, H)의 한 timestep rnn 결과물
h_prev = h_next
h_last = h_next
return h_last, h_stack
def backward(self, d_h_stack, optimize=True):
Wx, Wh, bias = self.params
N, T, H = d_h_stack.shape
D, H = Wx.shape
d_Wx = np.zeros_like(Wx)
d_Wh = np.zeros_like(Wh)
d_bias = np.zeros_like(bias)
d_h_prev = init.zeros(N, H)
for t in reversed(range(T)):
if t == 0: continue
# d_h_next = 0.2 * d_h_stack[:, t, :] + d_h_prev
cell = self.timestep_cells[t]
d_h_next = d_h_stack[:, t, :] + d_h_prev
grad = cell.backward(d_h_next=d_h_next, optimize=optimize)
d_Wx += grad['Wx']
d_Wh += grad['Wh']
d_bias += grad['bias']
d_h_prev[...] = grad['h_prev']
for d in [d_Wx, d_Wh, d_bias]:
np.clip(d, -1, 1, out=d)
self.grads['Wx'][...] = d_Wx
self.grads['Wh'][...] = d_Wh
self.grads['bias'][...] = d_bias
return self.grads
class LSTMCell:
def __init__(self, batch_size, input_dim, hidden_dim, Wx=None, Wh=None, bias=None):
N, D, H = batch_size, input_dim, hidden_dim
Wx = init.simplexavier(D, 4 * H) if Wx is None else Wx
Wh = init.simplexavier(H, 4 * H) if Wh is None else Wh
bias = init.simplexavier(4 * H) if bias is None else bias
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.params = [Wx, Wh, bias]
self.grads = {'Wx': np.zeros_like(Wx),
'Wh': np.zeros_like(Wh),
'bias': np.zeros_like(bias)}
self.cache = {}
def forward(self, x, c_prev, h_prev):
# x_t : (N, D) c_prev, h_prev : (N, H)
# f : forget gate, n : new info, i : input gate, o : output gate
H = self.hidden_dim
Wx, Wh, bias = self.params
fc = x @ Wx + h_prev @ Wh + bias # N * 4H
f = sigmoid(fc[:, :H])
n = np.tanh(fc[:, H:2 * H])
i = sigmoid(fc[:, 2 * H:3 * H])
o = sigmoid(fc[:, 3 * H:])
c_next = (c_prev * f) + (n * i)
h_next = np.tanh(c_next) * o
self.cache = (x, c_prev, h_prev, c_next, h_next, f, n, i, o,)
return c_next, h_next
def backward(self, d_c_next, d_h_next):
# d는 모두 N, H. 원하는 건 dWx, dWh, dbias
# d_h_prev, d_c_prev 구해서 보내줘야함
# f, n, i, o 순으로 구한 뒤 np.hstack()으로 쌓기
Wx, Wh, bias = self.params
x, c_prev, h_prev, c_next, h_next, f, n, i, o = self.cache
tanh_c_next = np.tanh(c_next)
d_c_next_agg = d_c_next + (d_h_next * o) * (1 - tanh_c_next ** 2)
d_f = d_c_next_agg * c_prev
d_n = d_c_next_agg * i
d_i = d_c_next_agg * n
d_o = d_h_next * tanh_c_next
d_f *= f * (1 - f)
d_n *= 1 - n ** 2
d_i *= i * (1 - i)
d_o *= o * (1 - o)
d_fc = np.hstack((d_f, d_n, d_i, d_o))
d_Wx = x.T @ d_fc
d_Wh = h_prev.T @ d_fc
d_bias = d_fc.sum(axis=0)
self.grads['Wx'][...] = d_Wx
self.grads['Wh'][...] = d_Wh
self.grads['bias'][...] = d_bias
d_c_prev = d_c_next_agg * f
d_h_prev = d_fc @ Wh.T
d_x = d_fc @ Wx.T
return d_x, d_c_prev, d_h_prev
class LSTMLayerTimesteps:
def __init__(self, input_dim, hidden_dim, Wx=None, Wh=None, bias=None, stateful=False):
self.input_dim, self.hidden_dim = input_dim, hidden_dim
D, H = input_dim, hidden_dim
Wx = init.simplexavier(D, 4 * H) if Wx is None else Wx
Wh = init.simplexavier(H, 4 * H) if Wh is None else Wh
bias = init.simplexavier(4 * H) if bias is None else bias
self.cell_state, self.hidden_state, self.d_h_0 = None, None, None
self.stateful = stateful
self.params = [Wx, Wh, bias]
self.grads = [np.zeros_like(Wx),
np.zeros_like(Wh),
np.zeros_like(bias)]
# self.grads = {'Wx': np.zeros_like(Wx),
# 'Wh': np.zeros_like(Wh),
# 'bias': np.zeros_like(bias)}
self.timestep_cells = list()
def update(self, *parameters):
self.params = parameters
self.timestep_cells = list()
def set_state(self, h, c=None):
self.cell_state, self.hidden_state = c, h
def reset_state(self):
self.cell_state, self.hidden_state = None, None
def reset_timesteps(self):
self.timestep_cells = list()
def forward(self, x_seq):
N, T, D = x_seq.shape
H = self.hidden_dim
Wx, Wh, bias = self.params
c_prev = init.zeros(N, H) if not self.stateful or self.cell_state is None else self.cell_state
h_prev = init.zeros(N, H) if not self.stateful or self.hidden_state is None else self.hidden_state
# x_seq : N, T, D
h_stack = init.empty(N, T, H)
for t in range(T):
timestep_cell = LSTMCell(N, D, H, Wx, Wh, bias)
c_next, h_next = timestep_cell.forward(x_seq[:, t, :], c_prev, h_prev)
self.timestep_cells.append(timestep_cell)
h_stack[:, t, :] = h_next
c_prev, h_prev = c_next, h_next
self.cell_state, self.hidden_state = c_next, h_next
h_last = h_next
return h_last, h_stack
def backward(self, d_h_stack, optimize=True):
Wx, Wh, bias = self.params
N, T, H = d_h_stack.shape
D, _ = Wx.shape
d_Wx = np.zeros_like(Wx)
d_Wh = np.zeros_like(Wh)
d_bias = np.zeros_like(bias)
d_h_prev = init.zeros(N, H)
d_c_next = init.zeros(N, H)
d_x_stack = init.empty(N, T, | |
= thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str image: image
:param str user: user
:return: ScreeningMode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'image', 'user'] # noqa: E501
all_params.append('omit')
all_params.append('fields')
all_params.append('expand')
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_screening_mode" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `retrieve_screening_mode`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'image' in params:
query_params.append(('image', params['image'])) # noqa: E501
if 'user' in params:
query_params.append(('user', params['user'])) # noqa: E501
if 'omit' in params:
query_params.append(('omit', params['omit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E50
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/images/screening_modes/{id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScreeningMode', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_screening_mode(self, id, **kwargs): # noqa: E501
"""update_screening_mode # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_screening_mode(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body57 body:
:param str id2: id
:param str image2: image
:param str user2: user
:return: ScreeningMode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_screening_mode_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_screening_mode_with_http_info(id, **kwargs) # noqa: E501
return data
def update_screening_mode_with_http_info(self, id, **kwargs): # noqa: E501
"""update_screening_mode # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_screening_mode_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body57 body:
:param str id2: id
:param str image2: image
:param str user2: user
:return: ScreeningMode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'id2', 'image2', 'user2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_screening_mode" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_screening_mode`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'image2' in params:
query_params.append(('image', params['image2'])) # noqa: E501
if 'user2' in params:
query_params.append(('user', params['user2'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'image' in params:
form_params.append(('image', params['image'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'screening_tiles' in params:
form_params.append(('screening_tiles', params['screening_tiles'])) # noqa: E501
if 'x_steps' in params:
form_params.append(('x_steps', params['x_steps'])) # noqa: E501
if 'y_steps' in params:
form_params.append(('y_steps', params['y_steps'])) # noqa: E501
if 'x_resolution' in params:
form_params.append(('x_resolution', params['x_resolution'])) # noqa: E501
if 'y_resolution' in params:
form_params.append(('y_resolution', params['y_resolution'])) # noqa: E501
if 'current_index' in params:
form_params.append(('current_index', params['current_index'])) # noqa: E501
if 'image' in params:
form_params.append(('image', params['image'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'screening_tiles' in params:
form_params.append(('screening_tiles', params['screening_tiles'])) # noqa: E501
if 'x_steps' in params:
form_params.append(('x_steps', params['x_steps'])) # noqa: E501
if 'y_steps' in params:
form_params.append(('y_steps', params['y_steps'])) # noqa: E501
if 'x_resolution' in params:
form_params.append(('x_resolution', params['x_resolution'])) # noqa: E501
if 'y_resolution' in params:
form_params.append(('y_resolution', params['y_resolution'])) # noqa: E501
if 'current_index' in params:
form_params.append(('current_index', params['current_index'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/images/screening_modes/{id}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScreeningMode', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_screening_mode(self, id, **kwargs): # noqa: E501
"""update_screening_mode # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_screening_mode(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int image:
:param int user:
:param object screening_tiles:
:param int x_steps:
:param int y_steps:
:param int x_resolution:
:param int y_resolution:
:param int current_index:
:param str id2: id
:param str image2: image
:param str user2: user
:return: ScreeningMode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_screening_mode_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_screening_mode_with_http_info(id, **kwargs) # noqa: E501
return data
def update_screening_mode_with_http_info(self, id, **kwargs): # noqa: E501
"""update_screening_mode # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_screening_mode_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int image:
:param int user:
:param object screening_tiles:
:param int x_steps:
:param int y_steps:
:param int x_resolution:
:param int y_resolution:
:param int current_index:
:param str id2: id
:param str image2: image
:param str user2: user
:return: ScreeningMode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'image', 'user', 'screening_tiles', 'x_steps', 'y_steps', 'x_resolution', 'y_resolution', 'current_index', 'id2', 'image2', 'user2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_screening_mode" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_screening_mode`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'image2' in params:
query_params.append(('image', params['image2'])) # noqa: E501
if 'user2' in params:
query_params.append(('user', params['user2'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'image' in params:
form_params.append(('image', params['image'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'screening_tiles' in params:
form_params.append(('screening_tiles', params['screening_tiles'])) # noqa: E501
if 'x_steps' in params:
form_params.append(('x_steps', params['x_steps'])) # noqa: E501
if 'y_steps' in params:
form_params.append(('y_steps', params['y_steps'])) # noqa: E501
if 'x_resolution' in params:
form_params.append(('x_resolution', params['x_resolution'])) # noqa: E501
if 'y_resolution' in params:
form_params.append(('y_resolution', params['y_resolution'])) # noqa: E501
if 'current_index' in params:
form_params.append(('current_index', params['current_index'])) # noqa: E501
if 'image' in params:
form_params.append(('image', params['image'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'screening_tiles' in params:
form_params.append(('screening_tiles', params['screening_tiles'])) # noqa: E501
if 'x_steps' in params:
form_params.append(('x_steps', params['x_steps'])) # noqa: E501
if 'y_steps' in params:
form_params.append(('y_steps', params['y_steps'])) # noqa: E501
if 'x_resolution' in params:
form_params.append(('x_resolution', params['x_resolution'])) # noqa: E501
if 'y_resolution' in params:
form_params.append(('y_resolution', params['y_resolution'])) # noqa: E501
if 'current_index' in params:
form_params.append(('current_index', params['current_index'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings | |
import discord
from discord.ext import commands
from math import ceil
import random
class RpsButtons(discord.ui.View):
"""
Contains the RPS Game Buttons.
"""
def __init__(self, author, member):
super().__init__(timeout=60)
self.author = author
self.member = member
self.authorchoice = None
self.memberchoice = None
self.message = None
@discord.ui.button(label="Rock", emoji="🪨", style=discord.ButtonStyle.gray)
async def rock(self, button: discord.ui.Button, interaction: discord.Interaction):
"""
Registers the author and user input for Rock.
Stops if both parties made a choice.
I dont know why the Rock Emoji doesnt display properly here btw,
it does on Discord.
"""
await interaction.response.send_message("You chose Rock!", ephemeral=True)
if interaction.user.id == self.author.id:
self.authorchoice = "Rock"
if interaction.user.id == self.member.id:
self.memberchoice = "Rock"
if self.authorchoice is not None and self.memberchoice is not None:
self.stop()
@discord.ui.button(label="Paper", emoji="📝", style=discord.ButtonStyle.gray)
async def paper(self, button: discord.ui.Button, interaction: discord.Interaction):
"""
Registers the author and user input for Paper.
Stops if both parties made a choice.
"""
await interaction.response.send_message("You chose Paper!", ephemeral=True)
if interaction.user.id == self.author.id:
self.authorchoice = "Paper"
if interaction.user.id == self.member.id:
self.memberchoice = "Paper"
if self.authorchoice is not None and self.memberchoice is not None:
self.stop()
@discord.ui.button(label="Scissors", emoji="✂️", style=discord.ButtonStyle.gray)
async def scissors(
self, button: discord.ui.Button, interaction: discord.Interaction
):
"""
Registers the author and user input for Scissors.
Stops if both parties made a choice.
"""
await interaction.response.send_message("You chose Scissors!", ephemeral=True)
if interaction.user.id == self.author.id:
self.authorchoice = "Scissors"
if interaction.user.id == self.member.id:
self.memberchoice = "Scissors"
if self.authorchoice is not None and self.memberchoice is not None:
self.stop()
async def interaction_check(self, interaction: discord.Interaction):
# basically ignores every other member except the author and mentioned member
return interaction.user in (self.member, self.author)
async def on_timeout(self):
# if the match didnt go through as planned
if self.authorchoice is None and self.memberchoice is None:
await self.message.reply(
"Match timed out! Both parties took too long to pick a choice!"
)
elif self.authorchoice is None:
await self.message.reply(
f"Match timed out! {self.author.mention} took too long to pick a choice!"
)
elif self.memberchoice is None:
await self.message.reply(
f"Match timed out! {self.member.mention} took too long to pick a choice!"
)
class TicTacToeButtons(discord.ui.Button["TicTacToeGame"]):
"""
Contains the TicTacToe Buttons.
"""
def __init__(self, button_pos: int):
super().__init__(
style=discord.ButtonStyle.gray,
label="\u200b",
row=ceil((button_pos + 1) / 3),
)
self.button_pos = button_pos
async def callback(self, interaction: discord.Interaction):
await self.view.handle_turn(self, self.button_pos, interaction)
class TicTacToeGame(discord.ui.View):
"""
Contains the TicTacToe Game Logic.
"""
def __init__(self, author, member):
super().__init__(timeout=60)
self.author = author
self.member = member
self.turn = author
self.message = None
# initialises the board
self.board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# adds all of the buttons
for i in range(0, 9):
self.add_item(TicTacToeButtons(i))
def check_for_winner(self, board):
"""
Checks if there is a winner, a tie or if the game is still going on.
"""
winning_combinations = [
(0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6),
]
# checks for the winner and returns it, if found
for combination in winning_combinations:
winning_list = []
for position in combination:
winning_list.append(board[position])
if winning_list == [1, 1, 1]:
return self.author
if winning_list == [2, 2, 2]:
return self.member
# if theres a tie we return false
if not 0 in board:
return False
return None
async def game_ending(self, interaction: discord.Interaction):
"""
Handles the game ending for us.
"""
winner = self.check_for_winner(self.board)
if winner is not None:
self.stop()
if winner is False:
await interaction.message.reply(content="The game is tied!")
else:
await interaction.message.reply(
content=f"The winner is: {winner.mention}!"
)
async def handle_turn(
self,
button: discord.ui.Button,
button_id: int,
interaction: discord.Interaction,
):
"""
The logic for one turn.
"""
if interaction.user.id == self.author.id:
self.board[button_id] = 1
button.emoji = "❌"
button.style = discord.ButtonStyle.red
self.turn = self.member
if interaction.user.id == self.member.id:
self.board[button_id] = 2
button.emoji = "⭕"
button.style = discord.ButtonStyle.blurple
self.turn = self.author
await self.game_ending(interaction)
button.disabled = True
await interaction.response.edit_message(
content=f"{self.author.mention}: ❌\n{self.member.mention}: ⭕\n\n{self.turn.mention}'s Turn:",
view=self,
)
async def interaction_check(self, interaction: discord.Interaction):
# checks if the user is in the game
if not interaction.user in (self.member, self.author):
return False
# checks if its your turn
if interaction.user == self.author and self.turn == self.author:
self.turn = self.member
return True
if interaction.user == self.member and self.turn == self.member:
self.turn = self.author
return True
return False
async def on_timeout(self):
await self.message.reply(
f"Match timed out! {self.turn.mention} took too long to pick something!"
)
class BlackJackButtons(discord.ui.View):
def __init__(self, author, member):
super().__init__(timeout=60)
self.author = author
self.member = member
self.author_hand = [[], 0]
self.member_hand = [[], 0]
self.folded = []
self.turn = author
self.message = None
# all of the possible cards
card_faces = [
"Ace",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"Jack",
"Queen",
"King",
]
card_suites = ["♠️", "♦️", "♣️", "♥️"]
def draw_card(self):
card_deck = []
for i, f in enumerate(self.card_faces):
for s in self.card_suites:
if i in (10, 11, 12):
i = 9
card_deck.append([f"{f} of {s}", i + 1])
card = random.choice(card_deck)
# checks if the card is already present in one hand, if so repeats the process
# i read that in real life blackjack is played with like 8 decks at once
# so this really isnt even needed
if card[0] in self.author_hand[0] or card[0] in self.member_hand[0]:
try:
self.draw_card()
return
except RecursionError:
return
if self.turn == self.author:
# if the card is an ace, checks if it should be worth 11 or 1
if card[1] == 1:
if self.author_hand[1] + 11 <= 21:
card[1] = 11
self.author_hand[0].append(card[0])
self.author_hand[1] += card[1]
else:
if card[1] == 1:
if self.member_hand[1] + 11 <= 21:
card[1] = 11
self.member_hand[0].append(card[0])
self.member_hand[1] += card[1]
def get_winner(self):
# checks for values greater than 21
if self.author_hand[1] > 21 >= self.member_hand[1]:
return self.member
if self.member_hand[1] > 21 >= self.author_hand[1]:
return self.author
# checks for draws
if self.member_hand[1] == self.author_hand[1] or (
self.author_hand[1] > 21 and self.member_hand[1] > 21
):
return None
if self.author_hand[1] > self.member_hand[1]:
return self.author
return self.member
async def end_game(self):
self.stop()
if self.get_winner():
await self.message.reply(f"The winner is {self.get_winner().mention}!")
else:
await self.message.reply("The game is tied!")
@discord.ui.button(
label="Draw a Card", emoji="🃏", style=discord.ButtonStyle.blurple
)
async def draw(self, button: discord.ui.Button, interaction: discord.Interaction):
"""
Draws another card and checks if the players turn is over.
"""
self.draw_card()
if self.turn == self.author:
if self.author_hand[1] > 20:
await self.end_game()
if self.member not in self.folded:
self.turn = self.member
else:
if self.member_hand[1] > 20:
await self.end_game()
if self.author not in self.folded:
self.turn = self.author
await interaction.response.edit_message(
content=f"{self.author.mention}'s Hand: {', '.join(self.author_hand[0])} ({self.author_hand[1]})\n"
f"{self.member.mention}'s Hand: {', '.join(self.member_hand[0])} ({self.member_hand[1]})\n\n"
f"It is {self.turn.mention}'s Turn.",
view=self,
)
@discord.ui.button(label="Fold", emoji="❌", style=discord.ButtonStyle.red)
async def fold(self, button: discord.ui.Button, interaction: discord.Interaction):
"""
Folds and switches the turn, or exits the game.
"""
if self.turn == self.author:
self.folded.append(self.author)
self.turn = self.member
else:
self.folded.append(self.member)
self.turn = self.author
if all(x in self.folded for x in [self.member, self.author]):
await self.end_game()
await interaction.response.edit_message(
content=f"{self.author.mention}'s Hand: {', '.join(self.author_hand[0])} ({self.author_hand[1]})\n"
f"{self.member.mention}'s Hand: {', '.join(self.member_hand[0])} ({self.member_hand[1]})\n\n"
f"It is {self.turn.mention}'s Turn.",
view=self,
)
async def interaction_check(self, interaction: discord.Interaction):
if not interaction.user in (self.author, self.member):
return False
if interaction.user == self.author and self.turn == self.author:
return True
if interaction.user == self.member and self.turn == self.member:
return True
return False
async def on_timeout(self):
await self.message.reply(
f"The match timed out! {self.turn.mention} took too long to pick a choice!"
)
class Games(commands.Cog):
"""
Contains the commands to execute the Games.
"""
def __init__(self, bot):
self.bot = bot
# never heard of those alternate names but wikipedia says they exist so might as well add them
@commands.command(aliases=["rockpaperscissors", "rochambeau", "roshambo"])
async def rps(self, ctx, member: discord.Member = None):
"""
Plays a Game of Rock, Paper, Scissors with either a mentioned user,
or the bot, if you do not mention a user.
"""
if member is None:
member = self.bot.user
# basic checks for users
if member == ctx.author:
await ctx.send("Please don't play matches with yourself.")
return
if member.bot and member.id != self.bot.user.id:
await ctx.send(
"Please do not play with other bots, they are too predictable."
)
return
view = RpsButtons(ctx.author, member)
view.message = await ctx.send(
f"Rock, Paper, Scissors: \n{ctx.author.mention} vs {member.mention}\nChoose wisely:",
view=view,
)
# if the bot plays | |
import csv
import psycopg2
import psycopg2.extras
import sys
import pprint
from datetime import date
import holidays
import traceback
REPORTED_DATETIME_INDEX = 0
CITY_INDEX = 1
STATE_INDEX = 2
SHAPE_INDEX = 3
DURATION_INDEX = 4
SUMMARY_INDEX = 5
POSTED_DATE_INDEX = 6
CONNECTION_STRING = "host='localhost' dbname='postgres' user='postgres' password='password'"
us_holidays = holidays.UnitedStates()
CSV_FILE_LOCATION = "nuforcScrape.csv"
class MissingDimensionValueException(Exception):
pass
class DuplicateRowException(Exception):
pass
# enum containing the color codes for coloring the console output
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def date_to_string(date):
return "{:%m-%d-%Y}".format(date)
def create_summary_dimension(csvLocation):
with open(csvLocation, "rb") as csvFile:
csvReader = csv.reader(csvFile)
# Create empty summary table
create_summary_dimension_query = """
DROP TABLE IF EXISTS ufo_fact;
DROP TABLE IF EXISTS summary_dimension;
CREATE TABLE summary_dimension
(
summary_key SERIAL,
summary VARCHAR(200),
PRIMARY KEY (summary_key)
);
"""
results = execute_query(create_summary_dimension_query)
def correct_country_in_location_dimension():
correct_canada_country_query = """
UPDATE public.location_dimension
SET country = 'canada'
WHERE city LIKE '%canada%';
"""
execute_query(correct_canada_country_query)
correct_greece_country_query = """
UPDATE public.location_dimension
SET country = 'greece'
WHERE city LIKE '%greece%';
"""
execute_query(correct_greece_country_query)
correct_bulgaria_country_query = """
UPDATE public.location_dimension
SET country = 'bulgaria'
WHERE city LIKE '%bulgaria%';
"""
execute_query(correct_bulgaria_country_query)
correct_uk_country_query = """
UPDATE public.location_dimension
SET country = 'uk/england'
WHERE city LIKE '%united kingdom%';
"""
execute_query(correct_uk_country_query)
correct_mexico_country_query = """
UPDATE public.location_dimension
SET country = 'mexico'
WHERE city LIKE '%mexico%';
"""
execute_query(correct_mexico_country_query)
correct_germany_country_query = """
UPDATE public.location_dimension
SET country = 'germany'
WHERE city LIKE '%germany%';
"""
execute_query(correct_germany_country_query)
correct_japan_country_query = """
UPDATE public.location_dimension
SET country = 'japan'
WHERE city LIKE '%japan%';
"""
execute_query(correct_japan_country_query)
correct_india_country_query = """
UPDATE public.location_dimension
SET country = 'india'
WHERE city LIKE '%india%';
"""
execute_query(correct_india_country_query)
def create_location_dimension(csvLocation):
with open(csvLocation, "rb") as csvFile:
csvReader = csv.reader(csvFile)
# Create empty location table
create_location_dimension_query = """
DROP TABLE IF EXISTS location_dimension;
CREATE TABLE location_dimension
(
location_key SERIAL,
city VARCHAR(70),
state VARCHAR(5),
country VARCHAR(20) DEFAULT 'united states',
region VARCHAR(20) DEFAULT 'north america',
PRIMARY KEY (location_key)
);
"""
execute_query(create_location_dimension_query)
locationList = []
for row in csvReader:
tmp_location_tuple = (row[CITY_INDEX].lower().replace("'", "''"), row[STATE_INDEX].lower().replace("'", "''"))
if tmp_location_tuple not in locationList:
locationList.append(tmp_location_tuple)
print "Added (%s, %s) to location dimension" % (row[CITY_INDEX], row[STATE_INDEX])
if len(locationList) > 0:
insert_location_data_string = "("
location_string_list = []
for location_tuple in locationList:
if location_tuple[0] == '' and location_tuple[1] != '':
location_string_list.append("NULL, '%s'" % location_tuple[1])
if location_tuple[0] != '' and location_tuple[1] == '':
location_string_list.append("'%s', NULL" % location_tuple[0])
if location_tuple[0] == '' and location_tuple[1] == '':
location_string_list.append("NULL, NULL")
else:
location_string_list.append("'%s', '%s'" % location_tuple)
insert_location_data_string += "),\n(".join(location_string_list)
insert_location_data_string += ")\n"
# Fill up location table with data from the csv
insert_into_location_dim_query = """
INSERT INTO public.location_dimension(city, state)
VALUES %s;
""" % insert_location_data_string
execute_query(insert_into_location_dim_query)
correct_country_in_location_dimension()
def get_all_distinct_shapes(csvLocation):
with open(csvLocation, "rb") as csvFile:
csvReader = csv.reader(csvFile)
shape_name_list = []
for row in csvReader:
shape_name = row[SHAPE_INDEX].lower() if row[SHAPE_INDEX] is not None else row[SHAPE_INDEX]
if shape_name not in shape_name_list \
and shape_name not in ['unknown', 'others', 'other', '']:
shape_name_list.append(shape_name.lower())
return shape_name_list
def create_shape_dimension():
print "Connecting to database with the following connection string\n->%s" % CONNECTION_STRING
connection = psycopg2.connect(CONNECTION_STRING)
print "Connected"
# Configure cursor
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
create_shape_dimension_query = """
DROP TABLE IF EXISTS shape_dimension;
CREATE TABLE shape_dimension
(
shape_key SERIAL,
shape_name VARCHAR(15),
PRIMARY KEY (shape_key)
);
"""
try:
cursor.execute(create_shape_dimension_query)
shape_name_list = get_all_distinct_shapes(CSV_FILE_LOCATION)
if len(shape_name_list) > 0:
added_rows_script = "('"
added_rows_script += "'),\n('".join(shape_name_list)
added_rows_script += "')\n"
# Query to create shape dimension
# We add a null shape at the end for missing values
insert_query = """
INSERT INTO shape_dimension(shape_name)
VALUES %s, (NULL);
""" % added_rows_script
cursor.execute(insert_query)
print "shape dimension created"
finally:
cursor.close()
connection.commit()
connection.close()
print 'Connection closed'
def populate_reported_date_dimension_holidays():
# gets all rows in the reported date dimension
get_reported_date_rows_query = """
SELECT reported_date_key,
date_actual
FROM public.reported_date_dimension;
"""
holiday_dates_list = []
results = execute_query(get_reported_date_rows_query)
for row in results:
date_dimension_id = row[0]
is_holiday = row[1] in us_holidays
holiday_text = us_holidays.get(row[1])
if is_holiday:
holiday_text = holiday_text.replace("'", "''")
holiday_dates_list.append(date_dimension_id)
update_query = """
UPDATE public.reported_date_dimension
SET is_holiday = TRUE,
holiday_text = '%s'
WHERE reported_date_key = '%s';
""" % (holiday_text, date_dimension_id)
execute_query(update_query)
print 'Updated %d dates out of %d' % (len(holiday_dates_list), len(results))
def populate_posted_date_dimension_holidays():
# gets all rows in the reported date dimension
get_posted_date_rows_query = """
SELECT posted_date_key,
date_actual
FROM public.posted_date_dimension;
"""
holiday_dates_list = []
results = execute_query(get_posted_date_rows_query)
for row in results:
date_dimension_id = row[0]
is_holiday = row[1] in us_holidays
holiday_text = us_holidays.get(row[1])
if is_holiday:
holiday_text = holiday_text.replace("'", "''")
holiday_dates_list.append(date_dimension_id)
update_query = """
UPDATE public.posted_date_dimension
SET is_holiday = TRUE,
holiday_text = '%s'
WHERE posted_date_key = '%s';
""" % (holiday_text, date_dimension_id)
execute_query(update_query)
print 'Updated %d dates out of %d' % (len(holiday_dates_list), len(results))
# Read a csv file to another
def readCSVToOtherCSV(csvLocation):
with open(csvLocation, "rb") as csvFile:
with open("new_" + csvLocation, "wb") as newCsvFile:
csvReader = csv.reader(csvFile)
csvWriter = csv.writer(newCsvFile)
for row in csvReader:
print ', '.join(row)
csvWriter.writerow(row)
# Read from postgres database
def createTableAndReadTable():
createTableQuery = """
CREATE TABLE public.test2
(
id VARCHAR(10) NOT NULL
CONSTRAINT test2_pkey
PRIMARY KEY,
content2 VARCHAR
);
"""
execute_query(createTableQuery)
# Perform SELECT query
select_query = "SELECT * FROM public.test2;"
records = execute_query(select_query)
def execute_query(query):
print "Connecting to database with the following connection string\n->%s" % CONNECTION_STRING
connection = psycopg2.connect(CONNECTION_STRING)
print "Connected"
# Configure cursor
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
results = []
try:
cursor.execute(query)
if "SELECT" in cursor.statusmessage and "INTO" not in cursor.statusmessage:
results = cursor.fetchall()
print "Query successful"
except:
print bcolors.FAIL
traceback.print_exc()
print bcolors.ENDC
finally:
cursor.close()
connection.commit()
connection.close()
print 'Connection closed'
return results
def populate_fact_table_and_summary(csv_location):
with open(csv_location, "rb") as csvFile:
csvReader = csv.reader(csvFile)
print "Connecting to database with the following connection string\n->%s" % CONNECTION_STRING
connection = psycopg2.connect(CONNECTION_STRING)
print "Connected"
# Configure cursor
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
results = []
try:
#All the code to populate fact table starts here
create_fact_table_query = """
DROP TABLE IF EXISTS UFO_Fact;
CREATE TABLE UFO_Fact
(
summary_key INT REFERENCES summary_dimension(summary_key),
shape_key INT REFERENCES shape_dimension(shape_key),
reported_date_key INT REFERENCES reported_date_dimension(reported_date_key),
posted_date_key INT REFERENCES posted_date_dimension(posted_date_key),
location_key INT REFERENCES location_dimension(location_key),
duration_text text,
duration_sec DECIMAL,
duration_minute DECIMAL,
-- days_between_sighting_and_posting INT,
PRIMARY KEY (summary_key, shape_key, reported_date_key, posted_date_key, location_key)
);
"""
cursor.execute(create_fact_table_query)
print bcolors.OKGREEN + 'Created UFO_Fact table' + bcolors.ENDC
# Iterate over each row in the csv file,
# add summary to summary dimension if not there,
# Find correct dimension keys and use them to add row to fact table
# Extract numerical values out of duration_text
for csv_row in csvReader:
# Extract interesting values out of csv row
summary_text = csv_row[SUMMARY_INDEX].replace("'", "''")
duration_text = csv_row[DURATION_INDEX]
posted_date = csv_row[POSTED_DATE_INDEX]
reported_datetime = csv_row[REPORTED_DATETIME_INDEX]
shape_text = csv_row[SHAPE_INDEX].lower()
city_text = csv_row[CITY_INDEX].lower().replace("'", "''")
state_text = csv_row[STATE_INDEX].lower().replace("'", "''")
# Keys to use to link fact to dimensions
# To be populated as the code goes on
summary_key = None
reported_date_key = None
location_key = None
shape_key = None
# Get summary_key
get_summary_key_query = ""
if summary_text == '' or summary_text is None:
get_summary_key_query = """
SELECT summary_key
FROM summary_dimension
WHERE summary IS NULL;
"""
else:
get_summary_key_query = """
SELECT summary_key
FROM summary_dimension
WHERE summary = '%s';
""" % summary_text
cursor.execute(get_summary_key_query)
summary_key_set = cursor.fetchall()
if len(summary_key_set) == 0:
# No key found, create row
create_summary_row_query = ""
if summary_text is None:
create_summary_row_query = """
INSERT INTO summary_dimension(summary)
VALUES (NULL);
"""
else:
create_summary_row_query = """
INSERT INTO summary_dimension(summary)
VALUES ('%s');
""" % summary_text
cursor.execute(create_summary_row_query)
connection.commit()
# Repeat prior select query to get the key of the new summary row
cursor.execute(get_summary_key_query)
summary_key_set = cursor.fetchall()
print get_summary_key_query
summary_key = summary_key_set[0][0]
elif len(summary_key_set) == 1:
# Exactly one key found, get the key out and use it
summary_key = summary_key_set[0][0]
else:
# More than 1 key was found, meaning duplicate data was inserted in the database,
# throw exception and fix code so that this can't happen
continue
# raise DuplicateRowException("More than one summary row with the same text found")
# Get reported date key
get_reported_date_key_query = """
SELECT reported_date_key
FROM reported_date_dimension
WHERE date_actual = to_date('%s', 'YYYY-MM-DD');
""" % reported_datetime[:10]
cursor.execute(get_reported_date_key_query)
reported_date_key = cursor.fetchone()
if reported_date_key is None:
print "Summary of row causing an issue: " + summary_text
continue
# raise MissingDimensionValueException("reported_date not found in reported_date_dimension")
else:
reported_date_key = reported_date_key[0]
# Get posted date key
get_posted_date_key_query = """
SELECT posted_date_key
FROM posted_date_dimension
WHERE date_actual = to_date('%s', 'YYYY-MM-DD');
""" | |
"""
============
Polarization
============
Provides functions for:
- Creating initial belief configurations for various scenarios.
- Creating influence graphs for various scenarios.
- The Esteban-Ray polarization measure.
- Discretizing a belief state into a distribution.
- Updating the belief state of an agent, see the `Update` enum for all types.
"""
import math
from enum import Enum
from functools import partial
import numpy as np
######################################################
## Parameters for the Belief states
######################################################
## number of agents
NUM_AGENTS = 100
## for consensus belief-function: belief value for the consensus belief state
CONSENSUS_VALUE = 0.5
## Values for the old belief configurations:
## -----------------------------------------
## for mildly-polarized belief-function: belief value for the upper end of the low pole of a mildly polarized belief state
LOW_POLE = 0.25
## for mildly-polarized belief-function: belief value for the lower end of the high pole of a mildly polarized belief state
HIGH_POLE = 0.75
## for mildly-polarized belief-function: step of belief change from agent to agent in mildly polarized belief state
BELIEF_STEP = 0.01
######################################################
## Parameters for the Esteban-Ray polarization measure
######################################################
## number of bins when discretizing the belief state
NUM_BINS = 5
## parameter alpha set to what Esteban and Ray suggest
ALPHA = 1.6
## scaling factor for polarization measure
K = 1000
##################################
## Parameters for update functions
##################################
## for conf_bias update-function: confirmation bias discount
CONFBIAS_DISCOUNT = 0.5
## for backfire_effect update-function: minimum difference of beliefs to trigger the backfire effect
BACKFIRE_BELIEF_THRESHOLD = 0.4
## for backfire_effect update-function: minimum influence to trigger the backfire effect
BACKFIRE_INFLUENCE_THRESHOLD = 0.2
#######################################
## Parameters for influence graphs
#######################################
## for clique influence-graph: belief value of all agents on a clique influence graph
CLIQUE_INF_VALUE = 0.5
## for 2_groups_disconnected influence-graph: belief value of all agents that can communicate in a 2 groups_disconnected influence graph
GROUPS_DISCONNECTED_INF_VALUE = 0.5
## for 2_groups_faint ineraction-function: belief value of all agents that can strongly communicate in a 2 groups faintly connected influence graph
GROUPS_FAINTLY_INF_VALUE_STRONG = 0.5
## for 2_groups_faint ineraction-function: belief value of all agents that can weakly communicate in a 2 groups faintly connected influence graph
GROUPS_FAINTLY_INF_VALUE_WEAK = 0.1
## for 2_influencers_balanced influence-graph: level of influence both influencers exert on all others
INFLUENCERS_BALANCED_OUTGOING_BOTH = 0.6
## for 2_influencers_balanced influence-graph: level of influence both influencers receive from all others
INFLUENCERS_BALANCED_INCOMING_BOTH = 0.0
## for 2_influencers_balanced influence-graph: level of influence all other agents exert on all others
INFLUENCERS_BALANCED_OTHERS = 0.1
## for 2_influencers_unbalanced influence-graph: level of influence agent 0 exerts on all others
INFLUENCERS_UNBALANCED_OUTGOING_FIRST = 0.8
## for 2_influencers_unbalanced influence-graph: level of influence agent n-1 exerts on all others
INFLUENCERS_UNBALANCED_OUTGOING_SECOND = 0.5
## for 2_influencers_unbalanced influence-graph: level of influence agent 0 receives from all others
INFLUENCERS_UNBALANCED_INCOMING_FIRST = 0.1
## for 2_influencers_unbalanced influence-graph: level of influence agent n-1 receives from all others
INFLUENCERS_UNBALANCED_INCOMING_SECOND = 0.1
## for 2_influencers_unbalanced influence-graph: level of influence all other agents exert on all others
INFLUENCERS_UNBALANCED_OTHERS = 0.2
## for circular influence-graph: belief value of all agents on a circular influence graph
CIRCULAR_INF_VALUE = 0.5
############################################
## Representing belief states implementation
############################################
class Belief(Enum):
UNIFORM = 0
MILD = 1
EXTREME = 2
TRIPLE = 3
CONSENSUS = 4
## Current representation
def build_belief(belief_type: Belief, num_agents=NUM_AGENTS, **kwargs):
"""Evenly distributes the agents beliefs into subgroups.
Same inputs as `build_old_belief`, will call it in case of finding a non-defined case
The default values are the constants defined at the beginning of the polarization module.
"""
if belief_type is Belief.MILD:
middle = math.ceil(num_agents / 2)
return [0.2 + 0.2 * i / middle if i < middle else 0.6 + 0.2 * (i - middle) / (num_agents - middle) for i in range(num_agents)]
if belief_type is Belief.EXTREME:
middle = math.ceil(num_agents / 2)
return [0.2 * i / middle if i < middle else 0.8 + 0.2 * (i - middle) / (num_agents - middle) for i in range(num_agents)]
if belief_type is Belief.TRIPLE:
beliefs = [0.0] * num_agents
first_third = num_agents // 3
middle_third = math.ceil(num_agents * 2 / 3) - first_third
last_third = num_agents - middle_third - first_third
offset = 0
for i, segment in enumerate((first_third, middle_third, last_third)):
for j in range(segment):
beliefs[j+offset] = 0.2 * j / segment + (0.4 * i)
offset += segment
return beliefs
return build_old_belief(belief_type, num_agents, **kwargs)
## Old representation
def build_uniform_beliefs(num_agents):
""" Build uniform belief state.
"""
return [i/(num_agents - 1) for i in range(num_agents)]
def build_old_mild_beliefs(num_agents, low_pole, high_pole, step):
"""Builds mildly polarized belief state, in which
half of agents has belief decreasing from 0.25, and
half has belief increasing from 0.75, all by the given step.
"""
middle = math.ceil(num_agents / 2)
return [max(low_pole - step*(middle - i - 1), 0) if i < middle else min(high_pole - step*(middle - i), 1) for i in range(num_agents)]
def build_old_extreme_beliefs(num_agents):
"""Builds extreme polarized belief state, in which half
of the agents has belief 0, and half has belief 1.
"""
middle = math.ceil(num_agents / 2)
return [0 if i < middle else 1 for i in range(num_agents)]
def build_old_triple_beliefs(num_agents):
"""Builds three-pole belief state, in which each
one third of the agents has belief 0, one third has belief 0.4,
and one third has belief 1.
"""
beliefs = [0.0] * num_agents
one_third = num_agents // 3
two_thirds = math.ceil(2 * num_agents / 3)
for i in range(num_agents):
if i >= two_thirds:
beliefs[i] = 1.0
elif i >= one_third:
beliefs[i] = 0.5
return beliefs
def build_consensus_beliefs(num_agents, belief):
"""Builds consensus belief state, in which each
all agents have same belief.
"""
return [belief] * num_agents
def build_old_belief(
belief_type,
num_agents=NUM_AGENTS,
*,
low_pole=LOW_POLE,
high_pole=HIGH_POLE,
step=BELIEF_STEP,
consensus_value=CONSENSUS_VALUE):
"""Builds the initial belief state according to the `belief_type`.
Helper function when iterating over the `Belief` enum. The default values
are the constants defined at the beginning of the polarization module.
"""
if belief_type is Belief.UNIFORM:
return build_uniform_beliefs(num_agents)
if belief_type is Belief.MILD:
return build_old_mild_beliefs(num_agents, low_pole, high_pole, step)
if belief_type is Belief.EXTREME:
return build_old_extreme_beliefs(num_agents)
if belief_type is Belief.TRIPLE:
return build_old_triple_beliefs(num_agents)
if belief_type is Belief.CONSENSUS:
return build_consensus_beliefs(num_agents, consensus_value)
raise Exception('belief_type not recognized. Expected a `Belief`')
######################################################
## The Esteban-Ray polarization measure implementation
######################################################
def belief_2_dist(belief_vec, num_bins=NUM_BINS):
"""Takes a belief state `belief_vec` and discretizes it into a fixed
number of bins.
"""
# stores labels of bins
# the value of a bin is the medium point of that bin
bin_labels = [(i + 0.5)/num_bins for i in range(num_bins)]
# stores the distribution of labels
bin_prob = [0] * num_bins
# for all agents...
for belief in belief_vec:
# computes the bin into which the agent's belief falls
bin_ = math.floor(belief * num_bins)
# treats the extreme case in which belief is 1, putting the result in the right bin.
if bin_ == num_bins:
bin_ = num_bins - 1
# updates the frequency of that particular belief
bin_prob[bin_] += 1 / len(belief_vec)
# bundles into a matrix the bin_labels and bin_probabilities.
dist = np.array([bin_labels,bin_prob])
# returns the distribution.
return dist
def make_belief_2_dist_func(num_bins=NUM_BINS):
"""Returns a function that discretizes a belief state into a `num_bins`
number of bins.
"""
_belief_2_dist = partial(belief_2_dist, num_bins=num_bins)
_belief_2_dist.__name__ = belief_2_dist.__name__
_belief_2_dist.__doc__ = belief_2_dist.__doc__
return _belief_2_dist
def pol_ER(dist, alpha=ALPHA, K=K):
"""Computes the Esteban-Ray polarization of a distribution.
"""
# recover bin labels
bin_labels = dist[0]
# recover bin probabilities
bin_prob = dist[1]
diff = np.ones((len(bin_labels), 1)) @ bin_labels[np.newaxis]
diff = np.abs(diff - np.transpose(diff))
pol = (bin_prob ** (1 + alpha)) @ diff @ bin_prob
# scales the measure by the constant K, and returns it.
return K * pol
def make_pol_er_func(alpha=ALPHA, K=K):
"""Returns a function that computes the Esteban-Ray polarization of a
distribution with set parameters `alpha` and `K`
"""
_pol_ER = partial(pol_ER, alpha=alpha, K=K)
_pol_ER.__name__ = pol_ER.__name__
_pol_ER.__doc__ = pol_ER.__doc__
return _pol_ER
def pol_ER_discretized(belief_state, alpha=ALPHA, K=K, num_bins=NUM_BINS):
"""Discretize belief state as necessary for computing Esteban-Ray
polarization and computes it.
"""
return pol_ER(belief_2_dist(belief_state, num_bins), alpha, K)
def make_pol_er_discretized_func(alpha=ALPHA, K=K, num_bins=NUM_BINS):
"""Returns a function that computes the Esteban-Ray polarization of a
belief state, discretized into a `num_bins` number of bins, with set
parameters `alpha` and `K`.
"""
_pol_ER_discretized = partial(pol_ER_discretized, alpha=alpha, K=K, num_bins=num_bins)
_pol_ER_discretized.__name__ = pol_ER_discretized.__name__
_pol_ER_discretized.__doc__ = pol_ER_discretized.__doc__
return _pol_ER_discretized
#####################################
## Influence graphs implementation
#####################################
def build_inf_graph_clique(num_agents, belief_value):
"""Returns the influence graph for "clique" scenario."""
return np.full((num_agents, num_agents), belief_value)
def build_inf_graph_2_groups_disconnected(num_agents, belief_value):
"""Returns the influence graph for for "disconnected" scenario."""
inf_graph = np.zeros((num_agents, num_agents))
middle = math.ceil(num_agents / 2)
inf_graph[:middle, :middle] = belief_value
inf_graph[middle:, middle:] = belief_value
return inf_graph
def build_inf_graph_2_groups_faint(num_agents, weak_belief_value, | |
seqfeature_dbxref
# table as seqfeature_id, dbxref_id, and rank tuples
self._load_seqfeature_dbxref(qualifiers[qualifier_key],
seqfeature_id)
def _load_seqfeature_dbxref(self, dbxrefs, seqfeature_id):
"""Add database crossreferences of a SeqFeature to the database (PRIVATE).
o dbxrefs List, dbxref data from the source file in the
format <database>:<accession>
o seqfeature_id Int, the identifier for the seqfeature in the
seqfeature table
Insert dbxref qualifier data for a seqfeature into the
seqfeature_dbxref and, if required, dbxref tables.
The dbxref_id qualifier/value sets go into the dbxref table
as dbname, accession, version tuples, with dbxref.dbxref_id
being automatically assigned, and into the seqfeature_dbxref
table as seqfeature_id, dbxref_id, and rank tuples
"""
# NOTE - In older versions of Biopython, we would map the GenBank
# db_xref "name", for example "GI" to "GeneIndex", and give a warning
# for any unknown terms. This was a long term maintainance problem,
# and differed from BioPerl and BioJava's implementation. See bug 2405
for rank, value in enumerate(dbxrefs):
# Split the DB:accession format string at colons. We have to
# account for multiple-line and multiple-accession entries
try:
dbxref_data = value.replace(' ','').replace('\n','').split(':')
db = dbxref_data[0]
accessions = dbxref_data[1:]
except:
raise ValueError("Parsing of db_xref failed: '%s'" % value)
# Loop over all the grabbed accessions, and attempt to fill the
# table
for accession in accessions:
# Get the dbxref_id value for the dbxref data
dbxref_id = self.handler._get_dbxref_id(db, accession)
# Insert the seqfeature_dbxref data
self._get_seqfeature_dbxref(seqfeature_id, dbxref_id, rank+1)
def _get_seqfeature_dbxref(self, seqfeature_id, dbxref_id, rank):
""" Check for a pre-existing seqfeature_dbxref entry with the passed
seqfeature_id and dbxref_id. If one does not exist, insert new
data
"""
# Check for an existing record
result = self.adaptor((self.adaptor.seqfeature_dbxref.seqfeature_id == seqfeature_id) &
(self.adaptor.seqfeature_dbxref.dbxref_id == dbxref_id)).select(self.adaptor.seqfeature_dbxref.seqfeature_id,
self.adaptor.seqfeature_dbxref.dbxref_id)
# If there was a record, return without executing anything, else create
# the record and return
if result:
return (result[0].seqfeature_id, result[0].dbxref_id)
return self._add_seqfeature_dbxref(seqfeature_id, dbxref_id, rank)
def _add_seqfeature_dbxref(self, seqfeature_id, dbxref_id, rank):
""" Insert a seqfeature_dbxref row and return the seqfeature_id and
dbxref_id
"""
self.adaptor.seqfeature_dbxref.insert(seqfeature_id = seqfeature_id,
dbxref_id = dbxref_id,
rank = rank)
return (seqfeature_id, dbxref_id)
class BioSQLBioentryFeatures(BaseBioSQL):
''' TO DO
handles feature comparisons and avoid duplications
handler is an initiated BioSQLHandler object
bioentry_id and key (qualifier term name) are required to identify the annotation
this handles a single feature add/update/delete
if rank is specified an available annotation is searched,
otherwise a new one will be created
usage:
to download an existing feature use
>>> feat = BioSQLFeature(handler, bioentry_id=123341, rank=1)
>>> feat.feature
SeqFeature object
to create a new feature for a given bioentry
>>> feat = BioSQLFeature(handler, bioentry_id=123341)
>>> feat.get(SeqFeature object)
or directly
>>> feat = BioSQLFeature(handler, bioentry_id=123341, feature = SeqFeature object)
then
>>> feat.feature
SeqFeature object
modify the feat.feature
if needed and then use
>>> feat.sync()
for DB persistence
to delete a given feature form the db use
>>> feat.delete()
'''
def __init__(self, handler, bioentry_id,):
BaseBioSQL.__init__(self, handler = handler)
self.bioentry_id = bioentry_id
self.features = dict() #rank:biosqlfeature dictionary
self._get()
def _get(self):
'''get all vailable features '''
rows = self.adaptor((self.adaptor.seqfeature.bioentry_id == self.bioentry_id)).select()
if rows:
for row in rows:
if row.rank in self.features:
raise ValueError('multiple seqfeatures present with the same rank')
else:
self.features[row.rank] = BioSQLFeature(handler = self.handler, bioentry_id = self.bioentry_id, rank= row.rank)
def get_ordered_seqfeatures(self):
ordered = []
for key in sorted(self.feature):
ordered.append(self.feature[key].feature)
return ordered
class BioSQLBioentryDBXrefs(BaseBioSQL):
def __init__(self, handler, bioentry_id, ):
BaseBioSQL.__init__(self, handler = handler)
self.bioentry_id = bioentry_id
self.dbxrefs = []
self._get()
def _get(self,):
dbxrefs=((dbxref.dbname, dbxref.accession, dbxref.version) for dbxref in self.adaptor((self.adaptor.bioentry.bioentry_id == self.bioentry_id) & \
(self.adaptor.bioentry.bioentry_id == self.adaptor.bioentry_dbxref.bioentry_id) & \
(self.adaptor.bioentry_dbxref.dbxref_id == self.adaptor.dbxref.dbxref_id)).select(
self.adaptor.dbxref.dbname,self.adaptor.dbxref.accession,self.adaptor.dbxref.version, orderby=self.adaptor.bioentry_dbxref.rank))
for dbname, accession, version in dbxrefs:
if version and version != "0":
v = "%s.%s" % (accession, version)
else:
v = accession
self.dbxrefs.append("%s:%s" % (dbname, v))
class BioSQLSeq(BaseBioSQL):
def __init__(self, handler, bioentry_id):
'''handler is an initiated BioSQLHandler object
bioentry_id is required to identify the bioentry sequence. only one
sequence per bioentry is allowed by the biosql schema.
usage:
>>> seq = BioSQLSeq(handler, bioentry_id=123341, )
>>> seq.seq
Seq('PRLNMLKAWHYTCVNGH', ProteinAlphabet)
>>> seq.sync()
if the sequence does not exist, a new one is created
please note that by setting
>>> seq.seq = '' or [] or None
>>> seq.delete()
'''
self.handler = handler
self.adaptor = handler.adaptor
self.bioentry_id = bioentry_id
self.seq = None
self.length = None
self.alphabet = None
self._get()
def _get(self):
'''modified from biopython, now returns a Seq object and not a DBSeq object '''
#The database schema ensures there will be only one matching
#row in the table.
#If an UnknownSeq was recorded, seq will be NULL,
#but length will be populated. This means length(seq)
#will return None.
seqs = [(row.alphabet,row.length,len(row.seq),row.seq) for row in \
self.adaptor(self.adaptor.biosequence.bioentry_id == self.bioentry_id \
).select(self.adaptor.biosequence.bioentry_id,
self.adaptor.biosequence.alphabet,
self.adaptor.biosequence.length,
self.adaptor.biosequence.seq,)]
if not seqs : return
assert len(seqs) == 1
moltype, given_length, length, seq = seqs[0]
try:
length = int(length)
given_length = int(length)
assert length == given_length
have_seq = True
except TypeError:
assert length is None
assert len(seqs) == 1
assert seq is None or seq==""
length = int(given_length)
have_seq = False
#del seq
del given_length
moltype = moltype.lower() #might be upper case in database
#We have no way of knowing if these sequences will use IUPAC
#alphabets, and we certainly can't assume they are unambiguous!
if moltype == "dna":
alphabet = Alphabet.generic_dna
elif moltype == "rna":
alphabet = Alphabet.generic_rna
elif moltype == "protein":
alphabet = Alphabet.generic_protein
elif moltype == "unknown":
#This is used in BioSQL/Loader.py and would happen
#for any generic or nucleotide alphabets.
alphabet = Alphabet.single_letter_alphabet
else:
raise AssertionError("Unknown moltype: %s" % moltype)
if have_seq:
self.seq = Seq(seq, alphabet,)
self.length = length
self.alphabet = moltype
else:
self.seq = UnknownSeq(length, alphabet)
self.length = length
self.alphabet = moltype
def _insert(self, value):
'''PRIVATE
load one annotation per time '''
pass
def get(self, annotation):
pass
def sync(self, clean = False):
'''todo
'''
pass
def delete(self):
'''remove all the bioentry_qualifier_table rows with the specified key,
thus removing the annotation from the BioSQL db'''
pass
class BioSQLRelation(BaseBioSQL):
'''TO DO'''
pass
class BioSQLComment(BaseBioSQL):
'''TO DO. needed?'''
pass
class BioSQLReference(BaseBioSQL):
def __init__(self, handler, bioentry_id, ):
BaseBioSQL.__init__(self, handler = handler)
self.bioentry_id = bioentry_id
self.references = []
self._get()
def _get(self,):
'''web2py has a bug with left joins putting left joins before joins in the generated SQL. this is allowed (even if incorrect) by any db-backend except postgres.
making a double query and merging the dbname when possible as a workaround here here
single query working code should be:
refs=self.adaptor((self.adaptor.bioentry_reference.bioentry_id==bioentry_id) &
(self.adaptor.bioentry_reference.reference_id==self.adaptor.reference.reference_id)
)._select(orderby=self.adaptor.bioentry_reference.rank, left=self.adaptor.reference.on(self.adaptor.reference.dbxref_id==self.adaptor.dbxref.dbxref_id))
'''
refs=((row.bioentry_reference.start_pos,
row.bioentry_reference.end_pos,
row.reference.location,
row.reference.title,
row.reference.authors,
row.reference.dbxref_id,) for row in self.adaptor((self.adaptor.bioentry_reference.bioentry_id == self.bioentry_id) &
(self.adaptor.bioentry_reference.reference_id == self.adaptor.reference.reference_id)
).select(self.adaptor.reference.reference_id,
self.adaptor.reference.location,
self.adaptor.reference.title,
self.adaptor.reference.authors,
self.adaptor.reference.dbxref_id,
self.adaptor.bioentry_reference.start_pos,
self.adaptor.bioentry_reference.end_pos,
orderby=self.adaptor.bioentry_reference.rank,))
refs_dbxrefs=dict(((row.reference.dbxref_id,dict(dbname=row.dbxref.dbname,accession=row.dbxref.accession)) for row in self.adaptor((self.adaptor.bioentry_reference.bioentry_id == self.bioentry_id) &
(self.adaptor.bioentry_reference.reference_id==self.adaptor.reference.reference_id) &
(self.adaptor.reference.dbxref_id==self.adaptor.dbxref.dbxref_id)
).select(self.adaptor.reference.dbxref_id,
self.adaptor.dbxref.dbname,
self.adaptor.dbxref.accession,
orderby=self.adaptor.bioentry_reference.rank,)))
for start, end, location, title, authors, dbxref_id in refs:
if dbxref_id in refs_dbxrefs:
dbname = refs_dbxrefs[dbxref_id]['dbname']
accession = refs_dbxrefs[dbxref_id]['accession']
else:
dbname,accession=None,None
reference = SeqFeature.Reference()
#If the start/end are missing, reference.location is an empty list
if (start is not None) or (end is not None):
if start is not None: start -= 1 #python counting
reference.location = [SeqFeature.FeatureLocation(start, end)]
#Don't replace the default "" with None.
if authors : reference.authors = authors
if title : reference.title = title
reference.journal = location
if dbname == 'PUBMED':
reference.pubmed_id = accession
elif dbname == 'MEDLINE':
reference.medline_id = accession
self.references.append(reference)
def _insert(self, reference):
#def _load_reference(self, reference, rank, bioentry_id):
"""Record a SeqRecord's annotated references in the database (PRIVATE).
record - a SeqRecord object with annotated references
bioentry_id - corresponding database identifier
"""
ranks = self.adaptor(self.adaptor.bioentry_reference.bioentry_id ==self.bioentry_id).select(self.adaptor.bioentry_reference.rank)
if ranks:
rank = max([row.rank for row in ranks]) +1
else:
rank = 1
refs | |
x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
if randomweapons == True: #!
for y in range(8):
if weapons[y] == Airbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(AirShooterReceived)
AirShooterReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 3:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(AirShooterReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Bubblebyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(BubbleLeadReceived)
BubbleLeadReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(BubbleLeadReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Quickbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(QuickBoomerangReceived)
QuickBoomerangReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(QuickBoomerangReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Heatbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(AtomicFireReceived)
AtomicFireReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(AtomicFireReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
ROM.write(b'\x0A')
Pointer+=1
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
ROM.write(b'\x0A')
Pointer+=1
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Woodbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(LeafShieldReceived)
LeafShieldReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 4:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(LeafShieldReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Metalbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(MetalBladeReceived)
MetalBladeReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(MetalBladeReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Flashbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(TimeStopperReceived)
TimeStopperReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 4:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(TimeStopperReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
| |
<filename>src/cbapi/connection.py
#!/usr/bin/env python
"""Manages the CBAPI connection to the server."""
from __future__ import absolute_import
import requests
import sys
from requests.adapters import HTTPAdapter, DEFAULT_POOLBLOCK, DEFAULT_RETRIES, DEFAULT_POOLSIZE, DEFAULT_POOL_TIMEOUT
try:
from requests.packages.urllib3.util.ssl_ import create_urllib3_context
REQUESTS_HAS_URLLIB_SSL_CONTEXT = True
except ImportError:
REQUESTS_HAS_URLLIB_SSL_CONTEXT = False
import ssl
# Older versions of requests (such as the one packaged with Splunk) do not have a Retry object
# in the packaged version of urllib3. Fall back gracefully.
try:
from requests.packages.urllib3 import Retry
MAX_RETRIES = Retry(total=5, status_forcelist=[502, 504], backoff_factor=0.5)
except ImportError:
MAX_RETRIES = 5
import logging
import json
from cbapi.six import iteritems
from cbapi.six.moves import urllib
from .auth import CredentialStoreFactory, Credentials
from .errors import ClientError, QuerySyntaxError, ServerError, TimeoutError, ApiError, ObjectNotFoundError, \
UnauthorizedError, ConnectionError
from . import __version__
from .cache.lru import lru_cache_function
from .models import CreatableModelMixin
from .utils import calculate_elapsed_time, convert_query_params
log = logging.getLogger(__name__)
def try_json(resp):
"""
Return a parsed JSON representation of the input.
Args:
resp (str): Input to be parsed.
Returns:
object: The parsed JSON result, or an empty dict if the value is not valid JSON.
"""
try:
return resp.json()
except ValueError:
return dict()
def check_python_tls_compatibility():
"""
Verify which level of TLS/SSL that this version of the code is compatible with.
Returns:
str: The maximum level of TLS/SSL that this version is compatible with.
"""
try:
CbAPISessionAdapter(force_tls_1_2=True)
except Exception:
ret = "TLSv1.1"
if "OP_NO_TLSv1_1" not in ssl.__dict__:
ret = "TLSv1.0"
elif "OP_NO_TLSv1" not in ssl.__dict__:
ret = "SSLv3"
elif "OP_NO_SSLv3" not in ssl.__dict__:
ret = "SSLv2"
else:
ret = "Unknown"
else:
ret = "TLSv1.2"
return ret
class CbAPISessionAdapter(HTTPAdapter):
"""Adapter object used to handle TLS connections to the CB server."""
def __init__(self, verify_hostname=True, force_tls_1_2=False, max_retries=DEFAULT_RETRIES, **pool_kwargs):
"""
Initialize the CbAPISessionManager.
Args:
verify_hostname (boolean): True if we want to verify the hostname.
force_tls_1_2 (boolean): True to force the use of TLS 1.2.
max_retries (int): Maximum number of retries.
**pool_kwargs: Additional arguments.
Raises:
ApiError: If the library versions are too old to force the use of TLS 1.2.
"""
self._cbapi_verify_hostname = verify_hostname
self._cbapi_force_tls_1_2 = force_tls_1_2
if force_tls_1_2 and not REQUESTS_HAS_URLLIB_SSL_CONTEXT:
raise ApiError("Cannot force the use of TLS1.2: Python, urllib3, and requests versions are too old.")
super(CbAPISessionAdapter, self).__init__(max_retries=max_retries, **pool_kwargs)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""
Initialize the connection pool manager.
Args:
connections (int): Initial number of connections to be used.
maxsize (int): Maximum size of the connection pool.
block (object): Blocking policy.
**pool_kwargs: Additional arguments for the connection pool.
Returns:
object: TBD
"""
if self._cbapi_force_tls_1_2 and REQUESTS_HAS_URLLIB_SSL_CONTEXT:
# Force the use of TLS v1.2 when talking to this Cb Response server.
context = create_urllib3_context(ciphers=('TLSv1.2:!aNULL:!eNULL:!MD5'))
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.options |= ssl.OP_NO_TLSv1
context.options |= ssl.OP_NO_TLSv1_1
pool_kwargs['ssl_context'] = context
if not self._cbapi_verify_hostname:
# Provide the ability to validate a Carbon Black server's SSL certificate without validating the hostname
# (by default Carbon Black certificates are "issued" as CN=Self-signed Carbon Black Enterprise Server
# HTTPS Certificate)
pool_kwargs["assert_hostname"] = False
return super(CbAPISessionAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
class Connection(object):
"""Object that encapsulates the HTTP connection to the CB server."""
def __init__(self, credentials, integration_name=None, timeout=None, max_retries=None, **pool_kwargs):
"""
Initialize the Connection object.
Args:
credentials (object): The credentials to use for the connection.
integration_name (str): The integration name being used.
timeout (int): The timeout value to use for HTTP requests on this connection.
max_retries (int): The maximum number of times to retry a request.
**pool_kwargs: Additional arguments to be used to initialize connection pooling.
Raises:
ApiError: If there's an internal error initializing the connection.
ConnectionError: If there's a problem with the credentials.
"""
if not credentials.url or not credentials.url.startswith("https://"):
raise ConnectionError("Server URL must be a URL: eg. https://localhost")
if not credentials.token:
raise ConnectionError("No API token provided")
self.server = credentials.url.rstrip("/")
self.ssl_verify = credentials.ssl_verify
if not self.ssl_verify:
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
except Exception:
pass
else:
if credentials.ssl_cert_file:
self.ssl_verify = credentials.ssl_cert_file
user_agent = "cbapi/{0:s} Python/{1:d}.{2:d}.{3:d}" \
.format(__version__, sys.version_info[0], sys.version_info[1], sys.version_info[2])
if integration_name:
user_agent += " {}".format(integration_name)
self.token = credentials.token
self.token_header = {'X-Auth-Token': self.token, 'User-Agent': user_agent}
self.session = requests.Session()
self._timeout = timeout
if max_retries is None:
max_retries = MAX_RETRIES
try:
tls_adapter = CbAPISessionAdapter(max_retries=max_retries, force_tls_1_2=credentials.ssl_force_tls_1_2,
verify_hostname=credentials.ssl_verify_hostname, **pool_kwargs)
except ssl.SSLError as e:
raise ApiError("This version of Python and OpenSSL do not support TLSv1.2: {}".format(e),
original_exception=e)
except Exception as e:
raise ApiError("Unknown error establishing cbapi session: {0}: {1}".format(e.__class__.__name__, e),
original_exception=e)
self.session.mount(self.server, tls_adapter)
self.proxies = {}
if credentials.ignore_system_proxy: # see https://github.com/kennethreitz/requests/issues/879
# Unfortunately, requests will look for any proxy-related environment variables and use those anyway. The
# only way to solve this without side effects, is passing in empty strings for 'http' and 'https':
self.proxies = {
'http': '',
'https': '',
'no': 'pass'
}
else:
if credentials.proxy:
self.proxies['http'] = credentials.proxy
self.proxies['https'] = credentials.proxy
def http_request(self, method, url, **kwargs):
"""
Submit a HTTP request to the server.
Args:
method (str): The method name to use for the HTTP request.
url (str): The URL to submit the request to.
**kwargs: Additional arguments for the request.
Returns:
object: Result of the HTTP request.
Raises:
ApiError: An unknown problem was detected.
ClientError: The server returned an error code in the 4xx range, indicating a problem with the request.
ConnectionError: A problem was seen with the HTTP connection.
ObjectNotFoundError: The specified object was not found on the server.
QuerySyntaxError: The query passed in had invalid syntax.
ServerError: The server returned an error code in the 5xx range, indicating a problem on the server side.
TimeoutError: The HTTP request timed out.
UnauthorizedError: The stored credentials do not permit access to the specified request.
"""
method = method.upper()
verify_ssl = kwargs.pop('verify', None) or self.ssl_verify
proxies = kwargs.pop('proxies', None) or self.proxies
new_headers = kwargs.pop('headers', None)
if new_headers:
headers = self.token_header.copy()
headers.update(new_headers)
else:
headers = self.token_header
uri = self.server + url
try:
raw_data = kwargs.get("data", None)
if raw_data:
log.debug("Sending HTTP {0} {1} with {2}".format(method, url, raw_data))
r = self.session.request(method, uri, headers=headers, verify=verify_ssl, proxies=proxies,
timeout=self._timeout, **kwargs)
log.debug('HTTP {0:s} {1:s} took {2:.3f}s (response {3:d})'.format(method, url,
calculate_elapsed_time(r.elapsed),
r.status_code))
except requests.Timeout as timeout_error:
raise TimeoutError(uri=uri, original_exception=timeout_error)
except requests.ConnectionError as connection_error:
raise ConnectionError("Received a network connection error from {0:s}: {1:s}"
.format(self.server, str(connection_error)),
original_exception=connection_error)
except Exception as e:
raise ApiError("Unknown exception when connecting to server: {0:s}".format(str(e)),
original_exception=e)
else:
if r.status_code >= 500:
raise ServerError(error_code=r.status_code, message=r.text)
elif r.status_code == 404:
raise ObjectNotFoundError(uri=uri, message=r.text)
elif r.status_code == 401:
raise UnauthorizedError(uri=uri, action=method, message=r.text)
elif r.status_code == 400 and try_json(r).get('reason') == 'query_malformed_syntax':
raise QuerySyntaxError(uri=uri, message=r.text)
elif r.status_code >= 400:
raise ClientError(error_code=r.status_code, message=r.text)
return r
def get(self, url, **kwargs):
"""
Submit a GET request on this connection.
Args:
url (str): The URL to submit the request to.
**kwargs: Additional arguments for the request.
Returns:
object: Result of the HTTP request.
"""
return self.http_request("GET", url, **kwargs)
def post(self, url, **kwargs):
"""
Submit a POST request on this connection.
Args:
url (str): The URL to submit the request to.
**kwargs: Additional arguments for the request.
Returns:
object: Result of the HTTP request.
"""
return self.http_request("POST", url, **kwargs)
def put(self, url, **kwargs):
"""
Submit a PUT request on this connection.
Args:
url (str): The URL to submit the request to.
**kwargs: Additional arguments for the request.
Returns:
object: Result of the HTTP request.
"""
return self.http_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
"""
Submit a DELETE request on this connection.
Args:
url (str): The URL to submit the request to.
**kwargs: Additional arguments for the request.
Returns:
object: Result of the HTTP request.
"""
return self.http_request("DELETE", url, **kwargs)
class BaseAPI(object):
"""The base API object used by all CBAPI objects to communicate with the server."""
def __init__(self, *args, **kwargs):
"""
Initialize the base API information.
Args:
*args: TBD
**kwargs: Additional arguments.
"""
product_name = kwargs.pop("product_name", None)
credential_file = kwargs.pop("credential_file", None)
integration_name = kwargs.pop("integration_name", None)
self.credential_store = CredentialStoreFactory.getCredentialStore(product_name, credential_file)
url, token, org_key = kwargs.pop("url", None), kwargs.pop("token", None), kwargs.pop("org_key", None)
if url and token:
if org_key:
credentials = {"url": url, "token": token, "org_key": org_key}
else:
credentials = {"url": url, "token": token}
for k in ("ssl_verify", "proxy", "ssl_cert_file"):
if k in kwargs:
credentials[k] = kwargs.pop(k)
self.credentials = Credentials(credentials)
self.credential_profile_name = None
else:
self.credential_profile_name = kwargs.pop("profile", None)
self.credentials = self.credential_store.get_credentials(self.credential_profile_name)
timeout = kwargs.pop("timeout", DEFAULT_POOL_TIMEOUT)
max_retries = kwargs.pop("max_retries", DEFAULT_RETRIES)
| |
<gh_stars>100-1000
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert python AST objects into a more explicit set of
Algebraic types. These are easier to work with than the
Python ast directly.
"""
import sys
import ast
import typed_python.compiler.python_ast_util as python_ast_util
import types
import traceback
from typed_python._types import Forward, Alternative, TupleOf, OneOf
# forward declarations.
Module = Forward("Module")
Statement = Forward("Statement")
Expr = Forward("Expr")
Arg = Forward("Arg")
NumericConstant = Forward("NumericConstant")
ExprContext = Forward("ExprContext")
Slice = Forward("Slice")
BooleanOp = Forward("BooleanOp")
BinaryOp = Forward("BinaryOp")
UnaryOp = Forward("UnaryOp")
ComparisonOp = Forward("ComparisonOp")
Comprehension = Forward("Comprehension")
ExceptionHandler = Forward("ExceptionHandler")
Arguments = Forward("Arguments")
Keyword = Forward("Keyword")
Alias = Forward("Alias")
WithItem = Forward("WithItem")
TypeIgnore = Forward("TypeIgnore")
Module = Module.define(Alternative(
"Module",
Module={
"body": TupleOf(Statement),
**({"type_ignores": TupleOf(TypeIgnore)} if sys.version_info.minor >= 8 else {})
},
Expression={'body': Expr},
Interactive={'body': TupleOf(Statement)},
Suite={"body": TupleOf(Statement)}
))
TypeIgnore = TypeIgnore.define(Alternative(
"TypeIgnore",
Item={'lineno': int, 'tag': str}
))
def statementStrLines(self):
if self.matches.FunctionDef:
yield f"def {self.name}(...):"
for s in self.body:
for line in statementStrLines(s):
yield " " + line
return
elif self.matches.Expr:
yield str(self.value)
elif self.matches.If:
yield f"if {self.test}:"
for s in self.body:
for line in statementStrLines(s):
yield " " + line
if self.orelse:
yield "else:"
for s in self.orelse:
for line in statementStrLines(s):
yield " " + line
elif self.matches.While:
yield f"while {self.test}:"
for s in self.body:
for line in statementStrLines(s):
yield " " + line
if self.orelse:
yield "else:"
for s in self.orelse:
for line in statementStrLines(s):
yield " " + line
elif self.matches.Try:
yield "try:"
for s in self.body:
for line in statementStrLines(s):
yield " " + line
for eh in self.handlers:
yield f"except {eh.type}" + (f" as {eh.name}")
for s in eh.body:
for line in statementStrLines(s):
yield " " + line
if self.orelse:
yield "else:"
for s in self.orelse:
for line in statementStrLines(s):
yield " " + line
if self.finalbody:
yield "finally:"
for s in self.finalbody:
for line in statementStrLines(s):
yield " " + line
elif self.matches.With:
yield f"with {self.items}:"
for s in self.body:
for line in statementStrLines(s):
yield " " + line
elif self.matches.Assign:
yield f"{', '.join(str(x) for x in self.targets)} = {self.value}"
elif self.matches.AugAssign:
yield f"{self.target} {self.op}= {self.value}"
elif self.matches.Raise:
res = "raise"
if self.exc is not None:
res += " " + str(self.exc)
if self.cause is not None:
res += " from " + str(self.cause)
yield res
elif self.matches.Break:
yield "break"
elif self.matches.Continue:
yield "continue"
elif self.matches.Pass:
yield "pass"
elif self.matches.Return:
if self.value is not None:
yield f"return {self.value}"
else:
yield "return"
else:
yield str(type(self)) + "..."
def StatementStr(self):
return "\n".join(list(statementStrLines(self)))
Statement = Statement.define(Alternative(
"Statement",
FunctionDef={
"name": str,
"args": Arguments,
"body": TupleOf(Statement),
"decorator_list": TupleOf(Expr),
"returns": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
ClassDef={
"name": str,
"bases": TupleOf(Expr),
"keywords": TupleOf(Keyword),
"body": TupleOf(Statement),
"decorator_list": TupleOf(Expr),
'line_number': int,
'col_offset': int,
'filename': str
},
Return={
"value": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
Delete={
"targets": TupleOf(Expr),
'line_number': int,
'col_offset': int,
'filename': str
},
Assign={
"targets": TupleOf(Expr),
"value": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
AugAssign={
"target": Expr,
"op": BinaryOp,
"value": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
Print={
"expr": OneOf(Expr, None),
"values": TupleOf(Expr),
"nl": int,
'line_number': int,
'col_offset': int,
'filename': str
},
For={
"target": Expr,
"iter": Expr,
"body": TupleOf(Statement),
"orelse": TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
While={
"test": Expr,
"body": TupleOf(Statement),
"orelse": TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
If={
"test": Expr,
"body": TupleOf(Statement),
"orelse": TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
With={
"items": TupleOf(WithItem),
"body": TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
Raise={
"exc": OneOf(Expr, None),
"cause": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
Try={
"body": TupleOf(Statement),
"handlers": TupleOf(ExceptionHandler),
"orelse": TupleOf(Statement),
"finalbody": TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
Assert={
"test": Expr,
"msg": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
Import={
"names": TupleOf(Alias),
'line_number': int,
'col_offset': int,
'filename': str
},
ImportFrom={
"module": OneOf(str, TupleOf(str)),
"names": OneOf(Alias, TupleOf(Alias)),
"level": OneOf(int, None),
'line_number': int,
'col_offset': int,
'filename': str
},
Global={
"names": TupleOf(str),
'line_number': int,
'col_offset': int,
'filename': str
},
Expr={
"value": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
Pass={
'line_number': int,
'col_offset': int,
'filename': str
},
Break={
'line_number': int,
'col_offset': int,
'filename': str
},
Continue={
'line_number': int,
'col_offset': int,
'filename': str
},
AsyncFunctionDef={
"name": str,
"args": Arguments,
"body": TupleOf(Statement),
"decorator_list": TupleOf(Expr),
"returns": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
AnnAssign={
"target": Expr,
"annotation": Expr,
'simple': int,
"value": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
AsyncWith={
"items": TupleOf(WithItem),
"body": TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
AsyncFor={
'target': Expr,
'iter': Expr,
'body': TupleOf(Statement),
'orelse': TupleOf(Statement),
'line_number': int,
'col_offset': int,
'filename': str
},
NonLocal={
"names": TupleOf(str),
'line_number': int,
'col_offset': int,
'filename': str
},
__str__=StatementStr
))
def ExpressionStr(self):
if self.matches.ListComp:
res = "[" + str(self.elt)
for gen in self.generators:
res += " for " + str(gen.target) + " in " + str(gen.iter)
for ifS in gen.ifs:
res += " if " + str(ifS)
return res + "]"
if self.matches.Lambda:
return "(lambda ...: " + str(self.body) + ")"
if self.matches.Subscript:
return str(self.value) + "[" + str(self.slice) + "]"
if self.matches.Num:
return str(self.n)
if self.matches.Call:
return (
f"({self.func})(" +
", ".join([str(x) for x in self.args] + [f"{kwd.arg}={kwd.value}" for kwd in self.keywords])
+ ")"
)
if self.matches.Str:
return repr(self.s)
if self.matches.Compare:
res = str(self.left)
for i in range(len(self.ops)):
if self.ops[i].matches.Eq:
sep = "=="
if self.ops[i].matches.NotEq:
sep = "!="
if self.ops[i].matches.Lt:
sep = "<"
if self.ops[i].matches.LtE:
sep = "<="
if self.ops[i].matches.Gt:
sep = ">"
if self.ops[i].matches.GtE:
sep = ">="
if self.ops[i].matches.Is:
sep = "is"
if self.ops[i].matches.IsNot:
sep = "is not"
if self.ops[i].matches.In:
sep = "in"
if self.ops[i].matches.NotIn:
sep = "not in"
res += f" {sep} {self.comparators[i]}"
return res
if self.matches.BoolOp:
sep = " and " if self.op.matches.And else " or "
return sep.join([f"({x})" for x in self.values])
if self.matches.BinOp:
if self.op.matches.Add:
sep = "+"
if self.op.matches.Sub:
sep = "-"
if self.op.matches.Mult:
sep = "*"
if self.op.matches.Div:
sep = "/"
if self.op.matches.Mod:
sep = "%"
if self.op.matches.Pow:
sep = "**"
if self.op.matches.LShift:
sep = "<<"
if self.op.matches.RShift:
sep = ">>"
if self.op.matches.BitOr:
sep = "|"
if self.op.matches.BitXor:
sep = "^"
if self.op.matches.BitAnd:
sep = "&"
if self.op.matches.FloorDiv:
sep = "//"
if self.op.matches.MatMult:
sep = "@"
return f"({self.left}) {sep} ({self.right})"
if self.matches.UnaryOp:
if self.op.matches.Invert:
sep = "~"
if self.op.matches.Not:
sep = "not "
if self.op.matches.UAdd:
sep = "+"
if self.op.matches.USub:
sep = "-"
return f"{sep} ({self.operand})"
if self.matches.Attribute:
return f"({self.value}).{self.attr}"
if self.matches.Yield:
if self.value is None:
return "yield"
else:
return f"yield {self.value}"
if self.matches.Name:
return self.id
return str(type(self))
Expr = Expr.define(Alternative(
"Expr",
BoolOp={
"op": BooleanOp,
"values": TupleOf(Expr),
'line_number': int,
'col_offset': int,
'filename': str
},
BinOp={
"left": Expr,
"op": BinaryOp,
"right": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
UnaryOp={
"op": UnaryOp,
"operand": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
Lambda={
"args": Arguments,
"body": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
IfExp={
"test": Expr,
"body": Expr,
"orelse": Expr,
'line_number': int,
'col_offset': int,
'filename': str
},
Dict={
"keys": TupleOf(OneOf(None, Expr)),
"values": TupleOf(Expr),
'line_number': int,
'col_offset': int,
'filename': str
},
Set={
"elts": TupleOf(Expr),
'line_number': int,
'col_offset': int,
'filename': str
},
ListComp={
"elt": Expr,
"generators": TupleOf(Comprehension),
'line_number': int,
'col_offset': int,
'filename': str
},
SetComp={
"elt": Expr,
"generators": TupleOf(Comprehension),
'line_number': int,
'col_offset': int,
'filename': str
},
DictComp={
"key": Expr,
"value": Expr,
"generators": TupleOf(Comprehension),
'line_number': int,
'col_offset': int,
'filename': str
},
GeneratorExp={
"elt": Expr,
"generators": TupleOf(Comprehension),
'line_number': int,
'col_offset': int,
'filename': str
},
Yield={
"value": OneOf(Expr, None),
'line_number': int,
'col_offset': int,
'filename': str
},
Compare={
"left": Expr,
"ops": TupleOf(ComparisonOp),
"comparators": TupleOf(Expr),
'line_number': int,
'col_offset': int,
'filename': str
},
Call={
"func": Expr,
"args": TupleOf(Expr),
"keywords": TupleOf(Keyword),
'line_number': int,
'col_offset': int,
'filename': str
},
Num={
"n": NumericConstant,
'line_number': int,
'col_offset': int,
'filename': str
},
| |
<filename>core/people/person.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine, Table
from sqlalchemy import Column, Integer, String, DateTime, Date
from sqlalchemy import MetaData, UnicodeText, text, Index
from sqlalchemy.orm import mapper, sessionmaker, scoped_session
#from sqlalchemy.sql import select
import uuid
import hashlib
from core.config.settings import logger
from core.config.settings import DB
import datetime
engine = create_engine(DB, echo=False, pool_recycle=3600)
metadata = MetaData()
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
#metadata = MetaData(bind=engine, reflect=True)
account_table = Table(
'account',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(36)),
Column('password', String(200)),
mysql_engine='InnoDB',
mysql_charset='utf8')
profile_table = Table(
'account_profile',
metadata,
Column('id', Integer, primary_key=True),
Column('account_id', String(36), primary_key=True),
Column('uuid', String(255)),
Column('email', String(255), nullable=True),
Column('first_name', String(255), nullable=True),
Column('last_name', String(255), nullable=True),
Column('birthdate', Date, nullable=True),
Column('birthplace', String(255), nullable=True),
Column('nationality', String(255)),
Column('nickname', String(24), nullable=True),
Column('gender', String(255), nullable=True),
Column(
'priority', String(11),
nullable=True,
default=text(u"'0'"),
server_default=text('NULL')),
Column('type', String(8), nullable=True),
Column('age', Integer(), nullable=True, default=text(u"'0'")),
Column('sign', String(11), nullable=True),
Column('first_login', Integer(), nullable=True, default=text(u"'0'")),
Column('last_login', Integer(), nullable=True, default=text(u"'0'")),
Column('registered', Integer(), nullable=True, default=text(u"'0'")),
Column('updated', Integer(), nullable=True, default=text(u"'0'")),
Column('homepage', String(255), nullable=True),
Column('home_country', String(255), nullable=True),
Column('home_city', String(255), nullable=True),
Column('home_state', String(255), nullable=True),
Column('home_street', String(255), nullable=True),
Column('home_house', String(255), nullable=True),
Column('home_apartment', String(255), nullable=True),
Column('home_postcode', String(255), nullable=True),
Column('home_phone', String(255), nullable=True),
Column('work_country', String(255), nullable=True),
Column('work_city', String(255), nullable=True),
Column('work_street', String(255), nullable=True),
Column('work_house', String(255), nullable=True),
Column('work_postcode', String(255), nullable=True),
Column('work_phone', String(255), nullable=True),
Column('mobile_phone', String(255), nullable=True),
Column('music', String(16), nullable=True),
Column('food', String(16), nullable=True),
Column('drink', String(16), nullable=True),
Column('location_id', Integer(), nullable=True),
Column('status', String(255), nullable=True),
Column('online', Integer(), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_table.c.id, unique=True)
Index('uuid', profile_table.c.uuid, unique=True)
Index('email', profile_table.c.email, unique=True)
Index('first_name', profile_table.c.first_name)
Index('last_name', profile_table.c.last_name)
Index('nickname', profile_table.c.nickname)
Index('homepage', profile_table.c.homepage)
Index('mobile_phone', profile_table.c.mobile_phone, unique=False)
Index('location_id', profile_table.c.location_id)
profile_role_table = Table(
'account_profile_role',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('role', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_role_table.c.id)
Index('uuid', profile_role_table.c.uuid)
Index('role', profile_role_table.c.role)
profile_social_table = Table(
'account_profile_social',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('service_id', UnicodeText()),
Column('service_name', String(255)),
Column('service_url', UnicodeText()),
Column('service_consumer_key', UnicodeText()),
Column('service_consumer_secret', UnicodeText()),
Column('service_access_token', UnicodeText()),
Column('service_scope', UnicodeText()),
Column('service_login', UnicodeText()),
Column('service_email', String(255)),
Column('service_password', UnicodeText()),
Column('notes', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_social_table.c.id)
Index('uuid', profile_social_table.c.uuid)
profile_interest_table = Table(
'account_interest_email',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('interest', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_interest_table.c.id)
Index('uuid', profile_interest_table.c.uuid)
profile_request_table = Table(
'account_profile_request',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('request', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_request_table.c.id)
Index('uuid', profile_request_table.c.uuid)
profile_comment_table = Table(
'account_profile_comment',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('comment', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_comment_table.c.id)
Index('uuid', profile_comment_table.c.uuid)
profile_relation_table = Table(
'account_profile_relation',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('type', String(255)),
Column('related_account', UnicodeText()),
Column('related_account_type', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_relation_table.c.id)
Index('uuid', profile_relation_table.c.uuid)
profile_other_name_table = Table(
'account_profile_other_name',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('name', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_other_name_table.c.id)
Index('uuid', profile_other_name_table.c.uuid)
Index('name', profile_other_name_table.c.name)
profile_email_table = Table(
'account_profile_email',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('email', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_email_table.c.id)
Index('uuid', profile_email_table.c.uuid)
Index('email', profile_email_table.c.email)
profile_picture_table = Table(
'account_profile_picture',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('picture', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_picture_table.c.id)
Index('uuid', profile_picture_table.c.uuid)
profile_phone_table = Table(
'account_profile_phone',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('phone', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_phone_table.c.id)
Index('uuid', profile_phone_table.c.uuid)
profile_cronjob_table = Table(
'account_profile_cronjob',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('job', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_cronjob_table.c.id)
Index('uuid', profile_cronjob_table.c.uuid)
profile_device_table = Table(
'account_profile_device',
metadata,
Column('id', Integer, primary_key=True),
Column('device_id', Integer()),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_device_table.c.id, unique=True)
Index('device_id', profile_device_table.c.device_id, unique=True)
device_table = Table(
'device',
metadata,
Column('id', Integer, primary_key=True),
Column('serial', String(255), nullable=True),
Column('name', String(255)),
Column('type', String(255), nullable=True),
Column('model', String(255), nullable=True),
Column('make', String(255), nullable=True),
Column('built', String(255), nullable=True),
Column('family', String(255), nullable=True),
Column('desc', UnicodeText(), nullable=True),
Column('params', UnicodeText(), nullable=True),
Column('network_name', String(255), nullable=True),
Column('mac_address', String(255), nullable=True),
Column('location', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', device_table.c.id, unique=True)
Index('serial', device_table.c.serial, unique=True)
Index('name', device_table.c.name, unique=True)
Index('model', device_table.c.model, unique=True)
Index('mac_address', device_table.c.mac_address, unique=True)
country_table = Table(
'country',
metadata,
Column('id', Integer(), primary_key=True, nullable=False),
Column('iso', String(2), nullable=False),
Column('name', String(80), nullable=False),
Column('title', String(80), nullable=False),
Column('iso3', String(3)),
Column('numcode', Integer()),
mysql_engine='MyISAM',
mysql_charset='utf8')
profile_link_table = Table(
'account_profile_link',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('type', String(255), nullable=True),
Column('url', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_link_table.c.id, unique=True)
#Index('uuid', profile_link_table.c.uuid, unique=True)
#needed for registering working hours
profile_timesheet_table = Table(
'account_profile_timesheet',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('type', String(255), nullable=True),
Column('created', DateTime, default=datetime.datetime.now),
Column('spent', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_timesheet_table.c.id, unique=True)
Index('uuid', profile_timesheet_table.c.id, unique=True)
company_table = Table(
'company_profile',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String(255), nullable=False),
Column('domain', String(255), nullable=True),
Column('type', String(255), nullable=True),
Column('registered', DateTime, default=datetime.datetime.now),
Column('updated', DateTime, default=datetime.datetime.now),
Column('founded', Integer(), nullable=True, default=text(u"'0'")),
Column('homepage', String(255), nullable=True),
Column('country', String(255), nullable=True),
Column('city', String(255), nullable=True),
Column('state', String(255), nullable=True),
Column('street', String(255), nullable=True),
Column('house', String(255), nullable=True),
Column('postcode', String(255), nullable=True),
Column('phone', String(255), nullable=True),
Column('fax', String(255), nullable=True),
Column('location_id', Integer(), nullable=True),
Column('info', UnicodeText(), nullable=True),
Column('status', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', company_table.c.id, unique=True)
Index('name', company_table.c.name)
Index('domain', company_table.c.domain, unique=False)
company_member_table = Table(
'company_member_profile',
metadata,
Column('id', Integer, primary_key=True),
Column('company_id', String(255)),
Column('uuid', String(255)),
Column('email1', String(255), nullable=True),
Column('email2', String(255), nullable=True),
Column('registered', DateTime, default=datetime.datetime.now),
Column('updated', DateTime, default=datetime.datetime.now),
Column('homepage', String(255), nullable=True),
Column('country', String(255), nullable=True),
Column('city', String(255), nullable=True),
Column('state', String(255), nullable=True),
Column('street', String(255), nullable=True),
Column('office', String(255), nullable=True),
Column('postcode', String(255), nullable=True),
Column('phone1', String(255), nullable=True),
Column('phone2', String(255), nullable=True),
Column('phone3', String(255), nullable=True),
Column('cellphone1', String(255), nullable=True),
Column('cellphone2', String(255), nullable=True),
Column('cellphone3', String(255), nullable=True),
Column('fax1', String(255), nullable=True),
Column('fax2', String(255), nullable=True),
Column('profession', UnicodeText(), nullable=True),
Column('info', UnicodeText(), nullable=True),
Column('location_id', Integer(), nullable=True),
Column('status', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', company_member_table.c.id, unique=True)
Index('company_id', company_member_table.c.company_id, unique=True)
Index('email1', company_member_table.c.email1, unique=False)
profile_interaction_table = Table(
'profile_interaction_history',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid1', String(255)),
Column('uuid2', String(255)),
Column('email1', String(255), nullable=True),
Column('email2', String(255), nullable=True),
Column('registered', DateTime, default=datetime.datetime.now),
Column('updated', DateTime, default=datetime.datetime.now),
Column('type', String(255), nullable=True),
Column('data', UnicodeText(), nullable=True),
Column('status', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_interaction_table.c.id, unique=True)
Index('uuid1', profile_interaction_table.c.uuid1, unique=True)
Index('uuid2', profile_interaction_table.c.uuid2, unique=True)
Index('email1', profile_interaction_table.c.email1, unique=False)
Index('email2', profile_interaction_table.c.email2, unique=False)
Index('type', profile_interaction_table.c.type, unique=False)
class ProfileTimesheet(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.created = kwargs.pop('created')
self.spent = kwargs.pop('spent')
def __repr__(self):
return "<ProfileTimesheet('%s')>" % (self.id)
class ProfileLink(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.url = kwargs.pop('url')
def __repr__(self):
return "<ProfileLink('%s')>" % (self.url)
class Country(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', '')
self.iso = kwargs.pop('iso', '')
self.name = kwargs.pop('name', '')
self.title = kwargs.pop('title', '')
self.iso3 = kwargs.pop('iso3', '')
self.numcode = kwargs.pop('numcode', '')
def __repr__(self):
return "<Country('%s')>" % (self.iso)
class Account(object):
def __init__(self, **kwargs):
#self.id = kwargs.pop('id', '')
self.uuid = kwargs.pop('uuid', '')
self.password = kwargs.pop('password', '')
def __repr__(self):
return "<Account('%s', '%s')>" % (self.id, self.uuid)
class Profile(object):
query = db_session.query_property()
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.email = kwargs.pop('email', '')
self.first_name = kwargs.pop('first_name', '')
self.last_name = kwargs.pop('last_name', '')
self.birthdate = kwargs.pop('birthdate', 0)
self.birthplace = kwargs.pop('birthplace', '')
self.nationality = kwargs.pop('nationality', '')
self.nickname = kwargs.pop('nickname', '')
self.gender = kwargs.pop('gender', '')
self.priority = kwargs.pop('priority', '')
self.type = kwargs.pop('type', '')
self.age = kwargs.pop('age', 0)
self.sign = kwargs.pop('sign', '')
self.first_login = kwargs.pop('first_login', '')
self.last_login = kwargs.pop('last_login', '')
self.registered = kwargs.pop('registered', 0)
self.updated = kwargs.pop('updated', 0)
self.homepage = kwargs.pop('homepage', '')
self.home_country = kwargs.pop('home_country', '')
self.home_city = kwargs.pop('home_city', '')
self.home_state = kwargs.pop('home_state', '')
self.home_street = kwargs.pop('home_street', '')
self.home_house = kwargs.pop('home_house', '')
self.home_apartment = kwargs.pop('home_apartment', '')
self.home_postcode = kwargs.pop('home_postcode', '')
self.home_phone = kwargs.pop('home_phone', '')
self.work_country = kwargs.pop('work_country', '')
self.work_city = kwargs.pop('work_city', '')
self.work_street = kwargs.pop('work_street', '')
self.work_house = kwargs.pop('work_house', '')
self.work_postcode = kwargs.pop('work_postcode', '')
self.work_phone = kwargs.pop('work_phone', '')
self.mobile_phone = kwargs.pop('mobile_phone', '')
self.food = kwargs.pop('food', '')
self.drink = kwargs.pop('drink', '')
self.music = kwargs.pop('music', '')
self.status = kwargs.pop('status', '')
self.online = kwargs.pop('online', 0)
def __repr__(self):
return "<Profile('%s'')>" % (self.email)
class ProfileRole(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.role = kwargs.pop('role')
def __repr__(self):
return "<ProfileRole('%s'')>" % (self.device_id)
class ProfileSocial(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.service_id = kwargs.pop('service_id')
self.service_name = kwargs.pop('service_name')
self.service_url = kwargs.pop('service_url', '')
self.service_consumer_key = kwargs.pop('service_consumer_key', '')
self.service_consumer_secret = kwargs.pop(
'service_consumer_secret', ''
)
self.service_access_token = kwargs.pop('service_access_token', '')
self.service_scope = kwargs.pop('service_scope', '')
self.service_login = kwargs.pop('service_login', '')
self.service_email = kwargs.pop('service_email', '')
self.service_password = kwargs.pop('service_password', '')
self.notes = kwargs.pop('notes', '')
def __repr__(self):
return "<ProfileSocial('%s'')>" % (self.uuid)
class ProfileOtherName(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.name = kwargs.pop('name')
def __repr__(self):
return "<ProfileOtherName('%s'')>" % (self.name)
class ProfileRelation(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.related_account = kwargs.pop('related_account')
self.related_account_type = kwargs.pop('related_account_type')
def __repr__(self):
return "<ProfileSocial('%s'')>" % (self.uuid)
class ProfileEmail(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.email = kwargs.pop('email')
def __repr__(self):
return "<ProfileEmail('%s'')>" % (self.email)
class ProfilePicture(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.picture = kwargs.pop('picture')
def __repr__(self):
return "<ProfilePicture('%s'')>" % (self.uuid)
class ProfilePhone(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.phone = kwargs.pop('phone')
def __repr__(self):
return "<ProfilePhone('%s'')>" % (self.uuid)
class ProfileInterest(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.interest = kwargs.pop('interest')
def __repr__(self):
return "<ProfileInterest('%s'')>" % (self.uuid)
class ProfileRequest(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.request = kwargs.pop('request')
def __repr__(self):
return "<ProfileRequest('%s'')>" % (self.uuid)
class ProfileDevice(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.device_id = kwargs.pop('device_id')
def __repr__(self):
return "<ProfileDevice('%s'')>" % (self.device_id)
class Device(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.device_name = kwargs.pop('device_name')
self.device_desc = kwargs.pop('device_desc')
self.device_type = kwargs.pop('device_type')
self.device_family = kwargs.pop('device_family')
self.device_model = kwargs.pop('device_model')
self.device_serial = kwargs.pop('device_serial')
self.device_make = kwargs.pop('device_make')
self.device_built = kwargs.pop('device_build')
self.device_params = kwargs.pop('device_params')
self.device_location = kwargs.pop('device_location')
def __repr__(self):
return "<Device('%s'')>" % (self.device_name)
class ProfileComment(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.comment = kwargs.pop('comment')
self.area = kwargs.pop('area')
def __repr__(self):
return | |
"""
Classes to perform actions on simulation trajectories
"""
from __future__ import division, print_function, absolute_import
import os
from collections import namedtuple
import MDAnalysis as md
import MDAnalysis.core.AtomGroup as AtomGroup
import MDAnalysis.analysis.align as align
import MDAnalysis.lib.util as mdutil
import MDAnalysis.lib.mdamath as mdmath
import numpy as np
try :
import pyvoro
except:
pass
from scipy.spatial.distance import cdist
import sklearn.mixture as mixture
from simanalysis.pbc import make_whole_xyz, unwrap_vector
from simanalysis.groups import read_groups
from simanalysis.utils import resanallib, AnalysisGrid
MDRecord = namedtuple("MDRecord",["time","value"])
ResidueAtoms = namedtuple("ResidueAtoms",["first","last"])
class TrajectoryAction(object):
"""
Base class for actions that are called by a TrajectoryProcessor
object. Classes that wants to implement a particular action
should inherit from this.
"""
def __init__(self, processor):
self.processor = processor
self.dosubsample = False
processor.append_action(self)
@staticmethod
def descr() :
return "This is generic description"
@classmethod
def command_name(cls) :
name = cls.__name__.lower()
if name.endswith("analysis") :
name = name[:-8]
return name
def add_arguments(self, parser):
"""
Function that is called to add action-specific arguments
Arguments
---------
parser : argparse.ArgumentParser
the parser to add command-line arguments to
"""
pass
def setup(self, args):
"""
Function that is called after the processor has parsed the command-line
arguments.
Arguments
---------
args : argparse.Namespace
the parsed arguments
"""
def process(self):
"""
Main processing function called at each trajectory timestep
"""
pass
def subsample(self):
"""
Function called occasionally if subsampling is turned on
"""
pass
def finalize(self):
"""
Function that is called after all timesteps has been processed
"""
pass
def _write_records(self, postfix='', headers=None):
"""
Helper routine to write out a list of MDRecord to disc
"""
if self.records :
with open(self.out+postfix,'w') as f :
if headers is not None :
f.write("#"+"\t".join(headers)+"\n")
for entry in self.records:
if isinstance(entry.value,float) or isinstance(entry.value,np.float32):
f.write("%.0f\t%.3f\n"%(entry.time,entry.value))
elif isinstance(entry.value,int) or isinstance(entry.value,np.int32):
f.write("%.0f\t%d\n"%(entry.time,entry.value))
else:
f.write("%.0f\t%s\n"%(entry.time,"\t".join("%.3f"%v for v in entry.value)))
class CenterWholeAlign(TrajectoryAction):
"""
Class to make MD snapshots whole over periodic boxes and to centre and
align proteins.
Attributes
----------
protsel : MDAnalysis.AtomGroup
the protein selection
refuni : MDAnalyis.Universe
the reference universe used for alignment
residue_atoms : list of integer tuples
the atom numbers of all residues excluding proteins
residues : list of MDAnalysis.AtomGroup
the residues in the universe excluding proteins
records : list of MDRecords
the RMSD record at each processes snapshot
writer : MDAnalysis.Writer
the output trajectory writer
"""
@staticmethod
def descr() :
return "Make snapshots whole and centre and align proteins"
def add_arguments(self, parser):
parser.add_argument('--bbmask',help="the selectiom mask for backbone",default="name CA")
parser.add_argument('--pmask',help="the selectiom mask for protein",default="protein")
parser.add_argument('-o','--out',help="the output",default="centerwhole")
parser.add_argument('--noalign',action="store_true",help="turns off alignment",default=False)
parser.add_argument('--nocenter',action="store_true",help="turns off centering",default=False)
parser.add_argument('--nowhole',action="store_true",help="turns off making whole",default=False)
def setup(self, args):
self.refuni = md.Universe(self.processor.args.struct)
self.protsel = self.processor.universe.select_atoms(args.pmask)
if len(self.protsel) == 0 :
self.nocenter = True
self.noalign = True
else:
self.nocenter = args.nocenter
self.noalign = args.noalign
self.nowhole = args.nowhole
self.residues = []
self.residue_atoms = []
for res in self.processor.universe.select_atoms("not "+args.pmask).residues:
if len(res.atoms) > 1 :
self.residues.append(res)
self.residue_atoms.append(ResidueAtoms(res.atoms[0].index,res.atoms[-1].index))
self.records = []
self.writer = md.Writer(args.out,
self.processor.universe.trajectory.n_atoms)
self.out = args.out
self.bbmask = args.bbmask
def process(self):
if not self.nowhole :
if len(self.protsel) > 0:
xyz = pbc.make_whole_xyz(self.protsel.positions,self.processor.currbox)
self.protsel.positions = xyz
for res in self.residues :
xyz = pbc.make_whole_xyz(res.atoms.positions,self.processor.currbox)
res.atoms.positions = xyz
if not self.nocenter :
self._center()
if not self.noalign :
rmsd = align.alignto(self.processor.universe, self.refuni,
select=self.bbmask)[1]
self.records.append(MDRecord(self.processor.currtime,rmsd))
self.writer.write(self.processor.currsnap)
def finalize(self):
"""
Write out the RMSDs to disc and close the output trajectory
"""
self._write_records(postfix="_rmsd.txt")
try :
self.writer.close_trajectory()
except :
pass
def _center(self) :
xyz = self.processor.currsnap._pos
#com1 = xyz[self.protsel[0].number:self.protsel[-1].number+1].mean(axis=0)
com1 = self.protsel.center_of_geometry()
for residue in self.residue_atoms :
com2 = xyz[residue.first:residue.last+1].mean(axis=0)
dr = pbc.unwrap_vector(com1 - com2, self.processor.currbox)
xyz[residue.first:residue.last+1] = xyz[residue.first:residue.last+1] + dr
delta = com1 - self.processor.currbox/2.0
self.processor.currsnap._pos = xyz - delta
MDGroupSelection = namedtuple("MDGroupSelection",["atomgroup", "indices", "transmat"])
class ChainOrderAnalysis(TrajectoryAction):
"""
Class to analyse chain order parameters during a trajectory
Attributes:
-----------
normal : numpy ndarray
the normal of the membrane, assumed to be z-axis
out : string
the output filename
selections : list of MDAnalysis.AtomGroup
the selections
records : list of MDRecord
the chain orders at each timestep
"""
@staticmethod
def descr() :
return "Analyze chain order parameters"
def add_arguments(self, parser):
parser.add_argument('--selections',nargs="+", help="the chains")
parser.add_argument('--analysis',choices=["CC","CH"], help="the type of analysis C-C or C-H", default="CC")
parser.add_argument('--groups', help="group definitions for pseudo-atom calculation")
parser.add_argument('--gridout', help="the prefix for the filename of a 2D grid")
parser.add_argument('--protmask',help="the selectiom mask for lipid residues")
parser.add_argument('--pmask',help="the selectiom mask for phosphor atoms",default="name P")
parser.add_argument('-o', '--out', help="the output prefix", default="order")
def setup(self, args):
def _get_h(atomgrp):
for atom2 in atomgrp[0].bonded_atoms :
if atom2.mass < 5.0 :
return self.processor.universe.select_atoms("resname %s and name %s"%(atomgrp[0].resname,atom2.name))
raise Exception("Could not find any H atom bonded to %s in %s"%(atomgrp[0].name,atomgrp[0].resname))
def _enumerateatoms(resname, atomstr) :
lipid = self.processor.universe.select_atoms("resname %s"%resname)[0].residue
base = atomstr[:-1]
atomi = int(atomstr[-1])
lst = []
while True :
try :
name = "%s%d"%(base,atomi)
dummy = lipid[name]
lst.append(name)
atomi += 1
except :
break
return lst
def _expandlist(liststr):
l, r = liststr.split("..")
i = l.find("(")
start = int(l[i+1:])
l = l[:i]
i = r.find(")")
end = int(r[:i])
r = r[i+1:]
return ["%s%d%s"%(l,i,r) for i in range(start,end+1)]
self.headers = ["Time"]
self.selheaders = []
self.selections = []
self.analtype = args.analysis
if self.analtype == "CH" :
self.hselections = []
self.resgroups = None
if args.groups is not None:
if self.analtype == "CH" :
raise Exception("Cannot perform C-H analysis on pseudo-atoms")
self.resgroups = read_groups(args.groups)
for selin in args.selections:
resname, chainlist = selin.split(":")
if self.resgroups is not None:
if resname not in self.resgroups:
raise Exception("Cannot find %s in groups spec."%resname)
pseudoatoms = [group.name for group in self.resgroups[resname].groups]
if chainlist.find("-") > -1:
atomlist = chainlist.split("-")
elif chainlist.find("..") > -1:
atomlist = _expandlist(chainlist)
elif chainlist.startswith("@") :
atomlist = _enumerateatoms(resname, chainlist[1:])
else:
raise Exception("Atom list need be specified with '-' or with expansion '(..)'")
if self.resgroups is None:
atomsels = [self.processor.universe.select_atoms("resname %s and name %s"%(resname,atom))
for atom in atomlist]
print("%s (%s) - %d atoms and %d atoms in first selection"% \
(resname, ",".join(atomlist), len(atomlist), len(atomsels[0])))
for atomgrp, atom in zip(atomsels[1:], atomlist[1:]):
if len(atomgrp) != len(atomsels[0]):
raise Exception("Selection for %s is different in length than the first selection"%atom)
self.selections.append(atomsels)
else:
for atom in atomlist:
if atom not in pseudoatoms :
raise Exception("Could not find selected atom %s in the group spec."%atom)
# Select all atoms for the selected residue, the coordinates
# will be transformed to pseudo-atoms
atomsel = self.processor.universe.select_atoms("resname %s"%resname)
atomnames = [atom.name for atom in atomsel.residues[0].atoms]
ngroups = len(self.resgroups[resname].groups)
natoms = len(atomnames)
nres = len(atomsel.residues)
# Create the pseudo atom indices
indices0 = [self.resgroups[resname].indices(atom) for atom in atomlist]
indices = [[i0[0]+ngroups*i for i in range(nres)] for i0 in indices0]
# Create the transformation matrix by replacting the one for the first residue
transmat0 = self.resgroups[resname].transmat(atomnames)
transmat = np.zeros([ngroups*nres,natoms*nres])
for i in range(nres):
transmat[i*ngroups:(i+1)*ngroups,i*natoms:(i+1)*natoms] = transmat0
self.selections.append(MDGroupSelection(atomsel, indices, transmat))
print("%s (%s) - %d atoms and %d atoms in first selection"% \
(resname, ",".join(atomlist), len(atomlist), len(indices[0])))
self.headers.extend(["%s/%s"%(resname, atom) for atom in atomlist])
self.selheaders.append(["%s/%s"%(resname, atom) for atom in atomlist])
if self.analtype == "CH":
hatomsels = [_get_h(atomgrp) for atomgrp in atomsels]
self.hselections.append(hatomsels)
for atomgrp, atom in zip(hatomsels[1:], atomlist[1:]):
if len(atomgrp) != len(hatomsels[0]):
raise Exception("H-selection for %s is different in length than the first selection"%atom)
self.out = args.out
# Assumes that the normal is along the z-axis
self.normal = np.array([0.0,0.0,1.0])
self.records = []
self.gridout = args.gridout
self.phosphorsel = None
if self.gridout is not None :
bounds = np.asarray([[0.0, 0.0, 0.0],self.processor.universe.dimensions[:3]])
self.grid_low = AnalysisGrid(bounds)
self.grid_upp = AnalysisGrid(bounds)
self.phosphorsel = self.processor.universe.select_atoms(args.pmask)
if args.protmask is not None :
self.protsel = self.processor.universe.select_atoms(args.protmask)
self.grid_prot = AnalysisGrid(bounds)
self.protone = np.ones(len(self.protsel))
else :
self.protsel = None
def process(self):
mid = None
if self.gridout is not None :
mid = self.phosphorsel.center_of_geometry()
if self.protsel is not None :
self.grid_prot.accumulate(self.protsel.positions-mid, self.protone)
orders = []
if self.analtype == "CC":
if self.resgroups is None:
for selection in self.selections :
for a1, a2 in zip(selection[:-1],selection[1:]):
orders.append(self._calc_order(a1.positions,
a2.positions, self.normal, mid))
else:
if self.processor.nprocessed == 1:
f = open(self.out+"_first_pseudo.xyz", "w")
for selection in self.selections :
xyz = np.dot(selection.transmat, selection.atomgroup.positions)
if self.processor.nprocessed == 1:
for pos in xyz:
f.write("c %.3f %.3f %.3f\n"%(pos[0], pos[1], pos[2]))
for i1, i2 in zip(selection.indices[:-1], selection.indices[1:]):
orders.append(self._calc_order(xyz[i1,:],
xyz[i2,:], self.normal, mid))
if self.processor.nprocessed == 1:
f.close()
elif self.analtype == "CH":
for cselection, hselection in zip(self.selections, self.hselections):
for a1, a2 in zip(cselection, hselection):
orders.append(self._calc_order(a1.positions,
a2.positions, self.normal, mid))
self.records.append(MDRecord(self.processor.currtime, orders))
def | |
self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = bundle_features_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bundle_features_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("bundle_features_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class time(loxi.OFObject):
def __init__(self, seconds=None, nanoseconds=None):
if seconds != None:
self.seconds = seconds
else:
self.seconds = 0
if nanoseconds != None:
self.nanoseconds = nanoseconds
else:
self.nanoseconds = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!Q", self.seconds))
packed.append(struct.pack("!L", self.nanoseconds))
packed.append(b'\x00' * 4)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = time()
obj.seconds = reader.read("!Q")[0]
obj.nanoseconds = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.seconds != other.seconds: return False
if self.nanoseconds != other.nanoseconds: return False
return True
def pretty_print(self, q):
q.text("time {")
with q.group():
with q.indent(2):
q.breakable()
q.text("seconds = ");
q.text("%#x" % self.seconds)
q.text(","); q.breakable()
q.text("nanoseconds = ");
q.text("%#x" % self.nanoseconds)
q.breakable()
q.text('}')
class bundle_features_prop_time(bundle_features_prop):
type = 1
def __init__(self, sched_accuracy=None, sched_max_future=None, sched_max_past=None, timestamp=None):
if sched_accuracy != None:
self.sched_accuracy = sched_accuracy
else:
self.sched_accuracy = loxi.unimplemented('init of_time_t')
if sched_max_future != None:
self.sched_max_future = sched_max_future
else:
self.sched_max_future = loxi.unimplemented('init of_time_t')
if sched_max_past != None:
self.sched_max_past = sched_max_past
else:
self.sched_max_past = loxi.unimplemented('init of_time_t')
if timestamp != None:
self.timestamp = timestamp
else:
self.timestamp = loxi.unimplemented('init of_time_t')
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(b'\x00' * 4)
packed.append(loxi.unimplemented('pack of_time_t'))
packed.append(loxi.unimplemented('pack of_time_t'))
packed.append(loxi.unimplemented('pack of_time_t'))
packed.append(loxi.unimplemented('pack of_time_t'))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bundle_features_prop_time()
_type = reader.read("!H")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
reader.skip(4)
obj.sched_accuracy = loxi.unimplemented('unpack of_time_t')
obj.sched_max_future = loxi.unimplemented('unpack of_time_t')
obj.sched_max_past = loxi.unimplemented('unpack of_time_t')
obj.timestamp = loxi.unimplemented('unpack of_time_t')
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.sched_accuracy != other.sched_accuracy: return False
if self.sched_max_future != other.sched_max_future: return False
if self.sched_max_past != other.sched_max_past: return False
if self.timestamp != other.timestamp: return False
return True
def pretty_print(self, q):
q.text("bundle_features_prop_time {")
with q.group():
with q.indent(2):
q.breakable()
q.text("sched_accuracy = ");
q.pp(self.sched_accuracy)
q.text(","); q.breakable()
q.text("sched_max_future = ");
q.pp(self.sched_max_future)
q.text(","); q.breakable()
q.text("sched_max_past = ");
q.pp(self.sched_max_past)
q.text(","); q.breakable()
q.text("timestamp = ");
q.pp(self.timestamp)
q.breakable()
q.text('}')
bundle_features_prop.subtypes[1] = bundle_features_prop_time
class controller_status_entry(loxi.OFObject):
def __init__(self, short_id=None, role=None, reason=None, channel_status=None, properties=None):
if short_id != None:
self.short_id = short_id
else:
self.short_id = 0
if role != None:
self.role = role
else:
self.role = 0
if reason != None:
self.reason = reason
else:
self.reason = 0
if channel_status != None:
self.channel_status = channel_status
else:
self.channel_status = 0
if properties != None:
self.properties = properties
else:
self.properties = []
return
def pack(self):
packed = []
packed.append(struct.pack("!H", 0)) # placeholder for length at index 0
packed.append(struct.pack("!H", self.short_id))
packed.append(struct.pack("!L", self.role))
packed.append(struct.pack("!B", self.reason))
packed.append(struct.pack("!B", self.channel_status))
packed.append(b'\x00' * 6)
packed.append(loxi.generic_util.pack_list(self.properties))
length = sum([len(x) for x in packed])
packed[0] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = controller_status_entry()
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 2)
obj.short_id = reader.read("!H")[0]
obj.role = reader.read("!L")[0]
obj.reason = reader.read("!B")[0]
obj.channel_status = reader.read("!B")[0]
reader.skip(6)
obj.properties = loxi.generic_util.unpack_list(reader, ofp.common.controller_status_prop.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.short_id != other.short_id: return False
if self.role != other.role: return False
if self.reason != other.reason: return False
if self.channel_status != other.channel_status: return False
if self.properties != other.properties: return False
return True
def pretty_print(self, q):
q.text("controller_status_entry {")
with q.group():
with q.indent(2):
q.breakable()
q.text("short_id = ");
q.text("%#x" % self.short_id)
q.text(","); q.breakable()
q.text("role = ");
value_name_map = {0: 'OFPCR_ROLE_NOCHANGE', 1: 'OFPCR_ROLE_EQUAL', 2: 'OFPCR_ROLE_MASTER', 3: 'OFPCR_ROLE_SLAVE'}
if self.role in value_name_map:
q.text("%s(%d)" % (value_name_map[self.role], self.role))
else:
q.text("%#x" % self.role)
q.text(","); q.breakable()
q.text("reason = ");
value_name_map = {0: 'OFPCSR_REQUEST', 1: 'OFPCSR_CHANNEL_STATUS', 2: 'OFPCSR_ROLE', 3: 'OFPCSR_CONTROLLER_ADDED', 4: 'OFPCSR_CONTROLLER_REMOVED', 5: 'OFPCSR_SHORT_ID', 6: 'OFPCSR_EXPERIMENTER'}
if self.reason in value_name_map:
q.text("%s(%d)" % (value_name_map[self.reason], self.reason))
else:
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("channel_status = ");
value_name_map = {0: 'OFPCT_STATUS_UP', 1: 'OFPCT_STATUS_DOWN'}
if self.channel_status in value_name_map:
q.text("%s(%d)" % (value_name_map[self.channel_status], self.channel_status))
else:
q.text("%#x" % self.channel_status)
q.text(","); q.breakable()
q.text("properties = ");
q.pp(self.properties)
q.breakable()
q.text('}')
class controller_status_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = controller_status_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = controller_status_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("controller_status_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class flow_lightweight_stats_entry(loxi.OFObject):
def __init__(self, table_id=None, reason=None, priority=None, match=None, stats=None):
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if reason != None:
self.reason = reason
else:
self.reason = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if match != None:
self.match = match
else:
self.match = ofp.match()
if stats != None:
self.stats = stats
else:
self.stats = ofp.stat()
return
def pack(self):
packed = []
packed.append(struct.pack("!H", 0)) # placeholder for length at index 0
packed.append(b'\x00' * 2)
packed.append(struct.pack("!B", self.table_id))
packed.append(struct.pack("!B", self.reason))
packed.append(struct.pack("!H", self.priority))
packed.append(self.match.pack())
packed.append(self.stats.pack())
length = sum([len(x) for x in packed])
packed[0] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = flow_lightweight_stats_entry()
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 2)
reader.skip(2)
obj.table_id = reader.read("!B")[0]
obj.reason = reader.read("!B")[0]
obj.priority = reader.read("!H")[0]
obj.match = ofp.match.unpack(reader)
obj.stats = ofp.stat.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
if self.reason != other.reason: return False
if self.priority != other.priority: return False
if self.match != other.match: return False
if self.stats != other.stats: return False
return True
def pretty_print(self, q):
q.text("flow_lightweight_stats_entry {")
with q.group():
with q.indent(2):
q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("reason = ");
value_name_map = {0: 'OFPFSR_STATS_REQUEST', 1: 'OFPFSR_STAT_TRIGGER'}
if self.reason in value_name_map:
q.text("%s(%d)" % (value_name_map[self.reason], self.reason))
else:
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("stats = ");
q.pp(self.stats)
q.breakable()
q.text('}')
class flow_monitor_entry(loxi.OFObject):
def __init__(self, monitor_id=None, out_port=None, out_group=None, flags=None, table_id=None, command=None, match=None):
if monitor_id != None:
self.monitor_id = monitor_id
else:
self.monitor_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if out_group != None:
self.out_group = out_group
else:
self.out_group = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if command != None:
self.command = command
else:
self.command = 0
if match != None:
self.match = match
else:
self.match = ofp.match()
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.monitor_id))
packed.append(struct.pack("!L", self.out_port))
packed.append(struct.pack("!L", self.out_group))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!B", self.table_id))
packed.append(struct.pack("!B", self.command))
packed.append(self.match.pack())
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = flow_monitor_entry()
obj.monitor_id = reader.read("!L")[0]
obj.out_port = reader.read("!L")[0]
obj.out_group = reader.read("!L")[0]
obj.flags = reader.read("!H")[0]
obj.table_id = reader.read("!B")[0]
obj.command = reader.read("!B")[0]
obj.match = ofp.match.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.monitor_id != other.monitor_id: return False
if self.out_port != other.out_port: return False
if self.out_group != other.out_group: return False
if self.flags != other.flags: return False
if self.table_id != other.table_id: return False
if self.command != other.command: return False
if self.match != other.match: return False
return True
def pretty_print(self, q):
q.text("flow_monitor_entry {")
with q.group():
with q.indent(2):
q.breakable()
q.text("monitor_id = ");
q.text("%#x" % self.monitor_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text("%#x" % self.out_port)
q.text(","); q.breakable()
q.text("out_group = ");
q.text("%#x" % self.out_group)
q.text(","); q.breakable()
q.text("flags | |
which_mc = self.vBucketMap[vBucketId]
for server in self.memcacheds:
if server != which_mc:
return self.memcacheds[server]
def set(self, key, exp, flags, value):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).set, key, exp, flags, value)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except EOFError as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket send." in error.message \
and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
def get(self, key):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).get, key)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except EOFError as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket send." in error.message \
and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
def delete(self, key):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).delete, key)
except MemcachedError as error:
if error.status in [ERR_NOT_MY_VBUCKET, ERR_EINVAL] and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except EOFError as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket send." in error.message \
and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
def _send_op(self, func, *args):
backoff = .001
while True:
try:
return func(*args)
except MemcachedError as error:
if error.status == ERR_ETMPFAIL and backoff < .5:
time.sleep(backoff)
backoff *= 2
else:
raise error
except EOFError, IOError:
raise MemcachedError(ERR_NOT_MY_VBUCKET, "Connection reset")
def done(self):
[self.memcacheds[ip].close() for ip in self.memcacheds]
class KVStoreAwareSmartClient(VBucketAwareMemcached):
def __init__(self, rest, bucket, kv_store = None, info=None, store_enabled = True):
VBucketAwareMemcached.__init__(self, rest, bucket, info)
self.kv_store = kv_store or ClientKeyValueStore()
self.store_enabled = store_enabled
self._rlock = threading.Lock()
def set(self, key, value, ttl = -1):
self._rlock.acquire()
try:
if ttl >= 0:
self.memcached(key).set(key, ttl, 0, value)
else:
self.memcached(key).set(key, 0, 0, value)
if self.store_enabled:
self.kv_store.write(key, hashlib.md5(value).digest(), ttl)
except MemcachedError as e:
self._rlock.release()
raise MemcachedError(e.status, e.msg)
except AssertionError:
self._rlock.release()
raise AssertionError
except:
self._rlock.release()
raise Exception("General Exception from KVStoreAwareSmartClient.set()")
self._rlock.release()
"""
" retrieve meta data of document from disk
"""
def get_doc_metadata(self, num_vbuckets, key):
vid = crc32.crc32_hash(key) & (num_vbuckets - 1)
mc = self.memcached(key)
metadatastats = None
try:
metadatastats = mc.stats("vkey {0} {1}".format(key, vid))
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
self.log.info(msg)
return metadatastats
def delete(self, key):
try:
self._rlock.acquire()
opaque, cas, data = self.memcached(key).delete(key)
if self.store_enabled and cas == 0:
self.kv_store.delete(key)
self._rlock.release()
else:
self._rlock.release()
raise MemcachedError(7, "Invalid cas value")
except Exception as e:
self._rlock.release()
raise MemcachedError(7, e)
def get_valid_key(self, key):
return self.get_key_check_status(key, "valid")
def get_deleted_key(self, key):
return self.get_key_check_status(key, "deleted")
def get_expired_key(self, key):
return self.get_key_check_status(key, "expired")
def get_all_keys(self):
return self.kv_store.keys()
def get_all_valid_items(self):
return self.kv_store.valid_items()
def get_all_deleted_items(self):
return self.kv_store.deleted_items()
def get_all_expired_items(self):
return self.kv_store.expired_items()
def get_key_check_status(self, key, status):
item = self.kv_get(key)
if(item is not None and item["status"] == status):
return item
else:
msg = "key {0} is not valid".format(key)
self.log.info(msg)
return None
# safe kvstore retrieval
# return dict of {key,status,value,ttl}
# or None if not found
def kv_get(self, key):
item = None
try:
item = self.kv_store.read(key)
except KeyError:
msg = "key {0} doesn't exist in store".format(key)
#self.log.info(msg)
return item
# safe memcached retrieval
# return dict of {key, flags, seq, value}
# or None if not found
def mc_get(self, key):
item = self.mc_get_full(key)
if item is not None:
item["value"] = hashlib.md5(item["value"]).digest()
return item
# unhashed value
def mc_get_full(self, key):
item = None
try:
x, y, value = self.memcached(key).get(key)
item = {}
item["key"] = key
item["flags"] = x
item["seq"] = y
item["value"] = value
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
return item
def kv_mc_sync_get(self, key, status):
self._rlock.acquire()
kv_item = self.get_key_check_status(key, status)
mc_item = self.mc_get(key)
self._rlock.release()
return kv_item, mc_item
class KVStoreSmartClientHelper(object):
@staticmethod
def do_verification(client):
keys = client.get_all_keys()
validation_failures = {}
for k in keys:
m, valid = KVStoreSmartClientHelper.verify_key(client, k)
if(valid == False):
validation_failures[k] = m
return validation_failures
@staticmethod
def verify_key(client, key):
status = False
msg = ""
item = client.kv_get(key)
if item is not None:
if item["status"] == "deleted":
msg, status = \
KVStoreSmartClientHelper.verify_delete(client, key)
elif item["status"] == "expired":
msg, status = \
KVStoreSmartClientHelper.verify_expired(client, key)
elif item["status"] == "valid":
msg, status = \
KVStoreSmartClientHelper.verify_set(client, key)
return msg, status
# verify kvstore contains key with valid status
# and that key also exists in memcached with
# expected value
@staticmethod
def verify_set(client, key):
kv_item = client.get_valid_key(key)
mc_item= client.mc_get(key)
status = False
msg = ""
if(kv_item is not None and mc_item is not None):
# compare values
if kv_item["value"] == mc_item["value"]:
status = True
else:
msg = "kvstore and memcached values mismatch"
elif(kv_item is None):
msg = "valid status not set in kv_store"
elif(mc_item is None):
msg = "key missing from memcached"
return msg, status
# verify kvstore contains key with deleted status
# and that it does not exist in memcached
@staticmethod
def verify_delete(client, key):
deleted_kv_item = client.get_deleted_key(key)
mc_item= client.mc_get(key)
status = False
msg = ""
if(deleted_kv_item is not None and mc_item is None):
status = True
elif(deleted_kv_item is None):
msg = "delete status not set in kv_store"
elif(mc_item is not None):
msg = "key still exists in memcached"
return msg, status
# verify kvstore contains key with expired status
# and that key has also expired in memcached
@staticmethod
def verify_expired(client, key):
expired_kv_item = client.get_expired_key(key)
mc_item= client.mc_get(key)
status = False
msg = ""
if(expired_kv_item is not None and mc_item is None):
status = True
elif(expired_kv_item is None):
msg = "exp. status not set in kv_store"
elif(mc_item is not None):
msg = "key still exists in memcached"
return msg, status
def start_reader_process(info, keyset, queue):
ReaderThread(info, keyset, queue).start()
class GeneratedDocuments(object):
def __init__(self, items, kv_template, options=dict(size=1024)):
self._items = items
self._kv_template = kv_template
self._options = options
self._pointer = 0
if "padding" in options:
self._pad = options["padding"]
else:
self._pad = DocumentGenerator._random_string(options["size"])
# Required for the for-in syntax
def __iter__(self):
return self
def __len__(self):
return self._items
def reset(self):
self._pointer = 0
def has_next(self):
return self._pointer != self._items
# Returns the next value of the iterator
def next(self):
if self._pointer == self._items:
raise StopIteration
else:
i = self._pointer
doc = {"meta":{"id": "{0}-{1}".format(i, self._options["seed"])},"json":{}}
for k in self._kv_template:
v = self._kv_template[k]
if isinstance(v, str) and v.find("${prefix}") != -1:
v = v.replace("${prefix}", "{0}".format(i))
#how about the value size
if isinstance(v, str) and v.find("${padding}") != -1:
v = v.replace("${padding}", self._pad)
if isinstance(v, str) and v.find("${seed}") != -1:
v = v.replace("${seed}", "{0}".format(self._options["seed"]))
doc["json"][k] = v
self._pointer += 1
return json.dumps(doc)
class DocumentGenerator(object):
#will loop over all values in props and replace ${prefix} with ${i}
@staticmethod
def make_docs(items, kv_template, options=dict(size=1024, seed=str(uuid.uuid4()))):
return GeneratedDocuments(items, kv_template, options)
@staticmethod
def _random_string(length):
return (("%%0%dX" % (length * 2)) % random.getrandbits(length * 8)).encode("ascii")
@staticmethod
def create_value(pattern, size):
return (pattern * (size / len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def get_doc_generators(count, kv_template = None, seed = None, sizes = None):
seed = seed or str(uuid.uuid4())[0:7]
sizes = sizes or [128]
doc_gen_iterators = []
if kv_template is None:
kv_template = {"name": "doc-${prefix}-${seed}",
"sequence": "${seed}",
"email": <EMAIL>"}
for size in sizes:
options = {"size": size, "seed": seed}
docs = DocumentGenerator.make_docs(count / len(sizes),
kv_template, options)
doc_gen_iterators.append(docs)
return doc_gen_iterators
@staticmethod
def get_doc_generators_by_load_ratio(rest,
bucket='default',
ram_load_ratio = 1,
value_size_distribution=None,
seed = None):
log = logger.Logger.get_logger()
if ram_load_ratio < 0 :
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
info = rest.get_bucket(bucket)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in value_size_distribution.items():
how_many = int(space_to_fill / (size + 250) * probability)
doc_seed = | |
import re
import itertools
import os
import pandas as pd
import numpy as np
from prettytable import PrettyTable
from tqdm import tqdm
def get_char(seq):
"""split string int sequence of chars returned in pandas.Series"""
chars = list(seq)
return pd.Series(chars)
class SeqProcessConfig(object):
def __init__(self, seq_len, seq_stend, ewindow_stend, offset_val):
self.seq_len = seq_len
# entries are with respect to offset value (i.e. start and end indices)
self.seq_stend = seq_stend
self.ewindow_stend = ewindow_stend
self.offset_val = offset_val
# determine the range (start and end) from the offset provided
self._determine_offset_range()
# map the indices to 0-based indexing
self._translate_to_0based_indexing()
def _determine_offset_range(self):
# printing indices
st = self.offset_val
if st <= 0:
# for example -4,25 (equivalent to 30 elements where 0 is included)
end = self.seq_len - abs(st) - 1
else:
# for example 1,30 (equivalent to 30 elements)
end = self.seq_len + st - 1
self.offset_st = st
self.offset_end = end
def _translate_to_0based_indexing(self):
offset = self.offset_val
# edit window mapping
st, end = self.ewindow_stend
self.ewindow_st = st - offset
self.ewindow_end = end - offset
# sequence mapping
st, end = self.seq_stend
self.seq_st = st - offset
self.seq_end = end - offset
def __str__(self):
tb = PrettyTable()
tb.field_names = ['Sequence processing Config', 'Value']
tb.add_row(['sequence length', self.seq_len])
tb.add_row(['sequence start index (0-based indexing)', self.seq_st])
tb.add_row(['sequence end index (0-based indexing)', self.seq_end])
tb.add_row(['editable window start index (0-based indexing)', self.ewindow_st])
tb.add_row(['editable window end index (0-based indexing)', self.ewindow_end])
tb.add_row(['offset start numbering', self.offset_st])
tb.add_row(['offset end numbering', self.offset_end])
return tb.get_string()
class HaplotypeSeqProcessor(object):
def __init__(self, base_editor, conversion_nucl, seqconfig, max_num_targets=12):
self.base_editor = base_editor
self.conversion_nucl = conversion_nucl
self.seqconfig = seqconfig
self.max_num_targets = max_num_targets
self.describe()
def describe(self):
tb = PrettyTable()
tb.field_names = ['Description', 'Value']
tb.add_row(['Base editor', self.base_editor])
tb.add_row(['Target nucleotide', self.conversion_nucl[0]])
tb.add_row(['Conversion nucleotide', self.conversion_nucl[1]])
tb.add_row(['Maximum number of targets considered', self.max_num_targets])
print(tb)
print(self.seqconfig)
def _determine_target_complem_nucl(self):
tb_nucl, cb_nucl = self.conversion_nucl
return tb_nucl, cb_nucl
def remove_viol_seqs(self, df, inpseq_col):
"""
Args:
df: dataframe
inpseq_col: string, column name of input sequence such as "Inp_seq"
"""
print('--- checking for violating seqs ---')
seq_df = df.copy()
tb_nucl, __ = self.conversion_nucl
seqlen = self.seqconfig.seq_len
viol_seqs = []
cond_letter = ~seq_df[inpseq_col].str.contains(tb_nucl)
cond_len = ~seq_df[inpseq_col].str.len() == seqlen
df_clean = seq_df
if cond_len.any() or cond_letter.any():
cond = cond_letter | cond_len
print(seq_df.loc[cond, inpseq_col])
df_clean = seq_df.loc[~cond].copy()
df_clean.reset_index(inplace=True, drop=True)
return df_clean
def _check_duplicates(self, gdf, outcomeseq_colname, pbar, prg_counter):
gdf_clean = gdf.copy()
gdf_clean.drop_duplicates(subset=[outcomeseq_colname], inplace=True, ignore_index=True)
prg_counter+=1
pbar.update(prg_counter)
return gdf_clean
def preprocess_df(self, df, inpseq_colnames, outcomeseq_colname):
"""
Args:
df: dataframe
inpseq_colnames: list of column names such as ['seq_id', 'Inp_seq']
outcomeseq_colname: string, column name of observed outcome sequences
"""
print('--- removing duplicates (if found!) ---')
prg_counter=0
dfg = df.groupby(by=inpseq_colnames)
pbar = tqdm(total=dfg.ngroups)
df_clean = dfg.apply(self._check_duplicates, outcomeseq_colname, pbar, prg_counter)
pbar.close()
df_clean.reset_index(inplace=True, drop=True)
return df_clean
def renormalize_outcome_prop(self, df, by_cols, prop_col):
""" renormalize the outcome sequence probability (optional, in case it is not normalized!)
Args:
df:pd.DataFrame, read data frame
by_cols: list, input sequence column name/s such as ['seq_id', 'Inp_seq']
prop_col: string, outcome propotion (i.e. probability) column name
.. Note:
this method is run after using :func:`preprocess_df`
"""
print('--- renormalizing outcome proportion ---')
a = df.groupby(by=by_cols, as_index=False)[prop_col].sum()
a['denom'] = a[prop_col]
b = df.copy()
b = b.merge(a, on=by_cols, how='left')
validate_df(b)
b['prob'] = b[f'{prop_col}_x']/b['denom']
b[prop_col] = b['prob']
return b
def _generate_combinatorial_conversion(self, tbase_indices, conv_nl):
num_pos = len(tbase_indices)
comb_nucl_lst= []
conv_nl_lst = list(conv_nl)
for __ in range(num_pos):
comb_nucl_lst.append(conv_nl_lst)
return itertools.product(*comb_nucl_lst)
def generate_combinatorial_outcome(self, df):
""" Generates combinatorial outcome sequences based on identified canonical bases
Args:
df:pd.DataFrame, processed dataframe using :func:`process_inp_outp_df` function
"""
print('--- generating edit combinations ---')
# print(df.columns)
# print(df.shape)
seqconfig = self.seqconfig
conv_nl = self.conversion_nucl
tb_nucl, cb_nucl = conv_nl
e_st = seqconfig.ewindow_st
e_end = seqconfig.ewindow_end
seqlen = seqconfig.seq_len
max_num_targets=self.max_num_targets
res_df_lst = []
target_cols = ['seq_id', 'Inp_seq', 'Outp_seq']
for row in tqdm(df.iterrows()):
indx, record = row
rec_nucl = record[[f'Inp_L{i}'for i in range(e_st+1,e_end+2)]]
# print('indx:', indx)
# print(rec_nucl)
tbase_indices = np.where(rec_nucl==tb_nucl)[0]
# print('tbase_indices:\n', tbase_indices)
if len(tbase_indices) > max_num_targets:
tbase_indices = tbase_indices[:max_num_targets]
# print('e_st:', e_st)
# print('e_end:', e_end)
# print('tbase_indices:\n', tbase_indices)
comb_nucl_opt= self._generate_combinatorial_conversion(tbase_indices, conv_nl)
comb_nucl_opt = list(comb_nucl_opt)
num_options = len(comb_nucl_opt)
# print(comb_nucl_opt)
comb_nucl_arr = np.repeat(rec_nucl.values.reshape(1,-1),num_options,axis=0)
# print(comb_nucl_arr)
for i_arr, opt in enumerate(comb_nucl_opt):
# print('i_arr:', i_arr)
# print('opt:',opt)
comb_nucl_arr[i_arr, tbase_indices]= opt
# print(comb_nucl_arr)
comb_nucl_df = pd.DataFrame(comb_nucl_arr)
comb_nucl_df.columns = [f'Inp_L{i}'for i in range(e_st+1,e_end+2)]
# print(comb_nucl_df)
pre_ew_col = record[[f'Inp_L{i}'for i in range(1,e_st+1)]]
post_ew_col = record[[f'Inp_L{i}'for i in range(e_end+2,seqlen+1)]]
a = pd.DataFrame(np.repeat(pre_ew_col.values.reshape(1,-1), num_options, axis=0))
a.columns = [f'Inp_L{i}'for i in range(1,e_st+1)]
# print(a)
b = pd.DataFrame(np.repeat(post_ew_col.values.reshape(1,-1), num_options, axis=0))
b.columns = [f'Inp_L{i}'for i in range(e_end+2,seqlen+1)]
# print(b)
# print(record['Inp_seq'])
inpseq_df = pd.DataFrame([record['Inp_seq']]*num_options)
inpseq_df.columns = ['Inp_seq']
seqid_df = pd.DataFrame([record['seq_id']]*num_options)
seqid_df.columns = ['seq_id']
res_df = pd.concat([seqid_df,inpseq_df, a, comb_nucl_df, b], axis=1)
# print()
# print(res_df)
res_df['Outp_seq'] = res_df[[f'Inp_L{i}'for i in range(1,seqlen+1)]].astype(str).sum(axis=1)
# print(res_df)
res_df_lst.append(res_df[target_cols])
# print('-'*15)
comb_final_df = pd.concat(res_df_lst, axis=0)
# print('comb_final_df:\n', comb_final_df.columns)
return comb_final_df
def process_inp_outp_df(self, df, seqid_col, t_inp_col, t_outp_col, outcome_prop_col):
"""
df:pd.DataFrame, read data frame
t_inp_col: string, input sequence column name
t_outp_col: string, output sequence column name
None, when performing inference
outcome_prop_col: string, outcome propotion (i.e. probability of outcome sequence) column name
None, when performing inference
"""
# print()
# print('__ process_inp_outp __')
# print('df.columns:', df.columns)
# print()
max_num_targets = self.max_num_targets
pbar = tqdm(total=100)
seq_len = self.seqconfig.seq_len
tb_nucl, cb_nucl = self._determine_target_complem_nucl()
inp_df = self._process_df(df, seqid_col, t_inp_col, tb_nucl, 'Inp')
if t_outp_col is not None:
pbar.update(25)
outp_df = self._process_df(df, seqid_col, t_outp_col, cb_nucl, 'Outp')
pbar.update(50)
conv_mat = inp_df[[f'Inp_M{i}' for i in range(1,seq_len+1)]].values & \
outp_df[[f'Outp_M{i}' for i in range(1,seq_len+1)]].values
conv_df = pd.DataFrame(conv_mat)
conv_df.columns = [f'conv{tb_nucl}{cb_nucl}_{i}' for i in range(1,seq_len+1)]
pbar.update(75)
if outcome_prop_col is not None:
proc_df = pd.concat([inp_df, outp_df, conv_df, pd.DataFrame(df[outcome_prop_col])], axis=1)
else:
proc_df = pd.concat([inp_df, outp_df, conv_df], axis=1)
else:
pbar.update(50)
proc_df = inp_df
pbar.update(75)
# remove double seq_id columns
proc_df = proc_df.loc[:,~proc_df.columns.duplicated()]
pbar.update(100)
pbar.close()
# print('proc_df.columns:', proc_df.columns)
validate_df(proc_df)
# print()
return proc_df
def _get_char(self,seq):
"""split string int sequence of chars returned in pandas.Series"""
chars = list(seq)
return pd.Series(chars)
def _process_df(self, df, seqid_col, tcol, target_base, suffix):
"""cleans a data frame representing sequences and their edit info obtained from crispr experiment
Args:
df: pandas.DataFrame
tcol: string,
target_base: string,
suffix: string,
Note:
assumed columns in the dataframe are:
"""
## process outcome sequences
# print('__ process_df __')
# print(df.columns)
seqid_df = pd.DataFrame(df[seqid_col].copy())
seqid_df.columns = ['seq_id']
df = pd.DataFrame(df[tcol].copy())
seq_colname = f'{suffix}_seq'
df.columns = [seq_colname]
# harmonize sequence string representation to capitalized form
df[seq_colname] = df[seq_colname].str.upper()
baseseq_df = df[seq_colname].apply(self._get_char)
num_nucl = len(baseseq_df.columns)+1
baseseq_df.columns = [f'{suffix}_B{i}' for i in range(1, num_nucl)]
base_mask = (baseseq_df == target_base) * 1
base_mask.columns = [f'{suffix}_M{i}' for i in range(1, num_nucl)]
baseseq_letters_df = baseseq_df.copy()
baseseq_letters_df.columns = [f'{suffix}_L{i}' for i in range(1, num_nucl)]
# replace base letters with numbers
baseseq_df.replace(['A', 'C', 'T', 'G'], [0,1,2,3], inplace=True)
base_df = pd.concat([seqid_df,
base_mask,
df,
baseseq_letters_df,
baseseq_df], axis=1)
base_df.reset_index(inplace=True, drop=True)
return base_df
def validate_df(df):
print('number of NA:', df.isna().any().sum())
class VizInpOutp_Haplotype(object):
html_colors = {'blue':' #aed6f1',
'red':' #f5b7b1',
'green':' #a3e4d7',
'yellow':' #f9e79f',
'violet':'#d7bde2'}
codes = {'A':'@', 'C':'!', 'T':'#', 'G':'_', 'conv':'~', 'prob':'%'}
nucl_colrmap = {'A':'red',
'C':'yellow',
'T':'blue',
'G':'green',
'prob':'violet'}
def __init__(self):
pass
@classmethod
def viz_align_haplotype(clss, df, seqid, outcome_colname, seqconfig, conv_nl, predscore_thr=0., return_type='html'):
"""
Args:
df: processed dataframe using HaplotypeSeqProcessor.process_inp_outp_df
seqid: string, sequence id
outcome_colname: string or None, the ground truth outcome proportion
seqconfig: instance of SeqProcessConfig class
conv_nl: tuple of (target nucleotide, transition nucleotide)
predscore_thr: float, probability threshold
return_type: string, default `html`
"""
seq_len = seqconfig.seq_len
seq_st, seq_end = seqconfig.seq_st, seqconfig.seq_end
ewindow_st, ewindow_end = seqconfig.ewindow_st, seqconfig.ewindow_end
offset_st, offset_end = seqconfig.offset_st, seqconfig.offset_end
tb_nucl, cb_nucl = conv_nl
codes = clss.codes
tb = PrettyTable()
tb.field_names = ['Desc.'] + [f'{i}' for i in range(1, seq_len+1)]
cond = df['seq_id'] == seqid
cond_thr = df['pred_score'] >= predscore_thr
df = df.loc[(cond) & (cond_thr)].copy()
# sort df by outcome probability
if outcome_colname is not None:
sortcol = outcome_colname
else:
sortcol = 'pred_score'
df.sort_values(by=[sortcol], ascending=False, inplace=True)
# get the input sequence
inp_nucl = df.iloc[0][[f'Inp_L{i}' for i in range(1,seq_len+1)]].values
inp_str_lst = ['Input sequence'] + [f'{codes[nucl]}{nucl}' for nucl in inp_nucl]
tb.add_row(inp_str_lst)
n_rows = df.shape[0]
# generate outcome (haplotype) rows
for rcounter in range(n_rows):
row = df.iloc[rcounter]
outp_nucl = row[[f'Outp_L{i}' for i in range(1,seq_len+1)]].values
if outcome_colname is not None:
outp_str_lst = ['{}Output | |
2a552bb">sources</a>', output_text
)
# Add a new commit on the repo from
newpath = tempfile.mkdtemp(prefix="pagure-fork-test")
gitrepo = os.path.join(self.path, "repos", "test.git")
repopath = os.path.join(newpath, "test")
clone_repo = pygit2.clone_repository(
gitrepo, repopath, checkout_branch="feature"
)
def compatible_signature(name, email):
if six.PY2:
name = name.encode("utf-8")
email = email.encode("utf-8")
return pygit2.Signature(name, email)
with open(os.path.join(repopath, ".gitignore"), "w") as stream:
stream.write("*~")
clone_repo.index.add(".gitignore")
clone_repo.index.write()
com = clone_repo.revparse_single("HEAD")
prev_commit = [com.oid.hex]
# Commits the files added
tree = clone_repo.index.write_tree()
author = compatible_signature("<NAME>", "<EMAIL>")
comitter = compatible_signature(
"<NAME>", "<EMAIL>"
)
clone_repo.create_commit(
"refs/heads/feature",
author,
comitter,
"Add .gitignore file for testing",
# binary string representing the tree object ID
tree,
# list of binary strings representing parents of the new commit
prev_commit,
)
refname = "refs/heads/feature:refs/heads/feature"
ori_remote = clone_repo.remotes[0]
PagureRepo.push(ori_remote, refname)
shutil.rmtree(newpath)
pagure.lib.tasks.update_pull_request(request.uid)
self.session = pagure.lib.query.create_session(self.dbpath)
project = pagure.lib.query.get_authorized_project(self.session, "test")
self.assertEqual(len(project.requests), 1)
request = project.requests[0]
self.assertEqual(len(request.comments), 1)
self.assertIsNotNone(request.commit_start)
self.assertIsNotNone(request.commit_stop)
self.assertNotEqual(start_commit, request.commit_start)
self.assertNotEqual(stop_commit, request.commit_stop)
@patch("pagure.lib.notify.send_email")
def test_request_pull_ci_dropdown(self, send_email):
""" Test presence of the "Rerun CI" dropdown with various settings. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session, self.path, new_project=None, branch_from="feature"
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
# old-style TRIGGER_CI list - test backwards compatibility
with patch.dict(
"pagure.config.config",
{"TRIGGER_CI": ["old-style-trigger-ci"]},
):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertNotIn("Rerun CI", output_text)
# new-style TRIGGER_CI, but no button to show
with patch.dict(
"pagure.config.config", {"TRIGGER_CI": {"no-button": None}}
):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertNotIn("Rerun CI", output_text)
trigger_ci = {
"foobar-ci": {
"name": "foobar-ci-name",
"description": "barfoo",
},
"spam-ci": {
"name": "spam-ci-name",
"description": "with beans and eggs",
},
"no-button-for-me-ci": None,
}
# new-style TRIGGER_CI, several buttons to show
with patch.dict(
"pagure.config.config", {"TRIGGER_CI": trigger_ci}
):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("Rerun CI", output_text)
self.assertIn("foobar-ci-name", output_text)
self.assertIn("spam-ci-name", output_text)
self.assertNotIn("no-button-for-me-ci", output_text)
trigger_ci = {
"foobar-ci": {
"name": "foobar-ci-name",
"description": "barfoo",
"requires_project_hook_attr": (
"ci_hook",
"active_pr",
True,
),
}
}
# new-style TRIGGER_CI with requires_project_hook_attr that is
# not fulfilled by the project
with patch.dict(
"pagure.config.config", {"TRIGGER_CI": trigger_ci}
):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertNotIn("Rerun CI", output_text)
# now activate the hook and try again
data = {
"active_pr": "y",
"ci_url": "https://jenkins.fedoraproject.org",
"ci_job": "ci_job",
"ci_type": "jenkins",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/test/settings/Pagure CI", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
with patch.dict(
"pagure.config.config", {"TRIGGER_CI": trigger_ci}
):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("Rerun CI", output_text)
self.assertIn("foobar-ci-name", output_text)
# shouldn't show up if user is not logged in
with patch.dict("pagure.config.config", {"TRIGGER_CI": trigger_ci}):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertNotIn("Rerun CI", output_text)
@patch("pagure.lib.notify.send_email")
@patch.dict(
"pagure.config.config",
{"TRIGGER_CI": {"CI1": {"name": "CI1", "description": "CI1!"}}},
)
def test_request_pull_ci_rerun(self, send_email):
""" Test rerunning CI using button from the "Rerun CI" dropdown. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session, self.path, new_project=None, branch_from="feature"
)
user = tests.FakeUser()
user.username = "pingou"
project = pagure.lib.query.get_authorized_project(self.session, "test")
request = project.requests[0]
with tests.user_set(self.app.application, user):
# no csrf token
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output = self.app.post(
"/test/pull-request/1/trigger-ci", follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn("Invalid input", output.get_data(as_text=True))
# no such PR
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output = self.app.post(
"/test/pull-request/2/trigger-ci", follow_redirects=True
)
self.assertEqual(output.status_code, 404)
# wrong comment
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {"csrf_token": csrf_token, "comment": "this doesnt exist"}
output = self.app.post(
"/test/pull-request/1/trigger-ci",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
self.assertIn("Invalid input", output.get_data(as_text=True))
# everything ok
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {"csrf_token": csrf_token, "comment": "CI1"}
output = self.app.post(
"/test/pull-request/1/trigger-ci",
data=data,
follow_redirects=True,
)
output_text = output.get_data(as_text=True)
self.assertEqual(output.status_code, 200)
self.assertIn("<p>CI1</p>", output_text)
comment = request.comments[0]
self.assertTrue(comment.notification)
self.assertEqual(comment.comment, "CI1")
@patch("pagure.lib.notify.send_email")
def test_merge_request_pull_FF(self, send_email):
""" Test the merge_request_pull endpoint with a FF PR. """
send_email.return_value = True
self.test_request_pull()
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
# No CSRF
output = self.app.post(
"/test/pull-request/1/merge", data={}, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
# self.assertIn(
#'<h3><span class="label label-default">PR#1</span>\n'
#' PR from the feature branch\n</h3>',
# output_text)
self.assertIn(
'title="View file as of 2a552bb">sources</a>', output_text
)
# Wrong project
data = {"csrf_token": csrf_token}
output = self.app.post(
"/foobar/pull-request/100/merge",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 404)
# Wrong project
data = {"csrf_token": csrf_token}
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 403)
user.username = "pingou"
with tests.user_set(self.app.application, user):
# Wrong request id
data = {"csrf_token": csrf_token}
output = self.app.post(
"/test/pull-request/100/merge",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 404)
# Project w/o pull-request
self.session.commit()
repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
settings = repo.settings
settings["pull_requests"] = False
repo.settings = settings
self.session.add(repo)
self.session.commit()
# Pull-request disabled
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 404)
# Project w pull-request but only assignee can merge
self.session.commit()
repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
settings["pull_requests"] = True
settings["Only_assignee_can_merge_pull-request"] = True
repo.settings = settings
self.session.add(repo)
self.session.commit()
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertIn(
"This request must be " "assigned to be merged", output_text
)
# PR assigned but not to this user
self.session.commit()
repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = repo.requests[0]
req.assignee_id = 2
self.session.add(req)
self.session.commit()
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertIn(
"Only the assignee can merge this request", output_text
)
# Project w/ minimal PR score
self.session.commit()
repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
settings["Only_assignee_can_merge_pull-request"] = False
settings["Minimum_score_to_merge_pull-request"] = 2
repo.settings = settings
self.session.add(repo)
self.session.commit()
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertIn(
"This request does not "
"have the minimum review score necessary to be merged",
output_text,
)
# Merge
self.session.commit()
repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
settings["Minimum_score_to_merge_pull-request"] = -1
repo.settings = settings
self.session.add(repo)
self.session.commit()
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output = self.app.get("/test/commits")
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - test - Pagure</title>", output_text
)
self.assertIn("A commit on branch feature", output_text)
self.assertNotIn(
"Merge #1 `PR from the feature branch`", output_text
)
# Check if the closing notification was added
output = self.app.get("/test/pull-request/1")
self.assertIsNotNone(
re.search(MERGED_PATTERN, output.get_data(as_text=True))
)
@patch("pagure.lib.notify.send_email")
def test_merge_request_pull_merge(self, send_email):
""" Test the merge_request_pull endpoint with a merge PR. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session,
self.path,
new_project=None,
branch_from="feature",
mtype="merge",
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {"csrf_token": csrf_token}
# Merge
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - Pagure</title>",
output.get_data(as_text=True),
)
# Check if the closing notification was added
output = self.app.get("/test/pull-request/1")
self.assertIsNotNone(
re.search(MERGED_PATTERN, output.get_data(as_text=True))
)
@patch("pagure.lib.notify.send_email")
def test_merge_request_pull_merge_with_comment(self, send_email):
""" Test the merge_request_pull endpoint with a merge PR. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session,
self.path,
new_project=None,
branch_from="feature",
mtype="merge",
)
self.session = pagure.lib.query.create_session(self.dbpath)
request = pagure.lib.query.search_pull_requests(
self.session, project_id=1, requestid=1
)
self.assertEqual(len(request.comments), 0)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {
"csrf_token": csrf_token,
"comment": "Thanks for the review and the suggestions!",
}
# Merge
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - Pagure</title>",
output.get_data(as_text=True),
)
# Check if the closing notification was added
output = self.app.get("/test/pull-request/1")
output_text = output.get_data(as_text=True)
self.assertIsNotNone(re.search(MERGED_PATTERN, output_text))
self.assertIn(
"Thanks for the review and the suggestions!", output_text
)
self.session = pagure.lib.query.create_session(self.dbpath)
request = pagure.lib.query.search_pull_requests(
self.session, project_id=1, requestid=1
)
self.assertEqual(len(request.comments), 2)
@patch("pagure.lib.notify.send_email")
def | |
<filename>src/software/dAMP/triage.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
# @package triage
from __future__ import absolute_import, division, print_function, unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations
"""
Process Developed by <NAME>
Triage
-> Rationality:
We do triage work to ensure that:
The process of reporting test failures is quick.
Assess and classify new JIRA sightings as FW, hardware, drive, test/test environment, or ‘other’ failures.
Coordinate "treatment" of these failures with the appropriate teams/individuals.
Work with the Program Manager & SI to prioritize new JIRA.
Ensure new JIRA sighting resolutions start off in the right direction.
-> Suggested Workflow:
I. BVT Status Check:
Go to https://nsg-bamb.intel.com/browse/GEN3-G3TCI.
Check if the last BVT tests done are successful.
Indicate it in the ‘BVT Notes/Summary’ section in the NIT report (see section Report Outline below).
. Main Triage:
Create a working directory in ‘Y:/Users’ to save your repo clone + debug artifacts (logs, asserts, etc).
Go to the Conval analysis page of PDR from <http://tidbits.lm.intel.com/contvaldashboard/detail.php?program=cd> and select the graph corresponding to ‘15-xx-x'. You’ll see all the machines that were tested and test results for that day.
Update your local repository to the firmware version used by Conval.
Create a folder in Outlook specifically for Conval reports and create a ‘rule’ that will ensure that all the NIT email reports are saved in this folder. (This makes it really simple to search if the current failure string that you’re observing has been previously reported, or not).
Go to <http://tidbits.lm.intel.com/tidbits/waitQueueManager.php?tidbit=cd> and clear all the queued tests.
First Pass:
Fill out an excel page with basic details like name of machine, test name, basic comment that you feel should be made, etc.
Remote login to the machine. Look for the details in the FAST interface. Note down pertinent details in your excel sheet.
Open Twidl and see the drive status. This is being done because it has been observed that the status reported on TIDBITS and the actual drive status can be sometimes different. Note this down in your excel sheet as well. Look for the FW version of the drive, also look at the maxLBA field. All this will indicate whether the drive is truly healthy.
Write notes like information regarding this drive’s history. Example scenario, a drive has been failing the basic precommit test and you know that this particular drive hasn’t had a good defect map loaded……
Why excel sheet? Note that you don’t have to write something redundant that is already present in FAST. The reason for having these excel notes is so you can write, based on your experience, what might be the issue or what needs to be looked at by the developer who will be assigned the issue. Also, you will have proper documentation of what you actually did to a certain drive on a certain day after it encountered a certain failure. Remember, the goal is to have fast triaging work, not slow it down by writing redundant info. You can replace this excel with your own Onenote/notebook/sticky notes, etc.
Use VNC for logging in, this will ensure that you’re not blocking someone from looking into the machine at the same time.
Check the drives physically:
As the drives are local, you should go to the lab and physically check if any of them has something like an assert (indicated by a red solid/blinking LED). Why do this? TIDBITS reporting anomaly, as explained above.
Do miscellaneous checks, like whether the drive is properly mounted (look out for quarch and L06x drives especially).
Final pass:
Now you have a comprehensive overview of the actual state of the drive. So you should get more details now.
Retrieve and inspect test logs (to understand what the failure looks like from a host’s perspective), and if appropriate, do the test implementation to understand the stimulus it provides to the drive. Also, note that the availability of logs depends on the test environment and the test failure.
Scenarios: (Most scenarios can be found in this page if you scroll up, assert response is given here just as an example). If the drive status shows:
ASSERT:
The assert is usually of the form: ASSERT_X (where X is the address).
Open the file AssertTable-<firmware_version>.csv, located at: Y://fast-data//Releases//Nightly//PDRShortBranch//2015WWXX.X//PleasantdaleRefresh//
Do a quick search for the assert address. You will get a corresponding assert name. Note it down
Open TWIDL and type in the following commands: unlock() setTestCommandRouting(1) rdump(“<Path-to-your-repo>/ ASSERT_<assert_name>_<firmware_version>_<machine#>_CORE_0.bin”) unlock() setTestCommandRouting(2) rdump(“<Path-to-your-repo>/ ASSERT_<assert_name>_<firmware_version>_<machine#>_CORE_1.bin”)
XASSERT:
DO NOT powercycle the drive. A developer can look into the live assert, if you’re not doing it already.
Ensure the drive is preserved. Do not allow the drive to be LLF'd.
De-enum without ASSERT: Recover using the tool ‘RecoverDrive’ (shortcut on Desktop) and hitting RTC Powercycle
ASSERT dump unavailable:
You can get the ASSERT table by using the 'makeasserttable' flag with buildgen.
Command: #To select the appropriate target: buildgen3.py --listtargets #To create the assert table buildgen3.py --target <selected target> --makeasserttable
BAD CONTEXT
If you observe a BAD_CONTEXT or INITIALIZED0 or any such string in the 'drive status' field after starting twidl and suspect an assert, then it is advisable to do a PLI sidetrace process to capture the assert.
Note that this is important to understand and debug ASSERTS that neither show up in the 'Drive Status' field, nor do they result in the red LED blinking.
To perform this, enter the following (DO NOT use the edump() command):
mkdir C://plisidetrace
cd /d C://plisidetrace
mkdir "core 0" "core 1" "sidetrace"
cd "core 0"
C://fast-tools//TwidlTVE//applications//parsePliDump.py --core 0 -d 1
cd ..
cd "core 1"
C://fast-tools//TwidlTVE//applications//parsePliDump.py --core 1 -d 1
cd ..
cd "sidetrace"
C://fast-tools//TwidlTVE//applications//parseSideTrace.py -d 1 --details
mkdir C://plisidetrace cd /d C://plisidetrace mkdir "core 0" "core 1" "sidetrace" cd "core 0" C://fast-tools//TwidlTVE//applications//parsePliDump.py --core 0 -d 1 cd .. cd "core 1" C:\fast-tools\TwidlTVE\applications\parsePliDump.py --core 1 -d 1 cd .. cd "sidetrace" C:\fast-tools\TwidlTVE\applications\parseSideTrace.py -d 1 --details
Report a JIRA issue:
Search for the corresponding failure string in the previous NIT reports folder in your email. If you can find it, then simply copy the same in your report and follow the steps from (d)
If you weren’t able to find the issue on your previous NIT reports, then, do the following:
Determine the team (FWOPS, SWT, NSGSE, etc.) that the failure corresponds to.
Go to the page (https://nsg-jira.intel.com), select the appropriate dashboard and look at the active/open JIRAs.
If you still can’t find it, then use the search box at the top right of the page.
If you are able to find the issue, then follow the steps from (d).
Quick tip: If you see an issue that is exactly what you’ve seen but it has been closed recently, then contact the assigned developer before reporting; it could be that this current failure might have occurred under different circumstances. If the issue you found is exactly the same & has been fixed (but is quite older) then it is better to just create a new JIRA sighting.
If you’ve not been able to find the corresponding JIRA in the above steps, then you must create one. To do this:
Go to the page (https://nsg-jira.intel.com/secure/CreateIssue!default.jspa) to create a sighting.
The various fields to be filled up are:
Summary: Short brief description of the failure. Avoid writing the exact NIT Work Week number.
Priority: Select ‘P2 High’. If the assigned developer then thinks it needs to be changed, he can do it.
Exposure: ‘Low’ is okay as a default value if you’re not sure.
Components: Not mandatory to be filled out.
Security Level: NSG SE
Program:
PDR - if the issue was found on a PDR mule / PDR drive
Note: When a developer investigates the issue and sees that this also affects another product like DV, then that product must be added to | |
<filename>alphad3m/alphad3m/grpc_api/grpc_server.py<gh_stars>0
"""GRPC server code, exposing AlphaD3M over the TA3-TA2 protocol.
Those adapters wrap the D3mTa2 object and handle all the GRPC and protobuf
logic, converting to/from protobuf messages. No GRPC or protobuf objects should
leave this module.
"""
import grpc
import json
import calendar
import datetime
import logging
from uuid import UUID
from os.path import join
from alphad3m import __version__
import d3m_automl_rpc.core_pb2 as pb_core
import d3m_automl_rpc.core_pb2_grpc as pb_core_grpc
import d3m_automl_rpc.problem_pb2 as pb_problem
import d3m_automl_rpc.value_pb2 as pb_value
import d3m_automl_rpc.pipeline_pb2 as pb_pipeline
import d3m_automl_rpc.primitive_pb2 as pb_primitive
from d3m_automl_rpc.utils import decode_pipeline_description, decode_problem_description, decode_performance_metric, \
encode_pipeline_description, decode_value
from d3m.metadata import pipeline as pipeline_module
from d3m.metadata.problem import Problem
from google.protobuf.timestamp_pb2 import Timestamp
from alphad3m.grpc_api.grpc_logger import log_service
from alphad3m.primitive_loader import load_primitives_list
from alphad3m.utils import PersistentQueue
logger = logging.getLogger(__name__)
def to_timestamp(dt):
"""Converts a UTC datetime object into a gRPC Timestamp.
:param dt: Time to convert, or None for now.
:type dt: datetime.datetime | None
"""
if dt is None:
dt = datetime.datetime.utcnow()
return Timestamp(seconds=calendar.timegm(dt.timetuple()),
nanos=dt.microsecond * 1000)
def error(context, code, format, *args):
message = format % args
context.set_code(code)
context.set_details(message)
if code == grpc.StatusCode.NOT_FOUND:
return KeyError(message)
else:
return ValueError(message)
@log_service(logger)
class CoreService(pb_core_grpc.CoreServicer):
def __init__(self, ta2):
self._ta2 = ta2
self._ta2.add_observer(self._ta2_event)
self._requests = {}
def _ta2_event(self, event, **kwargs):
if 'job_id' in kwargs and kwargs['job_id'] in self._requests:
job_id = kwargs['job_id']
self._requests[job_id].put((event, kwargs))
if event in ('scoring_success', 'scoring_error',
'training_success', 'training_error',
'test_success', 'test_error'):
self._requests[job_id].close()
def Hello(self, request, context):
version = pb_core.DESCRIPTOR.GetOptions().Extensions[pb_core.protocol_version]
user_agent = "alphad3m %s" % __version__
return pb_core.HelloResponse(
user_agent=user_agent,
version=version,
allowed_value_types=['RAW', 'DATASET_URI', 'CSV_URI'],
supported_extensions=[],
supported_task_keywords=[], # TODO: Add supported_task_keywords using core package enums
supported_performance_metrics=[], # TODO: Add supported_performance_metrics using core package enums
supported_evaluation_methods=['K_FOLD', 'HOLDOUT', 'RANKING'],
supported_search_features=[]
)
def SearchSolutions(self, request, context):
"""Create a `Session` and start generating & scoring pipelines.
"""
if len(request.inputs) > 1:
raise error(context, grpc.StatusCode.UNIMPLEMENTED,
"Search with more than 1 input is not supported")
expected_version = pb_core.DESCRIPTOR.GetOptions().Extensions[
pb_core.protocol_version]
if request.version != expected_version:
logger.error("TA3 is using a different protocol version: %r "
"(us: %r)", request.version, expected_version)
template = request.template
if template is not None and len(template.steps) > 0: # isinstance(template, pb_pipeline.PipelineDescription)
pipeline = decode_pipeline_description(template, pipeline_module.Resolver())
if pipeline.has_placeholder():
template = pipeline.to_json_structure()
else: # Pipeline template fully defined
problem = None
if request.problem:
problem = decode_problem_description(request.problem)
search_id = self._ta2.new_session(problem)
dataset = request.inputs[0].dataset_uri
if not dataset.startswith('file://'):
dataset = 'file://' + dataset
self._ta2.build_fixed_pipeline(search_id, pipeline.to_json_structure(), dataset)
return pb_core.SearchSolutionsResponse(search_id=str(search_id),)
else:
template = None
dataset = request.inputs[0].dataset_uri
if not dataset.endswith('datasetDoc.json'):
raise error(context, grpc.StatusCode.INVALID_ARGUMENT,
"Dataset is not in D3M format: %s", dataset)
if not dataset.startswith('file://'):
dataset = 'file://' + dataset
problem = decode_problem_description(request.problem)
timeout_search = request.time_bound_search
timeout_run = request.time_bound_run
report_rank = True if request.rank_solutions_limit > 0 else False
if timeout_search <= 0.0: timeout_search = None
if timeout_run <= 0.0: timeout_run = None
search_id = self._ta2.new_session(problem)
session = self._ta2.sessions[search_id]
task_keywords = session.problem['problem']['task_keywords']
metrics = session.metrics
automl_hyperparameters = {}
if request.automl_hyperparameters:
for hp_name, hp_value in request.automl_hyperparameters.items():
automl_hyperparameters[hp_name] = decode_value(hp_value)['value']
self._ta2.build_pipelines(search_id, dataset, task_keywords, metrics, timeout_search, timeout_run,
automl_hyperparameters, template, report_rank=report_rank)
return pb_core.SearchSolutionsResponse(
search_id=str(search_id),
)
def GetSearchSolutionsResults(self, request, context):
"""Get the created pipelines and scores.
"""
session_id = UUID(hex=request.search_id)
if session_id not in self._ta2.sessions:
raise error(context, grpc.StatusCode.NOT_FOUND,
"Unknown search ID %r", session_id)
session = self._ta2.sessions[session_id]
def msg_solution(pipeline_id):
scores = self._ta2.get_pipeline_scores(pipeline_id)
progress = session.progress
if scores:
if session.metrics and session.metrics[0]['metric'].name in scores:
metric = session.metrics[0]['metric']
try:
internal_score = metric.normalize(scores[metric.name])
except:
internal_score = scores[metric.name]
logger.warning('Problems normalizing metric, using the raw value: %.2f' % scores[metric.name])
else:
internal_score = float('nan')
scores = [
pb_core.Score(
metric=pb_problem.ProblemPerformanceMetric(
metric=m,
k=0,
pos_label=''),
value=pb_value.Value(
raw=pb_value.ValueRaw(double=s)
),
)
for m, s in scores.items()
]
scores = [pb_core.SolutionSearchScore(scores=scores)]
return pb_core.GetSearchSolutionsResultsResponse(
done_ticks=progress.current,
all_ticks=progress.total,
progress=pb_core.Progress(
state=pb_core.RUNNING,
status="Solution scored",
start=to_timestamp(session.start),
),
solution_id=str(pipeline_id),
internal_score=internal_score,
scores=scores,
)
def msg_progress(status, state=pb_core.RUNNING):
progress = session.progress
return pb_core.GetSearchSolutionsResultsResponse(
done_ticks=progress.current,
all_ticks=progress.total,
progress=pb_core.Progress(
state=state,
status=status,
start=to_timestamp(session.start),
),
internal_score=float('nan'),
)
def msg_fixed_solution(pipeline_id, state=pb_core.RUNNING):
progress = session.progress
return pb_core.GetSearchSolutionsResultsResponse(
done_ticks=progress.current,
all_ticks=progress.total,
progress=pb_core.Progress(
state=state,
status="Solution Created",
start=to_timestamp(session.start),
),
solution_id=str(pipeline_id),
internal_score=float('nan'),
)
with session.with_observer_queue() as queue:
# Send the solutions that already exist
for pipeline_id in session.pipelines:
msg = msg_solution(pipeline_id)
if msg is not None:
yield msg
# Send updates by listening to notifications on session
while session.working or not queue.empty():
if not context.is_active():
logger.info(
"Client closed GetSearchSolutionsResults stream")
break
event, kwargs = queue.get()
if event == 'finish_session' or event == 'done_searching':
break
elif event == 'new_pipeline':
yield msg_progress("Trying new solution")
elif event == 'new_fixed_pipeline':
pipeline_id = kwargs['pipeline_id']
yield msg_fixed_solution(pipeline_id)
elif event == 'scoring_success':
pipeline_id = kwargs['pipeline_id']
msg = msg_solution(pipeline_id)
if msg is not None:
yield msg
else:
yield msg_progress("No appropriate score")
elif event == 'scoring_error':
yield msg_progress("Solution doesn't work")
yield msg_progress("End of search", pb_core.COMPLETED)
def ScoreSolution(self, request, context):
"""Request scores for a pipeline.
"""
pipeline_id = UUID(hex=request.solution_id)
dataset = request.inputs[0].dataset_uri
if not dataset.endswith('datasetDoc.json'):
raise error(context, grpc.StatusCode.INVALID_ARGUMENT,
"Dataset is not in D3M format: %s", dataset)
if dataset.startswith('/'):
logger.warning("Dataset is a path, turning it into a file:// URL")
dataset = 'file://' + dataset
metrics = []
for metric in request.performance_metrics:
metrics.append(decode_performance_metric(metric))
logger.info("Got ScoreSolution request, dataset=%s, "
"metrics=%s",
dataset, metrics)
problem = None
timeout_run = None
for session_id in self._ta2.sessions.keys():
if pipeline_id in self._ta2.sessions[session_id].pipelines:
problem = self._ta2.sessions[session_id].problem
timeout_run = self._ta2.sessions[session_id].timeout_run
break
scoring_config = {
'method': request.configuration.method,
'train_score_ratio': str(request.configuration.train_test_ratio),
'random_seed': request.configuration.random_seed,
'shuffle': str(request.configuration.shuffle).lower(),
'stratified': str(request.configuration.stratified).lower()
}
if scoring_config['method'] == 'K_FOLD':
scoring_config['number_of_folds'] = str(request.configuration.folds)
job_id = self._ta2.score_pipeline(pipeline_id, metrics, dataset, problem, scoring_config, timeout_run)
self._requests[job_id] = PersistentQueue()
return pb_core.ScoreSolutionResponse(
request_id='%x' % job_id,
)
def GetScoreSolutionResults(self, request, context):
"""Wait for a scoring job to be done.
"""
try:
job_id = int(request.request_id, 16)
queue = self._requests[job_id]
except (ValueError, KeyError):
raise error(context, grpc.StatusCode.NOT_FOUND,
"Unknown ID %r", request.request_id)
for event, kwargs in queue.read():
if not context.is_active():
logger.info("Client closed GetScoreSolutionResults stream")
break
if event == 'scoring_start':
yield pb_core.GetScoreSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.RUNNING,
status="Scoring in progress",
),
)
elif event == 'scoring_success':
pipeline_id = kwargs['pipeline_id']
scores = self._ta2.get_pipeline_scores(pipeline_id)
scores = [
pb_core.Score(
metric=pb_problem.ProblemPerformanceMetric(
metric=m,
k=0,
pos_label=''),
value=pb_value.Value(
raw=pb_value.ValueRaw(double=s)
),
)
for m, s in scores.items()
]
yield pb_core.GetScoreSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.COMPLETED,
status="Scoring completed",
),
scores=scores,
)
break
elif event == 'scoring_error':
status = kwargs['error_msg']
yield pb_core.GetScoreSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.ERRORED,
status=status,
),
)
break
def FitSolution(self, request, context):
"""Train a pipeline on a dataset.
This will make it available for testing and exporting.
"""
pipeline_id = UUID(hex=request.solution_id)
dataset = request.inputs[0].dataset_uri
steps_to_expose = list(request.expose_outputs)
if dataset.startswith('/'):
logger.warning("Dataset is a path, turning it into a file:// URL")
dataset = 'file://' + dataset
problem = None
for session_id in self._ta2.sessions.keys():
if pipeline_id in self._ta2.sessions[session_id].pipelines:
problem = self._ta2.sessions[session_id].problem
break
job_id = self._ta2.train_pipeline(pipeline_id, dataset, problem, steps_to_expose)
self._requests[job_id] = PersistentQueue()
return pb_core.FitSolutionResponse(
request_id='%x' % job_id,
)
def GetFitSolutionResults(self, request, context):
"""Wait for a training job to be done.
"""
try:
job_id = int(request.request_id, 16)
queue = self._requests[job_id]
except (ValueError, KeyError):
raise error(context, grpc.StatusCode.NOT_FOUND,
"Unknown ID %r", request.request_id)
for event, kwargs in queue.read():
if not context.is_active():
logger.info("Client closed GetFitSolutionsResults stream")
break
if event == 'training_start':
yield pb_core.GetFitSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.RUNNING,
status="Training in progress",
),
)
elif event == 'training_success':
pipeline_id = kwargs['pipeline_id']
storage_dir = kwargs['storage_dir']
steps_to_expose = kwargs['steps_to_expose']
yield pb_core.GetFitSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.COMPLETED,
status="Training completed",
),
exposed_outputs={step_id: pb_value.Value(csv_uri='file://%s/fit_%s_%s.csv' %
(storage_dir, pipeline_id, step_id))
for step_id in steps_to_expose},
fitted_solution_id=str(pipeline_id),
)
break
elif event == 'training_error':
status = kwargs['error_msg']
yield pb_core.GetFitSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.ERRORED,
status=status,
),
)
break
elif event == 'done_searching':
break
def ProduceSolution(self, request, context):
"""Run testing from a trained pipeline.
"""
pipeline_id = UUID(hex=request.fitted_solution_id)
dataset = request.inputs[0].dataset_uri
steps_to_expose = list(request.expose_outputs)
if dataset.startswith('/'):
logger.warning("Dataset is a path, turning it into a file:// URL")
dataset = 'file://' + dataset
job_id = self._ta2.test_pipeline(pipeline_id, dataset, steps_to_expose)
self._requests[job_id] = PersistentQueue()
return pb_core.ProduceSolutionResponse(
request_id='%x' % job_id,
)
def GetProduceSolutionResults(self, request, context):
"""Wait for the requested test run to be done.
"""
try:
job_id = int(request.request_id, 16)
queue = self._requests[job_id]
except (ValueError, KeyError):
raise error(context, grpc.StatusCode.NOT_FOUND,
"Unknown ID %r", request.request_id)
for event, kwargs in queue.read():
if not context.is_active():
logger.info("Client closed GetProduceSolutionResults "
"stream")
break
if event == 'testing_success':
pipeline_id = kwargs['pipeline_id']
storage_dir = kwargs['storage_dir']
steps_to_expose = kwargs['steps_to_expose']
yield pb_core.GetProduceSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.COMPLETED,
status="Execution completed",
),
exposed_outputs={step_id: pb_value.Value(csv_uri='file://%s/produce_%s_%s.csv' %
(storage_dir, pipeline_id, step_id))
for step_id in steps_to_expose},
)
break
elif event == 'testing_error':
status = kwargs['error_msg']
yield pb_core.GetProduceSolutionResultsResponse(
progress=pb_core.Progress(
state=pb_core.ERRORED,
status=status,
),
)
break
def SaveSolution(self, request, context):
"""Save a pipeline.
"""
pipeline_id = UUID(hex=request.solution_id)
session_id = None
problem = None
for session_id in self._ta2.sessions.keys():
if | |
import random
import numpy
import simpy
from file_manager import SharedFile
def new_inter_session_time():
"""
Ritorna un valore per l'istanza di "inter-session time"
"""
return numpy.random.lognormal(mean=7.971, sigma=1.308)
def new_session_duration():
"""
Ritorna un valore per l'istanza di "session time"
"""
return numpy.random.lognormal(mean=8.492, sigma=1.545)
def new_inter_upload_time():
"""
Ritorna un valore per l'istanza di "inter-upload time"
"""
return numpy.random.lognormal(mean=3.748, sigma=2.286)
def new_download_time(s, download_rate):
"""
Ritorna un valore per l'istanza di "download time", relativa al file di dimensione "s"
"""
if s == 0:
return 0
else:
return s / download_rate
def new_download_rate(f):
"""
Ritorna un valore per l'istanza di "download rate", relativa al file "f"
"""
r = f.get_throughput()
delta_r = (random.random() - 0.25) * 2 * r
r += delta_r
return r
def new_upload_time(file_size, upload_rate):
"""
Ritorna un valore per l'istanza di "upload time", relativa al file di dimensione "file_size"
"""
return new_download_time(file_size, upload_rate)
def new_upload_rate(f):
"""
Ritorna un valore per l'istanza di "upload rate", relativa al file "f"
"""
return new_download_rate(f)
class Device(object):
# cosftructor
def __init__(self, device_id, env, fm, cs, cenv):
"""
:param device_id: id del dispositivo
:param env: ambiente di simulazione simpy
:param fm: file manager
:param cs: cloud stats
"""
# ID del dispositivo
self.id = device_id
# Elenco delle cartelle condivise
self.my_shared_folders = []
# Ambiente di simulazione
self.env = env
# Ambiente cloud
self.cloud_env = cenv
# File manager
self.fm = fm
# Gestore statistiche
self.stats = cs
# Cartella condivisa di lavoro (per una certa sessione)
self.current_sf = None
# Timestamp di fine sessione
self.end_session = -1
# Flag di login
self.logged_in = False
# Elenco dei file obsoleti/mancanti, da scaricare
self.missing_files = set([])
# Elenco dei file che non sono stati caricati in upload
self.missed_uploads = set([])
# Elenco dei file da scaricare al volo
self.triggered_list = set([])
# Risorsa condivisa per i triggered download: utile per scaricare i file al volo
self.trigger_lock = simpy.Container(self.env, init=0)
# Flag: se vero, il dispositivo viene notificato realtime sull'upload di nuovi file su Cloud
self.triggerable = False
# Contributo nel trasferimento file P2P
self.p2p_contribution = 0.0
# Preparazione alla simulazione
self.prepare()
# fancy printing as string
def __str__(self):
sf_str = ", ".join([str(i) for i in self.my_shared_folders])
return "Device: " + str(self.id) + ", Shared Folders [" + sf_str + "]"
# add a shared folder to this device
def add_shared_folder(self, sf):
self.my_shared_folders.append(sf)
def is_working_in_sf(self, sf):
"""
Verifica che il dispositivo stia lavorando nella cartella condivisa specificata
"""
return self.current_sf == sf
def is_on(self):
"""
Verifica che il dispositivo sia loggato
"""
return self.logged_in
def has_file(self, f):
"""
Verifica che il dispositivo abbia gia' scaricato il file "f"
"""
return f not in self.missing_files
def has_shared_folder(self, sf):
"""
Verifica che il dispositivo abbia i privilegi per lavorare in una certa cartella condivisa
"""
return sf in self.my_shared_folders
def get_id(self):
return self.id
def random_sf(self):
"""
Ritorna una delle shared folder, scelta casualmente
"""
return random.choice(self.my_shared_folders)
def residual_session_duration(self):
"""
Ritorna il numero di secondi rimanenti prima del logout
"""
return self.end_session - int(self.env.now)
def prepare(self):
"""
La funzione prepara i processi di simulazione per il dispositivo in questione
"""
self.env.process(self.run())
def run(self):
"""
Questo metodo alterna lo stato di online/offline per il device
"""
while True:
# Tempo di attesa, prima di diventare online
inter_session_time = new_inter_session_time()
yield self.env.timeout(inter_session_time)
# Scelgo la shared folder su cui operare in questa sessione
self.current_sf = self.random_sf()
# Il device effettua il login
session_duration = new_session_duration()
# La sessione ha una durata massima, entro cui posso svolgere le operazioni: quando session_duration ha
# valore negativo, significa che l'operazione corrente di upload/download viene troncata
self.session_start(session_duration)
# DOWNLOADS
residual_time = session_duration
# Ricaviamo l'elenco dei file che dovrei scaricare
if len(self.downloads()) == 0:
self.fm.log('Device %d has no file to download from the server' % self.id)
else:
while len(self.downloads()) > 0:
# File da scaricare
f = self.downloads().pop()
file_size_to_download = f.get_size()
server_download = True
# Verifico il download P2P
if not self.cloud_env.server:
server_download = False
# Elenco dei dispositivi loggati che dispongono del file
peers = self.cloud_env.look_for_peers(f)
if len(peers) > 0:
# Ricavo le durate utili residue relative alle sessioni dei peers
residual_times = map(lambda p: min(p.residual_session_duration(), residual_time), peers)
# Calcolo un valore di throughput per il trasferimento dati del file dai vari peers
rates = map(lambda p: new_download_rate(f), peers)
# Funzione locale, verifica se ho tempo a disposizione per scaricare il file da altri peers
def p2p_check():
for t in residual_times:
if t > 0:
return True
return False
# Calcolo ora per quanto tempo rimanere connesso ai vari peers, per scaricare il file
durations = [0] * len(peers)
downloaded_data = 0.0
file_size = f.get_size()
downloaded = False
while (not downloaded) and p2p_check():
for i in range(len(peers)):
if residual_times[i] > 0:
# Scarichero' un secondo di dati dal peer i-esimo
residual_times[i] -= 1
durations[i] += 1
downloaded_data += rates[i]
# Se il file puo' essere scaricato, interrompo i cicli
if downloaded_data >= file_size:
downloaded = True
break
# Eseguo il download in parallelo dai vari peers
events = []
for i in range(len(peers)):
if durations[i] > 0:
events.append(self.env.process(peers[i].p2p_upload(f, durations[i], rates[i])))
self.env.process(self.p2p_download(f, durations[i], rates[i], peers[i].get_id()))
if len(events) > 0:
yield simpy.events.AllOf(self.env, events)
# Terminato il download dai peer, se il file e' troppo grande la parte residua viene
# scaricata dal server centrale
if not downloaded:
file_size_to_download -= downloaded_data
server_download = True
else:
self.missing_files.remove(f)
else:
# Non ci sono peer che dispongono del file che sto cercando
server_download = True
if server_download:
# Devo scaricare il file da server
# Tempo richiesto per il download del file
server_download_rate = new_download_rate(f)
server_download_time = new_download_time(file_size_to_download, server_download_rate)
residual_time -= server_download_time
# Verifico di avere tempo sufficiente per eseguire correttamente il download del file
if residual_time >= 0:
# Riesco a scaricare correttamente il file
yield self.env.process(self.download(f, server_download_time, server_download_rate, True))
else:
# L'operazione di download e' stata prematuramente interrotta
self.missing_files.add(f)
self.stats.download_start(self, f)
yield self.env.timeout(residual_time + server_download_time)
self.stats.download_end(self, f, server_download_rate)
self.fm.log('Device %s fails to download on fly file "%d" at %d' % (self.id, f.get_id(),
int(self.env.now)))
return
self.fm.log('Device %s finishes its downloads at %d' % (self.id, int(self.env.now)))
# Nell'eventuale parte rimanente della sessione, il dispositivo effettua upload di file e scarica le nuove
# modifiche
if residual_time > 0:
# TRIGGERED DOWNLOADS
# In parallelo agli uploads, il dispositivo rimane in ascolto per scaricare file caricati da altri sulla
# cartella condivisa corrente
self.triggerable = True
# UPLOADS
# Se la parte di download e' terminata con successo, procedo nel caricare in upload piu' file possibile
yield self.env.process(self.uploads(residual_time))
'''tdw_proc = self.env.process(self.triggered_downloads(residual_time))
yield up_proc or tdw_proc'''
self.triggerable = False
# up_proc.interrupt('')
# tdw_proc.interrupt('')
self.session_end()
self.fm.log('Device %d logs out at %d: session lasts for %d' % (self.id, int(self.env.now),
int(session_duration)))
def downloads(self):
"""
La funzione restituisce l'elenco di file da scaricare (perche' nuovi o modificati) da una specifica cartella
condivisa
"""
return filter(lambda x: x.get_shared_folder() == self.current_sf, self.missing_files)
def download(self, f, download_time, download_rate, on_fly=False):
"""
La funzione simula il download del file "f"
"""
# Eseguo il download del file, che puo' essere server o P2P
self.stats.download_start(self, f)
yield self.env.timeout(download_time)
self.stats.download_end(self, f, download_rate)
self.stats.download_successful(self, f, download_time)
# Ho scaricato il file, quindi lo segnalo come aggiornato
self.missing_files.remove(f)
self.fm.log(
'Device %d downloads %sfile "%d" from the server at %d: download lasts for %.2f' %
(self.id, 'on fly ' if on_fly else '', f.get_id(), int(self.env.now), download_time)
)
def p2p_download(self, f, download_time, download_rate, peer_id):
"""
La funzione simula il download di una porzione di file da un peer
:param f: file scaricato
:param download_time: tempo impiegato per scaricare la porzione di file (s)
:param download_rate: velcoita' di download (bit/s)
:param peer_id: id del peer che effettua l'upload dei dati
"""
size = download_time * download_rate
self.stats.p2p_download_start()
yield self.env.timeout(download_time)
self.stats.p2p_download_end(size)
if f.get_size() == size:
# Sto scaricando l'intero file da un unico peer
tmp = 'the entire'
else:
tmp = 'a portion of'
self.fm.log('Device %d downloads %s file "%d" (size: %.2f bits) from | |
<reponame>pmp-p/wapy-pack<filename>wapy-lib/readline/pyreadline.py<gh_stars>0
# Incremental readline compatible with micropython/lib/readline.c, credits https://github.com/dhylands
import sys
try:
import ulogging as logging
except:
import logging
try:
import signal
except:
signal = None
DEBUG = 0
CTRL_A = b"\x01"
CTRL_C = b"\x03"
CTRL_D = b"\x04"
CTRL_E = b"\x05"
CTRL_U = b"\x15"
TAB = b"\x09"
CR = b"\x0d"
LF = b"\x0a"
ESC = b"\x1b"
DEL = b"\x7f"
BS = b"\x08"
# The following escape sequence is used to query the size of the window:
#
# ESC 7 - Save cursor position
# ESC [r - Enable scrolling for entire display
# ESC [row;colH - Move to cursor position
# ESC [6n - Device Status Report - send ESC [row;colR
# ESC 8 - Restore cursor position
RWS1 = b"\x1b7\x1b[r\x1b[999;999H"
RWS2 = b"\x1b8"
RCL = b"\x1b[6n"
# When running the test suite, we're only checking the line buffer, so we
# disable output
FBO = True
def printable(ch):
"""Returns a printable representation of a character."""
val = ord(ch)
if val < ord(" ") or val > ord("~"):
return "."
return chr(val)
class CmdBase(object):
def __init__(self, log=None):
self.log = log or logging.getLogger(__name__)
class CmdWrite(CmdBase):
def __init__(self, string, log=None):
CmdBase.__init__(self, log)
self.string = string
def is_input(self):
return False
def process(self):
if FBO:
return
self.log.debug("CmdWrite(" + repr(self.string) + ")")
if isinstance(self.string, str):
sys.stdout.write(self.string)
sys.stdout.flush()
else:
sys.stdout.buffer.write(self.string)
sys.stdout.buffer.flush()
class CmdInput(CmdBase):
def __init__(self, func, log=None):
CmdBase.__init__(self, log)
self._func = func
def is_input(self):
return True
def process(self):
# For some reason, the ESC [ 999;999 R sequence doesn't cause
# select to trigger. So we do a read here.
# This is what ncurses does as well.
data = ""
while True:
char = sys.stdin.read(1)
self.log.debug("CmdInput: got char '%c' 0x%02x" % (printable(char), ord(char)))
if char == "R":
break
data += char
if data[0] != chr(ord(ESC)) or data[1] != "[":
self.log.error("Invalid cursor position received")
self.log.error("data[0] = " + repr(data[0]))
self.log.error("data[1] = " + repr(data[1]))
return
num_str = data[2:].split(";")
try:
rows = int(num_str[0])
cols = int(num_str[1])
except:
self.log.error("Unknown ESC [ '%s' R" % data[2:])
self.log.error("num_str = " + repr(num_str))
return
self.log.debug("CmdInput: %s rows: %d cols: %d" % (self._func.__name__, rows, cols))
self._func(rows, cols)
class CmdWriteQueue(object):
def __init__(self, log=None):
self.log = log or logging.getLogger(__name__)
self.queue = []
def write(self, string):
self.queue.append(CmdWrite(string, log=self.log))
def queue_input(self, func):
self.queue.append(CmdInput(func, log=self.log))
def wait_for_input(self, func):
self.queue.append(CmdInput(func, log=self.log))
def process(self):
while len(self.queue) > 0:
cmd = self.queue.pop(0)
cmd.process()
def process_input(self, *args, **kwargs):
assert len(self.queue) > 0
assert self.queue[0].is_input()
cmd = self.queue.pop(0)
cmd.process()
self.process()
#
#
# """
# Mouse Tracking
# The VT widget can be set to send the mouse position and other information on button presses.
# These modes are typically used by editors and other full-screen applications that want to make use of the mouse.
#
# There are six mutually exclusive modes. One is DEC Locator mode,
# enabled by the DECELR CSI P s ; P s ´z control sequence, and is not described here (control sequences are summarized above).
# The remaining five modes are each enabled (or disabled) by a different parameter in:
# DECSET CSI ? P m h or DECRST CSI ? P m l control sequence.
#
# Manifest constants for the parameter values are defined in xcharmouse.h as follows:
#
##define SET_X10_MOUSE 9
##define SET_VT200_MOUSE 1000
##define SET_VT200_HIGHLIGHT_MOUSE 1001
##define SET_BTN_EVENT_MOUSE 1002
##define SET_ANY_EVENT_MOUSE 1003
# The motion reporting modes are strictly xterm extensions, and are not part of any standard, though they are analogous to the DEC VT200 DECELR locator reports.
#
# Parameters (such as pointer position and button number) for all mouse tracking escape sequences generated by xterm encode numeric parameters in a single character as value+32. For example, ! specifies the value 1. The upper left character position on the terminal is denoted as 1,1.
#
# X10 compatibility mode sends an escape sequence only on button press, encoding the location and the mouse button pressed. It is enabled by specifying parameter 9 to DECSET. On button press, xterm sends CSI M C b C x C y (6 characters). C b is button−1. C x and C y are the x and y coordinates of the mouse when the button was pressed.
#
# Normal tracking mode sends an escape sequence on both button press and release. Modifier key (shift, ctrl, meta) information is also sent. It is enabled by specifying parameter 1000 to DECSET. On button press or release, xterm sends CSI M C b C x C y . The low two bits of C b encode button information: 0=MB1 pressed, 1=MB2 pressed, 2=MB3 pressed, 3=release. The next three bits encode the modifiers which were down when the button was pressed and are added together: 4=Shift, 8=Meta, 16=Control. Note however that the shift and control bits are normally unavailable because xterm uses the control modifier with mouse for popup menus, and the shift modifier is used in the default translations for button events. The Meta modifier recognized by xterm is the mod1 mask, and is not necessarily the "Meta" key (see xmodmap). C x and C y are the x and y coordinates of the mouse event, encoded as in X10 mode.
#
# Wheel mice may return buttons 4 and 5. Those buttons are represented by the same event codes as buttons 1 and 2 respectively, except that 64 is added to the event code. Release events for the wheel buttons are not reported.
#
# Mouse hilite tracking notifies a program of a button press, receives a range of lines from the program,
# highlights the region covered by the mouse within that range until button release,
# and then sends the program the release coordinates.
# It is enabled by specifying parameter 1001 to DECSET.
#
# Highlighting is performed only for button 1, though other button events can be received.
# Warning: use of this mode requires a cooperating program or it will hang xterm.
# On button press, the same information as for normal tracking is generated;
# xterm then waits for the program to send mouse tracking information.
#
# All X events are ignored until the proper escape sequence is received from the pty: CSI P s ; P s ; P s ; P s ; P s T .
# The parameters are func, startx, starty, firstrow, and lastrow. func is non-zero to initiate hilite tracking and zero to abort. startx and starty give the starting x and y location for the highlighted region. The ending location tracks the mouse, but will never be above row firstrow and will always be above row lastrow. (The top of the screen is row 1.) When the button is released, xterm reports the ending position one of two ways: if the start and end coordinates are valid text locations: CSI t C x C y . If either coordinate is past the end of the line: CSI T C x C y C x C y C x C y . The parameters are startx, starty, endx, endy, mousex, and mousey. startx, starty, endx, and endy give the starting and ending character positions of the region. mousex and mousey give the location of the mouse at button up, which may not be over a character.
#
# Button-event tracking is essentially the same as normal tracking, but xterm also reports button-motion events.
# Motion events are reported only if the mouse pointer has moved to a different character cell.
# It is enabled by specifying parameter 1002 to DECSET.
# On button press or release, xterm sends the same codes used by normal tracking mode.
# On button-motion events, xterm adds 32 to the event code (the third character, C b ).
# The other bits of the event code specify button and modifier keys as in normal mode.
#
# For example,
# motion into cell x,y with button 1 down is reported as:
# CSI M @ C x C y . ( @ = 32 + 0 (button 1) + 32 (motion indicator) ).
#
# Similarly,
# motion with button 3 down is reported as:
# CSI M B C x C y . ( B = 32 + 2 (button 3) + 32 (motion indicator) ).
#
# Any-event mode is the same as button-event mode, except that all | |
<filename>extract_haplotype_read_counts.py<gh_stars>0
#!/bin/env python
#
# Copyright 2013 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
"""
usage: extract_haplotype_read_counts.py [-h] [--assembly ASSEMBLY]
[--target_region_size TARGET_REGION_SIZE]
[--sample_file SAMPLE_FILE]
[--homozygous_as_counts {zero,rand_hap,rand_allele}]
track_prefix pop individual input_file
positional arguments:
track_prefix prefix of tracks to extract reads from (e.g.
10_IND/PolII/read_counts/PolII_18505)
pop population prefix for genotype tracks (YRI or CEU)
individual individual to extract read counts for (e.g. 18505)
input_file bed-like file to read coordinates of test SNP and
target region from
optional arguments:
-h, --help show this help message and exit
--assembly ASSEMBLY genome assembly that reads were mapped to (e.g. hg18)
--target_region_size TARGET_REGION_SIZE
override target region size that is specified by input
file
--sample_file SAMPLE_FILE
path to file containing ordered list of genotyped
individuals
--homozygous_as_counts {zero,rand_hap,rand_allele}
how to report AS counts at linked het SNPs when test
SNP genotype is homozygous or unknown. zero (default):
set allele-specific counts to 0; rand_hap: randomly
choose one of the haplotypes to be 'reference';
rand_allele: choose random allele at each SNP to be
reference
This script is used to generate input files for the combined haplotype
test script. It depends on a number of datafiles, which may make it
difficult for other people to use. More specifically this script reads
data from HDF5 files (a.k.a. tracks) and uses code from the 'genome'
library (https://github.com/gmcvicker/genome) to access them.
The script reads from the following HDF5 tracks. <PREFIX> and <POP> are specified by
positional command line arguments "track_prefix", "pop":
<PREFIX>_AS_ref_count - number of allele-specific reads that match ref allele at each SNP
<PREFIX>_AS_alt_count - number of allele-specific reads that match alt allele at each SNP
<PREFIX>_AS_other_count - number of reads that match neither ref nor alt allele at each SNP
<PREFIX>_read_start_count - number of aligned reads that start at each position
<PREFIX>_impute2/snps - table with info about each SNP including alleles, and position
<PREFIX>impute2/snp_index - mapping from genomic position to index in snps table
impute2/<POP>_geno_probs - genotype probabilites for each individual
impute2/<POP>_haplotypes - phasing information for alleles
"""
import argparse
import numpy as np
import sys
import gzip
import genome.db
import genome.coord
SNP_UNDEF = -1
HAP_UNDEF = -1
class SNP(object):
def __init__(self, chrom, pos, name, ref_allele, alt_allele):
self.chrom = chrom
self.pos = pos
self.name = name
self.ref_allele = ref_allele
self.alt_allele = alt_allele
class DataTracks(object):
def __init__(self, gdb, track_prefix, pop):
# open tracks that read counts will be pulled from
ref_as_name = "%s_AS_ref_count" % track_prefix
alt_as_name = "%s_AS_alt_count" % track_prefix
oth_as_name = "%s_AS_other_count" % track_prefix
read_count_name = "%s_read_start_count" % track_prefix
self.ref_count_track = gdb.open_track(ref_as_name)
self.alt_count_track = gdb.open_track(alt_as_name)
self.other_count_track = gdb.open_track(oth_as_name)
self.read_count_track = gdb.open_track(read_count_name)
# open tracks where SNP information can be extracted
self.snp_track = gdb.open_track("impute2/snps")
self.snp_index_track = gdb.open_track("impute2/snp_index")
self.geno_track = gdb.open_track("impute2/%s_geno_probs" %
pop.lower())
self.hap_track = gdb.open_track("impute2/%s_haplotypes" %
pop.lower())
def close(self):
"""closes all of the data tracks"""
self.ref_count_track.close()
self.alt_count_track.close()
self.other_count_track.close()
self.read_count_track.close()
self.snp_track.close()
self.snp_index_track.close()
self.geno_track.close()
self.hap_track.close()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--assembly", help="genome assembly that reads "
"were mapped to (e.g. hg18)", default=None)
parser.add_argument('--target_region_size',
help='override target region size that is '
'specified by input file',
type=int, default=None)
parser.add_argument("--sample_file",
help="path to file containing ordered list of "
"genotyped individuals",
default=None)
parser.add_argument("--homozygous_as_counts",
help="how to report AS counts at linked het SNPs when "
"test SNP genotype is homozygous or unknown. "
"zero (default): set allele-specific counts to 0; "
"rand_hap: randomly choose one of the haplotypes "
"to be 'reference'; "
"rand_allele: choose random allele at each SNP to "
"be reference", default="zero",
choices=("zero", "rand_hap", "rand_allele"))
parser.add_argument("track_prefix",
help="prefix of tracks to extract reads from "
"(e.g. 10_IND/PolII/read_counts/PolII_18505)")
parser.add_argument("pop",
help="population prefix for genotype tracks "
"(YRI or CEU)")
parser.add_argument("individual",
help="individual to extract read counts "
"for (e.g. 18505)")
parser.add_argument("input_file",
help="bed-like file to read coordinates of "
"test SNP and target region from")
args = parser.parse_args()
return args
def get_region_snps(dt, region_list, ind_idx):
"""Retrieves all of the SNPs in the requested regions.
The test SNP is also returned."""
if len(region_list) == 0:
raise genome.coord.CoordError("expected at least one coordinate, got 0")
chrom = region_list[0].chrom
snp_tab = dt.snp_track.h5f.getNode("/%s" % chrom.name)
hap_tab = dt.hap_track.h5f.getNode("/%s" % chrom.name)
geno_tab = dt.geno_track.h5f.getNode("/%s" % chrom.name)
region_snps = []
for region in region_list:
if region.chrom.name != chrom.name:
raise CoordError("only regions on same chromosome are supported")
snp_idx = dt.snp_index_track.get_nparray(chrom, region.start, region.end)
offsets = np.where(snp_idx != SNP_UNDEF)[0]
test_snp = None
for offset in offsets:
i = snp_idx[offset]
snp_row = snp_tab[i]
# extract geno probs and haplotypes for this individual
geno_probs = geno_tab[i, (ind_idx*3):(ind_idx*3 + 3)]
haps = hap_tab[i, (ind_idx*2):(ind_idx*2 + 2)]
snp = SNP(region.chrom, snp_row['pos'],
snp_row['name'],
snp_row['allele1'],
snp_row['allele2'])
# get heterozygote probability for SNP
snp.het_prob = geno_probs[1]
# linear combination of genotype probs:
# 0*homo_ref + 1*het + 2*homo_alt
snp.geno_sum = geno_probs[1] + 2.0*geno_probs[2]
snp.haps = haps
# TODO: set linkage probabilty properly
snp.linkage_prob = 1.0
region_snps.append(snp)
return region_snps
def get_het_snps(snp_list):
het_snp_list = []
for snp in snp_list:
if snp.haps[0] != snp.haps[1]:
het_snp_list.append(snp)
return het_snp_list
def lookup_individual_index(options, ind_name):
"""Gets the index of individual that is used
to lookup information in the genotype and haplotype tables"""
if options.sample_file is None:
sample_file = "/data/share/10_IND/IMPUTE/%s_samples.txt" % options.pop
else:
sample_file = options.sample_file
sys.stderr.write("reading list of individuals from %s\n" % sample_file)
f = open(sample_file)
idx = 0
for line in f:
words = line.rstrip().split()
name = words[0].replace("NA", "")
if name == ind_name:
f.close()
return idx
idx += 1
raise ValueError("individual %s is not in samples file %s" %
(ind_name, options.sample_file))
def set_snp_counts(dt, region_list, snps, test_snp, options):
"""Sets counts of reference and alternate haplotype matching reads
for each of the provided SNPs. Labeling of 'reference' or 'alternate'
is with respect to the test SNP"""
if test_snp and (test_snp.haps[0] != test_snp.haps[1]) and \
(test_snp.haps[0] != HAP_UNDEF):
# test SNP is heterozygous: use this to phase counts that are
# retrieved at linked het SNPs
if test_snp.haps[0] == 0:
# reference allele is first haplotype at test SNP
ref_idx = 0
alt_idx = 1
else:
# alt allele is first haplotype at test SNP
ref_idx = 1
alt_idx = 0
else:
# test SNP is homozygous or is undefined
# so we have no way to tell which haplotype it is on
if options.homozygous_as_counts == "rand_hap":
# choose haplotype randomly
if np.random.randint(2) == 0:
ref_idx = 0
alt_idx = 1
else:
ref_idx = 1
alt_idx = 0
else:
ref_idx = None
alt_idx = None
for region in region_list:
ref_counts = dt.ref_count_track.get_nparray(region.chrom, region.start,
region.end)
alt_counts = dt.alt_count_track.get_nparray(region.chrom, region.start,
region.end)
other_counts = dt.other_count_track.get_nparray(region.chrom,
region.start,
region.end)
for snp in snps:
# we have het SNPs from several regions, but only want to consider
# ones in current region
if snp.pos >= region.start and snp.pos <= region.end:
offset = snp.pos - region.start
ref_count = ref_counts[offset]
alt_count = alt_counts[offset]
snp.other_count = other_counts[offset]
if ref_idx is None:
if options.homozygous_as_counts == "zero":
snp.ref_hap_count = 0
snp.alt_hap_count = 0
elif options.homozygous_as_counts == "rand_allele":
# choose allele randomly to be reference
if np.random.randint(2) == 0:
snp.ref_hap_count = ref_count
snp.alt_hap_count = alt_count
else:
snp.ref_hap_count = alt_count
snp.alt_hap_count = ref_count
else:
raise ValueError("unknown homozygous_as_counts option %s" %
options.homozygous_as_counts)
else:
if snp.haps[ref_idx] == 0:
# reference allele is on "reference" haplotype
snp.ref_hap_count = ref_count
snp.alt_hap_count = alt_count
elif snp.haps[ref_idx] == 1:
# reference allele is on "alternate" haplotype
snp.ref_hap_count = alt_count
snp.alt_hap_count = ref_count
else:
raise ValueError("expected haplotype to be defined")
def write_header(f):
f.write("CHROM "
"TEST.SNP.POS "
"TEST.SNP.ID "
"TEST.SNP.REF.ALLELE "
"TEST.SNP.ALT.ALLELE "
"TEST.SNP.GENOTYPE "
"TEST.SNP.HAPLOTYPE "
"REGION.START "
"REGION.END "
"REGION.SNP.POS "
"REGION.SNP.HET.PROB "
"REGION.SNP.LINKAGE.PROB "
"REGION.SNP.REF.HAP.COUNT "
"REGION.SNP.ALT.HAP.COUNT "
"REGION.SNP.OTHER.HAP.COUNT "
"REGION.READ.COUNT | |
array is full
if i_dt_csa >= nb_dt_csa:
break
# Calculate the new time
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Exit the loop if the array is full
if (i_dt_csa + 1) >= nb_dt_csa:
break
# If the array has been read completely, but the sfr_input array is
# not full, fil the rest of the array with the last read value
if self.sfh_array[-1][1] == 0.0:
sfr_temp = 0.0
else:
sfr_temp = self.sfr_input[i_dt_csa-1]
while i_dt_csa < nb_dt_csa - 1:
self.sfr_input[i_dt_csa] = sfr_temp
m_stel_sfr_in += self.sfr_input[i_dt_csa] * \
self.history.timesteps[i_dt_csa]
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Normalise the SFR in order to be consistent with the integrated
# input star formation array (no mass loss considered!)
if self.sfh_array_norm > 0.0:
norm_sfr_in = self.sfh_array_norm / m_stel_sfr_in
for i_csa in range(0, nb_dt_csa):
self.sfr_input[i_csa] = self.sfr_input[i_csa] * norm_sfr_in
# Fill the missing last entry (extention of the last timestep, for tend)
# Since we don't know dt starting at tend, it is not part of m_stel_sfr_in
self.sfr_input[-1] = self.sfr_input[-2]
##############################################
## Calculate SFE Cl. ##
##############################################
def __calculate_sfe_cl(self):
'''
Calculate the star formation efficiency and the initial mass of gas
for a closed box model, given the final gas mass and the current
stellar mass.
'''
# Get the average return gas fraction of SSPs
if self.mass_frac_SSP == -1.0:
f_ej = 0.35
else:
f_ej = self.mass_frac_SSP
# If the gas-to-stellar mass ratio is the selected input ...
if self.r_gas_star > 0.0:
# Calculate the final mass of gas
self.m_gas_f = self.r_gas_star * self.stellar_mass_0
# Calculate the initial mass of gas
m_gas_ini = self.m_gas_f + self.stellar_mass_0
# If the final mass of gas is the selected input ...
elif self.m_gas_f > 0.0:
# Calculate the initial mass of gas
m_gas_ini = self.m_gas_f + self.stellar_mass_0
# If the initial mass of gas is the selected input ...
else:
# Use the input value for the initial mass of gas
m_gas_ini = self.mgal
# Calculate the final mass of gas
self.m_gas_f = m_gas_ini - self.stellar_mass_0
# Verify if the final mass of gas is negative
if self.m_gas_f < 0.0:
self.not_enough_gas = True
sfe_gcs = 1.0e-10
print ('!!Error - Try to have a negative final gas mass!!')
if not self.not_enough_gas:
# Scale the initial mass of all isotopes
scale_m_tot = m_gas_ini / np.sum(self.ymgal[0])
for k_cm in range(len(self.ymgal[0])):
self.ymgal[0][k_cm] = self.ymgal[0][k_cm] * scale_m_tot
# Initialization for finding the right SFE
sfe_gcs = 1.8e-10
sfe_max = 1.0
sfe_min = 0.0
m_gas_f_try = self.__get_m_gas_f(m_gas_ini, sfe_gcs, f_ej)
# While the SFE is not the right one ...
while abs(m_gas_f_try - self.m_gas_f) > 0.01:
# If the SFE needs to be increased ...
if (m_gas_f_try / self.m_gas_f) > 1.0:
# Set the lower limit of the SFE interval
sfe_min = sfe_gcs
# If an upper limit is already defined ...
if sfe_max < 1.0:
# Set the SFE to the middle point of the interval
sfe_gcs = (sfe_max + sfe_gcs) * 0.5
# If an upper limit is not already defined ...
else:
# Try a factor of 2
sfe_gcs = sfe_gcs * 2.0
# If the SFE needs to be decreased ...
else:
# Set the upper limit of the SFE interval
sfe_max = sfe_gcs
# If a lower limit is already defined ...
if sfe_min > 0.0:
# Set the SFE to the middle point of the interval
sfe_gcs = (sfe_min + sfe_gcs) * 0.5
# If a lower limit is not already defined ...
else:
# Try a factor of 2
sfe_gcs = sfe_gcs * 0.5
# Get the approximated final mass of gas
m_gas_f_try = self.__get_m_gas_f(m_gas_ini, sfe_gcs, f_ej)
# Keep the SFE in memory
self.sfe_gcs = sfe_gcs
##############################################
## Get M_gas_f ##
##############################################
def __get_m_gas_f(self, m_gas_ini, sfe_gcs, f_ej):
'''
Return the final mass of gas, given the initial mass of the gas
reservoir and the star formation efficiency. The function uses
a simple star formation law in the form of SFR(t) = sfe * M_gas(t)
'''
# Initialisation of the integration
m_gas_loop = m_gas_ini
t_gmgf = 0.0
# For every timestep ...
for i_gmgf in range(0,self.nb_timesteps):
# Calculate the new mass of gass
t_gmgf += self.history.timesteps[i_gmgf]
#self.sfr_input[i_gmgf] = sfe_gcs * m_gas_loop
m_gas_loop -= sfe_gcs * (1-f_ej) * m_gas_loop * \
self.history.timesteps[i_gmgf]
# Return the final mass of gas
return m_gas_loop
##############################################
# Copy SFR Input #
##############################################
def __copy_sfr_input(self, path_sfh_in):
'''
This function reads a SFH input file and interpolates its values so it
can be inserted in the array "sfr_input", which contains the SFR for each
OMEGA timestep.
Note
====
The input file does not need to have constant time step lengths, and
does not need to have the same number of timesteps as the number of
OMEGA timesteps.
Important
=========
In OMEGA and SYGMA, t += timestep[i] is the first thing done in the main
loop. The loop calculates what happened between the previous t and the
new t. This means the mass of stars formed must be SFR(previous t) *
timestep[i]. Therefore, sfr_input[i] IS NOT the SFR at time t +=
timestep[i], but rather the SFR at previous time which is used for the
current step i.
Argument
========
path_sfh_in : Path of the input SFH file.
'''
# Variable to keep track of the OMEGA timestep
nb_dt_csi = self.nb_timesteps + 1
i_dt_csi = 0
t_csi = 0.0 # Not timesteps[0] because sfr_input[0] must be
# used from t = 0 to t = timesteps[0]
# Variable to keep track of the total stellar mass from the input SFH
m_stel_sfr_in = 0.0
# Open the file containing the SFR vs time
with open(os.path.join(nupy_path, path_sfh_in), 'r') as sfr_file:
# Read the first line (col 0 : t, col 1 : SFR)
line_1_str = sfr_file.readline()
parts_1 = [float(x) for x in line_1_str.split()]
# For every remaining line ...
for line_2_str in sfr_file:
# Extract data
parts_2 = [float(x) for x in line_2_str.split()]
# Calculate the interpolation coefficients (SFR = a*t + b)
a_csi = (parts_2[1] - parts_1[1]) / (parts_2[0] - parts_1[0])
b_csi = parts_1[1] - a_csi * parts_1[0]
# While we stay in the same time bin ...
while t_csi <= parts_2[0]:
# Calculate the right SFR for the specific OMEGA timestep
#self.sfr_input[i_dt_csi] = a_csi * t_csi + b_csi
# Calculate the average SFR for the specific OMEGA timestep
if i_dt_csi < self.nb_timesteps:
self.sfr_input[i_dt_csi] = a_csi * (t_csi + \
self.history.timesteps[i_dt_csi] * 0.5) + b_csi
else:
self.sfr_input[i_dt_csi] = a_csi * t_csi + b_csi
# Cumulate the mass of stars formed
if i_dt_csi < nb_dt_csi - 1:
m_stel_sfr_in += self.sfr_input[i_dt_csi] * \
self.history.timesteps[i_dt_csi]
# Calculate the new time
t_csi += self.history.timesteps[i_dt_csi]
# Go to the next time step
i_dt_csi += 1
# Exit the loop if the array is full
if i_dt_csi >= nb_dt_csi:
break
# Exit the loop if the array is full
if i_dt_csi >= nb_dt_csi:
break
# Copie the last read line
parts_1 = copy.copy(parts_2)
# Close the file
sfr_file.close()
# If the file has been read completely, but the sfr_input array is
# not full, fill the rest of the array with the last read value
while i_dt_csi < nb_dt_csi:
self.sfr_input[i_dt_csi] = self.sfr_input[i_dt_csi-1]
if i_dt_csi < nb_dt_csi - 1:
m_stel_sfr_in += self.sfr_input[i_dt_csi] * \
self.history.timesteps[i_dt_csi]
i_dt_csi += 1
# Normalise the SFR in order to be consistent with the input current
# stellar mass (if the stellar mass is known)
if self.stellar_mass_0 > 0.0:
norm_sfr_in = self.stellar_mass_0 / ((1-self.mass_frac_SSP) * m_stel_sfr_in)
for i_csi in range(0, nb_dt_csi):
self.sfr_input[i_csi] = self.sfr_input[i_csi] * norm_sfr_in
##############################################
# Generate Rand SFH #
##############################################
def __generate_rand_sfh(self):
'''
This function generates a random SFH. This should only be used for
testing purpose in order to look at how the uncertainty associated to the
SFH can affects the results.
The self.rand_sfh sets the maximum ratio between the maximum and the
minimum values for | |
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.utils.data import download_file
import os
import random
class FakeStars():
"""
A class object that generates fake sources or fake images.
NOTE- The generate sources are generated using statistical information from
existing data but are completely pseudo and do not represent a real
science image.
In order for this class to run, you need to necessary Python modules installed.
It is recommended you run this on LINUX, I've tried making it compatible with
windows but I encounter some errors.
"""
def __init__(self, image_name, x_len, y_len):
"""
Initializes a new FakeStar object which represents either a fake space
image or a fake singular point source.
@type self: FakeStars
@type image_name: String (The name of the image)
@type x_len: Int (Width of Image)
@type y_len: Int (Height of Image)
@rtype: None
"""
self.name = image_name
self.width = x_len
self.height = y_len
self._image_array = np.zeros((x_len, y_len))
self.noise = np.zeros((x_len, y_len))
def generate_white_noise(self, std=29, mean=0):
"""
Generates a 1D array of random gaussian white noise. The default
setting for the average noise is 29. (This is estimated to be SuperBIT's
estimated noise)
The length of the array is width * height of the image.
@type self: FakeStars
@rtype: Numpy Array
"""
num_samples = self.width * self.height
white_noise = np.round(np.random.normal(mean, std, size=num_samples))
white_noise = white_noise.reshape(self.width, self.height)
return white_noise
def generate_background(self, mean=1250, std=0):
"""
Generates a fake background representing the dark sky of the image.
The background has a median value of 1250.
All this does is simply add the value 1250 to each individual pixel in
the image.
@type self: FakeStars
@rtype: Numpy Array
"""
num_samples = self.width * self.height
background = np.round(np.random.normal(mean, std, size = num_samples))
return background
def create_2d_gaussian(self, size, xfwhm=2, yfwhm=2, center=None, btness=400):
""" Make a square gaussian kernel
Size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
@type self: FakeStars
@type size: Int
@type xfwhm: Int
@type yfwhm: Int
@type center: Tuple (Coordinates of center of star)
@type btness: Int (Brightest point of star)
"""
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return btness * np.exp(-4*np.log(2) * ((x-x0)**2 / xfwhm**2 + \
(y-y0)**2 / yfwhm**2))
def create_point_stars(self, num_stars):
"""
Creates a fake single pixel to represent a hot pixel on a SuperBIT
image.
The star will simply be a dot on the image with a high intensity
brightness.
@type self: FakeStars
@type num_stars: Integer
@rtype: Numpy Array
"""
for i in range(num_stars):
point_star = random.randint(6000, 60000)
x_pos = random.randint(0, self.width-1)
y_pos = random.randint(0, self.height-1)
self._image_array[x_pos][y_pos] += point_star
def create_stars(self, generator=100, sz=20, xf=10, yf=10, amp=200):
"""
Creates a certain number of fake star based on different parameters
based on the generator value.
@type self: FakeStars
@type generator: Int (Number of Iterations)
@type sz: used to randomly step away from the designated point.
@type xf: Int (FWHM in x direction)
@type yf: Int (FWHM in y direction)
@type amp: Int (The Amplitude of the gaussian- how bright a star is)
@rtype: None
"""
while generator != 0:
x = random.randint(100, self.width - 100)
y = random.randint(100, self.height - 100)
source = self.create_2d_gaussian(size=sz, xfwhm=xf, yfwhm=yf, btness=amp)
self._image_array[x:x+sz, y:y+sz] += source
generator -= 1
def create_cosmic_rays(self, amount):
"""
Creates a streak of cosmic ray streak that will be randomly placed
in the image.
@type self: FakeStars
@type amount: Int (Number of sources)
@rtype: None
"""
while amount != 0:
size = 40
chance = random.randint(0,1)
rotation = random.randint(0, 2)
x_pos = random.randint(0, self.width - 1)
y_pos = random.randint(0, self.height - 1)
brightness = 8000
while size != 0:
if rotation == 0:
if x_pos >= 4400 or y_pos >= 6650:
break
self._image_array[x_pos][y_pos] += brightness
x_pos += 1
y_pos -= 1
elif rotation == 1:
if x_pos >= 4400 or y_pos >= 6650:
break
self._image_array[x_pos][y_pos] += brightness
x_pos += 1
y_pos += 1
else:
if x_pos >= 4400 or y_pos >= 6650:
break
self._image_array[x_pos][y_pos] += brightness
x_pos += 1
size -=1
brightness -= 8000/500
amount -= 1
def create_single_source(self, bt=250, xf=10, yf=10, rand_pos=False, std=29, sz=100):
"""
Creates a single fake source with different noise realizations.
Brightness increases the signal of the source value.
@type self: FakeStars
@type bt: Int (Brightest point in the star)
@type xf: Int (FWHM in x direction)
@type yf: Int (FWHM in y direction)
@type rand_pos: Boolean (Randomly places star)
@type std: Int (Standard Deviation)
@type sz: Size of Image (height by width)
@rtype: None
"""
x = y = sz // 2
if rand_pos:
x += random.uniform(-0.5, 0.5)
y += random.uniform(-0.5, 0.5)
star = self.create_2d_gaussian(size=sz, xfwhm=xf, yfwhm=yf, center=(x, y), btness=bt)
num_samples = sz*sz
white_noise = np.round(np.random.normal(29, std, size=num_samples))
white_noise = np.reshape(white_noise, (sz, sz))
self.noise = white_noise
self._image_array = star + white_noise
def create_image(self, signal=1, btness= 400):
"""
Creates a fake map with different sources and galaxies.
Signal is an integer value. Which determines how many fake sources
of different forms (Point stars, large sources, galaxies, cosmic rays)
will be created.
@type self: Fake_Star
@type signal: Int
@type btness: Int (Brightest point of a source)
@rtype: None
"""
self.noise = self.generate_white_noise()
self.create_point_stars(signal*100)
if type(btness) == list:
for amp in btness:
if random.randint(0, 25) == 1:
self.create_stars(generator=signal * 5, sz=50*2, xf=5, yf=2, amp=amp)
else:
self.create_stars(generator=signal * 5, sz=50*2, xf=amp/40, yf=amp/40, amp=amp)
else:
self.create_stars(generator=signal*5, sz=30, xf=amp/40, yf=amp/40, bt=btness)
self.create_cosmic_rays(signal*3)
self._image_array += self.noise
def new_noise(self):
"""
Creates a new generated white noise background with the same sources
in the same position.
@type self: Fake_stars
@rytpe: None
"""
self._image_array -= self.noise
self.noise = self.generate_white_noise()
self._image_array += self.noise
def cap_pixel_value(self, bit_limit=64):
"""
Limits the maximal pixal value to be below a 16 bit number
@type self: Fake_Stars
@rtype: None
"""
x, y = np.where(self._image_array > 65535)
if type(x) == np.ndarray and len(x) > 0:
for i in range(len(x)):
self._image_array[x[i]][y[i]] = 65535
def show_image(self, together=True):
"""
Displays the scaled and original version of the source.
The together parameter puts both sources into the one figure.
@type self: FakeStars
@type together: Boolean
@rtype: None
"""
if together:
fig=plt.figure(figsize=(10, 10))
columns = 2
rows = 1
max = np.mean(self._image_array) + np.std(self._image_array) *3
min = np.mean(self._image_array) - np.std(self._image_array) *3
fig.add_subplot(rows, columns, 1)
plt.title("Normalized")
plt.imshow(self._image_array, vmax=max, vmin=min)
plt.colorbar()
fig.add_subplot(rows, columns, 2)
plt.title("Original")
plt.imshow(self._image_array)
plt.colorbar()
plt.show()
else:
plt.figure("Scaled")
plt.title("Scaled")
max = np.mean(self._image_array) + np.std(self._image_array) *1
min = np.mean(self._image_array) - np.std(self._image_array) *1
plt.imshow(self._image_array, vmax=max, vmin=min)
plt.colorbar()
plt.figure("Original")
plt.title("Original")
plt.imshow(self._image_array)
plt.colorbar()
plt.show()
def create_fits_image(self):
"""
Creates a fake fits image and saves it onto the current working directory.
Use os.chdir() to change to the desired save location
clobber: bool, optional- if True, overwrite any existing file.
@type self: FakeStars
@rtype: None
"""
hdu = fits.PrimaryHDU(data=self._image_array)
hdu.writeto(self.name + "large.fits", overwrite=True)
def show_statistics(self):
"""
Runs basic statistics on the fake image and prints the values.
@type self: FakeStars
@rtype: None
"""
# Basic Statistics
self.median = np.median(self._image_array)
self.mean = np.mean(self._image_array)
self.min = np.min(self._image_array)
self.std = np.std(self._image_array)
print("Mean: ", self.mean, "\n")
print("Min: ", self.min, "\n")
print("Median: ", self.median, "\n")
print("Standard Deviation: ", self.std, "\n")
def return_image(self):
"""
Returns the image array data.
@type self: Fakestars
@rtype: Numpy Array
"""
return self._image_array
## (Example)
if __name__ == '__main__':
os.chdir("/Users/a16472/desktop/")
fakestar1 = FakeStars("fakestar1", 4400, 6650)
fakestar1.create_image(signal = 150, btness=[1200, 1000, 800, 200])
# fakestar1 = FakeStars("fakestar1", 100, 100)
# fakestar1.create_single_source(bt=250, xf=2, yf=2, sz=100)
fakestar1.cap_pixel_value()
fakestar1.create_fits_image()
fakestar1.show_image(together=False)
fakestar1.new_noise()
| |
<reponame>gargrohin/sngan.pytorch
import torch.nn as nn
from .gen_resblock import GenBlock
import numpy as np
import torch
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
b = np.load('ResNetGenerator_850000.npz')
lst = b.files
for item in lst:
print(item, b[item].shape)
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
print("XXXX", args.latent_dim, self.bottom_width, self.ch)
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch * 16)
self.block2 = GenBlock(self.ch*16, self.ch*16, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch*16, self.ch*8, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch*8, self.ch*4, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = GenBlock(self.ch*4, self.ch*2, activation=activation, upsample=True, n_classes=n_classes)
self.block6 = GenBlock(self.ch*2, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b7 = nn.BatchNorm2d(self.ch)
self.l7 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
mapping = {
"l1.weight" : "l1/W", #torch.Size([128]) (1024,)
"l1.bias" : "l1/b", #torch.Size([128]) (1024,)
"block2.c1.bias" : "block2/c1/b", #torch.Size([128]) (1024,)
"block2.c1.weight" : "block2/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block2.c2.bias" : "block2/c2/b", #torch.Size([256]) (1024,)
"block2.c2.weight" : "block2/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block2.b1.bias" : "block2/b1/betas/W", #torch.Size([128]) (1024,)
"block2.b1.weight" : "block2/b1/gammas/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block2.b2.bias" : "block2/b2/betas/W", #torch.Size([256]) (1024,)
"block2.b2.weight" : "block2/b2/gammas/W",
"block2.c_sc.bias" : "block2/c_sc/b", #torch.Size([256]) (1024,)
"block2.c_sc.weight" : "block2/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block3.c1.bias" : "block3/c1/b", #torch.Size([128]) (1024,)
"block3.c1.weight" : "block3/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block3.c2.bias" : "block3/c2/b", #torch.Size([256]) (1024,)
"block3.c2.weight" : "block3/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block3.b1.bias" : "block3/b1/betas/W", #torch.Size([128]) (1024,)
"block3.b1.weight" : "block3/b1/gammas/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block3.b2.bias" : "block3/b2/betas/W", #torch.Size([256]) (1024,)
"block3.b2.weight" : "block3/b2/gammas/W",
"block3.c_sc.bias" : "block3/c_sc/b", #torch.Size([256]) (1024,)
"block3.c_sc.weight" : "block3/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block4.c1.bias" : "block4/c1/b", #torch.Size([128]) (1024,)
"block4.c1.weight" : "block4/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block4.c2.bias" : "block4/c2/b", #torch.Size([256]) (1024,)
"block4.c2.weight" : "block4/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block4.b1.bias" : "block4/b1/betas/W", #torch.Size([128]) (1024,)
"block4.b1.weight" : "block4/b1/gammas/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block4.b2.bias" : "block4/b2/betas/W", #torch.Size([256]) (1024,)
"block4.b2.weight" : "block4/b2/gammas/W",
"block4.c_sc.bias" : "block4/c_sc/b", #torch.Size([256]) (1024,)
"block4.c_sc.weight" : "block4/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block5.c1.bias" : "block5/c1/b", #torch.Size([128]) (1024,)
"block5.c1.weight" : "block5/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block5.c2.bias" : "block5/c2/b", #torch.Size([256]) (1024,)
"block5.c2.weight" : "block5/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block5.b1.bias" : "block5/b1/betas/W", #torch.Size([128]) (1024,)
"block5.b1.weight" : "block5/b1/gammas/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block5.b2.bias" : "block5/b2/betas/W", #torch.Size([256]) (1024,)
"block5.b2.weight" : "block5/b2/gammas/W",
"block5.c_sc.bias" : "block5/c_sc/b", #torch.Size([256]) (1024,)
"block5.c_sc.weight" : "block5/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block6.c1.bias" : "block6/c1/b", #torch.Size([128]) (1024,)
"block6.c1.weight" : "block6/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block6.c2.bias" : "block6/c2/b", #torch.Size([256]) (1024,)
"block6.c2.weight" : "block6/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block6.b1.bias" : "block6/b1/betas/W", #torch.Size([128]) (1024,)
"block6.b1.weight" : "block6/b1/gammas/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block6.b2.bias" : "block6/b2/betas/W", #torch.Size([256]) (1024,)
"block6.b2.weight" : "block6/b2/gammas/W",
"block6.c_sc.bias" : "block6/c_sc/b", #torch.Size([256]) (1024,)
"block6.c_sc.weight" : "block6/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"b7.weight" : "b7/gamma", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"b7.bias" : "b7/beta", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"l7.weight" : "l7/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"l7.bias" : "l7/b", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
}
for n,p in self.named_parameters():
try:
source = mapping[n]
src = b[source]
if("betas" in source or "gammas" in source):
src = src.mean(0)
print(n, source, p.size(), src.shape, type(b[source]))
p.data = torch.from_numpy(src).cuda()
else:
print(n, source, p.size(), src.shape, type(b[source]))
p.data = torch.from_numpy(src).cuda()
except:
pass
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch*16, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.block6(h)
h = self.b7(h)
h = self.activation(h)
h = nn.Tanh()(self.l7(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = 64 #int(args.df_dim)
b = np.load('SNResNetProjectionDiscriminator_850000.npz')
# lst = b.files
# for item in lst:
# print(item, b[item].shape)
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch*2, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch*2, self.ch*4, activation=activation, downsample=True)
self.block4 = DisBlock(args, self.ch*4, self.ch*8, activation=activation, downsample=True)
self.block5 = DisBlock(args, self.ch*8, self.ch*16, activation=activation, downsample=True)
self.block6 = DisBlock(args, self.ch*16, self.ch*16, activation=activation, downsample=False)
self.l7 = nn.Linear(self.ch*16, 1, bias=True)
if args.d_spectral_norm:
self.l7 = nn.utils.spectral_norm(self.l7)
mapping = {
"block1.c1.bias" : "block1/c1/b", #torch.Size([128]) (1024,)
"block1.c1.weight_orig" : "block1/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block1.c2.bias" : "block1/c2/b", #torch.Size([256]) (1024,)
"block1.c2.weight_orig" : "block1/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block1.c_sc.bias" : "block1/c_sc/b", #torch.Size([256]) (1024,)
"block1.c_sc.weight_orig" : "block1/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block2.c1.bias" : "block2/c1/b", #torch.Size([128]) (1024,)
"block2.c1.weight_orig" : "block2/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block2.c2.bias" : "block2/c2/b", #torch.Size([256]) (1024,)
"block2.c2.weight_orig" : "block2/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block2.c_sc.bias" : "block2/c_sc/b", #torch.Size([256]) (1024,)
"block2.c_sc.weight_orig" : "block2/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block3.c1.bias" : "block3/c1/b", #torch.Size([128]) (1024,)
"block3.c1.weight_orig" : "block3/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block3.c2.bias" : "block3/c2/b", #torch.Size([256]) (1024,)
"block3.c2.weight_orig" : "block3/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block3.c_sc.bias" : "block3/c_sc/b", #torch.Size([256]) (1024,)
"block3.c_sc.weight_orig" : "block3/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block4.c1.bias" : "block4/c1/b", #torch.Size([128]) (1024,)
"block4.c1.weight_orig" : "block4/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block4.c2.bias" : "block4/c2/b", #torch.Size([256]) (1024,)
"block4.c2.weight_orig" : "block4/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block4.c_sc.bias" : "block4/c_sc/b", #torch.Size([256]) (1024,)
"block4.c_sc.weight_orig" : "block4/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block5.c1.bias" : "block5/c1/b", #torch.Size([128]) (1024,)
"block5.c1.weight_orig" : "block5/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block5.c2.bias" : "block5/c2/b", #torch.Size([256]) (1024,)
"block5.c2.weight_orig" : "block5/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block5.c_sc.bias" : "block5/c_sc/b", #torch.Size([256]) (1024,)
"block5.c_sc.weight_orig" : "block5/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"block6.c1.bias" : "block6/c1/b", #torch.Size([128]) (1024,)
"block6.c1.weight_orig" : "block6/c1/W", #torch.Size([128, 128, 3, 3]) (1024, 1024, 3, 3)
"block6.c2.bias" : "block6/c2/b", #torch.Size([256]) (1024,)
"block6.c2.weight_orig" : "block6/c2/W", #torch.Size([256, 128, 3, 3]) (1024, 1024, 3, 3)
"block6.c_sc.bias" : "block6/c_sc/b", #torch.Size([256]) (1024,)
"block6.c_sc.weight_orig" : "block6/c_sc/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"l7.weight_orig" : "l7/W", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
"l7.bias" : "l7/b", #torch.Size([256, 128, 1, 1]) (1024, 1024, 1, 1)
}
for n,p in self.named_parameters():
# | |
not self.fs.exists(path):
raise AssertionError(
'Input path %s does not exist!' % (path,))
def _check_output_not_exists(self):
"""Verify the output path does not already exist. This avoids
provisioning a cluster only to have Hadoop refuse to launch.
"""
if self.fs.exists(self._output_dir):
raise IOError(
'Output path %s already exists!' % (self._output_dir,))
def _add_bootstrap_files_for_upload(self):
"""Add files needed by the bootstrap script to self._upload_mgr.
Tar up mrjob if bootstrap_mrjob is True.
Create the master bootstrap script if necessary.
"""
# lazily create mrjob.tar.gz
if self._bootstrap_mrjob():
self._create_mrjob_tar_gz()
self._bootstrap_dir_mgr.add('file', self._mrjob_tar_gz_path)
# all other files needed by the script are already in
# _bootstrap_dir_mgr
for path in self._bootstrap_dir_mgr.paths():
self._upload_mgr.add(path)
# now that we know where the above files live, we can create
# the master bootstrap script
self._create_master_bootstrap_script_if_needed()
if self._master_bootstrap_script_path:
self._upload_mgr.add(self._master_bootstrap_script_path)
self._upload_mgr.add(_MAX_HOURS_IDLE_BOOTSTRAP_ACTION_PATH)
def _add_job_files_for_upload(self):
"""Add files needed for running the job (setup and input)
to self._upload_mgr."""
for path in self._get_input_paths():
self._upload_mgr.add(path)
for path in self._working_dir_mgr.paths():
self._upload_mgr.add(path)
# TODO - mtai @ davidmarin - hadoop_streaming_jar is currently ignored,
# see _HADOOP_STREAMING_JAR_URI
# if self._opts['hadoop_streaming_jar']:
# self._upload_mgr.add(self._opts['hadoop_streaming_jar'])
for step in self._get_steps():
if step.get('jar'):
self._upload_mgr.add(step['jar'])
def _upload_local_files_to_fs(self):
"""Copy local files tracked by self._upload_mgr to FS."""
bucket_name, _ = parse_gcs_uri(self._job_tmpdir)
self._create_fs_tmp_bucket(bucket_name)
log.info('Copying non-input files into %s' % self._upload_mgr.prefix)
for path, gcs_uri in self._upload_mgr.path_to_uri().items():
log.debug('uploading %s -> %s' % (path, gcs_uri))
# TODO - mtai @ davidmarin - Implement put function for other FSs
self.fs.put(path, gcs_uri)
self._wait_for_fs_sync()
def _create_fs_tmp_bucket(self, bucket_name, location=None):
"""Create a temp bucket if missing
Tie the temporary bucket to the same region as the GCE job and set a
28-day TTL
"""
# Return early if our bucket already exists
try:
self.fs.get_bucket(bucket_name)
return
except google_errors.HttpError as e:
if not e.resp.status == 404:
raise
log.info('creating FS bucket %r' % bucket_name)
location = location or self._gce_region
# NOTE - By default, we create a bucket in the same GCE region as our
# job (tmp buckets ONLY)
# https://cloud.google.com/storage/docs/bucket-locations
self.fs.create_bucket(
self._gcp_project, bucket_name, location=location,
object_ttl_days=_DEFAULT_CLOUD_TMP_DIR_OBJECT_TTL_DAYS)
self._wait_for_fs_sync()
### Running the job ###
def cleanup(self, mode=None):
super(DataprocJobRunner, self).cleanup(mode=mode)
# stop the cluster if it belongs to us (it may have stopped on its
# own already, but that's fine)
if self._cluster_id and not self._opts['cluster_id']:
self._cleanup_cluster()
def _cleanup_cloud_tmp(self):
# delete all the files we created
if not self._job_tmpdir:
return
try:
log.info('Removing all files in %s' % self._job_tmpdir)
self.fs.rm(self._job_tmpdir)
self._job_tmpdir = None
except Exception as e:
log.exception(e)
# TODO - mtai @ davidmarin - Re-enable log support and supporting cleanup
def _cleanup_logs(self):
super(DataprocJobRunner, self)._cleanup_logs()
def _cleanup_job(self):
job_prefix = self._dataproc_job_prefix()
for current_job in self._api_job_list(
cluster_name=self._cluster_id, state_matcher='ACTIVE'):
# Kill all active jobs with the same job_prefix as this job
current_job_id = current_job['reference']['jobId']
if not current_job_id.startswith(job_prefix):
continue
self._api_job_cancel(current_job_id)
self._wait_for_api('job cancellation')
def _cleanup_cluster(self):
if not self._cluster_id:
# If we don't have a cluster, then we can't terminate it.
return
try:
log.info("Attempting to terminate cluster")
self._api_cluster_delete(self._cluster_id)
except Exception as e:
log.exception(e)
return
log.info('cluster %s successfully terminated' % self._cluster_id)
def _wait_for_api(self, msg):
_wait_for(msg, self._opts['check_cluster_every'])
def _wait_for_fs_sync(self):
"""Sleep for a little while, to give FS a chance to sync up.
"""
_wait_for('GCS sync (eventual consistency)',
self._opts['cloud_fs_sync_secs'])
def _build_dataproc_hadoop_job(self, step_num):
"""This function creates a "HadoopJob" to be passed to
self._api_job_submit_hadoop
:param step_num:
:return: output_hadoop_job
"""
# Reference: https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs#HadoopJob # noqa
args = list()
file_uris = list()
archive_uris = list()
properties = dict()
step = self._get_step(step_num)
assert step['type'] in ('streaming', 'jar'), (
'Bad step type: %r' % (step['type'],))
# TODO - mtai @ davidmarin - Might be trivial to support jar running,
# see "mainJarFileUri" of variable "output_hadoop_job" in this function
# https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs#HadoopJob # noqa
assert step['type'] == 'streaming', 'Jar not implemented'
main_jar_uri = _HADOOP_STREAMING_JAR_URI
# TODO - mtai @ davidmarin - Not clear if we should move _upload_args
# to file_uris, currently works fine as-is
# TODO - dmarin @ mtai - Probably a little safer to do the API's way,
# assuming the API supports distributed cache syntax (so we can pick
# the names of the uploaded files).
args.extend(self._upload_args(self._upload_mgr))
args.extend(self._hadoop_args_for_step(step_num))
mapper, combiner, reducer = (self._hadoop_streaming_commands(step_num))
if mapper:
args += ['-mapper', mapper]
if combiner:
args += ['-combiner', combiner]
if reducer:
args += ['-reducer', reducer]
for current_input_uri in self._step_input_uris(step_num):
args += ['-input', current_input_uri]
args += ['-output', self._step_output_uri(step_num)]
# TODO - mtai @ davidmarin - Add back support to specify a different
# mainJarFileURI
output_hadoop_job = dict(
args=args,
fileUris=file_uris,
archiveUris=archive_uris,
properties=properties,
mainJarFileUri=main_jar_uri
)
return output_hadoop_job
def _launch_cluster(self):
"""Create an empty cluster on Dataproc, and set self._cluster_id to
its ID."""
bucket_name, _ = parse_gcs_uri(self._job_tmpdir)
self._create_fs_tmp_bucket(bucket_name)
# clusterName must be a match of
# regex '(?:[a-z](?:[-a-z0-9]{0,53}[a-z0-9])?).'
# as documented in an API error message
# (not currently documented in the Dataproc docs)
if not self._cluster_id:
self._cluster_id = '-'.join(
['mrjob', self._gce_zone.lower(), random_identifier()])
# Create the cluster if it's missing, otherwise join an existing one
try:
self._api_cluster_get(self._cluster_id)
log.info('Adding job to existing cluster - %s' % self._cluster_id)
except google_errors.HttpError as e:
if not e.resp.status == 404:
raise
log.info(
'Creating Dataproc Hadoop cluster - %s' % self._cluster_id)
cluster_data = self._cluster_create_args()
self._api_cluster_create(cluster_data)
self._wait_for_cluster_ready(self._cluster_id)
# keep track of when we launched our job
self._dataproc_job_start = time.time()
return self._cluster_id
def _wait_for_cluster_ready(self, cluster_id):
# See https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.clusters#State # noqa
cluster_state = None
# Poll until cluster is ready
while cluster_state not in _DATAPROC_CLUSTER_STATES_READY:
result_describe = self.api_client.clusters().get(
projectId=self._gcp_project,
region=_DATAPROC_API_REGION,
clusterName=cluster_id).execute()
cluster_state = result_describe['status']['state']
if cluster_state in _DATAPROC_CLUSTER_STATES_ERROR:
raise DataprocException(result_describe)
self._wait_for_api('cluster to accept jobs')
assert cluster_state in _DATAPROC_CLUSTER_STATES_READY
log.info("Cluster %s ready", cluster_id)
return cluster_id
def _dataproc_job_prefix(self):
return _cleanse_gcp_job_id(self._job_key)
def _run_steps(self):
"""Wait for every step of the job to complete, one by one."""
total_steps = self._num_steps()
# define out steps
for step_num in range(total_steps):
job_id = self._launch_step(step_num)
self._wait_for_step_to_complete(
job_id, step_num=step_num, num_steps=total_steps)
log.info('Completed Dataproc Hadoop Job - %s', job_id)
# After all steps completed, wait for the last output (which is
# usually written to GCS) to sync
self._wait_for_fs_sync()
def _launch_step(self, step_num):
# Build each step
hadoop_job = self._build_dataproc_hadoop_job(step_num)
# Clean-up step name
step_name = '%s---step-%05d-of-%05d' % (
self._dataproc_job_prefix(), step_num + 1, self._num_steps())
# Submit it
log.info('Submitting Dataproc Hadoop Job - %s', step_name)
result = self._api_job_submit_hadoop(step_name, hadoop_job)
log.info('Submitted Dataproc Hadoop Job - %s', step_name)
job_id = result['reference']['jobId']
assert job_id == step_name
return job_id
def _wait_for_step_to_complete(
self, job_id, step_num=None, num_steps=None):
"""Helper for _wait_for_step_to_complete(). Wait for
step with the given ID to complete, and fetch counters.
If it fails, attempt to diagnose the error, and raise an
exception.
This also adds an item to self._log_interpretations
"""
log_interpretation = dict(job_id=job_id)
self._log_interpretations.append(log_interpretation)
while True:
# https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs#JobStatus # noqa
job_result = self._api_job_get(job_id)
job_state = job_result['status']['state']
log.info('%s => %s' % (job_id, job_state))
# https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs#State # noqa
if job_state in _DATAPROC_JOB_STATES_ACTIVE:
self._wait_for_api('job completion')
continue
# we're done, will return at the end of this
elif job_state == 'DONE':
break
raise StepFailedException(step_num=step_num, num_steps=num_steps)
def _step_input_uris(self, step_num):
"""Get the gs:// URIs for input for the given step."""
if step_num == 0:
return [self._upload_mgr.uri(path)
for path in self._get_input_paths()]
else:
# put intermediate data in HDFS
return ['hdfs:///tmp/mrjob/%s/step-output/%05d/' % (
self._job_key, step_num)]
def _step_output_uri(self, step_num):
if step_num == len(self._get_steps()) - 1:
return self._output_dir
else:
# put intermediate data in HDFS
return 'hdfs:///tmp/mrjob/%s/step-output/%05d/' % (
self._job_key, step_num + 1)
def counters(self):
# TODO - mtai @ davidmarin - Counters are currently always empty as we
# are not processing task logs
return [_pick_counters(log_interpretation)
for log_interpretation in self._log_interpretations]
### Bootstrapping ###
def get_hadoop_version(self):
if self._hadoop_version is None:
self._store_cluster_info()
return self._hadoop_version
def get_image_version(self):
"""Get the version that our cluster is running.
"""
if self._image_version is None:
self._store_cluster_info()
return self._image_version
def _store_cluster_info(self):
"""Set self._ami_version and self._hadoop_version."""
if not self._cluster_id:
raise AssertionError('cluster has not yet been created')
cluster = self._api_cluster_get(self._cluster_id)
self._image_version = (
cluster['config']['softwareConfig']['imageVersion'])
self._hadoop_version = (
_DATAPROC_IMAGE_TO_HADOOP_VERSION[self._image_version])
### Bootstrapping ###
def _create_master_bootstrap_script_if_needed(self):
"""Helper for :py:meth:`_add_bootstrap_files_for_upload`.
Create the master bootstrap script and write it into our local
temp directory. Set self._master_bootstrap_script_path.
This will do nothing if there are no bootstrap scripts or commands,
or if it has already been called."""
if self._master_bootstrap_script_path:
return
# don't bother if we're not starting a cluster
if self._cluster_id:
return
# Also don't bother | |
import numpy as np
import librosa
from scipy import interpolate
import pywt
from matplotlib.image import imsave
from scipy.signal import butter, lfilter, freqz
from matplotlib import pyplot as plt
from imageProcessingUtil import ImageProcessing
import SimpleITK as sitk
class AudioProcessing(object):
def __init__(self):
pass
@staticmethod
def read(absFilePath,sr=None):
"""
Reading audio
:param absFilePath: Absolute File Path
:param sr: Sampling rate of audio to be read (If None, original sampling rate is considered)
:return: audio samples,
"""
data,fs = librosa.load(absFilePath,sr=sr)
return data,fs
@staticmethod
def writeAsWav(data,sr,filename):
"""
Write .wav files
:param data: audio data
:param sr: sampling rate
:param filename: filename to be saved
:return: None
"""
if filename is None or sr is None or data is None :
return "Please provid arguements as writeAsWav(data,sr,filename)"
if "wav" not in filename:
return "Only wav files!"
filename_split = filename.rsplit(".",1)
filename = filename_split[0]
filetype = filename_split[1].lower()
data = AudioProcessing.rescaleAmplitude(data)
librosa.output.write_wav("{}.{}".format(filename,filetype),data,sr)
@staticmethod
def generateSineWave(amp,f,phi,fs):
"""
Generating a simple sine wave
:param amp: Amplitude
:param f: Frequency
:param phi: Phase
:param fs: Frequency sampling rate
:return: Sine wave signal
"""
# considering 5 time periodics
t = np.arange(0,10.0/f,1.0/fs)
x = amp*np.cos(2*np.pi*f*t + phi)
return(t,x)
@staticmethod
def convert_to_mono(x):
"""
Convert multi channel sounds to mono channel
:param x: audio data
:return: mono channel (audio data)
"""
if x.ndim > 1:
return librosa.to_mono(x)
return x
@staticmethod
def DFT(data,N,fs,start_time = 0.0):
"""
calculating N point DFT
:param data: audio data
:param N: N point DFT
:param fs: sampling frequency
:return:
"""
data = AudioProcessing.convert_to_mono(data)
size = data.size
new_data = np.zeros(N)
if size < N:
diff = N - size
new_data[:size] = data
else:
new_data = data[start_time*fs:start_time*fs+N]
hanning = np.hanning(N)
new_data = new_data*hanning
print("Calculating DFT for {} ms window with start time {} sec".format(N*1000/float(fs),start_time))
nv = np.arange(N)
kv = np.arange(N)
nv = np.arange(-N/2.0,N/2.0)
kv = np.arange(-N/2.0,N/2.0)
X = np.array([])
# Calculating the DFT of the cropped signal
for k in kv:
s = np.exp(1j*2*np.pi*k/N*nv)
X = np.append(X,sum(new_data*np.conjugate(s)))
X = np.abs(X)
frequency_axis = kv*fs/N
return (frequency_axis,X)
@staticmethod
def resampleAudio(data,fs,new_fs):
"""
Resampling audio to a different sampling rate
:param data: audio data
:param fs: old sampling rate
:param new_fs: new sampling rate
:return: resampled audio
"""
print("Resampling from {} to {} hz".format(fs,new_fs))
fs = float(fs)
new_fs = float(new_fs)
data = AudioProcessing.convert_to_mono(data)
size = data.size
old_time_axis = np.arange(size)/fs
total_time = old_time_axis[-1]
total_samples = round(total_time*new_fs)
new_time_axis = np.arange(total_samples)/new_fs
f = interpolate.interp1d(old_time_axis,data)
new_data = f(new_time_axis)
return new_data
@staticmethod
def rescaleAmplitude(data,scale_range = (-1,1)):
"""
rescaling an array to a particlar range
:param data: Any array
:param scale_range: The range to which rescaling has to be done
:return: rescaled array
"""
mini = np.min(data)
maxi = np.max(data)
new_min = scale_range[0]
new_max = scale_range[1]
new_data = ((new_max - new_min)*(data - mini)/(maxi - mini)) + new_min
return new_data
@staticmethod
def get_entropy(X):
"""
:param X: Input array
:return: Entropy of the input array
"""
probs = [np.mean(X == c) for c in set(X)]
return np.sum(-p * np.log2(p) for p in probs)
@staticmethod
def denoise_by_wavelets(audio,wavelet = 'dmey',threshold = 9):
"""
Audio denoising by using wavelet packet decomposition
Steps 1) Wavelet Packet decomposition 2) Thresholding 3) Reconstruction of wavelet packet decomposition.
:param audio:
:param wavelet:
:param threshold: Threshold used to remove noise (Actual threshold = threshold*std of
lowest level detail coefficients of the tree of wavelet packet decomposition)
:return: Denoised audio
"""
wp = pywt.WaveletPacket(data=audio, wavelet=wavelet, mode='symmetric')
new_wp = pywt.WaveletPacket(data=None, wavelet=wavelet, mode='symmetric')
ld = wp['d'].data
threshold = threshold*np.std(ld)
print("Denoising using wavelets for {} levels ... This may take a while".format(wp.maxlevel))
for i in range(wp.maxlevel):
paths = [node.path for node in wp.get_level(i+1, 'natural')]
for path in paths:
new_wp[path] = pywt.threshold(wp[path].data,threshold)
new_wp.reconstruct(update=True)
return new_wp.data
@staticmethod
def get_stft(data,n_fft,win_length,hop_length):
"""
Compute Short Time Fourier Transform of the audio
:param data: audio data
:param n_fft: FFT length
:param win_length: Time frame or the window length
:param hop_length: Hop length between the time frames. (Determines overlapping between frames)
:return: STFT of the audio signal
"""
stft = librosa.stft(y = data,n_fft=n_fft,hop_length=hop_length,win_length=win_length)
return stft
@staticmethod
def get_energy(data,frame_length,hop_length):
"""
Compute the Root mean square energy of the signal
:param data: audio data
:param frame_length: window or frame legth
:param hop_length: overlapping factor
:return: Energy of the audio signal.
"""
energy = librosa.feature.rmse(y=data,n_fft=frame_length,hop_length=hop_length)
energy = energy[0,:]
return energy
@staticmethod
def get_spectrogram(data,n_fft = 512,win_length = 480,hop_length = 120,range = (0,255),pixel_type = np.uint8,log_amplitude = True):
"""
return spectorgram in log scale recaled to given range
:param log_amplitude: if True, returns spectrogram in logamplitude, or returns linear amplitude.
:return: Spectrogram image
"""
# calculating stft for window length = 480 and overlap = 360 samples
stft = AudioProcessing.get_stft(data,n_fft,win_length,hop_length)
db = np.absolute(stft)
if log_amplitude:
db = librosa.logamplitude(db)
# converting to log amplitude and rescaling it between the given range
db = AudioProcessing.rescaleAmplitude(db,range)
db = db.astype(pixel_type)
return db
@staticmethod
def get_spectrogram_label(data,n_fft = 512,win_length = 480,hop_length = 120,
range = (0,255),pixel_type = np.uint8,log_amplitude = True,
initial_labels = [25,50,75,100,125,150,175,200,225,250], no_labels = 2 ):
"""
Performs preprocessing and clustering on the spectrogram to retrieve the most prominent parts as labels.
:param data: audio data
:param n_fft: FFT length
:param win_length: Window length
:param hop_length: Hop length (overlapping factor)
:param range: range of the intensity values of spectrogram
:param pixel_type: Pixel type for intensity values of spectrogram
:param log_amplitude: Whether to consider log amplitude of spectrogram or not
:param initial_labels: Initial Labels for clustering the spectrogram using Kmeans
:param no_labels: Maximum number of labels to be retained.
:return: Labels extracted from spectrogram.
"""
# obtaining the spectrogram of the audio
spectrogram = AudioProcessing.get_spectrogram(data,n_fft=n_fft,win_length=win_length,hop_length=hop_length,range=range,pixel_type=pixel_type,log_amplitude = log_amplitude)
# converting to sitk image
db_sitk = sitk.GetImageFromArray(spectrogram)
db_sitk = sitk.GetImageFromArray(ImageProcessing.median_image_filter(db_sitk,radius=(3,3,3)))
# kmeans clustering the image acoording to the intial labels
labels = sitk.ScalarImageKmeans(db_sitk,initial_labels,True)
# considering only last n labels given byu no_labels
lables_arr = sitk.GetArrayFromImage(labels)
max_label = np.max(lables_arr)
lables_arr[lables_arr < (max_label-(no_labels - 1))] = 0
lables_arr[lables_arr >= (max_label-(no_labels - 1))] = 1
labels = sitk.GetImageFromArray(lables_arr)
# performing binary closing and dilating with certain parameters
closed = sitk.BinaryMorphologicalClosing(labels,1,sitk.sitkBall)
dilated = sitk.BinaryDilate(closed,3,sitk.sitkBall)
# filling holes
holesfilled = sitk.BinaryFillhole(dilated,fullyConnected=True)
# getting the connected components and relabelling it according to size
connected = sitk.ConnectedComponent(holesfilled,fullyConnected=True)
relabelled = sitk.RelabelComponent(connected,minimumObjectSize=200)
relabelled_arr = sitk.GetArrayFromImage(relabelled)
# returning the spectrogram and the label
return relabelled_arr
@staticmethod
def segmentAudioByEnergyApproximation(data,fs,threshold = 5 ,short_energy_time = 64,max_segments = 5):
"""
Segmenting the audio based on approximation using signal energy. Modelling the noise
by considering certain amount of low energy level frames.
:param data:
:param fs:
:param threshold:
:param short_energy_time:
:param max_segments:
:return:
"""
total_samples = 0.2*fs
min_energy_samples = np.sort(abs(data))[:int(total_samples)]
min_energy_samples = np.array(min_energy_samples)
mean = np.mean(abs(min_energy_samples))
std = np.std(abs(min_energy_samples))
if std == 0.0:
std = 0.01
# Approximating a frame with the maximum value of the frame to eliminate the high frequency content
approximate = np.copy(abs(data))
i = 0
hop_size = 2048
while(i < data.size):
if(i+hop_size < data.size):
# approximate my maximum
approximate[i:i+hop_size] = np.max(approximate[i:i+hop_size])
else:
approximate[i:] = np.max(approximate[i:])
i = i+hop_size
check_array = (abs(approximate) - mean)/float(std)
if 0:
import pdb
pdb.set_trace()
plt.plot(check_array)
plt.show()
if np.min(check_array )> threshold:
threshold = np.min(check_array) + 3
ind_p = np.where(check_array > threshold)
ind_n = np.where(check_array <= threshold)
check_array[ind_p] = 1
check_array[ind_n] = 0
diff = np.ediff1d(check_array)
ones = np.where(diff == 1)[0]
minus_ones = np.where(diff == -1)[0]
if ones.size == 0:
ones = np.array([0])
if minus_ones.size == 0:
minus_ones = np.array([check_array.size - 1])
if ones[0] >= minus_ones[0]:
ones = np.append(0,ones)
if ones[-1] >= minus_ones[-1]:
minus_ones = np.append(minus_ones,[check_array.size - 1])
segments = []
if 0:
import pdb
pdb.set_trace()
for i in range(ones.size):
if(minus_ones[i] - ones[i] >= 6144):
# print(minus_ones[i] - ones[i],i)
segments.append((ones[i],minus_ones[i],minus_ones[i]-ones[i]))
def seg_size(x):
return (x[2])
segments = sorted(segments,key=seg_size,reverse=True)
if len(segments) > max_segments :
segments =segments[:5]
return segments
@staticmethod
def segmentAudioBySpectrograms(data,spec_label,win_len,hop_len,max_segments = 5):
"""
Segmentation audio by using labels generated by spectrogram.
First compute spectrogram labels using get_spectrogram_label method and
:param data: audio data to be segmented
:param spec_label: Spectrogram labels
| |
"""
Auxiliary drawables for 2D game support.
This module provides support for non-rectangular objects such as triangles, polygons,
and paths (e.g. lines with width).
Author: <NAME> (wmw2)
Date: August 1, 2017 (Python 3 version)
"""
# Lower-level kivy modules to support animation
from kivy.graphics import *
from kivy.graphics.instructions import *
from .gobject import GObject
def same_side(p1, p2, a, b):
"""
Checks whether two points are on the same side of a line segment.
:param p1: A point represented as a 2-element sequence of numbers
:type p1: ``list`` or ``tuple``
:param p2: A point represented as a 2-element sequence of numbers
:type p2: ``list`` or ``tuple``
:param a: One end of a line segment, represented as a 2-element sequence of numbers
:type a: ``list`` or ``tuple``
:param b: Another end of a line segment, represented as a 2-element sequence of numbers
:type b: ``list`` or ``tuple``
:return: True if ``p1``, ``p2`` are on the same side of segment ``ba``; False otherwise
:rtype: ``bool``
"""
import numpy as np
ba = np.append(np.subtract(b,a),[0])
cp1 = np.cross(ba,np.subtract(p1,a))
cp2 = np.cross(ba,np.subtract(p2,a))
return np.dot(cp1,cp2) >= 0
def in_triangle(p, t):
"""
Checks whether a point is inside of a triangle
:param p: A point in 2 dimensions
:type p: 2-element list of ``int`` or ``float``
:param t: A triangle defined by 3 points
:type t: 6-element list of ``int`` or ``float``
:return: True if ``p`` is in triangle ``t``; False otherwise
:rtype: ``bool``
"""
return (same_side(p, t[0:2], t[2:4], t[4:6]) and
same_side(p, t[2:4], t[0:2], t[4:6]) and
same_side(p, t[4:6], t[0:2], t[2:4]))
def is_point_tuple(t,minsize):
"""
Checks whether a value is an EVEN sequence of numbers.
The number of points tuple must be size greater than or equal to ``minsize``, or the
function returns False. As a point is a pair of numbers, this means the length of
list ``t`` must be at least **twice** ``minsize``.
:param t: The value to test
:type t: any
:param minsize: The minimum number of points in the sequence
:type minsize: ``int`` >= 0
:return: True if t is a point sequence (i.e. even sequence of numbers); False otherwise
:rtype: ``bool``
"""
try:
from functools import reduce
return len(t) % 2 == 0 and len(t) >= 2*minsize and \
reduce(lambda x, y: x and y, map(lambda z: type(z) in [int, float], t))
except:
return False
# #mark -
class GPath(GObject):
"""
A class representing a sequence of line segments
The path is defined by the ``points`` attribute which is an (even) sequence of
alternating x and y values. When drawn in a :class:`GView` object, the line starts
from one x-y pair in ``points`` and goes to the next x-y pair. If ``points`` has
length 2n, then the result is n-1 line segments.
The object uses the attribute ``linecolor`` to determine the color of the line and the
attribute ``linewidth`` to determine the width. The attribute ``fillcolor`` is unused
(even though it is inherited from :class:`GObject`).
The attributes ``width`` and ``height`` are present in this object, but they are now
read-only. These values are computed from the list of points.
On the other hand, the attributes ``x`` and ``y`` are used. By default, these values
are 0. However, if they are nonzero, then Python will add them to all of the points
in the path, shifting the path accordingly.
"""
# MUTABLE PROPERTIES
@property
def points(self):
"""
The sequence of points that make up this line.
**Invariant**: Must be a sequence (list or tuple) of int or float.
The length of this sequence must be even with length at least 4.
"""
return self._points
@points.setter
def points(self,value):
assert is_point_tuple(value,2),'value %s is not a valid list of points' % repr(value)
self._points = tuple(value)
if self._defined:
self._reset()
@property
def linewidth(self):
"""
The width of this path.
Setting this value to 0 means that the path is invisible.
**Invariant**: Must be an int or float >= 0.
"""
return self._linewidth
@linewidth.setter
def linewidth(self,value):
assert type(value) in [int,float], 'value %s is not a number' % repr(value)
assert value >= 0, 'value %s is negative' % repr(value)
self._linewidth = value
if self._defined:
self._reset()
# IMMUTABLE PROPERTIES
@property
def width(self):
"""
The horizontal width of this path.
The value is the width of the smallest bounding box that contains all of the
points in the line AND the origin (0,0).
**Invariant**: Must be an int or float > 0.
"""
px = self.points[::2]+(0,0)
return 2*max(max(px),-min(px))
@property
def height(self):
"""
The vertical height of this path.
The value is the height of the smallest bounding box that contains all of the
points in the line AND the origin (0,0).
**Invariant**: Must be an int or float > 0.
"""
py = self.points[1::2]+(0,0)
return 2*max(max(py),-min(py))
# BUILT-IN METHODS
def __init__(self,**keywords):
"""
Creates a new sequence of line segments.
To use the constructor for this class, you should provide it with a list of
keyword arguments that initialize various attributes. For example, to create a
path from (0,0) to (2,3) with width 2, use the constructor call
GPath(points=[0,0,2,3],linewidth=2)
This class supports the same keywords as :class:`GObject`, though some of them
are unused, as the ``width`` and ``height`` attributes are now immutable. The
primary keywords for this class are ``points``, ``linecolor``, and ``linewidth``.
:param keywords: dictionary of keyword arguments
:type keywords: keys are attribute names
"""
self._defined = False
self.linewidth = keywords['linewidth'] if 'linewidth' in keywords else 1.0
self.points = keywords['points'] if 'points' in keywords else (0,0,10,10)
if not 'linecolor' in keywords:
keywords['linecolor'] = (1,1,1,1)
GObject.__init__(self,**keywords)
self._reset()
self._defined = True
# PUBLIC METHODS
def contains(self,point):
"""
Checks whether this shape contains the point
This method always returns `False` as a ``GPath`` has no interior.
:param point: the point to check
:type point: :class:`Point2`` or a pair of numbers
:return: True if the shape contains this point
:rtype: ``bool``
"""
return False
def near(self,point):
"""
Checks whether this path is near the given point
To determine if (x,y) is near the path, we compute the minimum distances
from (x,y) to the path. If this distance is less than e-6, we return True.
:param point: the point to check
:type point: :class:`Point2`` or a pair of numbers
:return: True if this path is near the give point; False otherwise.
:rtype: ``bool``
"""
if isinstance(point,Point2):
point = (point.x,point.y)
assert is_point_tuple(point,1),'value %s is not a valid point' % repr(point)
x = point[0]
y = point[1]
size = len(self.points)/2
epsilon = 1e-6
for ii in range(size-1):
p = self.points[2*ii :2*ii+2]
q = self.points[2*ii+2:2*ii+4]
if p == q:
test = np.sqrt((q[0]-x)*(q[0]-x)+(q[1]-y)*(q[1]-y)) < epsilon
else:
num = abs((q[0]-p[0])*x-(q[1]-p[1])*y+q[0]*p[1]-p[0]*q[1])
den = np.sqrt((q[0]-p[0])*(q[0]-p[0])+(q[1]-p[1])*(q[1]-p[1]))
test = num/den
if test:
return True
return self.contains(point)
# HIDDEN METHODS
def _reset(self):
"""
Resets the drawing cache
"""
GObject._reset(self)
if not self._linecolor is None:
self._cache.add(self._linecolor)
line = Line(points=self.points,cap='round',joint='round',width=self.linewidth)
self._cache.add(line)
self._cache.add(PopMatrix())
# #mark -
class GTriangle(GPath):
"""
A class representing a solid triangle.
The triangle is defined as a sequence of three point. Just as with the `GPath` class
(which is the parent of this class), it has an attribute `point` which represents
this points as an even-length sequence of ints or floats.
The interior (fill) color of this triangle is `fillcolor`, while `linecolor`
is the color of the border. If `linewidth` is set to 0, then the border is
not visible.
As with `GPath`, the attributes `x` and `y` may be used to shift the triangle
position. By default, these values are 0. However, if they are nonzero, then Python
will add them to the triangle | |
your ID. Please try logging in again.')
return response
except Exception as e:
log.error("%s type exception raised setting response following ID "
"Approval: %s", e.__class__.__name__,
traceback.format_exc())
response = self._render.errorPage(self.environ,
self.start_response,
'An error occurred setting additional parameters required by '
'the site requesting your ID. Please report this fault to '
'your site administrator.')
return response
try:
return self._render.decidePage(environ,
start_response,
oid_request,
self.oid_response)
except AuthNInterfaceError as e:
log.error("%s type exception raised calling decide page "
"rendering - an OpenID identifier look-up error? "
"message is: %s", e.__class__.__name__,
traceback.format_exc())
response = self._render.errorPage(environ, start_response,
'An error has occurred displaying an options page '
'which checks whether you want to return to the site '
'requesting your ID. Please report this fault to '
'your site administrator.')
return response
def _identityIsAuthenticated(self, oid_request):
'''Check that a user is authenticated i.e. does a session exist for their
username
@type oid_request: openid.server.server.CheckIDRequest
@param oid_request: OpenID Request object
@rtype: bool
@return: True/False is user authenticated
'''
username = self.session.get(
OpenIDProviderMiddleware.USERNAME_SESSION_KEYNAME)
if username is None:
return False
identityURI = self.session.get(
OpenIDProviderMiddleware.IDENTITY_URI_SESSION_KEYNAME)
if identityURI is None:
return False
log.debug("OpenIDProviderMiddleware._identityIsAuthenticated - "
"user is logged in")
return True
def _requestedIdMatchesAuthenticatedId(self, oid_request):
"""Check requested Identity matches identity of already logged in user.
Note also returns positive response if identity select mode used
@type oid_request: openid.server.server.CheckIDRequest
@param oid_request: OpenID Request object
@rtype: bool
@return: True/False authenticated user satisfy request ID
"""
if oid_request.idSelect():
log.debug(
"OpenIDProviderMiddleware._requestedIdMatchesAuthenticatedId - "
"ID Select mode set but user is already logged in")
return True
identityURI = self.session.get(
OpenIDProviderMiddleware.IDENTITY_URI_SESSION_KEYNAME)
if identityURI is None:
return False
if oid_request.identity != identityURI:
log.debug(
"OpenIDProviderMiddleware._requestedIdMatchesAuthenticatedId - "
"requested identity is %r but user is already logged in with "
"ID %r" % (oid_request.identity, identityURI))
return False
log.debug(
"OpenIDProviderMiddleware._requestedIdMatchesAuthenticatedId - "
"user is logged in with ID matching ID URI")
return True
def _trust_root_is_authorized(self, trust_root):
'''Return True/False for the given trust root (Relying Party)
previously been approved by the user
@type trust_root: dict
@param trust_root: keyed by trusted root (Relying Party) URL and
containing string item 'always' if approved
@rtype: bool
@return: True - trust has already been approved, False - trust root is
not approved'''
approvedRoots = self.session.get(
OpenIDProviderMiddleware.APPROVED_FLAG_SESSION_KEYNAME,
{})
return approvedRoots.get(trust_root) is not None
def _is_configured_trusted_root(self, trust_root):
"""the Relying Party is one of a
list of RPs set in the start up configuration as not needing
approval. This could be for example sites within the same
organisation as this Provider
@type trust_root: dict
@param trust_root: keyed by trusted root (Relying Party) URL and
containing string item 'always' if approved
@rtype: bool
@return: True - trust has already been approved, False - trust root is
not approved
"""
return trust_root in self.trustedRelyingParties
def _add_sreg_response(self, oid_request, oid_response):
'''Add Simple Registration attributes to response to Relying Party
@type oid_request: openid.server.server.CheckIDRequest
@param oid_request: OpenID Check ID Request object'''
if self.sregResponse is None:
# No Simple Registration response object was set
return
sreg_req = sreg.SRegRequest.fromOpenIDRequest(oid_request)
# Callout to external callable sets additional user attributes to be
# returned in response to Relying Party
sreg_data = self.sregResponse(self.session.get(
OpenIDProviderMiddleware.USERNAME_SESSION_KEYNAME))
sreg_resp = sreg.SRegResponse.extractResponse(sreg_req, sreg_data)
oid_response.addExtension(sreg_resp)
def _add_ax_response(self, oid_request, oid_response):
'''Add attributes to response based on the OpenID Attribute Exchange
interface
@type oid_request: openid.server.server.CheckIDRequest
@param oid_request: OpenID Check ID Request object
'''
ax_req = ax.FetchRequest.fromOpenIDRequest(oid_request)
if ax_req is None:
log.debug("No Attribute Exchange extension set in request")
return
ax_resp = ax.FetchResponse(request=ax_req)
if self.axResponse is None:
requiredAttr = ax_req.getRequiredAttrs()
if len(requiredAttr) > 0:
msg = ("Relying party requires these attributes: %s; but No"
"Attribute exchange handler 'axResponse' has "
"been set" % requiredAttr)
log.error(msg)
raise OpenIDProviderMissingAXResponseHandler(msg)
return
log.debug("Calling AX plugin: %s ...",
self.axResponse.__class__.__name__)
# Set requested values - need user intervention here to confirm
# release of attributes + assignment based on required attributes -
# possibly via FetchRequest.getRequiredAttrs()
try:
self.axResponse(ax_req, ax_resp, self._authN, self.session)
except OpenIDProviderMissingRequiredAXAttrs as e:
log.error("OpenID Provider is unable to set the AX attributes "
"required by the Relying Party's request: %s" % e)
raise
except OpenIDProviderReloginRequired as e:
log.exception(e)
raise
except Exception as e:
log.error("%s exception raised setting requested Attribute "
"Exchange values: %s",
e.__class__.__name__,
traceback.format_exc())
raise
log.debug("Adding AX parameters to response: %s ...", ax_resp)
oid_response.addExtension(ax_resp)
log.debug("Added AX parameters to response")
def _create_response(self, oid_request, identifier=None):
'''Create a response object from the input request and add
Simple Registration and/or Attribute Exchange parameters if handlers
were specified - See _add_sreg_response and _add_ax_response methods -
and only if the Relying Party has requested them
@type oid_request: openid.server.server.CheckIDRequest
@param oid_request: OpenID Check ID Request object
@type identifier: basestring
@param identifier: OpenID selected by user - for ID Select mode only
'''
oid_response = oid_request.answer(True, identity=identifier)
self._add_sreg_response(oid_request, oid_response)
self._add_ax_response(oid_request, oid_response)
return oid_response
def _handleCheckIDRequest(self, oid_request):
"""Handle "checkid_immediate" and "checkid_setup" type requests from
Relying Party
@type oid_request: openid.server.server.CheckIDRequest
@param oid_request: OpenID Check ID request
@rtype: basestring
@return: WSGI response
"""
log.debug("OpenIDProviderMiddleware._handleCheckIDRequest ...")
# Save request
self.session[
OpenIDProviderMiddleware.LAST_CHECKID_REQUEST_SESSION_KEYNAME
] = oid_request
self.session.save()
# Check for authenticated session already present
if self._identityIsAuthenticated(oid_request):
# Check requested ID matches ID for an existing authenticated user
# if one has already been set
if not self._requestedIdMatchesAuthenticatedId(oid_request):
response = self._render.errorPage(self.environ,
self.start_response,
'An existing user is already logged in with a different '
'identity to the one you provided (%s). Log out from this '
'site using the link below and then navigate back to the '
'site which first requested your OpenID.' %
oid_request.identity)
return response
# User is logged in - check for ID Select type request i.e. the
# user entered their IdP address at the Relying Party and not their
# full OpenID URL. In this case, the identity they wish to use must
# be confirmed.
is_configured_trusted_root = self._is_configured_trusted_root(
oid_request.trust_root)
if oid_request.idSelect() and not is_configured_trusted_root:
# OpenID identifier must be confirmed
return self.do_decide(self.environ, self.start_response)
elif (self._trust_root_is_authorized(oid_request.trust_root) or
is_configured_trusted_root):
# User entered their full OpenID URL and they have previously
# approved this Relying Party OR the Relying Party is one of a
# list of RPs set in the start up configuration as not needing
# approval. This could be for example sites within the same
# organisation as this Provider
try:
identityURI = self.session[
OpenIDProviderMiddleware.IDENTITY_URI_SESSION_KEYNAME
]
self.oid_response = self._create_response(oid_request,
identifier=identityURI)
except (OpenIDProviderMissingRequiredAXAttrs,
OpenIDProviderMissingAXResponseHandler):
response = self._render.errorPage(self.environ,
self.start_response,
'The site where you wish to signin requires '
'additional information which this site isn\'t '
'configured to provide. Please report this fault to '
'your site administrator.')
return response
except OpenIDProviderReloginRequired as e:
response = self._render.errorPage(self.environ,
self.start_response,
'An error occurred setting additional parameters '
'required by the site requesting your ID. Please '
'try logging in again.')
return response
except Exception as e:
log.error("%s type exception raised setting response "
"following ID Approval: %s",
e.__class__.__name__,
traceback.format_exc())
response = self._render.errorPage(self.environ,
self.start_response,
'An error occurred setting additional parameters '
'required by the site requesting your ID. Please '
'report this fault to your site administrator.')
return response
return self._displayResponse(self.oid_response)
else:
# This OpenID is being used for a login for the first time.
# Check with the user whether they want to approval the Relying
# Party's request
return self.do_decide(self.environ, self.start_response)
elif oid_request.immediate:
oid_response = oid_request.answer(False)
return self._displayResponse(oid_response)
else:
# User is not logged in
# Call login and if successful then call decide page to confirm
# user wishes to trust the Relying Party.
response = self.do_login(self.environ,
self.start_response,
success_to=self.urls['url_decide'])
return response
def _displayResponse(self, oid_response):
"""Serialize an OpenID Response object, set headers and return WSGI
response.
If the URL length for a GET request exceeds a maximum, then convert the
response into a HTML form and use POST method.
@type oid_response: openid.server.server.OpenIDResponse
@param oid_response: OpenID response object
@rtype: basestring
@return: WSGI response'''
"""
if not isinstance(oid_response, server.OpenIDResponse):
log.error("OpenID Response is %r type, expecting %r",
type(oid_response), server.OpenIDResponse)
return self._render.errorPage(self.environ, self.start_response,
"Error setting a response. Please "
"report this fault to your site "
"administrator.",
code=500)
try:
webresponse = self.oidserver.encodeResponse(oid_response)
except server.EncodingError as | |
),
"DR12a_5" : ( 33218, 33219 ),
"DR12a1_5" : ( 33219, 33220 ),
"DR12a2_5" : ( 33220, 33221 ),
"DR12b_5" : ( 33221, 33222 ),
"DR12b1_5" : ( 33222, 33223 ),
"DR12c_5" : ( 33223, 33224 ),
"DR12cSpecify_5" : ( 33224, 33449 ),
"DR12c1_5" : ( 33449, 33450 ),
"DR13a_" : ( 33450, 33451 ),
"DR13b_" : ( 33451, 33452 ),
"DR13c_" : ( 33452, 33453 ),
"DR13d_" : ( 33453, 33454 ),
"DR13a_2" : ( 33454, 33455 ),
"DR13b_2" : ( 33455, 33456 ),
"DR13c_2" : ( 33456, 33457 ),
"DR13d_2" : ( 33457, 33458 ),
"DR13a_3" : ( 33458, 33459 ),
"DR13b_3" : ( 33459, 33460 ),
"DR13c_3" : ( 33460, 33461 ),
"DR13d_3" : ( 33461, 33462 ),
"DR13a_4" : ( 33462, 33463 ),
"DR13b_4" : ( 33463, 33464 ),
"DR13c_4" : ( 33464, 33465 ),
"DR13d_4" : ( 33465, 33466 ),
"DR13a_5" : ( 33466, 33467 ),
"DR13b_5" : ( 33467, 33468 ),
"DR13c_5" : ( 33468, 33469 ),
"DR13d_5" : ( 33469, 33470 ),
"DR14_" : ( 33470, 33471 ),
"DR14a_" : ( 33471, 33472 ),
"DR14b_" : ( 33472, 33473 ),
"DR14_2" : ( 33473, 33474 ),
"DR14a_2" : ( 33474, 33475 ),
"DR14b_2" : ( 33475, 33476 ),
"DR14_3" : ( 33476, 33477 ),
"DR14a_3" : ( 33477, 33478 ),
"DR14b_3" : ( 33478, 33479 ),
"DR14_4" : ( 33479, 33480 ),
"DR14a_4" : ( 33480, 33481 ),
"DR14b_4" : ( 33481, 33482 ),
"DR14_5" : ( 33482, 33483 ),
"DR14a_5" : ( 33483, 33484 ),
"DR14b_5" : ( 33484, 33485 ),
"DR15_" : ( 33485, 33486 ),
"DR15a_" : ( 33486, 33487 ),
"DR15b_" : ( 33487, 33488 ),
"DR15_2" : ( 33488, 33489 ),
"DR15a_2" : ( 33489, 33490 ),
"DR15b_2" : ( 33490, 33491 ),
"DR15_3" : ( 33491, 33492 ),
"DR15a_3" : ( 33492, 33493 ),
"DR15b_3" : ( 33493, 33494 ),
"DR15_4" : ( 33494, 33495 ),
"DR15a_4" : ( 33495, 33496 ),
"DR15b_4" : ( 33496, 33497 ),
"DR15_5" : ( 33497, 33498 ),
"DR15a_5" : ( 33498, 33499 ),
"DR15b_5" : ( 33499, 33500 ),
"DR16_" : ( 33500, 33501 ),
"DR16a_" : ( 33501, 33502 ),
"DR16_2" : ( 33502, 33503 ),
"DR16a_2" : ( 33503, 33504 ),
"DR16_3" : ( 33504, 33505 ),
"DR16a_3" : ( 33505, 33506 ),
"DR16_4" : ( 33506, 33507 ),
"DR16a_4" : ( 33507, 33508 ),
"DR16_5" : ( 33508, 33509 ),
"DR16a_5" : ( 33509, 33510 ),
"DR17_" : ( 33510, 33511 ),
"DR17a_" : ( 33511, 33512 ),
"DR17_2" : ( 33512, 33513 ),
"DR17a_2" : ( 33513, 33514 ),
"DR17_3" : ( 33514, 33515 ),
"DR17a_3" : ( 33515, 33516 ),
"DR17_4" : ( 33516, 33517 ),
"DR17a_4" : ( 33517, 33518 ),
"DR17_5" : ( 33518, 33519 ),
"DR17a_5" : ( 33519, 33520 ),
"DR18_1_" : ( 33520, 33521 ),
"DR18_2_" : ( 33521, 33522 ),
"DR18_3_" : ( 33522, 33523 ),
"DR18_4_" : ( 33523, 33524 ),
"DR18_5_" : ( 33524, 33525 ),
"DR18a_" : ( 33525, 33526 ),
"DR18_1_2" : ( 33526, 33527 ),
"DR18_2_2" : ( 33527, 33528 ),
"DR18_3_2" : ( 33528, 33529 ),
"DR18_4_2" : ( 33529, 33530 ),
"DR18_5_2" : ( 33530, 33531 ),
"DR18a_2" : ( 33531, 33532 ),
"DR18_1_3" : ( 33532, 33533 ),
"DR18_2_3" : ( 33533, 33534 ),
"DR18_3_3" : ( 33534, 33535 ),
"DR18_4_3" : ( 33535, 33536 ),
"DR18_5_3" : ( 33536, 33537 ),
"DR18a_3" : ( 33537, 33538 ),
"DR18_1_4" : ( 33538, 33539 ),
"DR18_2_4" : ( 33539, 33540 ),
"DR18_3_4" : ( 33540, 33541 ),
"DR18_4_4" : ( 33541, 33542 ),
"DR18_5_4" : ( 33542, 33543 ),
"DR18a_4" : ( 33543, 33544 ),
"DR18_1_5" : ( 33544, 33545 ),
"DR18_2_5" : ( 33545, 33546 ),
"DR18_3_5" : ( 33546, 33547 ),
"DR18_4_5" : ( 33547, 33548 ),
"DR18_5_5" : ( 33548, 33549 ),
"DR18a_5" : ( 33549, 33550 ),
"DR19Qsx" : ( 33550, 33551 ),
"DR19Qsx2" : ( 33551, 33552 ),
"DR19Qsx3" : ( 33552, 33553 ),
"DR19Qsx4" : ( 33553, 33554 ),
"DR19Qsx5" : ( 33554, 33555 ),
"DR19Qsx6" : ( 33555, 33556 ),
"DR19Qsx7" : ( 33556, 33557 ),
"DR19Qsx8" : ( 33557, 33558 ),
"DR19Qsx9" : ( 33558, 33559 ),
"DR19Qsx10" : ( 33559, 33560 ),
"DR19Qsx11" : ( 33560, 33561 ),
"DR19Qsx12" : ( 33561, 33562 ),
"DR19Qsx13" : ( 33562, 33563 ),
"DR19Qsx14" : ( 33563, 33564 ),
"DR19Qsx15" : ( 33564, 33565 ),
"DR19Qsx16" : ( 33565, 33566 ),
"DR19Qsx17" : ( 33566, 33567 ),
"DR19Qsx18" : ( 33567, 33568 ),
"DR19Qsx19" : ( 33568, 33569 ),
"DR19aQsx" : ( 33569, 33570 ),
"DR19aQsx2" : ( 33570, 33571 ),
"DR19aQsx3" : ( 33571, 33572 ),
"DR19aQsx4" : ( 33572, 33573 ),
"DR19aQsx5" : ( 33573, 33574 ),
"DR19aQsx6" : ( 33574, 33575 ),
"DR19aQsx7" : ( 33575, 33576 ),
"DR19aQsx8" : ( 33576, 33577 ),
"DR19aQsx9" : ( 33577, 33578 ),
"DR19aQsx10" : ( 33578, 33579 ),
"DR19aQsx11" : ( 33579, 33580 ),
"DR19aQsx12" : ( 33580, 33581 ),
"DR19aQsx13" : ( 33581, 33582 ),
"DR19aQsx14" : ( 33582, 33583 ),
"DR19aQsx15" : ( 33583, 33584 ),
"DR19aQsx16" : ( 33584, 33585 ),
"DR19aQsx17" : ( 33585, 33586 ),
"DR19aQsx18" : ( 33586, 33587 ),
"DR19aQsx19" : ( 33587, 33588 ),
"DR19SxAgeOns_" : ( 33588, 33590 ),
"DR19SxOns_" : ( 33590, 33591 ),
"DR19SxAgeRec_" : ( 33591, 33593 ),
"DR19SxRec_" : ( 33593, 33594 ),
"DR5YrCl_" : ( 33594, 33595 ),
"DR6YrCl_" : ( 33595, 33596 ),
"DR7YrCl_" : ( 33596, 33597 ),
"DR7aYrCl_" : ( 33597, 33598 ),
"DR7bYrCl_" : ( 33598, 33599 ),
"DR8YrCl_" : ( 33599, 33600 ),
"DR9aYrCl_" : ( 33600, 33601 ),
"DR10YrCl_" : ( 33601, 33602 ),
"DR11bYrCl_" : ( 33602, 33603 ),
"DR11e2YrCl_" : ( 33603, 33604 ),
"DR12a2YrCl_" : ( 33604, 33605 ),
"DR12b1YrCl_" : ( 33605, 33606 ),
"DR12c1YrCl_" : ( 33606, 33607 ),
"DR13dYrCl_" : ( 33607, 33608 ),
"DR14bYrCl_" : ( 33608, 33609 ),
"DR18aYrCl_" : ( 33609, 33610 ),
"DR15aYrCl_" : ( 33610, 33611 ),
"DR16YrCl_" : ( 33611, 33612 ),
"DR17YrCl_" : ( 33612, 33613 ),
"DR5MnthCl_" : ( 33613, 33614 ),
"DR6MnthCl_" : ( 33614, 33615 ),
"DR7MnthCl_" : ( 33615, 33616 ),
"DR7aMnthCl_" : ( 33616, 33617 ),
"DR7bMnthCl_" : ( 33617, 33618 ),
"DR8MnthCl_" : ( 33618, 33619 ),
"DR9aMnthCl_" : ( 33619, 33620 ),
"DR10MnthCl_" : ( 33620, 33621 ),
"DR11bMnthCl_" : ( 33621, 33622 ),
"DR11e2MnthCl_" : ( 33622, 33623 ),
"DR12a2MnthCl_" : ( 33623, 33624 ),
"DR12b1MnthCl_" : ( 33624, 33625 ),
"DR12c1MnthCl_" : ( 33625, 33626 ),
"DR13dMnthCl_" : ( 33626, 33627 ),
"DR14bMnthCl_" : ( 33627, 33628 ),
"DR18aMnthCl_" : ( 33628, 33629 ),
"DR15aMnthCl_" : ( 33629, 33630 ),
"DR16MnthCl_" : ( 33630, 33631 ),
"DR17MnthCl_" : ( 33631, 33632 ),
"DR19AgeOns_" : ( 33632, 33634 ),
"DR19Ons_" : ( 33634, 33635 ),
"DR19AgeRec_" : ( 33635, 33637 ),
"DR19Rec_" : ( 33637, 33638 ),
"DRSxCount" : ( 33638, 33640 ),
"DRYrClCount" : ( 33640, 33642 ),
"DRMnthClCount" : ( 33642, 33644 ),
"DR19Qsx20" : ( 33644, 33645 ),
"DR19Qsx21" : ( 33645, 33646 ),
"DR19Qsx22" : ( 33646, 33647 ),
"DR19Qsx23" : ( 33647, 33648 ),
"DR19Qsx24" : ( 33648, 33649 ),
"DR19Qsx25" : ( 33649, 33650 ),
"DR19Qsx26" : ( 33650, 33651 ),
"DR19Qsx27" : ( 33651, 33652 ),
"DR19Qsx28" : ( 33652, 33653 ),
"DR19Qsx29" : ( 33653, 33654 ),
"DR19Qsx30" : ( 33654, 33655 ),
"DR19Qsx31" : ( 33655, 33656 ),
"DR19Qsx32" : ( 33656, 33657 ),
"DR19Qsx33" : ( 33657, 33658 ),
"DR19Qsx34" : ( 33658, 33659 ),
"DR19Qsx35" : ( 33659, 33660 ),
"DR19Qsx36" : ( 33660, 33661 ),
"DR19Qsx37" : ( 33661, 33662 ),
"DR19Qsx38" | |
<reponame>saucec0de/sifu
#!/usr/bin/env python3
#
# Copyright (c) <NAME>, 2020
# <EMAIL>
#
# SPDX-License-Identifier: MIT
#
# This file implements analysis of stderr
#
import results
import json
import yaml
import sys
import re
injectFileName = "inject.yaml"
def call_analyse(identifier = None, fname = "func"):
# Note: findings shall not repeat themselves, if found more than once
findings = {}
# function that adds a potential new finding to the findings database
def addFinding(finding, tag=""):
if not(finding in findings):
findings[finding] = tag
def loadYamlFile(inFile):
_inDict = {}
try:
with open(inFile) as f:
_inDict = yaml.load(f, Loader=yaml.FullLoader)
except Exception as e:
#print("WARNING: "+str(e))
pass
return _inDict
nLinesInjected = {}
I = loadYamlFile(injectFileName)
for fName in I:
nLinesInjected[fName] = len(I[fName])
chalConfig = loadYamlFile("config.yaml")
##########################################################################
# Main #
##########################################################################
#fNameR = r"func(?:_\d+)?\.cp?p?"
challengeFiles = chalConfig["files"]
if type(challengeFiles) is str:
challengeFiles = [challengeFiles]
for fname in challengeFiles:
# NOTE: we need to split this for loop - it is now very big...!
fNameR = fname
# +------------------------------------------+
# | Process compilaton errors |
# +------------------------------------------+
print("Search file:", fNameR)
try:
with open("compile.txt") as f:
errLines = f.read().split("\n")
lineNum = 0
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
if re.search("redirected_",errLine): errLine = re.sub("redirected_","",errLine)
# search for messages related only to the user file
# Errors that can be bypassed
if re.match(r"^collect2: error: ld returned 1 exit status$",errLine):
finding = "Linking failed!"
addFinding(finding)
#if re.search(r"\.o: No such file or directory$",errLine):
# finding = "File or directory missing!"
# addFinding(finding)
# deal with redirected funtions
# redirect.h:17:14: error: too few arguments to function ‘redirected_time’
m = re.match(r"^"+fNameR+r":(\d+):(\d+): error: (.*)",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
errMsg = m.group(3)
m = re.search("redirected_",errMsg)
if m:
errMsg = re.sub("redirected_","",errMsg)
# continue searching for the line number
# func_7453459449.c:21:19: note: in expansion of macro ‘time’
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
lineNum = lineNum + 1
m = re.search(fNameR+ r":(\d+):(\d+): note: in expansion of macro (.*)",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errMsg)
addFinding(finding)
break
#
# TODO: capture name of file and compare to our files that we are searching for...
# if not in our dictionary, then this is a "Yikes" error
#
#### # Compiler error in a file not func*
#### # This needs to be improved here...
#### m = re.match(r"^((?!" + fNameR + r")).*error:.*",errLine)
#### if m:
#### print("Yikes: ("+str(m.groups()))+")",errLine)
#### finding = "ERROR in project! Help, Hurry ... call someone!?!?! Yikes!"
#### addFinding(finding)
# Compiler error in func.c or func.cpp - Type 1 (line number + column number)
# func.c:12:5: error: implicit declaration of function ‘xstrcpy’; did you mean ‘strcpy’? [-Werror=implicit-function-declaration]
m = re.search(fNameR+ r":(\d+):(\d+): error: (.*)",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
errorMsg = m.group(3)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# Compiler error in func.c or func.cpp - Type 2 (only line number)
m = re.search(fNameR+ r":(\d+): error: (.*)",errLine)
if m:
#print("BBB",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
errorMsg = m.group(3)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# Compiler error in func.c or func.cpp - Type 3 (fatal error + line number + column number)
m = re.search( fNameR + r":(\d+):(\d+): fatal error: (.*)",errLine)
if m:
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
errorMsg = m.group(3)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# Usage of deprecated functions
m = re.search( fNameR+ r":(\d+):(\d+): warning: ‘(.*)’ * is deprecated \[-Wdeprecated-declarations\]", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
funcName = m.group(3)
finding = "WARNING ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# func.c:28:9: warning: format not a string literal and no format arguments [-Wformat-security]
if 'format not a string literal and no format arguments [-Wformat-security]' in errLine:
# func.c:22:14: runtime error: signed integer overflow: 244140625 * 25 cannot be represented in type 'int'
m = re.search( fNameR + r":(\d+):(\d+):", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "WARNING ({fileName},{lineNumber}): A format string attack is possible".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
lineNum = lineNum + 1
except Exception as e:
print("Exception: "+str(e))
# +------------------------------------------+
# | Process findings from stderr |
# +------------------------------------------+
try:
with open("stderr.txt") as f:
errLines = f.read().split("\n")
lineNum = 0
found_asan = False
added_asan_finding = False
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
if "runtime error: signed integer overflow" in errLine:
# func.c:22:14: runtime error: signed integer overflow: 244140625 * 25 cannot be represented in type 'int'
m = re.search(fNameR+ r":(\d+):(\d+): runtime error: signed integer overflow", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "ERROR ({fileName},{lineNumber}): There is a signed integer overflow vulnerability".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
if "runtime error: division by zero" in errLine:
# func.c:25:17: runtime error: division by zero
m = re.search(fNameR+ r":(\d+):(\d+): runtime error: division by zero", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "ERROR ({fileNamer},{lineNumber}): There is a division-by-zero vulnerability".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
if "runtime error: reference binding to null pointer of type" in errLine:
m = re.search(fNameR+ r":(\d+):(\d+): runtime error: member call on null pointer of type \.*", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "ERROR ({fileName},{lineNumber}): Null pointer access".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
# findings by AddressSanitizer and LeakSanitizer
# ==============================================
if re.search(r"AddressSanitizer: ",errLine):
found_asan = True
# search for AddressSanitizer: buffer overflow
if re.search(r"AddressSanitizer: stack-buffer-overflow on address",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
m = re.search(r"'(.*?)'.*<== Memory access at offset \d+ overflows this variable",errLine)
if m:
varName = m.group(1)
finding = "Stack overflow on variable '{varName}'".format(varName=varName)
addFinding(finding)
added_asan_finding = True
break
m = re.search(r"'(.*?)'.*<== Memory access at offset \d+ underflows this variable",errLine)
if m:
varName = m.group(1)
finding = "Stack underflow on variable '{varName}'".format(varName=varName)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# search for AddressSanitizer: buffer overflow
elif re.search(r"^\*\*\* stack smashing detected \*\*\*",errLine):
finding = "Possible stack smashing was detected"
addFinding(finding)
# Example: ==4==ERROR: AddressSanitizer: SEGV on unknown address 0x5566f04f9933 (pc 0x5566f04db4d6 bp 0x7ffe1f0c2eb0 sp 0x7ffe1f0c2df0 T0)
elif re.search(r"^==\d+==ERROR: AddressSanitizer: SEGV on unknown address",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #0 0x557a3f99c4d5 in func /home/gasiba/Git/sifu/upload/edbd33d4-6ece-4cec-9da9-4b66084db79e/func.c:13
m = re.search(r"^.*in (.*) .*\/" + fNameR + r":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Segmentation fault".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# search for AddressSanitizer: heap-buffer-overflow
# Example: ==2==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x602000000015 at pc 0x55b9ad0e93fd bp 0x7ffce65329b0 sp 0x7ffce65329a0
elif re.search(r"^==\d+==ERROR: AddressSanitizer: heap-buffer-overflow",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #0 0x55b9ad0e93fc in func /home/gasiba/Git/sifu/upload/51b30a8b-acde-4bf3-8c64-8d2f88fd932c/func.c:14
m = re.search(r"^in (.*) .*\/" + fNameR + r".cp?p?:(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Heap Buffer Overflow/Underflow".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# search for memory leaks
# Example: ==2==ERROR: LeakSanitizer: detected memory leaks
elif re.search(r"==\d+==ERROR: LeakSanitizer: detected memory leaks",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #1 0x5602ed79db34 in get_filled_buffer /home/gasiba/Git/sifu/Challenges/test/chal_0007/func.c:25
m = re.search(r"^.*in (.*) .*\/"+fNameR+":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Memory leak".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
break
# SUMMARY: AddressSanitizer: 120 byte(s) leaked in 1 allocation(s).
m = re.search(r"SUMMARY: AddressSanitizer: \d+ byte\(s\) leaked in \d+ allocation\(s\).$",errLine)
if m:
addFinding("Detected memory leak")
break
lineNum = lineNum + 1
# search for free memory that was not malloc'ed
# Example: AddressSanitizer: attempting free on address which was not malloc()-ed: 0x7ffffa10fcd0 in thread T0
# #0 0x560d6e9f491f in __interceptor_free (/home/gasiba/Git/sifu/Challenges/test/chal_0007/main+0x10591f)
# #1 0x560d6ea42783 in get_y_no /home/gasiba/Git/sifu/Challenges/test/chal_0007/func.c:17
# #2 0x560d6ea4191b in Test_Main /home/gasiba/Git/sifu/Challenges/test/chal_0007/main.c:49
# #3 0x560d6ea41ae1 in main /home/gasiba/Git/sifu/Challenges/test/chal_0007/main.c:73
# #4 0x7fe368b1cb96 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x21b96)
# #5 0x560d6e90b449 in _start (/home/gasiba/Git/sifu/Challenges/test/chal_0007/main+0x1c449)
elif re.search(r"AddressSanitizer: attempting free on address which was not malloc",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #1 0x560d6ea42783 in get_y_no /home/gasiba/Git/sifu/Challenges/test/chal_0007/func.c:17
m = re.search(r"^.*in (.*) .*\/"+fNameR+":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
| |
<gh_stars>0
"""IEM Cow (NWS Storm Based Warning Verification) API
See [IEM Cow](https://mesonet.agron.iastate.edu/cow/) webpage for the user
frontend to this API and for more discussion about what this does.
While this service only emits JSON, the JSON response embeds to GeoJSON objects
providing the storm reports and warnings.
Changed on 2 Sep 2021 to count LSRs valid at warning expiration time as
verifying as per NWS Verification Branch guidance.
"""
from typing import List
from datetime import datetime
import json
import geopandas as gpd
import pandas as pd
from pandas.io.sql import read_sql
from fastapi import Query, APIRouter
from shapely.ops import unary_union
from sqlalchemy import text
from ..util import get_dbconn
ISO9660 = "%Y-%m-%dT%H:%M:%SZ"
LSRTYPE2PHENOM = {
"T": "TO",
"H": "SV",
"G": "SV",
"D": "SV",
"F": "FF",
"x": "FF",
"M": "MA",
"W": "MA",
"2": "DS",
}
router = APIRouter()
class COWSession:
"""Things that we could do while generating Cow stats"""
def __init__(
self,
wfo,
begints,
endts,
phenomena,
lsrtype,
hailsize,
lsrbuffer,
warningbuffer,
wind,
windhailtag,
limitwarns,
fcster,
):
"""Build out our session based on provided fields"""
self.wfo = wfo
# Figure out the begin and end times
self.begints, self.endts = begints, endts
# Storage of data
self.events = gpd.GeoDataFrame()
self.events_buffered = None
self.stormreports = gpd.GeoDataFrame()
self.stormreports_buffered = None
self.stats = {}
# query parameters
self.phenomena = phenomena
if not self.phenomena:
self.phenomena = ["TO", "SV", "FF", "MA", "DS"]
self.lsrtype = lsrtype
if not self.lsrtype:
self.lsrtype = ["TO", "SV", "FF", "MA", "DS"]
self.hailsize = hailsize
self.lsrbuffer = lsrbuffer
self.warningbuffer = warningbuffer
self.wind = wind
self.windhailtag = windhailtag.upper() == "Y"
self.limitwarns = limitwarns.upper() == "Y"
self.fcster = fcster
# our database connection
self.dbconn = get_dbconn("postgis")
def milk(self):
"""Milk the Cow and see what happens"""
self.load_events()
self.load_stormreports()
self.compute_shared_border()
self.sbw_verify()
self.area_verify()
self.compute_stats()
def compute_stats(self):
"""Fill out the stats attribute"""
_ev = self.events
_sr = self.stormreports
self.stats["area_verify[%]"] = (
0
if _ev.empty
else _ev["areaverify"].sum() / _ev["parea"].sum() * 100.0
)
self.stats["shared_border[%]"] = (
0
if _ev.empty
else _ev["sharedborder"].sum() / _ev["perimeter"].sum() * 100.0
)
self.stats["max_leadtime[min]"] = (
None if _sr.empty else _sr["leadtime"].max()
)
self.stats["min_leadtime[min]"] = (
None if _sr.empty else _sr["leadtime"].min()
)
self.stats["avg_leadtime[min]"] = (
None if _sr.empty else _sr["leadtime"].mean()
)
self.stats["avg_leadtime_firstreport[min]"] = (
None if _ev.empty else _ev["lead0"].mean()
)
self.stats["tdq_stormreports"] = (
0 if _sr.empty else len(_sr[_sr["tdq"]].index)
)
self.stats["unwarned_reports"] = (
0 if _sr.empty else len(_sr[~_sr["warned"]].index)
)
self.stats["warned_reports"] = (
0 if _sr.empty else len(_sr[_sr["warned"]].index)
)
self.stats["events_verified"] = (
0 if _ev.empty else len(_ev[_ev["verify"]].index)
)
self.stats["events_total"] = len(_ev.index)
self.stats["reports_total"] = len(_sr.index)
if self.stats["reports_total"] > 0:
pod = self.stats["warned_reports"] / float(
self.stats["reports_total"]
)
else:
pod = 0
self.stats["POD[1]"] = pod
if self.stats["events_total"] > 0:
far = (
self.stats["events_total"] - self.stats["events_verified"]
) / self.stats["events_total"]
else:
far = 0
self.stats["FAR[1]"] = far
if pod > 0:
self.stats["CSI[1]"] = (((pod) ** -1 + (1 - far) ** -1) - 1) ** -1
else:
self.stats["CSI[1]"] = 0.0
self.stats["avg_size[sq km]"] = 0 if _ev.empty else _ev["parea"].mean()
self.stats["size_poly_vs_county[%]"] = (
0 if _ev.empty else _ev["parea"].sum() / _ev["carea"].sum() * 100.0
)
# Prevent NaN values from above
for key, stat in self.stats.items():
if pd.isnull(stat):
self.stats[key] = None
def sql_lsr_limiter(self):
"""How to limit LSR types"""
# This adds in some extra things that the database will ignore
ltypes = self.lsrtype.copy()
# Handle aliases
if "TO" in self.lsrtype:
ltypes.append("T")
if "SV" in self.lsrtype:
ltypes.extend(["H", "G", "D"])
if "FF" in self.lsrtype:
ltypes.extend(["F", "x"])
if "MA" in self.lsrtype:
ltypes.extend(["M", "W"])
if "DS" in self.lsrtype:
ltypes.append("2")
if len(ltypes) == 1:
return f" and type = '{ltypes[0]}'"
return f" and type in {tuple(ltypes)} "
def sql_fcster_limiter(self):
"""Should we limit the fcster column?"""
if self.fcster is None:
return " "
return f" and fcster ILIKE '{self.fcster}' "
def sql_wfo_limiter(self):
"""get the SQL for how we limit WFOs"""
if "_ALL" in self.wfo or not self.wfo:
return " "
if len(self.wfo) == 1:
return f" and w.wfo = '{self.wfo[0]}' "
return f" and w.wfo in {tuple(self.wfo)} "
def sql_tag_limiter(self):
"""Do we need to limit the events based on tags"""
if not self.limitwarns:
return " "
return (
f" and ((w.windtag >= {self.wind} or "
f"w.hailtag >= {self.hailsize}) or "
" (w.windtag is null and w.hailtag is null)) "
)
def load_events(self):
"""Build out the listing of events based on the request"""
self.events = gpd.read_postgis(
text(
f"""
WITH stormbased as (
SELECT wfo, phenomena, eventid, hailtag, windtag,
geom, significance,
ST_area(ST_transform(geom,2163)) / 1000000.0 as parea,
ST_perimeter(ST_transform(geom,2163)) as perimeter,
ST_xmax(geom) as lon0, ST_ymax(geom) as lat0,
extract(year from issue at time zone 'UTC') as year
from sbw w WHERE status = 'NEW' {self.sql_wfo_limiter()}
and issue >= :begints and issue < :endts and expire < :endts
and significance = 'W'
and phenomena in :phenomena {self.sql_tag_limiter()}
),
countybased as (
SELECT w.wfo, phenomena, eventid, significance,
max(w.status) as statuses,
array_agg(u.ugc) as ar_ugc,
array_agg(u.name ||' '||u.state) as ar_ugcname,
sum(ST_area(ST_transform(u.geom,2163)) / 1000000.0) as carea,
min(issue at time zone 'UTC') as missue,
max(expire at time zone 'UTC') as mexpire,
extract(year from issue at time zone 'UTC') as year, w.fcster
from warnings w JOIN ugcs u on (u.gid = w.gid) WHERE
w.gid is not null {self.sql_wfo_limiter()} and
issue >= :begints and issue < :endts and expire < :endts
and significance = 'W'
and phenomena in :phenomena
{self.sql_fcster_limiter()}
GROUP by w.wfo, phenomena, eventid, significance, year, fcster
)
SELECT s.year::int, s.wfo, s.phenomena, s.eventid, s.geom,
c.missue as issue,
c.mexpire as expire, c.statuses, c.fcster,
s.significance, s.hailtag, s.windtag, c.carea, c.ar_ugc,
s.lat0, s.lon0, s.perimeter, s.parea, c.ar_ugcname,
s.year || s.wfo || s.eventid || s.phenomena || s.significance ||
row_number() OVER (PARTITION by s.year, s.wfo, s.eventid, s.phenomena,
s.significance ORDER by c.missue ASC) as key
from stormbased s JOIN countybased c on
(c.eventid = s.eventid and c.wfo = s.wfo and c.year = s.year
and c.phenomena = s.phenomena and c.significance = s.significance)
ORDER by issue ASC
"""
),
self.dbconn,
params={
"begints": self.begints,
"endts": self.endts,
"phenomena": tuple(self.phenomena),
},
crs={"init": "epsg:4326"},
index_col="key",
)
self.events = self.events.assign(
status=lambda df_: df_["statuses"], # le sigh
stormreports=lambda df_: [[] for _ in range(len(df_.index))],
stormreports_all=lambda df_: [[] for _ in range(len(df_.index))],
)
self.events["verify"] = False
self.events["lead0"] = None
self.events["areaverify"] = 0
self.events["sharedborder"] = 0
if self.events.empty:
return
s2163 = self.events["geom"].to_crs(epsg=2163)
self.events_buffered = s2163.buffer(self.warningbuffer * 1000.0)
def load_stormreports(self):
"""Build out the listing of storm reports based on the request"""
self.stormreports = gpd.read_postgis(
f"""
SELECT distinct valid at time zone 'UTC' as valid,
type, magnitude, city, county, state,
source, remark, wfo, typetext, ST_x(geom) as lon0, ST_y(geom) as lat0,
geom
from lsrs w WHERE valid >= %s and valid <= %s
{self.sql_wfo_limiter()} {self.sql_lsr_limiter()}
and ((type = 'M' and magnitude >= 34) or type = '2' or
(type = 'H' and magnitude >= %s) or type = 'W' or
type = 'T' or (type = 'G' and magnitude >= %s) or type = 'D'
or type = 'F' or type = 'x') ORDER by valid ASC
""",
self.dbconn,
params=(self.begints, self.endts, self.hailsize, self.wind),
geom_col="geom",
crs={"init": "epsg:4326"},
)
self.stormreports["events"] = [
[] for _ in range(len(self.stormreports.index))
]
self.stormreports["tdq"] = False
self.stormreports["warned"] = False
self.stormreports["leadtime"] = None
self.stormreports["lsrtype"] = self.stormreports["type"].map(
LSRTYPE2PHENOM
)
if self.stormreports.empty:
return
s2163 = self.stormreports["geom"].to_crs(epsg=2163)
self.stormreports_buffered = s2163.buffer(self.lsrbuffer * 1000.0)
def compute_shared_border(self):
"""Compute a stat"""
# re ST_Buffer(simple_geom) see akrherz/iem#163
df = read_sql(
text(
f"""
WITH stormbased as (
SELECT geom, wfo, eventid, phenomena, significance,
extract(year from issue at time zone 'UTC') as year
from sbw w WHERE status = 'NEW' {self.sql_wfo_limiter()}
and issue >= :begints and issue < :endts and expire < :endts
and significance = 'W'
and phenomena in :phenomena {self.sql_tag_limiter()}),
countybased as (
SELECT ST_Union(ST_Buffer(u.simple_geom, 0)) as geom,
w.wfo, phenomena, eventid, significance,
extract(year from issue at time zone 'UTC') as year, w.fcster
from warnings w JOIN ugcs u on (u.gid = w.gid) WHERE
w.gid is not null {self.sql_wfo_limiter()} and
issue >= :begints and issue < :endts and expire < :endts
and significance = 'W'
and phenomena in :phenomena
{self.sql_fcster_limiter()}
GROUP by w.wfo, phenomena, eventid, significance, year,
fcster),
agg as (
SELECT ST_SetSRID(ST_intersection(
ST_buffer(ST_exteriorring(
ST_geometryn(ST_multi(c.geom),1)),0.02),
ST_exteriorring(ST_geometryn(
ST_multi(s.geom),1))), 4326) as geo,
c.year, c.wfo, c.phenomena, c.significance, c.eventid
from stormbased s, countybased c WHERE
s.wfo = c.wfo and s.eventid = c.eventid and
s.phenomena = c.phenomena and | |
# TODO: remove commented code once the new function has been tested
# if len(mesh.materials) == 0:
# if mesh.name in bpy.data.materials:
# mat = bpy.data.materials[mesh.name]
# else:
# mat = bpy.data.materials.new(name=mesh.name)
# mat.use_nodes=True
# mesh.materials.append(mat)
# else:
# i = 0
# for m in mesh.materials:
# if m == None:
# if mesh.name in bpy.data.materials:
# mat = bpy.data.materials[mesh.name]
# else:
# mat = bpy.data.materials.new(name=mesh.name)
# mat.use_nodes=True
# mesh.materials[i] = mat
# i = i+1
# NOTE: It looks like there may be a bug here in that all empty material slots will be assigned and if there are no empty slots, nothing will be assigned
def make_opaque():
""" Forces all material blend modes to be opaque for Blender file """
for mat in bpy.data.materials:
mat.blend_method = 'OPAQUE'
mat.use_backface_culling = True
def new_image_texture(material_name, image_texture_name, color_array = (0, 0, 0, 0), resolution = 4096):
# TODO: this can be combined with the _float version using a named variable
if image_texture_name not in bpy.data.materials[material_name].node_tree.nodes:
node_tree = bpy.data.materials[str(material_name)].node_tree
node = node_tree.nodes.new("ShaderNodeTexImage")
node.select = True
node_tree.nodes.active = node
bpy.ops.image.new(
name = image_texture_name,
width = resolution,
height = resolution,
color = (color_array),
alpha = True
)
node.image = bpy.data.images[image_texture_name]
node.name = image_texture_name
else:
bpy.data.materials[material_name].node_tree.nodes[image_texture_name].select = True
def new_image_texture_float(material_name, image_texture_name, color_float = 0.0, resolution = 4096):
if image_texture_name not in bpy.data.materials[material_name].node_tree.nodes:
node_tree = bpy.data.materials[str(material_name)].node_tree
node = node_tree.nodes.new("ShaderNodeTexImage")
node.select = True
node_tree.nodes.active = node
bpy.ops.image.new(
name = image_texture_name,
width = resolution,
height = resolution,
color = (color_float, color_float, color_float, 1),
alpha = True
)
node.image = bpy.data.images[image_texture_name]
node.name = image_texture_name
else:
bpy.data.materials[material_name].node_tree.nodes[image_texture_name].select = True
def procedural_nodes_link_check():
""" Checks if the procedural material nodes are linked corrctly """
valid = True
# Check for valid product name in .blend file
try:
name = bpy.context.scene.xr_studio.product_name
except:
xrs.log.error("Unable to find the product name in the .blend file")
valid = False
nodes = bpy.data.materials[name].node_tree.nodes
# Check that BSDF and Material Output are linked
if xrs.material.link_output_to_slot_named(bpy.data.materials[name],
nodes['Principled BSDF'].outputs[0],
nodes['Material Output'],
"Surface"):
xrs.validation_report.write_ok("BSDF and Material Output are connected")
else:
xrs.validation_report.write_error("BSDF and Material Output are not connected")
valid = False
# Check if group node and BSDF are linked
if xrs.material.link_output_to_slot_named(bpy.data.materials[name],
nodes[name].outputs[0],
nodes['Principled BSDF'],
"Base Color"):
xrs.validation_report.write_ok("Custom Group node and BSDF and are connected")
else:
xrs.validation_report.write_error("Custom Group node and BSDF are not connected")
valid = False
# TODO: Add checks for Roughness, Metallic, and Normal outputs on the custom gropu node
return valid
def procedural_nodes_name_check():
""" Checks if the procedural material nodes are named correctly """
valid = True
# Check for valid product name in .blend file
try:
name = bpy.context.scene.xr_studio.product_name
except:
xrs.log.error("Unable to find the product name in the .blend file")
# Check if the name of the material matches the material name from the website
try:
bpy.data.materials[name]
xrs.validation_report.write_ok("The " + name + " material was found in the .blend file")
except:
xrs.validation_report.write_error("The " + name + " material was not found in the .blend file. Make sure this material is named correctly.")
valid = False
return valid
# Check if the custom group node name matches the material name from the website
nodes = bpy.data.materials[name].node_tree.nodes
try:
nodes[name]
xrs.validation_report.write_ok("The " + name + " node was found in the .blend file")
except:
xrs.validation_report.write_error("The " + name + " node was not found in the .blend file. Make sure this custom group node name is named correctly.")
valid = False
return valid
# Check if node group name matches the material name from the website
try:
bpy.data.node_groups[name]
except:
xrs.validation_report.write_error("The " + name + " node group was not found in the .blend file. Make sure this is named correctly inside the custom group node.")
valid = False
return valid
# Check if the custom group node has the correct minimum inputs
if nodes[name].inputs[0].name != "Overall Color":
xrs.validation_report.write_error("The " + name + " node should have 'Overall Color' as its 1st input name")
valid = False
if nodes[name].inputs[0].type != "RGBA":
xrs.validation_report.write_error("The " + name + " node should have 'Overall Color' as a 'RGBA' input type")
valid = False
if nodes[name].inputs[1].name != "Texture Scale":
xrs.validation_report.write_error("The " + name + " node should have 'Texture Scale' as its 2nd input name")
valid = False
if nodes[name].inputs[1].type != "VALUE":
xrs.validation_report.write_error("The " + name + " node should have 'Texture Scale' as a 'VALUE' input type")
valid = False
if nodes[name].inputs[2].name != "Roughness":
xrs.validation_report.write_error("The " + name + " node should have 'Roughness' as its 3rd input name")
valid = False
if nodes[name].inputs[2].type != "VALUE":
xrs.validation_report.write_error("The " + name + " node should have 'Roughness' as a 'VALUE' input type")
valid = False
if nodes[name].inputs[3].name != "Metallic":
xrs.validation_report.write_error("The " + name + " node should have 'Metallic' as its 4th input name")
valid = False
if nodes[name].inputs[3].type != "VALUE":
xrs.validation_report.write_error("The " + name + " node should have 'Metallic' as a 'VALUE' input type")
valid = False
if nodes[name].inputs[4].name != "Normal":
xrs.validation_report.write_error("The " + name + " node should have 'Normal' as its 5th input name")
valid = False
if nodes[name].inputs[4].type != "VALUE":
xrs.validation_report.write_error("The " + name + " node should have 'Normal' as a 'VALUE' input type")
valid = False
# Check if the custom group node has the correct minimum outputs
if nodes[name].outputs[0].name != "Color":
xrs.validation_report.write_error("The " + name + " node should have 'Color' as its 1st output name")
valid = False
if nodes[name].outputs[0].type != "RGBA":
xrs.validation_report.write_error("The " + name + " node should have 'Color' as a 'RGBA' output type")
valid = False
if nodes[name].outputs[1].name != "Roughness":
xrs.validation_report.write_error("The " + name + " node should have 'Roughness' as its 2nd output name")
valid = False
if nodes[name].outputs[1].type != "VALUE":
xrs.validation_report.write_error("The " + name + " node should have 'Roughness' as a 'VALUE' output type")
valid = False
if nodes[name].outputs[2].name != "Metallic":
xrs.validation_report.write_error("The " + name + " node should have 'Metallic' as its 3rd output name")
if nodes[name].outputs[2].type != "VALUE":
xrs.validation_report.write_error("The " + name + " node should have 'Metallic' as a 'VALUE' output type")
valid = False
if nodes[name].outputs[3].name != "Normal":
xrs.validation_report.write_error("The " + name + " node should have 'Normal' as its 4th output name")
valid = False
if nodes[name].outputs[3].type != "VECTOR":
xrs.validation_report.write_error("The " + name + " node should have 'Normal' as a 'VECTOR' output type")
valid = False
return valid
def build_for_amazon(mat, textures_dir, amazon_dir):
""" Create a clean Principled BSDF material node with 4k textures (TODO: where does the ai file link go? """
xrs.log.info("Rebuilding Material named " + mat.name + " for Amazon")
# TODO: refactor and combine this with rebuild_from_textures
mat.use_nodes = True
# Default Values
alpha_value = 1.0 # opaque
# TODO: allow some color values here too (diffuse_value)
metallic_value = 0.0 # non-metal
roughness_value = 0.9 # very rough
# User set values in place of the defaults, if available
bsdf = get_one_node_of_type(mat.node_tree.nodes, "BSDF_PRINCIPLED")
if bsdf:
alpha_value = get_node_default_value_named(bsdf, "Alpha")
metallic_value = get_node_default_value_named(bsdf, "Metallic")
roughness_value = get_node_default_value_named(bsdf, "Roughness")
xrs.log.verbose("Principled BSDF has alpha: " + str(alpha_value) + ", metallic: " + str(metallic_value) +", roughness: " + str(roughness_value))
# value used above all, if available
if (os.path.isfile(textures_dir + mat.name + "_alpha.value")):
alpha_value = float(Path(textures_dir + mat.name + "_alpha.value").read_text()) / 100
xrs.log.verbose("Alpha loaded from file " + str(alpha_value))
if (os.path.isfile(textures_dir + mat.name + "_metallic.value")):
metallic_value = float(Path(textures_dir + mat.name + "_metallic.value").read_text()) / 100
xrs.log.verbose("Metallic loaded from file " + str(metallic_value))
if (os.path.isfile(textures_dir + mat.name + "_roughness.value")):
roughness_value = float(Path(textures_dir + mat.name + "_roughness.value").read_text()) / 100
xrs.log.verbose("Roughness loaded from file " + str(roughness_value))
# Remove all nodes (except output and P. BSDF) to get a clean start
nodes = mat.node_tree.nodes
for node in nodes:
if node.type != "OUTPUT_MATERIAL" and node.type != "BSDF_PRINCIPLED":
# Remove node
xrs.log.verbose("Removing material node named " + node.name)
mat.node_tree.nodes.remove(node)
# Add shader and output nodes, if missing
output_node = get_one_node_of_type(mat.node_tree.nodes, "OUTPUT_MATERIAL")
if output_node == None:
output_node = nodes.new("ShaderNodeOutputMaterial")
bsdf_node = get_one_node_of_type(mat.node_tree.nodes, "BSDF_PRINCIPLED")
if bsdf_node == None:
bsdf_node = nodes.new("ShaderNodeBsdfPrincipled")
| |
# noinspection PyPackageRequirements
import datawrangler as dw
import os
import sys
import numpy as np
import pandas as pd
import ast
import json
import datetime
import quail
import nltk
import warnings
import pickle
import datetime as dt
# noinspection PyPackageRequirements
from spellchecker import SpellChecker
from glob import glob as lsdir
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from scipy.spatial.distance import cdist, pdist
from scipy.stats import wasserstein_distance, pearsonr, zscore
from sklearn.linear_model import LinearRegression
from flair.models import TextClassifier
from flair.data import Sentence
import brainiak.eventseg.event as event
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data')
def load_raw():
datadir = os.path.join(DATA_DIR, 'raw_formatted')
files = lsdir(os.path.join(datadir, '*.csv'))
skip = ['data_descriptors.csv', 'event_descriptors.csv', 'id_filename_key.csv']
# noinspection PyShadowingNames
files = [f for f in files if os.path.split(f)[-1] not in skip]
# noinspection PyShadowingNames
raw_data = [pd.read_csv(f) for f in files]
loaded = []
subjects = []
for i, x in enumerate(raw_data):
# noinspection PyBroadException
try:
y = x.pivot_table(index=['datetime'], columns='variable', aggfunc=lambda a: a)
y.columns = [c[1] for c in y.columns]
loaded.append(y)
subjects.append(f'P{i}')
except:
print(f'error loading data: {files[i]}')
pass
return loaded, subjects
def parse_data(d):
datadir = os.path.join(DATA_DIR, 'raw_formatted')
non_exp_descriptors = pd.read_csv(os.path.join(datadir, 'data_descriptors.csv'))
exp_descriptors = pd.read_csv(os.path.join(datadir, 'event_descriptors.csv'))
def variable_type(v):
def helper(descriptors):
if 'variable name' in descriptors.columns:
field = 'variable name'
else:
field = 'exp_event'
# noinspection PyShadowingNames
inds = np.where([x.strip() == v.strip() for x in descriptors[field].values])[0]
if len(inds) > 0:
description = descriptors.iloc[inds[0]]['description']
else:
raise Exception('description not found')
if any([keyword in description for keyword in ['fitbit', 'sleep', 'activ', 'sedent', 'elevation',
'floors', 'step', 'battery', 'cardio']]):
return 'fitbit'
elif any([keyword in v.lower() for keyword in ['fb_', 'cal', 'bodyfat', 'water', 'peak', 'weight', 'hr',
'oor', 'sync', 'device', 'bmi']]):
return 'fitbit'
elif any([keyword in description for keyword in ['clear', 'instruction', 'difficult', 'language', 'gender',
'coffee', 'color', 'today', 'plan', 'motiv', 'year',
'current', 'degree', 'freq', 'feedback', 'race',
'stress', 'impair']]):
return 'survey'
elif any([keyword in v.lower() for keyword in ['freq', 'setting']]):
return 'survey'
elif any([((keyword in description) or (keyword in v.lower())) for keyword in ['pres', 'rec', 'task',
'word', 'position', 'delay',
'movie', 'experiment']]):
return 'experiment'
else:
return 'meta'
if v.lower() == 'utc':
return 'meta'
elif v.lower() == 'tracker_features':
return 'fitbit'
elif v.lower() in ['recent_meds_injuries', 'job_activity', 'tracker_sync_today', 'typical_stress']:
return 'survey'
elif v.lower() in ['movie_sent_recall', 'movie_sent_recall_delay']:
return 'experiment'
# noinspection PyBroadException
try:
return helper(non_exp_descriptors)
except:
# noinspection PyBroadException
try:
return helper(exp_descriptors)
except:
if any([keyword in v.lower() for keyword in ['pres', 'rec', 'task', 'word', 'position', 'delay',
'resp']]):
return 'experiment'
else:
return 'untagged'
parsed = {}
for c in d.columns:
x = variable_type(c)
if x in parsed.keys():
parsed[x] = parsed[x].merge(pd.DataFrame(d[c]), how='outer', right_index=True, left_index=True)
else:
parsed[x] = pd.DataFrame(d[c])
return parsed
def simplify_dict_list(x, subjs):
combined = {'participants': subjs}
for i, d in enumerate(x):
for k in d.keys():
if k in combined.keys():
combined[k].append(d[k])
else:
combined[k] = [d[k]]
return combined
def get_stats(parsed, stat_dict):
stacked = {}
subjs = parsed.pop('participants', None)
for k in parsed.keys():
stacked[k] = dw.stack(parsed[k], keys=subjs)
stats = pd.DataFrame(columns=list(stat_dict.keys()), index=subjs)
for s in stat_dict.keys():
stats[s] = stat_dict(s)(stacked)
return stats
def compute_recent_and_change(x, index, name, ref_days, base_days, today=None):
# noinspection PyShadowingNames
def average(d, f):
warnings.simplefilter('ignore')
return pd.Series(index=index,
data=[np.nanmean([eval(j) for j in i[f] if type(j) is str])
if (type(i) is pd.DataFrame and i.shape[0] > 0 and f in i.columns)
else np.nan for i in d])
results = {'recent': average(extract_days_prior(x, ref_days, today=today), name)}
# noinspection PyShadowingNames, PyTypeChecker
baseline = average(extract_days_prior(x, base_days, today=[t - dt.timedelta(days=ref_days) for t in today]), name)
results['recent / baseline'] = results['recent'] / baseline
return results
def dict_diff(a, b):
keys = list(set(a.keys()).union(set(b.keys())))
diffs = {}
for k in keys:
diffs[k] = a[k] - b[k]
return diffs
def fitness_stats(parsed, reference_days=7, baseline_days=180):
stats = {}
index = np.arange(len(parsed['fitbit']))
# static body stats
# bmi
stats['BMI'] = pd.Series(index=index,
data=[eval(x) if type(x) is str and not np.isclose(eval(x), 0.0) else np.nan for x in
get_raw_feature(parsed['fitbit'], 'bmi')])
# bodyfat
stats['body fat'] = pd.Series(index=index,
data=[eval(x) if type(x) is str and not np.isclose(eval(x), 0.0) else np.nan for x in
get_raw_feature(parsed['fitbit'], 'bodyfat')])
# weight
stats['weight'] = pd.Series(index=index,
data=[eval(x) if type(x) is str and not np.isclose(eval(x), 0.0) else np.nan for x in
get_raw_feature(parsed['fitbit'], 'weight')])
# dynamic body stats (for each, compute most recent + change in reference vs. baseline)
# resting heart rate
stats['resting heart rate'] = compute_recent_and_change(parsed['fitbit'], index, 'resting_HR', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# sleep hours
stats['sleep duration'] = compute_recent_and_change(parsed['fitbit'], index, 'sleep_duration', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# sleep efficiency
stats['sleep efficiency'] = compute_recent_and_change(parsed['fitbit'], index, 'sleep_duration', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# activity summary (recent + change in reference vs. baseline)
# steps
stats['steps'] = compute_recent_and_change(parsed['fitbit'], index, 'steps', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
# distance
stats['distance'] = compute_recent_and_change(parsed['fitbit'], index, 'distance', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
# elevation
stats['elevation'] = compute_recent_and_change(parsed['fitbit'], index, 'elevation', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
# floors
stats['floors climbed'] = compute_recent_and_change(parsed['fitbit'], index, 'floors', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# activity details (recent + change in reference vs. baseline)
# light activity minutes
stats['light activity'] = compute_recent_and_change(parsed['fitbit'], index, 'light_act_mins', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# fairly active minutes
stats['fair activity'] = compute_recent_and_change(parsed['fitbit'], index, 'fair_act_mins', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# very active minutes
stats['high intensity activity'] = compute_recent_and_change(parsed['fitbit'], index, 'very_act_mins',
reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
# cal - cal_bmr
cal = compute_recent_and_change(parsed['fitbit'], index, 'cal', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
cal_bmr = compute_recent_and_change(parsed['fitbit'], index, 'cal_bmr', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
stats['excess calories'] = dict_diff(cal, cal_bmr)
# heart-specific activity details (recent + change in reference vs. baseline)
# out of range minutes
stats['out-of-range HR'] = compute_recent_and_change(parsed['fitbit'], index, 'oor_mins', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# fat burn minutes
stats['fat burn HR'] = compute_recent_and_change(parsed['fitbit'], index, 'fb_mins', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
# cardio minutes
stats['cardio HR'] = compute_recent_and_change(parsed['fitbit'], index, 'cardio_mins', reference_days,
baseline_days, today=get_test_day(parsed['experiment']))
# peak minutes
stats['peak HR'] = compute_recent_and_change(parsed['fitbit'], index, 'peak_mins', reference_days, baseline_days,
today=get_test_day(parsed['experiment']))
# today's heart rate variability (average) -- cannot compute change
test_day = extract_days_prior(parsed['fitbit'], 1, today=get_test_day(parsed['experiment']))
hrv = pd.Series(index=index)
for i, x in enumerate(test_day):
if 'todayHRval' in x.columns:
hrv.loc[index[i]] = np.nanstd([eval(h) if type(h) is str else np.nan for h in x['todayHRval']])
stats['HR variability'] = hrv
# not including the following-- almost no one logged them:
# - food and water intake (recent + change in reference vs. baseline)
# - water logged
# - food calories logged
x = alt_dict2df(stats)
return pd.DataFrame(index=parsed['participants'], data=x.values, columns=x.columns)
def lemmatize(word, lemmatizer=None):
if lemmatizer is None:
lemmatizer = WordNetLemmatizer()
if type(word) == list:
return [lemmatize(w, lemmatizer=lemmatizer) for w in word]
tag = nltk.pos_tag([word])[0][1]
if tag == 'J':
pos = wordnet.ADJ
elif tag == 'V':
pos = wordnet.VERB
elif tag == 'R':
pos = wordnet.ADV
else:
pos = wordnet.NOUN
return lemmatizer.lemmatize(word, pos)
# noinspection PyShadowingNames
def get_list_items(data, lists=None, pres_prefix='', rec_prefix='', aggregate_presentations=False, debug=False):
if lists is None:
lists = [1, 2, 3, 4]
spell = SpellChecker(language='en')
wordpool = pd.read_csv(os.path.join(DATA_DIR, 'task', 'wordpool.csv'))
known_mistakes = pd.read_csv(os.path.join(DATA_DIR, 'task', 'spellcheck.csv'))
def get_features(word):
if ', ' in word:
return [get_features(w) for w in word.split(', ')]
# remove extraneous characters
extras = [',', '.', '!', '?', ' ']
word = ''.join([c for c in word if c not in extras])
# basic spelling correction
if type(word) is str:
word = spell.correction(word.capitalize())
else:
raise ValueError(f'cannot process words of type {type(word)}')
# known mistakes
mistake = known_mistakes.query(f'misspelled == "{word.upper()}"')
if len(mistake) > 0:
word = mistake['corrected'].values[0]
w = wordpool.query(f'WORD == "{word.upper()}"')
if len(w) == 0:
# try lemmatizing the word
lemmatized_word = lemmatize(word.lower())
lw = wordpool.query(f'WORD == "{lemmatized_word.upper()}"')
if len(lw) > 0:
w = lw
word = lemmatized_word
if len(w) == 0:
if debug:
print(f'unrecognized word: {word.upper()}')
return {'item': word.upper(),
'word_length': len(word),
'starting_letter': word[0].upper()}
else:
return {'item': word.upper(),
'word_length': len(word),
'starting_letter': word[0].upper(),
'category': w['CATEGORY'].values[0].upper(),
'size': w['SIZE'].values[0].upper()}
pres_words = []
rec_words = []
for subj_data in data:
list_presentations = []
list_recalls = []
# noinspection PyBroadException
try:
for i, x in enumerate(lists):
presented_items = [get_features(w) for w in subj_data[f'{pres_prefix}{x}'] if type(w) is not float]
if aggregate_presentations:
list_presentations.extend([dw.core.update_dict(i, {'list': x}) for i in presented_items])
if i == 0:
try:
list_recalls.extend([get_features(w) for w in subj_data[f'{rec_prefix}'] if type(w) is not float])
except KeyError:
list_recalls.extend([])
else:
list_presentations.append(presented_items)
try:
next_recalls = []
for w in subj_data[f'{rec_prefix}{x}']:
if type(w) is str:
next_features = get_features(w)
if type(next_features) is dict:
next_recalls.append(next_features)
elif type(next_features) is list:
next_recalls.extend(next_features)
list_recalls.append(next_recalls)
except KeyError:
list_recalls.append([])
if aggregate_presentations:
pres_words.append([list_presentations])
rec_words.append([list_recalls])
else:
pres_words.append(list_presentations)
rec_words.append(list_recalls)
except:
raise Exception('throwing this error to help with debugging...')
return quail.Egg(pres=pres_words, rec=rec_words)
def sliding_windows(text, width=10, end='.'):
punctuation = ['.', ',', '-', '?', '!']
if len(text) == 0:
return | |
"""
Module containing the three basic classes: Parameters, Particles, Species.
"""
from copy import deepcopy
from numpy import array, cross, ndarray, pi, sqrt, tanh, zeros
from scipy.constants import physical_constants
from scipy.linalg import norm
from .plasma import Species
from .utilities.exceptions import ParticlesError
class Parameters:
"""
Class containing all the constants and physical constants of the simulation.
Parameters
----------
dic : dict, optional
Dictionary to be copied.
Attributes
----------
a_ws : float
Wigner-Seitz radius. Calculated from the ``total_num_density`` .
equilibration_steps : int
Total number of equilibration timesteps.
eq_dump_step : int
Equilibration dump interval.
magnetization_steps : int
Total number of magnetization timesteps.
mag_dump_step : int
Magnetization dump interval.
production_steps : int
Total number of production timesteps.
prod_dump_step : int
Production dump interval.
box_volume : float
Volume of simulation box.
pbox_volume : float
Volume of initial particle box.
dimensions : int
Number of non-zero dimensions. Default = 3.
fourpie0: float
Electrostatic constant :math:`4\\pi \\epsilon_0`.
num_species : int
Number of species.
kB : float
Boltzmann constant obtained from ``scipy.constants``.
hbar : float
Reduced Planck's constant.
hbar2 : float
Square of reduced Planck's constant.
a0 : float
Bohr Radius.
c0 : float
Speed of light.
qe : float
Elementary charge.
me : float
Electron mass.
eps0 : float
Vacuum electrical permittivity.
eV2K : float
Conversion factor from eV to Kelvin obtained from ``scipy.constants``.
J2erg : float
Conversion factor from Joules to erg. Needed for cgs units.
QFactor : float
Charge Factor defined as :math:`\mathcal Q = \sum_{i}^{N} q_{i}^2` .
Lx : float
Box length in the :math:`x` direction.
Ly : float
Box length in the :math:`y` direction.
Lz : float
Box length in the :math:`z` direction.
e1 : float
Unit vector in the :math:`x` direction.
e2 : float
Unit vector in the :math:`y` direction.
e3 : float
Unit vector in the :math:`z` direction.
LPx : float
Initial particle box length in the :math:`x` direction.
LPy : float
Initial particle box length in the :math:`y` direction.
LPz : float
Initial particle box length in the :math:`z` direction.
ep1 : float
Unit vector of the initial particle box in the :math:`x` direction.
ep2 : float
Unit vector of the initial particle box in the :math:`y` direction.
ep3 : float
Unit vector of the initial particle box in the :math:`z` direction.
input_file : str
YAML Input file with all the simulation's parameters.
T_desired : float
Target temperature for the equilibration phase.
species_num : numpy.ndarray
Number of particles of each species. Shape = (``num_species``)
species_concentrations : numpy.ndarray
Concentration of each species. Shape = (``num_species``)
species_temperatures : numpy.ndarray
Initial temperature of each species. Shape = (``num_species``)
species_masses : numpy.ndarray
Mass of each species. Shape = (``num_species``)
species_charges : numpy.ndarray
Charge of each species. Shape = (``num_species``)
species_names : list
Name of each species. Len = (``num_species``)
species_plasma_frequencies : numpy.ndarray
Plasma Frequency of each species. Shape = (``num_species``)
species_num_dens : numpy.ndarray
Number density of each species. Shape = (``num_species``)
total_ion_temperature : float
Total initial ion temperature calculated as `` = species_concentration @ species_temperatures``.
total_net_charge : float
Total charge in the system.
total_num_density : float
Total number density. Calculated from the sum of :attr:`Species.number_density`.
total_num_ptcls : int
Total number of particles. Calculated from the sum of :attr:`Species.num`.
measure : bool
Flag for production phase.
verbose : bool
Flag for screen output.
simulations_dir : str
Name of directory where to store simulations.
job_dir : str
Directory name of the current job/run
production_dir : str
Directory name where to store simulation's files of the production phase. Default = 'Production'.
equilibration_dir : str
Directory name where to store simulation's file of the equilibration phase. Default = 'Equilibration'.
preprocessing_dir : str
Directory name where to store preprocessing files. Default = "PreProcessing".
postprocessing_dir : str
Directory name where to store postprocessing files. Default = "PostProcessing".
prod_dump_dir : str
Directory name where to store production phase's simulation's checkpoints. Default = 'dumps'.
eq_dump_dir : str
Directory name where to store equilibration phase's simulation's checkpoints. Default = 'dumps'.
job_id : str
Appendix of all simulation's files.
log_file : str
Filename of the simulation's log.
np_per_side : numpy.ndarray
Number of particles per simulation's box side.
The product of its components should be equal to ``total_num_ptcls``.
pre_run : bool
Flag for preprocessing phase.
"""
def __init__(self, dic: dict = None) -> None:
self.particles_input_file = None
self.load_perturb = 0.0
self.initial_lattice_config = "simple_cubic"
self.load_rejection_radius = None
self.load_halton_bases = None
self.load_method = None
self.potential_type = None
self.units = None
self.electron_magnetic_energy = None
self.input_file = None
# Sim box geometry
self.Lx = 0.0
self.Ly = 0.0
self.Lz = 0.0
self.LPx = 0.0
self.LPy = 0.0
self.LPz = 0.0
self.e1 = None
self.e2 = None
self.e3 = None
self.ep1 = None
self.ep2 = None
self.ep3 = None
self.box_lengths = None
self.pbox_lengths = None
self.box_volume = 0.0
self.pbox_volume = 0.0
self.dimensions = 3
# Physical Constants and conversion units
self.J2erg = 1.0e7 # erg/J
self.eps0 = physical_constants["vacuum electric permittivity"][0]
self.fourpie0 = 4.0 * pi * self.eps0
self.mp = physical_constants["proton mass"][0]
self.me = physical_constants["electron mass"][0]
self.qe = physical_constants["elementary charge"][0]
self.hbar = physical_constants["reduced Planck constant"][0]
self.hbar2 = self.hbar**2
self.c0 = physical_constants["speed of light in vacuum"][0]
self.eV2K = physical_constants["electron volt-kelvin relationship"][0]
self.eV2J = physical_constants["electron volt-joule relationship"][0]
self.a0 = physical_constants["Bohr radius"][0]
self.kB = physical_constants["Boltzmann constant"][0]
self.kB_eV = physical_constants["Boltzmann constant in eV/K"][0]
self.a_ws = 0.0
# Phases
self.equilibration_phase = True
self.electrostatic_equilibration = True
self.magnetization_phase = False
self.production_phase = True
# Timing
self.equilibration_steps = 0
self.production_steps = 0
self.magnetization_steps = 0
self.eq_dump_step = 1
self.prod_dump_step = 1
self.mag_dump_step = 1
# Control
self.job_id = None
self.job_dir = None
self.log_file = None
self.measure = False
self.magnetized = False
self.plot_style = None
self.pre_run = False
self.simulations_dir = "Simulations"
self.production_dir = "Production"
self.magnetization_dir = "Magnetization"
self.equilibration_dir = "Equilibration"
self.preprocessing_dir = "PreProcessing"
self.postprocessing_dir = "PostProcessing"
self.prod_dump_dir = "dumps"
self.eq_dump_dir = "dumps"
self.mag_dump_dir = "dumps"
self.verbose = True
self.restart_step = None
self.np_per_side = None
self.num_species = 1
self.magnetic_field = None
self.species_lj_sigmas = None
self.species_names = None
self.species_num = None
self.species_num_dens = None
self.species_concentrations = None
self.species_temperatures = None
self.species_temperatures_eV = None
self.species_masses = None
self.species_charges = None
self.species_plasma_frequencies = None
self.species_cyclotron_frequencies = None
self.species_couplings = None
self.coupling_constant = 0.0
self.total_num_density = 0.0
self.total_num_ptcls = 0
self.total_plasma_frequency = 0.0
self.total_debye_length = 0.0
self.total_mass_density = 0.0
self.total_ion_temperature = 0.0
self.T_desired = 0.0
self.total_net_charge = 0.0
self.QFactor = 0.0
self.average_charge = None
self.average_mass = None
self.hydrodynamic_frequency = None
if dic:
self.from_dict(dic)
def __repr__(self):
sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
disp = "Parameters( \n"
for key, value in sortedDict.items():
disp += "\t{} : {}\n".format(key, value)
disp += ")"
return disp
def __copy__(self):
"""Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__."""
# Create a new object
_copy = type(self)(dic=self.__dict__)
return _copy
def __deepcopy__(self, memodict={}):
"""
Make a deepcopy of the object.
Parameters
----------
memodict: dict
Dictionary of id's to copies
Returns
-------
_copy: :class:`sarkas.core.Parameters`
A new Parameters class.
"""
id_self = id(self) # memorization avoids unnecessary recursion
_copy = memodict.get(id_self)
if _copy is None:
_copy = type(self)()
# Make a deepcopy of the mutable arrays using numpy copy function
for k, v in self.__dict__.items():
_copy.__dict__[k] = deepcopy(v, memodict)
return _copy
def calc_coupling_constant(self, species: list):
"""
Calculate the coupling constant of each species and the total coupling constant. For more information see
the theory pages.
Parameters
----------
species: list
List of ``sarkas.plasma.Species`` objects.
"""
z_avg = (self.species_charges.transpose()) @ self.species_concentrations
for i, sp in enumerate(species):
const = self.fourpie0 * self.kB
sp.calc_coupling(self.a_ws, z_avg, const)
self.species_couplings[i] = sp.coupling
self.coupling_constant += sp.concentration * sp.coupling
def calc_electron_properties(self, species: list):
"""Check whether the electrons are a dynamical species or not."""
# Check for electrons as dynamical species
if "e" not in self.species_names:
electrons = {
"name": "electron_background",
"number_density": (
self.species_charges.transpose() @ self.species_concentrations * self.total_num_density / self.qe
),
}
if hasattr(self, "electron_temperature_eV"):
electrons["temperature_eV"] = self.electron_temperature_eV
electrons["temperature"] = self.eV2K * self.electron_temperature_eV
elif hasattr(self, "electron_temperature"):
electrons["temperature"] = self.electron_temperature
electrons["temperature_eV"] = self.electron_temperature | |
# -*- encoding:utf-8 -*-
"""
边裁基础实现模块
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import logging
import os
from abc import abstractmethod
import numpy as np
import sklearn.preprocessing as preprocessing
from enum import Enum
from sklearn.metrics.pairwise import pairwise_distances
from ..CoreBu import ABuEnv
from ..UtilBu import ABuFileUtil
from ..SimilarBu.ABuCorrcoef import ECoreCorrType, corr_xy
from .ABuUmpBase import AbuUmpBase
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import filter
__author__ = '阿布'
__weixin__ = 'abu_quant'
"""在predict中度量输入的x和矩阵中其它矢量的pairwise_distances后,通过if distances_cx.min() > K_DISTANCE_THRESHOLD过滤"""
K_DISTANCE_THRESHOLD = 0.668
"""从第一轮pairwise_distances的结果使用argsort后取K_N_TOP_SEED个做为第二轮相似匹配的种子"""
K_N_TOP_SEED = 100
"""完成第二轮相似度匹配后使用K_SIMILAR_THRESHOLD做为阀值过滤后得到有投票权的向量"""
K_SIMILAR_THRESHOLD = 0.91
"""
K_CG_TOP_RATE做为计算win_top和loss_top
win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
eg:
len(self.fiter.df['profit_cg']) == 100
-> win_top = 100 - 100 * 0.236
-> win_top = 100 - 23.6
-> win_top = 76.4
loss_top = len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
eg:
len(self.fiter.df['profit_cg']) == 100
-> loss_top = 100 * 0.236
-> loss_top = 23.6
"""
K_CG_TOP_RATE = 0.236
"""在predict中最后的投票结果需要大于一定比例才被认可, 即对有争议的投票需要一方拥有相对优势才认可"""
K_EDGE_JUDGE_RATE = 0.618
class EEdgeType(Enum):
"""对交易的利润亏损进行rank后的分类结果"""
"""损失最多的一类交易,可理解为最底端"""
E_EEdge_TOP_LOSS = -1
"""其它的普通收益亏损的交易,在整个训练集交易中占最多数"""
E_EEdge_NORMAL = 0
"""盈利最多的一类交易,可理解为最顶端"""
E_STORE_TOP_WIN = 1
"""在第二轮的相似度匹配中使用的方法,传递给ABuCorrcoef.corr_xy函数"""
g_similar_type = ECoreCorrType.E_CORE_TYPE_PEARS
class AbuUmpEdgeBase(AbuUmpBase):
"""边裁基类"""
@classmethod
def ump_edge_clf_dump(cls, orders_pd_train, show_info=False, market_name=None):
"""
类方法,通过交易训练集orders_pd_train构造AbuUmpEdgeBase子类对象, 使用fit方法对训练集进行特征采集,后进行dump_clf即
本地序列化存贮等工作
:param orders_pd_train: 交易训练集,pd.DataFrame对象
:param show_info: 是否显示edge.fiter.df.head(),默认False
:param market_name: 主裁训练或者获取裁判对应的存贮唯一名称,默认None, 根据env中的当前市场设置存储名称
:return: AbuUmpEdgeBase子类对象实例
"""
edge = cls(orders_pd_train, market_name=market_name)
edge.fit()
edge.dump_clf()
if show_info:
print('edge.fiter.df.head():\n', edge.fiter.df.head())
return edge
@abstractmethod
def get_fiter_class(self):
"""abstractmethod子类必须实现,声明具体子类裁判使用的筛选特征形成特征的类"""
pass
@abstractmethod
def get_predict_col(self):
"""abstractmethod子类必须实现,获取具体子类裁判需要的特征keys"""
pass
@classmethod
@abstractmethod
def class_unique_id(cls):
"""
具体ump类关键字唯一名称,类方法,abstractmethod子类必须实现
主要针对外部user设置自定义ump使用, 需要user自己保证class_unique_id的唯一性,内部不做检测
具体使用见ABuUmpManager中extend_ump_block方法
"""
pass
def __init__(self, orders_pd=None, predict=False, market_name=None, **kwarg):
"""
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象, 最好是经过度量类
AbuMetricsBase对象进行度量fit_metrics之后的orders_pd
:param predict: 是否构造的裁判类型为预测,非训练裁判
:param market_name: 主裁训练或者获取裁判对应的存贮唯一名称,默认None, 根据env中的当前市场设置存储名称
:param kwarg: 将kwarg参数透传给fiter_cls的构造:
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
# 特征筛选类fiter_cls
self.fiter_cls = self.get_fiter_class()
# 对交易特征进行统一标准化的scaler对象
self.scaler = preprocessing.StandardScaler()
if isinstance(market_name, ABuEnv.EMarketTargetType):
market_name = market_name.value
# predict或者训练的情况都需要对应裁判的唯一名称, 默认使用对应市场的字符串名字 eg,'us', 'cn'
self.market_name = ABuEnv.g_market_target.value if market_name is None else market_name
if not predict:
# TODO 拆开predict和训练数据逻辑,不要纠缠在一起
if orders_pd is not None and 'profit_cg' not in orders_pd.columns:
# profit_cg等度量参数是要在AbuMetricsBase结束后才会有
logging.info('you do better AbuMetricsBase.fit_metrics in orders_pd!!!!')
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
# 这里只做fit_metrics_order,没做fit_metrics因为比如期货,比特币会有自己的度量类,使用通用的fit_metrics_order
AbuMetricsBase(orders_pd, None, None, None).fit_metrics_order()
# 实例化特征构造对象self.fiter
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
通过self.fiter_cls构造形成self.fiter后self.fiter.df中以存在特征
eg:self.fiter.df
profit profit_cg buy_deg_ang42 buy_deg_ang252 buy_deg_ang60 buy_deg_ang21
2014-09-24 -22618.04 -0.0566 3.378 3.458 3.458 1.818
2014-10-24 -29690.28 -0.0742 0.191 2.889 2.809 -1.089
2014-10-29 18959.19 0.0542 -2.026 16.689 -0.761 1.980
2014-10-29 148209.36 0.5022 -3.427 -11.956 -8.296 6.507
2014-10-29 24867.60 0.0952 -2.915 39.469 -6.043 7.046
"""
# 默认使用svm,这里需要参数可设置
self.fiter().estimator.svc()
def fit(self):
"""
边裁训练集拟合存储函数,相对主裁的训练fit函数,边裁的fit很简单
self.fiter.df经过fit后添加了新列p_rk_cg和rk形式如下所示
eg:self.fiter.df
profit profit_cg buy_deg_ang42 buy_deg_ang252 \
2014-09-24 -22618.04 -0.0566 3.378 3.458
2014-10-24 -29690.28 -0.0742 0.191 2.889
2014-10-29 18959.19 0.0542 -2.026 16.689
2014-10-29 148209.36 0.5022 -3.427 -11.956
2014-10-29 24867.60 0.0952 -2.915 39.469
2014-10-29 18959.19 0.0542 -2.026 16.689
2014-11-03 1250.80 0.0045 0.103 39.202
2014-11-11 59888.21 0.1857 8.341 -9.450
2014-11-12 -3578.78 -0.0140 3.963 6.595
2014-11-26 -29085.19 -0.0946 14.052 6.061
... ... ... ... ...
2016-03-14 16220.57 0.0559 4.002 -10.559
2016-03-14 -25328.12 -0.1218 0.129 -6.649
2016-03-30 -29858.44 -0.0863 13.121 -8.461
2016-04-04 5373.76 0.0244 4.409 -33.097
2016-04-13 -28044.40 -0.1159 6.603 -31.459
2016-04-14 -18645.93 -0.0467 4.611 18.428
2016-04-15 -32484.79 -0.1149 4.238 -13.247
2016-04-15 -32484.79 -0.1149 4.238 -13.247
2016-04-29 290.96 0.0007 1.445 16.266
2016-04-29 290.96 0.0007 1.445 16.266
buy_deg_ang60 buy_deg_ang21 p_rk_cg rk
2014-09-24 3.458 1.818 19.0 0
2014-10-24 2.809 -1.089 13.0 -1
2014-10-29 -0.761 1.980 35.5 0
2014-10-29 -8.296 6.507 56.0 1
2014-10-29 -6.043 7.046 43.0 1
2014-10-29 -0.761 1.980 35.5 0
2014-11-03 -4.614 10.125 28.0 0
2014-11-11 0.730 12.397 48.0 1
2014-11-12 -7.524 6.671 23.0 0
2014-11-26 7.566 12.494 9.0 -1
... ... ... ... ..
2016-03-14 -7.992 9.324 37.0 0
2016-03-14 -10.880 5.201 2.0 -1
2016-03-30 4.498 4.070 12.0 -1
2016-04-04 -6.281 5.618 33.0 0
2016-04-13 0.191 4.457 4.0 -1
2016-04-14 3.134 0.733 20.0 0
2016-04-15 4.693 1.162 5.5 -1
2016-04-15 4.693 1.162 5.5 -1
2016-04-29 4.615 -1.115 24.5 0
2016-04-29 4.615 -1.115 24.5 0
边裁裁决方式多次使用非均衡技术对最后的结果概率进行干预,目的是使最终的裁决正确率达成非均衡的目标,
非均衡技术思想是量化中很很重要的一种设计思路,因为我们量化的目标结果就是非均衡(我们想要赢的钱比输的多)
"""
# 对训练特征fiter.df中的profit_cg进行rank,即针对训练集中的交易盈利亏损值进行rank排序, rank结果添加到self.fiter.df新列
# TODO 暂时只使用profit_cg不使用profit做为训练参数,需要整合profit为训练的rank等综合权重处理
self.fiter.df['p_rk_cg'] = self.fiter.df['profit_cg'].rank()
"""
eg: self.fiter.df['p_rk_cg']
2014-09-24 19.0
2014-10-24 13.0
2014-10-29 35.5
2014-10-29 56.0
2014-10-29 43.0
2014-10-29 35.5
2014-11-03 28.0
2014-11-11 48.0
2014-11-12 23.0
2014-11-26 9.0
...
2016-03-14 37.0
2016-03-14 2.0
2016-03-30 12.0
2016-04-04 33.0
2016-04-13 4.0
2016-04-14 20.0
2016-04-15 5.5
2016-04-15 5.5
2016-04-29 24.5
2016-04-29 24.5
"""
# K_CG_TOP_RATE=0.236, 由于策略的胜负的非均衡,win_top的位置实际比较loss_top为非均衡,为后续制造概率优势
win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
"""
eg:
len(self.fiter.df['profit_cg']) == 100
-> win_top = 100 - 100 * 0.236
-> win_top = 100 - 23.6
-> win_top = 76.4
"""
loss_top = len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
"""
eg:
len(self.fiter.df['profit_cg']) == 100
-> loss_top = 100 * 0.236
-> loss_top = 23.6
"""
# self.fiter.df添加新列'rk',初始值都为EEdgeType.E_EEdge_NORMAL.value,即0
self.fiter.df['rk'] = EEdgeType.E_EEdge_NORMAL.value
"""
根据win_top, loss_top将整体切分为三段,rk:-1, 0, 1
rk profit_cg p_rk_cg
2011-09-21 0 0.036216 58816.0
2011-09-21 1 0.046784 61581.0
2011-09-21 -1 -0.191184 1276.0
2011-09-21 0 -0.000428 43850.0
2011-09-21 0 0.001724 44956.0
"""
# noinspection PyTypeChecker
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] > win_top, EEdgeType.E_STORE_TOP_WIN.value,
self.fiter.df['rk'])
# noinspection PyTypeChecker
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] < loss_top, EEdgeType.E_EEdge_TOP_LOSS.value,
self.fiter.df['rk'])
def dump_file_fn(self):
"""
边裁本地缓存的存储路径规则:
ABuEnv.g_project_data_dir + 'ump/ump_edge_' + market_name + self.class_unique_id()
"""
# TODO 如果有裁判覆盖,保留备份,显示通知
unique_ump_name = 'ump/ump_edge_{}_{}'.format(self.market_name, self.class_unique_id())
return os.path.join(ABuEnv.g_project_data_dir, unique_ump_name)
def dump_clf(self):
"""
边裁的本地序列化相对主裁的dump_clf也简单很多,
将self.fiter.df和self.fiter.x打包成一个字典对象df_x_dict
通过ABuFileUtil.dump_pickle进行保存
"""
df_x_dict = {'fiter_df': self.fiter.df, 'fiter_x': self.fiter.x}
"""
eg:df_x_dict
array([[ 3.378, 3.458, 3.458, 1.818],
[ 0.191, 2.889, 2.809, -1.089],
[ -2.026, 16.689, -0.761, 1.98 ],
[ -3.427, -11.956, -8.296, 6.507],
[ -2.915, 39.469, -6.043, 7.046],
[ -2.026, 16.689, -0.761, 1.98 ],
[ 0.103, 39.202, -4.614, 10.125],
[ 8.341, -9.45 , 0.73 , 12.397],
[ 3.963, 6.595, -7.524, 6.671],
....................................
[ 4.002, -10.559, -7.992, 9.324],
[ 0.129, -6.649, -10.88 , 5.201],
[ 13.121, -8.461, 4.498, 4.07 ],
[ 4.409, -33.097, -6.281, 5.618],
[ 6.603, -31.459, 0.191, 4.457],
[ 4.611, 18.428, 3.134, 0.733],
[ 4.238, -13.247, 4.693, 1.162],
[ 4.238, -13.247, 4.693, 1.162],
[ 1.445, 16.266, 4.615, -1.115],
[ 1.445, 16.266, 4.615, -1.115]])
"""
ABuFileUtil.dump_pickle(df_x_dict, self.dump_file_fn(), how='zero')
def predict(self, **kwargs):
"""
边裁交易决策函数,从CachedUmpManager中获取缓存df_x_dict,对kwargs关键字参数所描述的交易特征进行拦截决策
边裁的predict()实现相对主裁来说比较复杂,大致思路如下:
1. 从输入的新交易中挑选需要的特征组成x
2. 将x和之前保存的训练集数据组合concatenate(),一起做数据标准化scaler
3. 使用sklearn.metrics.pairwise.pairwise_distances()度量输入特征和训练集矩阵中的距离序列
4. 取pairwise_distances() TOP个作为种子,继续匹配相似度
5. 相似度由大到小排序,保留大于保留阀值的相似度交易数据做为最终有投票权利的
6. 保留的交易认为是与新交易最相似的交易,保留的交易使用之前非均衡的rk对新交易进行投票
7. 最后的判断需要大于一定比例才被结果认可,即再次启动非均衡
:param kwargs: 需要和子类对象实现的虚方法get_predict_col中获取特征列对应的
关键字参数,eg: buy_deg_ang42=3.378, buy_deg_ang60=3.458
buy_deg_ang21=3.191, buy_deg_ang252=1.818
:return: 是否对kwargs关键字参数所描述的交易特征进行拦截,
EEdgeType: 不拦截: EEdgeType.E_EEdge_NORMAL or EEdgeType.E_STORE_TOP_WIN
拦截: EEdgeType.E_EEdge_TOP_LOSS
"""
# 统一从CachedUmpManager中获取缓存ump,没有缓存的情况下load_pickle
df_x_dict = AbuUmpBase.dump_clf_manager.get_ump(self)
# 从df_x_dict['fiter_df'].columns中筛选特征列
feature_columns = df_x_dict['fiter_df'].columns.drop(['profit', 'profit_cg', 'p_rk_cg', 'rk'])
"""
eg: df_x_dict['fiter_df'].columns
Index(['profit', 'profit_cg', 'buy_deg_ang42', 'buy_deg_ang252',
'buy_deg_ang60', 'buy_deg_ang21', 'p_rk_cg', 'rk'], dtype='object')
drop(['profit', 'profit_cg', 'p_rk_cg', 'rk']
-> ['buy_deg_ang42', 'buy_deg_ang252', 'buy_deg_ang60', 'buy_deg_ang21']
"""
# eg, x: array([ 3.378, 3.458, 3.458, 1.818])
x = np.array([kwargs[col] for col in feature_columns])
x = x.reshape(1, -1)
# 把新的x concatenate到之前保存的矩阵中
con_x = np.concatenate((x, df_x_dict['fiter_x']), axis=0)
# 将输入的x和原始矩阵组装好的新矩阵con_x一起标准化
con_x = self.scaler.fit_transform(con_x)
# 使用输入的x即con_x[0]和矩阵中其它的进行pairwise_distances比较
distances_cx = pairwise_distances(con_x[0].reshape(1, -1), con_x[1:],
metric='euclidean')
distances_cx = distances_cx[0]
"""
eg: distances_cx
array([[ 0. , 0.8432, 1.4371, 2.4178, 3.1302, 1.4371, 3.1774,
2.5422, 1.7465, 3.0011, 0.7233, 2.264 , 0.8279, 0.8279,
2.309 , 1.4878, 1.9396, 0.7438, 0.9731, 0.4494, 2.0755,
2.9762, 4.5869, 5.2029, 0.7362, 0.7362, 3.623 , 0.6105,
0.6105, 1.2288, 2.0991, 2.0991, 3.2272, 0.8599, 0.7419,
0.7419, 0.7804, 2.5241, 1.8116, 2.5373, 2.2742, 2.1726,
3.2738, 1.293 , 2.4555, 2.4555, 2.3358, 2.1673, 2.0187,
2.8637, 2.5066, 1.052 , 1.1481, 1.1481, 1.1175, 1.1175]])
"""
# 如果最小距离大于阀值,认为无效,K_DISTANCE_THRESHOLD = 0.668
if distances_cx.min() > K_DISTANCE_THRESHOLD:
return EEdgeType.E_EEdge_NORMAL
distances_sort = distances_cx.argsort()
"""
eg: distances_sort
array([ 0, 19, 28, 27, 10, 24, 25, 35, 34, 17, 36, 13, 12, 1, 33, 18, 51,
54, 55, 52, 53, 29, 43, 5, 2, 15, 8, 38, 16, 48, 20, 30, 31, 47,
41, 11, 40, 14, 46, 3, 45, 44, 50, 37, 39, 7, 49, 21, 9, 4, 6,
32, 42, 26, 22, 23])
"""
n_top = K_N_TOP_SEED if len(distances_cx) > K_N_TOP_SEED else len(distances_cx)
# 取前100个作为种子继续匹配相似度做数据准备
distances_sort = distances_sort[:n_top]
# 进行第二轮的相似度匹配,使用输入的x即con_x[0]和distances_sort中记录的其它矩阵矢量进行corr_xy
similar_cx = {arg: corr_xy(con_x[0], con_x[arg + 1], g_similar_type) for arg in distances_sort}
"""
eg: similar_cx
{0: 1.0, 19: 0.9197507467964976, 28: 0.57289288329659238, 27: 0.57289288329659238,
10: 0.44603792013583493, 24: 0.4103293780402798, 25: 0.4103293780402798,
35: 0.22026514236282496, 34: 0.22026514236282496, 17: -0.24170074544552811,
36: 0.43863838382081699, 13: 0.16234971594751921, 12: 0.16234971594751921, 1: 0.92424298737490296,
33: 0.47818723914034433, 18: -0.17734957863273493, 51: 0.63704694680797502, 54: 0.75395818997353681,
55: 0.75395818997353681, 52: 0.6485413094804453, 53: 0.6485413094804453,
29: 0.89796883127042837, 43: 0.86342390437553329, 5: 0.12738173851484677,
2: 0.12738173851484677, 15: 0.53496775815355813, 8: -0.92624283913287053,
38: -0.52046967255944876, 16: -0.65837858483393186, 48: 0.26241267262766549,
20: 0.45007515315947716, 30: -0.78037071039800843, 31: -0.78037071039800843,
47: -0.99196576241088685, 41: 0.71286817166895511, 11: -0.57565781272205685,
40: -0.089683927257343574, 14: -0.49743962329463148, 46: -0.84622925585859421, 3: -0.82066914234853283,
45: 0.30735926720691314, 44: 0.30735926720691314, 50: 0.010871213734502339, 37: -0.65150765047066517,
39: -0.38809703338219459, 7: -0.57947244493007666, 49: -0.33103296960584466, 21: 0.69444344588208717,
9: -0.3435188573004419, 4: -0.39204446380766983, 6: -0.54996919528831723, 32: -0.9481034251744791,
42: 0.20829094732022327, 26: 0.9936229414412624, 22: -0.35972456962349542, 23: -0.085747705364200594}
"""
# 相似度大到小排序
similar_sorted = sorted(zip(similar_cx.values(), similar_cx.keys()))[::-1]
"""
eg: similar_sorted
[(1.0, 0), (0.9936229414412624, 26), (0.92424298737490296, 1), (0.9197507467964976, 19), (
0.89796883127042837, 29), (0.86342390437553329, 43), (0.75395818997353681, 55), (0.75395818997353681, 54),
(0.71286817166895511, 41), (0.69444344588208717, 21), (0.6485413094804453, 53), (0.6485413094804453, 52),
(0.63704694680797502, 51), (0.57289288329659238, 28), (0.57289288329659238, 27), (0.53496775815355813, 15),
(0.47818723914034433, 33), | |
<gh_stars>0
from .fhirbase import fhirbase
class Sequence(fhirbase):
"""
Raw data describing a biological sequence.
Attributes:
resourceType: This is a Sequence resource
identifier: A unique identifier for this particular sequence instance.
This is a FHIR-defined id.
type: Amino Acid Sequence/ DNA Sequence / RNA Sequence.
coordinateSystem: Whether the sequence is numbered starting at 0
(0-based numbering or coordinates, inclusive start, exclusive end) or
starting at 1 (1-based numbering, inclusive start and inclusive end).
patient: The patient whose sequencing results are described by this
resource.
specimen: Specimen used for sequencing.
device: The method for sequencing, for example, chip information.
performer: The organization or lab that should be responsible for this
result.
quantity: The number of copies of the seqeunce of interest. (RNASeq).
referenceSeq: A sequence that is used as a reference to describe
variants that are present in a sequence analyzed.
variant: The definition of variant here originates from Sequence
ontology
([variant_of](http://www.sequenceontology.org/browser/current_svn/term/variant_of)).
This element can represent amino acid or nucleic sequence
change(including insertion,deletion,SNP,etc.) It can represent some
complex mutation or segment variation with the assist of CIGAR string.
observedSeq: Sequence that was observed. It is the result marked by
referenceSeq along with variant records on referenceSeq. This shall
starts from referenceSeq.windowStart and end by
referenceSeq.windowEnd.
quality: An experimental feature attribute that defines the quality of
the feature in a quantitative way, such as a phred quality score
([SO:0001686](http://www.sequenceontology.org/browser/current_svn/term/SO:0001686)).
readCoverage: Coverage (read depth or depth) is the average number of
reads representing a given nucleotide in the reconstructed sequence.
repository: Configurations of the external repository. The repository
shall store target's observedSeq or records related with target's
observedSeq.
pointer: Pointer to next atomic sequence which at most contains one
variant.
"""
__name__ = 'Sequence'
def __init__(self, dict_values=None):
self.resourceType = 'Sequence'
# type: str
# possible values: Sequence
self.type = None
# type: str
# possible values: aa, dna, rna
self.coordinateSystem = None
# type: int
self.patient = None
# reference to Reference: identifier
self.specimen = None
# reference to Reference: identifier
self.device = None
# reference to Reference: identifier
self.performer = None
# reference to Reference: identifier
self.quantity = None
# reference to Quantity
self.referenceSeq = None
# reference to Sequence_ReferenceSeq
self.variant = None
# type: list
# reference to Sequence_Variant
self.observedSeq = None
# type: str
self.quality = None
# type: list
# reference to Sequence_Quality
self.readCoverage = None
# type: int
self.repository = None
# type: list
# reference to Sequence_Repository
self.pointer = None
# type: list
# reference to Reference: identifier
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.type is not None:
for value in self.type:
if value is not None and value.lower() not in [
'aa', 'dna', 'rna']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'aa, dna, rna'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence',
'child_variable': 'patient'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence',
'child_variable': 'pointer'},
{'parent_entity': 'Sequence_Quality',
'parent_variable': 'object_id',
'child_entity': 'Sequence',
'child_variable': 'quality'},
{'parent_entity': 'Sequence_Repository',
'parent_variable': 'object_id',
'child_entity': 'Sequence',
'child_variable': 'repository'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence',
'child_variable': 'specimen'},
{'parent_entity': 'Sequence_ReferenceSeq',
'parent_variable': 'object_id',
'child_entity': 'Sequence',
'child_variable': 'referenceSeq'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence',
'child_variable': 'performer'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Sequence',
'child_variable': 'identifier'},
{'parent_entity': 'Sequence_Variant',
'parent_variable': 'object_id',
'child_entity': 'Sequence',
'child_variable': 'variant'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Sequence',
'child_variable': 'quantity'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence',
'child_variable': 'device'},
]
class Sequence_ReferenceSeq(fhirbase):
"""
Raw data describing a biological sequence.
Attributes:
chromosome: Structural unit composed of a nucleic acid molecule which
controls its own replication through the interaction of specific
proteins at one or more origins of replication
([SO:0000340](http://www.sequenceontology.org/browser/current_svn/term/SO:0000340)).
genomeBuild: The Genome Build used for reference, following GRCh build
versions e.g. 'GRCh 37'. Version number must be included if a
versioned release of a primary build was used.
referenceSeqId: Reference identifier of reference sequence submitted
to NCBI. It must match the type in the Sequence.type field. For
example, the prefix, *NG_* identifies reference sequence for genes,
*NM_* for messenger RNA transcripts, and *NP_* for amino acid
sequences.
referenceSeqPointer: A Pointer to another Sequence entity as reference
sequence.
referenceSeqString: A string like "ACGT".
strand: Directionality of DNA sequence. Available values are "1" for
the plus strand (5' to 3')/Watson/Sense/positive and "-1" for the
minus strand(3' to 5')/Crick/Antisense/negative.
windowStart: Start position of the window on the reference sequence.
If the coordinate system is either 0-based or 1-based, then start
position is inclusive.
windowEnd: End position of the window on the reference sequence. If
the coordinate system is 0-based then end is is exclusive and does not
include the last position. If the coordinate system is 1-base, then
end is inclusive and includes the last position.
"""
__name__ = 'Sequence_ReferenceSeq'
def __init__(self, dict_values=None):
self.chromosome = None
# reference to CodeableConcept
self.genomeBuild = None
# type: str
self.referenceSeqId = None
# reference to CodeableConcept
self.referenceSeqPointer = None
# reference to Reference: identifier
self.referenceSeqString = None
# type: str
self.strand = None
# type: int
self.windowStart = None
# type: int
self.windowEnd = None
# type: int
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Sequence_ReferenceSeq',
'child_variable': 'chromosome'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Sequence_ReferenceSeq',
'child_variable': 'referenceSeqId'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence_ReferenceSeq',
'child_variable': 'referenceSeqPointer'},
]
class Sequence_Variant(fhirbase):
"""
Raw data describing a biological sequence.
Attributes:
start: Start position of the variant on the reference sequence.If the
coordinate system is either 0-based or 1-based, then start position is
inclusive.
end: End position of the variant on the reference sequence.If the
coordinate system is 0-based then end is is exclusive and does not
include the last position. If the coordinate system is 1-base, then
end is inclusive and includes the last position.
observedAllele: An allele is one of a set of coexisting sequence
variants of a gene
([SO:0001023](http://www.sequenceontology.org/browser/current_svn/term/SO:0001023)).
Nucleotide(s)/amino acids from start position of sequence to stop
position of sequence on the positive (+) strand of the observed
sequence. When the sequence type is DNA, it should be the sequence on
the positive (+) strand. This will lay in the range between
variant.start and variant.end.
referenceAllele: An allele is one of a set of coexisting sequence
variants of a gene
([SO:0001023](http://www.sequenceontology.org/browser/current_svn/term/SO:0001023)).
Nucleotide(s)/amino acids from start position of sequence to stop
position of sequence on the positive (+) strand of the reference
sequence. When the sequence type is DNA, it should be the sequence on
the positive (+) strand. This will lay in the range between
variant.start and variant.end.
cigar: Extended CIGAR string for aligning the sequence with reference
bases. See detailed documentation
[here](http://support.illumina.com/help/SequencingAnalysisWorkflow/Content/Vault/Informatics/Sequencing_Analysis/CASAVA/swSEQ_mCA_ExtendedCIGARFormat.htm).
variantPointer: A pointer to an Observation containing variant
information.
"""
__name__ = 'Sequence_Variant'
def __init__(self, dict_values=None):
self.start = None
# type: int
self.end = None
# type: int
self.observedAllele = None
# type: str
self.referenceAllele = None
# type: str
self.cigar = None
# type: str
self.variantPointer = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Sequence_Variant',
'child_variable': 'variantPointer'},
]
class Sequence_Quality(fhirbase):
"""
Raw data describing a biological sequence.
Attributes:
type: INDEL / SNP / Undefined variant.
standardSequence: Gold standard sequence used for comparing against.
start: Start position of the sequence. If the coordinate system is
either 0-based or 1-based, then start position is inclusive.
end: End position of the sequence.If the coordinate system is 0-based
then end is is exclusive and does not include the last position. If
the coordinate system is 1-base, then end is inclusive and includes
the last position.
score: The score of an experimentally derived feature such as a
p-value
([SO:0001685](http://www.sequenceontology.org/browser/current_svn/term/SO:0001685)).
method: Which method is used to get sequence quality.
truthTP: True positives, from the perspective of the truth data, i.e.
the number of sites in the Truth Call Set | |
# synchronize() handles things in this case
return
# it is actually _get_dcvalue_from_file that guarantees that
# referenced nodes actually exist in the file...
# if xmlpath=="dc:summary/dc:dest":
# import pdb as pythondb
# pythondb.set_trace()
# pass
newval=self._get_dcvalue_from_file(xmldocobj,xmlpath,ETxmlpath)
# print xmldocobj.extensions
# print xmlpath
# if "units" in xmlel.attrib:
# units=xmlel.attrib["{http://limatix.org/dcvalue}units"]
#
# if self.controlparam.defunits is not None:
# newval=self.controlparam.paramtype(newvalue,units=units,defunits=self.controlparam.defunits)
# pass
# else :
# # print type(self.controlparam.paramtype)
# newval=self.controlparam.paramtype(newvalue,units=units)
# pass
# pass
#
# else :
# if self.controlparam.defunits is not None:
# newval=self.controlparam.paramtype(newvalue,defunits=self.controlparam.defunits)
# pass
# else :
# # print type(self.controlparam.paramtype)
# newval=self.controlparam.paramtype(newvalue)
# pass
# pass
if newval != self.controlparam.dcvalue:
if initialload:
self.synchronizeinitialload(initialloadvalue=newval,initialloadparams=(xmldocobj,xmlpath,ETxmlpath,logfunc))
pass
else:
self.synchronize()
pass
pass
pass
def synchronizeinitialload(self,initialloadvalue,initialloadparams):
# if we are performing the initial synchronization of an existing file use the no-parent merge semantics (any non-blank)
#sys.stderr.write("no-parent merge for %s: %s and %s\n" % (self.controlparam.xmlname,str(newval),str(self.controlparam.dcvalue)))
(xmldocobj,xmlpath,ETxmlpath,logfunc)=initialloadparams
#import pdb as pythondb
#if self.controlparam.xmlname=="specimen" and len(str(newval))==0 and len(str(self.controlparam.dcvalue))==0:
# pythondb.set_trace()
# pass
# for synced accumulating date support:
#initialloadvalue=self.createvalueobj(initialloadvalue)
contexthref=self.find_a_context_href(initialloadparams)
humanpath=xmlpath
if xmlpath is None:
humanpath=etxpath2human(ETxmlpath,xmldocobj.nsmap)
pass
try:
# sys.stderr.write("initialloadvalue=%s %s; self.controlparam.dcvalue=%s %s\n" % (initialloadvalue.__class__.__name__,str(initialloadvalue),self.controlparam.dcvalue.__class__.__name__,str(self.controlparam.dcvalue)))
# domerge enforces the correct value class by using that class to do the merge
mergedval=self.domerge(humanpath,None,"None",[ initialloadvalue, self.controlparam.dcvalue ],[xmldocobj._filename,"in memory"],contexthref=contexthref,manualmerge=True,**self.mergekwargs)
#sys.stderr.write("mergedval=%s\n\n" % (str(mergedval)))
pass
except ValueError as e:
raise ValueError("Error performing initial merge of information for parameter %s from URL %s: %s" % (self.controlparam.xmlname, str(xmldocobj.filehref),str(e)))
pass
if mergedval != initialloadvalue:
# need to update file we just loaded
self.update_file(xmldocobj,xmlpath,ETxmlpath,mergedval,logfunc,"Text Field %s Updated on file initial load" % (self.controlparam.xmlname))
pass
if mergedval != self.controlparam.dcvalue:
# need to update everything else
# Pass initialloadparams so we can get a good contexthref for synchronization if one doesn't already exist
self.synchronize(mergedval,initialloadparams)
pass
pass
def synchronize(self,requestedvalue=None,requestedvalueparams=None):
# prevent nested synchronization attempts
# as we lock files
#
# requestedvalueparams is a (xmldocobj,xmlpath,ETxmlpath,logfunc)
# tuple that can be used as an extra context source for
# find_a_context_href()
self.in_synchronize=True
xmldocobj=None
locklist=[]
try :
# attempt a real merge, with our in-memory value as the old value
# lock all files
# ***BUG*** Opportunity for deadlock with another process
# working on the same files because our doclists aren't
# necessarily in the sam e order!!!
for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
xmldocobj.lock_rw()
locklist.append(xmldocobj)
pass
oldvalue=self.controlparam.dcvalue
mergevalues=[]
mergesources=[]
if requestedvalue is not None:
mergevalues.append(requestedvalue)
mergesources.append("requested change")
pass
humanpath=None
# read in values of all copies
for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
if humanpath is None:
humanpath=xmlpath
if xmlpath is None:
humanpath=etxpath2human(ETxmlpath,xmldocobj.nsmap)
pass
pass
mergevalues.append(self._get_dcvalue_from_file(xmldocobj,xmlpath,ETxmlpath))
mergesources.append(xmldocobj.get_filehref().absurl())
pass
#sys.stderr.write("mergevalues=%s\n" % (str(mergevalues)))
# sys.stderr.write("parent merge for %s: %s and %s\n" % (self.controlparam.xmlname,str(newval),str(self.controlparam.dcvalue)))
# Perform merge
#sys.stderr.write("oldvalue=%s %s\n" % (oldvalue.__class__.__name__,str(oldvalue)))
#for mv in mergevalues:
# sys.stderr.write("mv=%s %s\n" % (mv.__class__.__name__,str(mv)))
# If we are requesting a non-blank value that can provide a context URL, always use that for the merge context
if requestedvalue is not None and hasattr(requestedvalue,"getcontexthref") and not(requestedvalue.getcontexthref().isblank()):
contexthref=requestedvalue.getcontexthref()
pass
else:
contexthref=self.find_a_context_href(requestedvalueparams)
pass
# domerge enforces the correct value class by using that class to do the merge
mergedval=self.domerge(humanpath,oldvalue,"in memory",mergevalues,mergesources,contexthref=contexthref,**self.mergekwargs)
# sys.stderr.write("mergedval=%s\n\n" % (str(mergedval)))
# createvalueobj can be overridden by derived class (used for current implemenetation of expanding date -- probably should be redone with some kind of merge override instead)
#mergedval=self.createvalueobj(mergedval)
if requestedvalue is None:
logfuncmsg="Text Field %s Updated Due to File Resync" % (self.controlparam.xmlname)
pass
else :
logfuncmsg="Text Field %s Updated" % (self.controlparam.xmlname)
pass
# Write to files if necessary
for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
self.update_file(xmldocobj,xmlpath,ETxmlpath,mergedval,logfunc=logfunc,logfuncmsg=logfuncmsg)
pass
# Update param if necessary
if mergedval != oldvalue:
# if self.controlparam.paramtype is dc_value.xmltreevalue:
# sys.stderr.write("Assigning merged value to %s (contexthref=%s; specified contexthref=%s): %s\n" % (self.controlparam.xmlname,mergedval._xmltreevalue__xmldoc.getcontexthref().absurl(),contexthref.absurl(),str(mergedval)))
# pass
self.controlparam.assignval(mergedval,self.id)
pass
#import pdb as pythondb
#if self.controlparam.xmlname=="specimen" and len(str(newval))==0:
# pythondb.set_trace()
# pass
pass
except ValueError as e:
URL="None"
if xmldocobj is not None:
URL=xmldocobj.filehref.absurl()
pass
raise ValueError("Error merging information for parameter %s from file %s: %s" % (self.controlparam.xmlname, str(URL),str(e)))
finally:
# unlock all files
for xmldocobj in locklist:
xmldocobj.unlock_rw()
pass
self.in_synchronize=False
pass
# update this file, if necessary
#self.update_file(xmldocobj,xmlpath,ETxmlpath,mergedval,logfunc)
#sys.stderr.write("update_file: %s %s -> %s\n" % (xmldocobj.filename,self.controlparam.xmlname,str(mergedval)))
#if mergedval != self.controlparam.dcvalue:
# controller value has changed... may need to update
# everything else
# self.controlparam.assignval(mergedval,self.id)
# # update everything else
# for (listxmldocobj,listxmlpath,listETxmlpath,listlogfunc) in self.doclist:
# if listxmldocobj is not xmldocobj:
# if xmldocobj.autoresync:
# xmldocobj._resync()
# pass
# listxmldocobj.lock_rw()
# # print "Updating file..."
# try:
# self.update_file(listxmldocobj,listxmlpath,listETxmlpath,mergedval,logfunc=listlogfunc)
# pass
# finally:
# listxmldocobj.unlock_rw()
# pass
# pass
#
#
# pass
pass
def update_file(self,xmldocobj,xmlpath,ETxmlpath,valueobj,logfunc,logfuncmsg):
# xmldocobj MUST be locked when making this call
# sys.stderr.write("Updating file: %s %s %s\n" % (xmlpath,ETxmlpath,str(valueobj)))
# sys.stderr.write("lock count: %d %d\n" % (xmldocobj.ro_lockcount,xmldocobj.rw_lockcount))
if xmlpath is None:
ETXobj=etree.ETXPath(ETxmlpath)
foundelement=ETXobj(xmldocobj.doc)
if len(foundelement) != 1:
raise ValueError("Non-unique result in update_file for %s (len=%d)" % (ETxmlpath,len(foundelement)))
xmltag=foundelement[0]
pass
else:
xmltag=xmldocobj.xpathsingle(xmlpath)
pass
# contexthref=self.find_a_context_href()
#filevalue=self.controlparam.paramtype.fromxml(xmldocobj,xmltag,defunits=self.controlparam.defunits,xml_attribute=self.controlparam.xml_attribute,contextdir=contextdir)
filevalue=self.controlparam.paramtype.fromxml(xmldocobj,xmltag,defunits=self.controlparam.defunits)
if filevalue != valueobj: # update needed
# sys.stderr.write("Upddate needed: %s != %s\n" % (str(filevalue),str(valueobj)))
if logfunc is not None:
logfunc(logfuncmsg,item=self.controlparam.xmlname,action="updatetext",value=str(valueobj))
pass
if self.controlparam.defunits is not None:
# write representation into XML element
valueobj.xmlrepr(xmldocobj,xmltag,defunits=self.controlparam.defunits) # ,xml_attribute=self.controlparam.xml_attribute)
pass
else :
# print type(self.controlparam.paramtype)
# print "id=%x autoflush=%s" % (id(xmldocobj),str(xmldocobj.autoflush))
valueobj.xmlrepr(xmldocobj,xmltag) # xml_attribute=self.controlparam.xml_attribute)
pass
provenance.elementgenerated(xmldocobj,xmltag)
xmldocobj.modified=True
pass
pass
# if xmldocobj.autoflush:
# xmldocobj.flush()
# pass
pass
def requestvalcallback(self,newvalue,requestid,*cbargs):
self.numpending -= 1
if (self.numpending==0):
self.state=self.controlparam.CONTROLLER_STATE_QUIESCENT
pass
# print "doclist=%s" % (str(self.doclist))
try:
self.synchronize(requestedvalue=newvalue)
pass
except:
# Give error client callback, then raise exception
if len(cbargs) > 0:
(exctype,excvalue)=sys.exc_info()[:2]
clientcallback=cbargs[0]
clientcallback(self.controlparam,requestid,str(excvalue),None,*cbargs[1:])
pass
raise
#oldvalue=self.controlparam.dcvalue
## valueobj=self.controlparam.paramtype(newvalue,defunits=self.controlparam.defunits)
# valueobj=self.createvalueobj(newvalue)
#mergevalues=[ valueobj ] # list of values to merge
## lock each file smultaneously. Normally we would try to avoid this to eliminate the risk of
## deadlock, but since we are in the mainloop, nobody should have anything locked (hopefully!)
#for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
## xmldocobj.shouldbeunlocked()
# xmldocobj.lock_rw() # the lock_rw triggers an xmlresync that can call assignval with a new value
# # update mergevalues with this entry
# mergevalues.append(self.controlparam.dcvalue)
# pass
#
#try:
## attempt to merge
#mergedval=self.domerge(oldvalue,mergevalues,**self.mergekwargs)
## attempt to update file
# for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
# self.update_file(xmldocobj,xmlpath,ETxmlpath,mergedval,logfunc=logfunc)
# pass
#
# pass
#finally:
# for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
# xmldocobj.unlock_rw()
# pass
# pass
## sys.stderr.write("assign %s=%s\n" % (self.controlparam.xmlname,str(valueobj)))
#self.controlparam.assignval(valueobj,self.id)
#for (xmldocobj,xmlpath,ETxmlpath,logfunc) in self.doclist:
# if logfunc is not None:
# logfunc("Text Field %s Updated" % (self.controlparam.xmlname),item=self.controlparam.xmlname,action="updatetext",value=str(self.controlparam.dcvalue)) #
# pass
# pass
if len(cbargs) > 0:
clientcallback=cbargs[0]
clientcallback(self.controlparam,requestid,None,self.controlparam.dcvalue,*cbargs[1:])
pass
return False
# requestval is a request that this parameter take on the requested value
# (This is an asynchronous request. Will get callback when complete)
# returns request identifier that can be used to cancel request
# callback(param,requestid,errorstr,newvalue,*cbargs)
def requestval(self,param,newvalue,*cbargs):
idstore=[] # returned identifier is actually a list with the gobject source id as it's only element
reqid=gobject.timeout_add(0,self.requestvalcallback,newvalue,idstore,*cbargs)
idstore.append(reqid)
self.state=self.controlparam.CONTROLLER_STATE_REQUEST_PENDING
self.numpending+=1
return idstore
def cancelrequest(self,param,requestid):
# returns True if successfully canceled
canceled=gobject.source_remove(requestid[0])
if canceled:
self.numpending -= 1
if (self.numpending==0):
self.state=self.controlparam.CONTROLLER_STATE_QUIESCENT
pass
pass
return canceled
pass
#class synced_accumulating_dates(synced):
# # This class is a paramdb2 controller for sync'd date elements
# # that accumulate instead of replace as you change them.
# # It is compatible with dc_value.datesetvalue.
#
# def __init__(self,controlparam):
# synced.__init__(self,controlparam)
#
# pass
#
#
# def valueobjfromxml(self,xmldocobj,xmlel):
# # this is a separate method so it can be overridden by derived
# # class for implementing expanding date class
# #!!!*** BUG!! This should really be moved into a domerge() function !!!***
#
# newval=self.controlparam.paramtype.fromxml(xmldocobj,xmlel,self.controlparam.defunits)
# oldval=self.controlparam.dcvalue
#
# # Form union of desired new value with previous value
# return newval.union(oldval)
#
# def createvalueobj(self,newvalue):
# # this is a separate method so | |
<filename>libtaxii/taxii_default_query.py
# Copyright (c) 2017, The MITRE Corporation
# For license information, see the LICENSE.txt file
"""
Creating, handling, and parsing TAXII Default Queries.
"""
import numbers
import datetime
from operator import attrgetter
import os
import dateutil.parser
from lxml import etree
import libtaxii.messages_11 as tm11
from .common import TAXIIBase
from .validation import (do_check, uri_regex, targeting_expression_regex)
from .constants import *
import six
class CapabilityModule(object):
"""
A Capability Module has valid relationships
Each relationship has 0-n valid parameters
"""
def __init__(self, capability_module_id, relationships):
self.capability_module_id = capability_module_id
self.relationships = relationships
@property
def capability_module_id(self):
return self._capability_module_id
@capability_module_id.setter
def capability_module_id(self, value):
do_check(value, 'capability_module_id', type=six.string_types)
self._capability_module_id = value
@property
def relationships(self):
return self._relationships
@relationships.setter
def relationships(self, value):
do_check(value, 'relationships', type=Relationship)
self._relationships = {}
for item in value:
self._relationships[item.name] = item
class Relationship(object):
def __init__(self, name, parameters=None):
self.name = name
self.parameters = parameters or []
@property
def name(self):
return self._name
@name.setter
def name(self, value):
do_check(value, 'name', type=six.string_types)
self._name = value
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, value):
do_check(value, 'parameters', type=Parameter)
self._parameters = {}
for item in value:
self._parameters[item.name] = item
class Parameter(object):
def __init__(self, name, type, value_tuple=None):
self.name = name
self.type = type
self.value_tuple = value_tuple
def verify(self, value):
do_check(value, 'value', type=self.type, value_tuple=self.value_tuple)
return True, 'OK'
# params - Define parameters for the Core/Regex/Timestamp capability modules
param_str_value = Parameter(P_VALUE, six.string_types)
param_float_value = Parameter(P_VALUE, float)
param_ts_value = Parameter(P_VALUE, datetime.datetime)
param_match_type = Parameter(P_MATCH_TYPE, six.string_types, ('case_sensitive_string', 'case_insensitive_string', 'number'))
param_case_sensitive = Parameter(P_CASE_SENSITIVE, bool, (True, False))
# CORE Relationships - Define relationships for the core capability module
rel_equals = Relationship(R_EQUALS, [param_str_value, param_match_type])
rel_not_equals = Relationship(R_NOT_EQUALS, [param_str_value, param_match_type])
rel_greater_than = Relationship(R_GREATER_THAN, [param_float_value])
rel_greater_than_or_equal = Relationship(R_GREATER_THAN_OR_EQUAL, [param_float_value])
rel_less_than = Relationship(R_LESS_THAN, [param_float_value])
rel_less_than_or_equal = Relationship(R_LESS_THAN_OR_EQUAL, [param_float_value])
rel_dne = Relationship(R_DOES_NOT_EXIST)
rel_ex = Relationship(R_EXISTS)
rel_begins_with = Relationship(R_BEGINS_WITH, [param_case_sensitive, param_str_value])
rel_ends_with = Relationship(R_ENDS_WITH, [param_case_sensitive, param_str_value])
rel_contains = Relationship(R_CONTAINS, [param_case_sensitive, param_str_value])
# REGEX relationships
rel_matches = Relationship(R_MATCHES, [param_case_sensitive, param_str_value])
# TIMESTAMP relationships
rel_ts_eq = Relationship(R_EQUALS, [param_ts_value])
rel_ts_gt = Relationship(R_GREATER_THAN, [param_ts_value])
rel_ts_gte = Relationship(R_GREATER_THAN_OR_EQUAL, [param_ts_value])
rel_ts_lt = Relationship(R_LESS_THAN, [param_ts_value])
rel_ts_lte = Relationship(R_LESS_THAN_OR_EQUAL, [param_ts_value])
# CORE - Define the Core Capability Module
cm_core = CapabilityModule(CM_CORE,
[rel_equals, rel_not_equals, rel_greater_than,
rel_greater_than_or_equal, rel_less_than,
rel_less_than_or_equal, rel_dne, rel_ex,
rel_begins_with, rel_contains, rel_ends_with]
)
# REGEX - Define the RegEx Capability Module
cm_regex = CapabilityModule(CM_REGEX, [rel_matches])
# TIMESTAMP - Define the timestamp Capability Module
cm_timestamp = CapabilityModule(CM_TIMESTAMP, [rel_ts_eq, rel_ts_gt, rel_ts_gte, rel_ts_lt, rel_ts_lte])
capability_modules = {CM_CORE: cm_core, CM_REGEX: cm_regex, CM_TIMESTAMP: cm_timestamp}
class DefaultQueryInfo(tm11.SupportedQuery):
""" Used to describe the TAXII Default Queries that are supported.
:param targeting_expression_infos: Describe the supported targeting expressions
:type targeting_expression_infos: :class:`list` of :class:`TargetingExpressionInfo` objects
:param capability_modules: Indicate the supported capability modules
:type capability_modules: :class:`list` of :class:`str`
"""
def __init__(self, targeting_expression_infos, capability_modules):
super(DefaultQueryInfo, self).__init__(FID_TAXII_DEFAULT_QUERY_10)
self.targeting_expression_infos = targeting_expression_infos
self.capability_modules = capability_modules
@property
def targeting_expression_infos(self):
return self._targeting_expression_infos
@targeting_expression_infos.setter
def targeting_expression_infos(self, value):
do_check(value, 'targeting_expression_infos', type=DefaultQueryInfo.TargetingExpressionInfo)
self._targeting_expression_infos = value
@property
def capability_modules(self):
return self._capability_modules
@capability_modules.setter
def capability_modules(self, value):
do_check(value, 'capability_modules', regex_tuple=uri_regex)
self._capability_modules = value
def to_etree(self):
q = super(DefaultQueryInfo, self).to_etree()
dqi = etree.SubElement(q, '{%s}Default_Query_Info' % ns_map['tdq'])
for expression_info in self.targeting_expression_infos:
dqi.append(expression_info.to_etree())
for cmod in self.capability_modules:
cm = etree.SubElement(dqi, '{%s}Capability_Module' % ns_map['tdq'], nsmap=ns_map)
cm.text = cmod
return q
def to_dict(self):
d = super(DefaultQueryInfo, self).to_dict()
d['targeting_expression_infos'] = []
for expression_info in self.targeting_expression_infos:
d['targeting_expression_infos'].append(expression_info.to_dict())
# TODO: This looks like a serialization bug
d['capability_modules'] = self.capability_modules
return d
def to_text(self, line_prepend=''):
s = super(DefaultQueryInfo, self).to_text(line_prepend)
for expression_info in self.targeting_expression_infos:
s += expression_info.to_text(line_prepend + STD_INDENT)
for capability_module in self.capability_modules:
s += line_prepend + " Capability Module: %s\n" % capability_module
return s
def __hash__(self):
return hash(str(self.to_dict()))
@staticmethod
def from_etree(etree_xml):
texpr_infos = etree_xml.xpath('./tdq:Default_Query_Info/tdq:Targeting_Expression_Info', namespaces=ns_map)
texpr_info_list = []
for texpr_info in texpr_infos:
texpr_info_list.append(DefaultQueryInfo.TargetingExpressionInfo.from_etree(texpr_info))
cms = etree_xml.xpath('./tdq:Default_Query_Info/tdq:Capability_Module', namespaces=ns_map)
cms_list = []
for cm in cms:
cms_list.append(cm.text)
return DefaultQueryInfo(texpr_info_list, cms_list)
@staticmethod
def from_dict(d):
kwargs = {}
kwargs['targeting_expression_infos'] = []
for expression_info in d['targeting_expression_infos']:
kwargs['targeting_expression_infos'].append(DefaultQueryInfo.TargetingExpressionInfo.from_dict(expression_info))
kwargs['capability_modules'] = d['capability_modules']
return DefaultQueryInfo(**kwargs)
class TargetingExpressionInfo(TAXIIBase):
"""This class describes supported Targeting Expressions
:param string targeting_expression_id: The supported targeting expression ID
:param preferred_scope: Indicates the preferred scope of queries
:type preferred_scope: :class:`list` of :class:`string`
:param allowed_scope: Indicates the allowed scope of queries
:type allowed_scope: :class:`list` of :class:`string`
"""
def __init__(self, targeting_expression_id, preferred_scope=None, allowed_scope=None):
self.targeting_expression_id = targeting_expression_id
self.preferred_scope = preferred_scope or []
self.allowed_scope = allowed_scope or []
@property
def sort_key(self):
return self.targeting_expression_id
@property
def targeting_expression_id(self):
return self._targeting_expression_id
@targeting_expression_id.setter
def targeting_expression_id(self, value):
do_check(value, 'targeting_expression_id', regex_tuple=uri_regex)
self._targeting_expression_id = value
@property
def preferred_scope(self):
return self._preferred_scope
@preferred_scope.setter
def preferred_scope(self, value):
do_check(value, 'preferred_scope', type=six.string_types, regex_tuple=targeting_expression_regex)
self._preferred_scope = value
@property
def allowed_scope(self):
return self._allowed_scope
@allowed_scope.setter
def allowed_scope(self, value):
do_check(value, 'allowed_scope', type=six.string_types, regex_tuple=targeting_expression_regex)
self._allowed_scope = value
def to_etree(self):
tei = etree.Element('{%s}Targeting_Expression_Info' % ns_map['tdq'])
tei.attrib['targeting_expression_id'] = self.targeting_expression_id
for scope in self.preferred_scope:
preferred = etree.SubElement(tei, '{%s}Preferred_Scope' % ns_map['tdq'])
preferred.text = scope
for scope in self.allowed_scope:
allowed = etree.SubElement(tei, '{%s}Allowed_Scope' % ns_map['tdq'])
allowed.text = scope
return tei
def to_dict(self):
d = {}
d['targeting_expression_id'] = self.targeting_expression_id
# TODO: Preferred / Allowed scope look like serialization bugs
d['preferred_scope'] = self.preferred_scope
d['allowed_scope'] = self.allowed_scope
return d
def to_text(self, line_prepend=''):
s = line_prepend + "=== Targeting Expression Info ===\n"
s += line_prepend + " Targeting Expression ID: %s\n" % self.targeting_expression_id
for scope in self.preferred_scope:
s += line_prepend + " Preferred Scope: %s\n" % scope
for scope in self.allowed_scope:
s += line_prepend + " Allowed Scope: %s\n" % scope
return s
def __hash__(self):
return hash(str(self.to_dict()))
@staticmethod
def from_etree(etree_xml):
kwargs = {}
kwargs['targeting_expression_id'] = etree_xml.xpath('./@targeting_expression_id', namespaces=ns_map)[0]
kwargs['preferred_scope'] = []
preferred_scope_set = etree_xml.xpath('./tdq:Preferred_Scope', namespaces=ns_map)
for preferred in preferred_scope_set:
kwargs['preferred_scope'].append(preferred.text)
kwargs['allowed_scope'] = []
allowed_scope_set = etree_xml.xpath('./tdq:Allowed_Scope', namespaces=ns_map)
for allowed in allowed_scope_set:
kwargs['allowed_scope'].append(allowed.text)
return DefaultQueryInfo.TargetingExpressionInfo(**kwargs)
@staticmethod
def from_dict(d):
return DefaultQueryInfo.TargetingExpressionInfo(**d)
class DefaultQuery(tm11.Query):
"""Conveys a TAXII Default Query.
:param string targeting_expression_id: The targeting_expression used in the query
:param criteria: The criteria of the query
:type criteria: :class:`DefaultQuery.Criteria`
"""
def __init__(self, targeting_expression_id, criteria):
super(DefaultQuery, self).__init__(FID_TAXII_DEFAULT_QUERY_10)
self.targeting_expression_id = targeting_expression_id
self.criteria = criteria
@property
def targeting_expression_id(self):
return self._targeting_expression_id
@targeting_expression_id.setter
def targeting_expression_id(self, value):
do_check(value, 'targeting_expression_id', regex_tuple=uri_regex)
self._targeting_expression_id = value
@property
def criteria(self):
return self._criteria
@criteria.setter
def criteria(self, value):
do_check(value, 'criteria', type=DefaultQuery.Criteria)
self._criteria = value
def to_etree(self):
q = super(DefaultQuery, self).to_etree()
dq = etree.SubElement(q, '{%s}Default_Query' % ns_map['tdq'], nsmap=ns_map)
dq.attrib['targeting_expression_id'] = self.targeting_expression_id
dq.append(self.criteria.to_etree())
return q
def to_dict(self):
d = super(DefaultQuery, self).to_dict()
d['targeting_expression_id'] = self.targeting_expression_id
d['criteria'] = self.criteria.to_dict()
return d
def to_text(self, line_prepend=''):
s = super(DefaultQuery, self).to_text(line_prepend)
s += line_prepend + " Targeting Expression ID: %s\n" % self.targeting_expression_id
s += self.criteria.to_text(line_prepend)
return s
@staticmethod
def from_etree(etree_xml):
tei = etree_xml.xpath('./tdq:Default_Query/@targeting_expression_id', namespaces=ns_map)[0] # attrib['targeting_expression_id']
criteria = DefaultQuery.Criteria.from_etree(etree_xml.xpath('./tdq:Default_Query/tdq:Criteria', namespaces=ns_map)[0])
return DefaultQuery(tei, criteria)
@staticmethod
def from_dict(d):
tei = d['targeting_expression_id']
criteria = DefaultQuery.Criteria.from_dict(d['criteria'])
return DefaultQuery(tei, criteria)
class Criteria(TAXIIBase):
"""Represents criteria for a :class:`DefaultQuery`. **Note**: At least one criterion OR criteria MUST be present
:param str operator: The logical operator (should be one of `OP_AND` or `OP_OR`)
:param criteria: The criteria for the query
:type criteria: :class:`DefaultQuery.Criteria`
:param criterion: The criterion for the query
:type criterion: :class:`DefaultQuery.Criterion`
"""
def __init__(self, operator, criteria=None, criterion=None):
self.operator = operator
self.criteria = criteria or []
self.criterion = criterion or []
@property
def sort_key(self):
key_list = []
ia = sorted(self.criteria, key=attrgetter('sort_key'))
ion = sorted(self.criterion, key=attrgetter('sort_key'))
for i in ia:
key_list.append(i.sort_key)
for i in ion:
key_list.append(i.sort_key)
return ''.join(key_list)
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
do_check(value, 'operator', value_tuple=OP_TYPES)
self._operator = value
@property
def criteria(self):
return self._criteria
@criteria.setter
def criteria(self, value):
do_check(value, 'critiera', type=DefaultQuery.Criteria)
self._criteria = value
@property
def criterion(self):
return self._criterion
@criterion.setter
def criterion(self, value):
do_check(value, 'criterion', type=DefaultQuery.Criterion)
self._criterion = value
def to_etree(self):
cr = etree.Element('{%s}Criteria' % ns_map['tdq'], nsmap=ns_map)
cr.attrib['operator'] = self.operator
for criteria in self.criteria:
cr.append(criteria.to_etree())
for criterion in self.criterion:
cr.append(criterion.to_etree())
return cr
def to_dict(self):
d = {}
d['operator'] = self.operator
d['criteria'] = []
for criteria in self.criteria:
d['criteria'].append(criteria.to_dict())
d['criterion'] = []
for criterion in self.criterion:
d['criterion'].append(criterion.to_dict())
return d
def to_text(self, line_prepend=''):
s = line_prepend + "=== Criteria ===\n"
s += line_prepend + " Operator: %s\n" % self.operator
for criteria in self.criteria:
s += criteria.to_text(line_prepend + STD_INDENT)
for criterion in self.criterion:
s += criterion.to_text(line_prepend + STD_INDENT)
return s
@staticmethod
def from_etree(etree_xml):
kwargs = {}
kwargs['operator'] = etree_xml.attrib['operator']
kwargs['criteria'] = []
criteria_set = etree_xml.xpath('./tdq:Criteria', namespaces=ns_map)
for criteria in criteria_set:
kwargs['criteria'].append(DefaultQuery.Criteria.from_etree(criteria))
kwargs['criterion'] = []
criterion_set = etree_xml.xpath('./tdq:Criterion', namespaces=ns_map)
for criterion in criterion_set:
kwargs['criterion'].append(DefaultQuery.Criterion.from_etree(criterion))
return DefaultQuery.Criteria(**kwargs)
@staticmethod
def from_dict(d):
kwargs = {}
kwargs['operator'] = d['operator']
kwargs['criteria'] = []
criteria_set = d.get('criteria', [])
for criteria in criteria_set:
kwargs['criteria'].append(DefaultQuery.Criteria.from_dict(criteria))
kwargs['criterion'] = []
criterion_set = d.get('criterion', [])
for criterion in criterion_set:
kwargs['criterion'].append(DefaultQuery.Criterion.from_dict(criterion))
return DefaultQuery.Criteria(**kwargs)
class | |
<gh_stars>1-10
"""Abstract class to define the API for an SPH scheme. The idea is that
one can define a scheme and thereafter one simply instantiates a suitable
scheme, gives it a bunch of particles and runs the application.
"""
class Scheme(object):
"""An API for an SPH scheme.
"""
def __init__(self, fluids, solids, dim):
"""
Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays (or boundaries).
dim: int
Dimensionality of the problem.
"""
self.fluids = fluids
self.solids = solids
self.dim = dim
self.solver = None
self.attributes_changed()
# Public protocol ###################################################
def add_user_options(self, group):
pass
def attributes_changed(self):
"""Overload this to compute any properties that depend on others.
This is automatically called when configure is called.
"""
pass
def configure(self, **kw):
"""Configure the scheme with given parameters.
Overload this to do any scheme specific stuff.
"""
for k, v in kw.items():
if not hasattr(self, k):
msg = 'Parameter {param} not defined for {scheme}.'.format(
param=k, scheme=self.__class__.__name__
)
raise RuntimeError(msg)
setattr(self, k, v)
self.attributes_changed()
def consume_user_options(self, options):
pass
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
raise NotImplementedError()
def get_equations(self):
raise NotImplementedError()
def get_solver(self):
return self.solver
def setup_properties(self, particles, clean=True):
"""Setup the particle arrays so they have the right set of properties
for this scheme.
Parameters
----------
particles : list
List of particle arrays.
clean : bool
If True, removes any unnecessary properties.
"""
raise NotImplementedError()
# Private protocol ###################################################
def _ensure_properties(self, pa, desired_props, clean=True):
"""Given a particle array and a set of properties desired,
this removes unnecessary properties (if `clean=True`), and
adds the desired properties.
Parameters
----------
pa : ParticleArray
Desired particle array.
desired_props : sequence
Desired properties to have in the array.
clean : bool
Remove undesirable properties.
"""
all_props = set(desired_props)
if clean:
to_remove = set(pa.properties.keys()) - all_props
for prop in to_remove:
pa.remove_property(prop)
to_add = all_props - set(pa.properties.keys())
for prop in to_add:
pa.add_property(prop)
def _smart_getattr(self, obj, var):
res = getattr(obj, var)
if res is None:
return getattr(self, var)
else:
return res
class SchemeChooser(Scheme):
def __init__(self, default, **schemes):
"""
Parameters
----------
default: str
Name of the default scheme to use.
**schemes: kwargs
The schemes to choose between.
"""
self.default = default
self.schemes = dict(schemes)
self.scheme = schemes[default]
def add_user_options(self, group):
for scheme in self.schemes.values():
scheme.add_user_options(group)
choices = list(self.schemes.keys())
group.add_argument(
"--scheme", action="store", dest="scheme",
default=self.default, choices=choices,
help="Specify scheme to use (one of %s)." % choices
)
def attributes_changed(self):
self.scheme.attributes_changed()
def configure(self, **kw):
self.scheme.configure(**kw)
def consume_user_options(self, options):
self.scheme = self.schemes[options.scheme]
self.scheme.consume_user_options(options)
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
for scheme in self.schemes.values():
self.scheme.configure_solver(
kernel=kernel, integrator_cls=integrator_cls,
extra_steppers=extra_steppers, **kw
)
def get_equations(self):
return self.scheme.get_equations()
def get_solver(self):
return self.scheme.get_solver()
def setup_properties(self, particles, clean=True):
"""Setup the particle arrays so they have the right set of properties
for this scheme.
Parameters
----------
particles : list
List of particle arrays.
clean : bool
If True, removes any unnecessary properties.
"""
self.scheme.setup_properties(particles, clean)
############################################################################
def add_bool_argument(group, arg, dest, help, default):
group.add_argument(
'--%s' % arg, action="store_true", dest=dest, help=help
)
neg_help = 'Do not ' + help[0].lower() + help[1:]
group.add_argument(
'--no-%s' % arg, action="store_false", dest=dest, help=neg_help
)
group.set_defaults(**{dest: default})
class WCSPHScheme(Scheme):
def __init__(self, fluids, solids, dim, rho0, c0, h0, hdx, gamma=7.0,
gx=0.0, gy=0.0, gz=0.0, alpha=0.1, beta=0.0, delta=0.1,
nu=0.0, tensile_correction=False, hg_correction=False,
update_h=False, delta_sph=False, summation_density=False):
"""Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays (or boundaries).
dim: int
Dimensionality of the problem.
rho0: float
Reference density.
c0: float
Reference speed of sound.
gamma: float
Gamma for the equation of state.
h0: float
Reference smoothing length.
hdx: float
Ratio of h/dx.
gx, gy, gz: float
Body force acceleration components.
alpha: float
Coefficient for artificial viscosity.
beta: float
Coefficient for artificial viscosity.
delta: float
Coefficient used to control the intensity of diffusion of density
nu: float
Real viscosity of the fluid, defaults to no viscosity.
tensile_correction: bool
Use tensile correction.
hg_correction: bool
Use the Hughes-Graham correction.
update_h: bool
Update the smoothing length as per Ferrari et al.
delta_sph: bool
Use the delta-SPH correction terms.
summation_density: bool
Use summation density instead of continuity.
References
----------
.. [Hughes2010] <NAME> and <NAME>, "Comparison of
incompressible and weakly-compressible SPH models for free-surface
water flows", Journal of Hydraulic Research, 48 (2010), pp. 105-117.
.. [Marrone2011] S. Marrone et al., "delta-SPH model for simulating
violent impact flows", Computer Methods in Applied Mechanics and
Engineering, 200 (2011), pp 1526--1542.
.. [Cherfils2012] <NAME> et al., "JOSEPHINE: A parallel SPH
code for free-surface flows", Computer Physics Communications, 183
(2012), pp 1468--1480.
"""
self.fluids = fluids
self.solids = solids
self.solver = None
self.rho0 = rho0
self.c0 = c0
self.gamma = gamma
self.dim = dim
self.h0 = h0
self.hdx = hdx
self.gx = gx
self.gy = gy
self.gz = gz
self.alpha = alpha
self.beta = beta
self.delta = delta
self.nu = nu
self.tensile_correction = tensile_correction
self.hg_correction = hg_correction
self.update_h = update_h
self.delta_sph = delta_sph
self.summation_density = summation_density
def add_user_options(self, group):
group.add_argument(
"--alpha", action="store", type=float, dest="alpha",
default=None,
help="Alpha for the artificial viscosity."
)
group.add_argument(
"--beta", action="store", type=float, dest="beta",
default=None,
help="Beta for the artificial viscosity."
)
group.add_argument(
"--delta", action="store", type=float, dest="delta",
default=None,
help="Delta for the delta-SPH."
)
group.add_argument(
"--gamma", action="store", type=float, dest="gamma",
default=None,
help="Gamma for the state equation."
)
add_bool_argument(
group, 'tensile-correction', dest='tensile_correction',
help="Use tensile instability correction.",
default=None
)
add_bool_argument(
group, "hg-correction", dest="hg_correction",
help="Use the Hughes Graham correction.",
default=None
)
add_bool_argument(
group, "update-h", dest="update_h",
help="Update the smoothing length as per Ferrari et al.",
default=None
)
add_bool_argument(
group, "delta-sph", dest="delta_sph",
help="Use the delta-SPH correction terms.",
default=None
)
add_bool_argument(
group, "summation-density", dest="summation_density",
help="Use summation density instead of continuity.",
default=None
)
def consume_user_options(self, options):
vars = ['gamma', 'tensile_correction', 'hg_correction',
'update_h', 'delta_sph', 'alpha', 'beta',
'summation_density']
data = dict((var, self._smart_getattr(options, var))
for var in vars)
self.configure(**data)
def get_timestep(self, cfl=0.5):
return cfl*self.h0/self.c0
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
from pysph.base.kernels import CubicSpline
if kernel is None:
kernel = CubicSpline(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
from pysph.sph.integrator import PECIntegrator, TVDRK3Integrator
from pysph.sph.integrator_step import WCSPHStep, WCSPHTVDRK3Step
cls = integrator_cls if integrator_cls is not None else PECIntegrator
step_cls = WCSPHTVDRK3Step if cls is TVDRK3Integrator else WCSPHStep
for name in self.fluids + self.solids:
if name not in steppers:
steppers[name] = step_cls()
integrator = cls(**steppers)
from pysph.solver.solver import Solver
if 'dt' not in kw:
kw['dt'] = self.get_timestep()
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.wc.basic import (
MomentumEquation, TaitEOS, TaitEOSHGCorrection,
UpdateSmoothingLengthFerrari
)
from pysph.sph.wc.basic import (ContinuityEquationDeltaSPH,
MomentumEquationDeltaSPH)
from pysph.sph.basic_equations import \
(ContinuityEquation, SummationDensity, XSPHCorrection)
from pysph.sph.wc.viscosity import LaminarViscosity
equations = []
g1 = []
all = self.fluids + self.solids
if self.summation_density:
g0 = []
for name in self.fluids:
g0.append(SummationDensity(dest=name, sources=all))
equations.append(Group(equations=g0, real=False))
for name in self.fluids:
g1.append(TaitEOS(
dest=name, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma
))
# This correction applies only to solids.
for name in self.solids:
if self.hg_correction:
g1.append(TaitEOSHGCorrection(
dest=name, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma
))
else:
g1.append(TaitEOS(
dest=name, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma
))
equations.append(Group(equations=g1, real=False))
g2 = []
for name in self.solids:
g2.append(ContinuityEquation(dest=name, sources=self.fluids))
for name in self.fluids:
if self.delta_sph:
other = all[:]
other.remove(name)
g2.append(
ContinuityEquationDeltaSPH(
dest=name, sources=[name], c0=self.c0,
delta=self.delta
)
)
if len(other) > 0:
g2.append(ContinuityEquation(dest=name, sources=other))
g2.append(
MomentumEquationDeltaSPH(
dest=name, sources=[name], rho0=self.rho0, c0=self.c0,
alpha=self.alpha, gx=self.gx, gy=self.gy, gz=self.gz,
)
)
if len(other) > 0:
g2.append(
MomentumEquation(
dest=name, sources=other, c0=self.c0,
alpha=self.alpha, beta=self.beta,
gx=self.gx, gy=self.gy, gz=self.gz,
tensile_correction=self.tensile_correction
)
)
g2.append(XSPHCorrection(dest=name, sources=[name]))
else:
if not self.summation_density:
g2.append(ContinuityEquation(dest=name, sources=all))
g2.extend([
MomentumEquation(
dest=name, sources=all, alpha=self.alpha,
beta=self.beta, gx=self.gx, gy=self.gy, gz=self.gz,
c0=self.c0, tensile_correction=self.tensile_correction
),
XSPHCorrection(dest=name, sources=[name])
])
if abs(self.nu) > 1e-14:
eq = LaminarViscosity(
dest=name, sources=self.fluids, nu=self.nu
)
g2.insert(-1, eq)
equations.append(Group(equations=g2))
if self.update_h:
g3 = [
UpdateSmoothingLengthFerrari(
| |
<gh_stars>0
from typing import Optional, Union
import discord
import time
import importlib
import asyncio
import aiohttp
from discord.ext import commands
from fcts import args, checks
importlib.reload(args)
importlib.reload(checks)
from libs.classes import Zbot, MyContext
class Partners(commands.Cog):
def __init__(self, bot: Zbot):
self.bot = bot
self.file = 'partners'
self.table = 'partners_beta' if bot.beta else 'partners'
@commands.Cog.listener()
async def on_ready(self):
self.table = 'partners_beta' if self.bot.beta else 'partners'
async def generate_id(self):
return round(time.time()/2)
async def bdd_get_partner(self, partnerID: int, guildID: int):
"""Return a partner based on its ID"""
try:
query = ("SELECT * FROM `{}` WHERE `ID`={} AND `guild`={}".format(self.table,partnerID,guildID))
async with self.bot.db_query(query) as query_results:
liste = list(query_results)
return liste
except Exception as err:
await self.bot.get_cog('Errors').on_error(err,None)
async def bdd_get_guild(self, guildID: int):
"""Return every partners of a guild"""
try:
query = ("SELECT * FROM `{}` WHERE `guild`={}".format(self.table,guildID))
async with self.bot.db_query(query) as query_results:
liste = list(query_results)
return liste
except Exception as err:
await self.bot.get_cog('Errors').on_error(err,None)
async def bdd_get_partnered(self, invites: list):
"""Return every guilds which has this one as partner"""
try:
if len(invites) == 0:
return list()
query = ("SELECT * FROM `{}` WHERE `type`='guild' AND ({})".format(self.table," OR ".join([f"`target`='{x.code}'" for x in invites])))
async with self.bot.db_query(query) as query_results:
liste = list(query_results)
return liste
except Exception as err:
await self.bot.get_cog('Errors').on_error(err,None)
async def bdd_set_partner(self,guildID:int,partnerID:str,partnerType:str,desc:str):
"""Add a partner into a server"""
try:
ID = await self.generate_id()
query = "INSERT INTO `{}` (`ID`,`guild`,`target`,`type`,`description`) VALUES (%(i)s,%(g)s,%(ta)s,%(ty)s,%(d)s);".format(self.table)
async with self.bot.db_query(query, { 'i': ID, 'g': guildID, 'ta': partnerID, 'ty': partnerType, 'd': desc }):
pass
return True
except Exception as err:
await self.bot.get_cog('Errors').on_error(err,None)
return False
async def bdd_edit_partner(self,partnerID:int,target:str=None,desc:str=None,msg:int=None):
"""Modify a partner"""
try:
query = ""
if target is not None:
query += ("UPDATE `{table}` SET `target` = \"{target}\" WHERE `ID` = {id};".format(table=self.table,target=target,id=partnerID))
if desc is not None:
query += ("UPDATE `{table}` SET `description` = \"{desc}\" WHERE `ID` = {id};".format(table=self.table,desc=desc.replace('"','\"'),id=partnerID))
if msg is not None:
query += ("UPDATE `{table}` SET `messageID` = \"{msg}\" WHERE `ID` = {id};".format(table=self.table,msg=msg,id=partnerID))
async with self.bot.db_query(query):
pass
return True
except Exception as err:
await self.bot.get_cog('Errors').on_error(err,None)
return False
async def bdd_del_partner(self,ID:int):
"""Delete a partner from a guild list"""
try:
query = ("DELETE FROM `{}` WHERE `ID` = {}".format(self.table,ID))
async with self.bot.db_query(query):
pass
return True
except Exception as e:
await self.bot.get_cog('Errors').on_error(e,None)
return False
async def get_bot_guilds(self, bot:int, session:aiohttp.ClientSession) -> Optional[int]:
"""Get the guilds count of a bot
None if unknown bot/count not provided"""
async with session.get('https://top.gg/api/bots/{}/stats'.format(bot), headers={'Authorization':str(self.bot.dbl_token)}) as resp:
ans: dict = await resp.json()
if 'server_count' in ans:
return ans['server_count']
return None
async def get_bot_owners(self, bot:int, session:aiohttp.ClientSession) -> list[Union[discord.User, int]]:
"""Get the owners list of a bot
Empty list if unknown bot/owners not provided"""
async with session.get('https://top.gg/api/bots/{}'.format(bot), headers={'Authorization':str(self.bot.dbl_token)}) as resp:
ans: dict = await resp.json()
owners = list()
if 'owners' in ans:
for o in ans['owners']:
try:
owners.append(await self.bot.fetch_user(o))
except discord.NotFound:
owners.append(o)
return owners
async def update_partners(self, channel: discord.TextChannel, color: int =None) -> int:
"""Update every partners of a channel"""
if not channel.permissions_for(channel.guild.me).embed_links:
return 0
partners = await self.bdd_get_guild(channel.guild.id)
if len(partners) == 0:
return 0
tr_unknown = await self.bot._(channel.guild.id, "misc.unknown")
tr_guild = await self.bot._(channel.guild.id, "misc.server")
tr_bot = await self.bot._(channel.guild.id, "misc.bot")
tr_members = await self.bot._(channel.guild.id, 'info.info.role-3')
tr_guilds = await self.bot._(channel.guild.id, "misc.servers")
tr_invite = await self.bot._(channel.guild.id, 'info.info.inv-4')
tr_click = await self.bot._(channel.guild.id, "misc.click_here")
tr_owner = await self.bot._(channel.guild.id, 'info.info.guild-1')
count = 0
if color is None:
color = await self.bot.get_config(channel.guild.id,'partner_color')
if color is None:
color = self.bot.get_cog('Servers').default_opt['partner_color']
session = aiohttp.ClientSession(loop=self.bot.loop)
for partner in partners:
target_desc = partner['description']
if partner['type'] == 'bot':
title, fields, image = await self.update_partner_bot(tr_bot, tr_guilds, tr_invite, tr_owner, session, partner)
else:
try:
title, fields, image, target_desc = await self.update_partner_guild(tr_guild, tr_members, tr_unknown, tr_invite, tr_click, channel, partner, target_desc)
except discord.NotFound:
continue
emb = discord.Embed(title=title, description=target_desc, color=color, timestamp=self.bot.utcnow())
emb.set_footer(text=partner['ID'])
if image:
emb.set_thumbnail(url=image)
for field in fields:
if field:
emb.add_field(**field)
if self.bot.zombie_mode:
return
try:
msg = await channel.fetch_message(partner['messageID'])
await msg.edit(embed=emb)
except discord.errors.NotFound:
msg = await channel.send(embed=emb)
await self.bdd_edit_partner(partnerID=partner['ID'],msg=msg.id)
except Exception as e:
msg = await channel.send(embed=emb)
await self.bdd_edit_partner(partnerID=partner['ID'],msg=msg.id)
await self.bot.get_cog('Errors').on_error(e,None)
count += 1
await session.close()
return count
async def update_partner_bot(self, tr_bot: str, tr_guilds: str, tr_invite: str, tr_owner: str, session: aiohttp.ClientSession, partner: dict):
"""Update a bot partner embed"""
image = ""
title = "**{}** ".format(tr_bot.capitalize())
fields = list()
try:
title += str(await self.bot.fetch_user(int(partner['target'])))
# guild count field
guild_nbr = await self.get_bot_guilds(partner['target'], session)
if guild_nbr is not None:
fields.append({'name': tr_guilds.capitalize(),
'value': str(guild_nbr)})
# owners field
owners = await self.get_bot_owners(partner['target'], session)
if owners:
fields.append({'name': tr_owner.capitalize(),
'value': ", ".join([str(u) for u in owners])})
usr = await self.bot.fetch_user(int(partner['target']))
image = usr.display_avatar.with_static_format("png") if usr else ""
except discord.NotFound:
title += "ID: "+partner['target']
except Exception as e:
usr = await self.bot.fetch_user(int(partner['target']))
image = usr.display_avatar.url if usr else ""
await self.bot.get_cog("Errors").on_error(e, None)
perm = discord.Permissions.all()
perm.update(administrator=False)
oauth_url = discord.utils.oauth_url(partner['target'], permissions=perm)
fields.append({'name': tr_invite.capitalize(),
'value': f'[Click here]({oauth_url})'})
return title, fields, image
async def update_partner_guild(self, tr_guild: str, tr_members: str, tr_unknown: str, tr_invite: str, tr_click: str, channel: discord.TextChannel, partner: dict, target_desc: str):
"""Update a guild partner embed"""
title = "**{}** ".format(tr_guild.capitalize())
try:
inv = await self.bot.fetch_invite(partner['target'])
except discord.NotFound as err:
raise err
image = str(inv.guild.icon) if inv.guild.icon else None
if isinstance(inv, discord.Invite) and not inv.revoked:
title += inv.guild.name
field1 = {'name': tr_members.capitalize(), 'value': str(
inv.approximate_member_count)}
await self.give_roles(inv, channel.guild)
else:
title += tr_unknown
field1 = None
field2 = {'name': tr_invite.capitalize(
), 'value': '[{}](https://discord.gg/{})'.format(tr_click.capitalize(), partner['target'])}
if len(target_desc) == 0:
target_desc = await self.bot.get_config(inv.guild.id, 'description')
return title, (field1, field2), image, target_desc
async def give_roles(self,invite:discord.Invite,guild:discord.Guild):
"""Give a role to admins of partners"""
if not isinstance(invite.guild,discord.Guild):
return
if guild.id == 356067272730607628 and self.bot.beta:
return
roles = await self.bot.get_config(guild.id,'partner_role')
roles = (guild.get_role(int(x)) for x in roles.split(';') if len(x) > 0 and x.isnumeric())
roles = (x for x in roles if x is not None)
admins = [x for x in invite.guild.members if not x.bot and x.guild_permissions.administrator]
for admin in admins:
if admin in guild.members:
member = guild.get_member(admin.id)
for role in roles:
if role not in member.roles:
try:
await member.add_roles(role)
except (discord.HTTPException, discord.Forbidden):
pass
@commands.group(name="partner",aliases=['partners'])
@commands.guild_only()
@commands.check(checks.database_connected)
@commands.check(checks.has_manage_guild)
async def partner_main(self, ctx: MyContext):
"""Manage the partners of your server
..Doc server.html#partners-system"""
if ctx.subcommand_passed is None:
await self.bot.get_cog('Help').help_command(ctx,['partner'])
@partner_main.command(name='add')
@commands.check(checks.database_connected)
async def partner_add(self, ctx: MyContext, invite:args.Invite, *, description=''):
"""Add a partner in your list
..Example partners add https://discord.com/oauth2/authorize?client_id=486896267788812288&scope=bot
..Example partners add discord.gg/mee6
..Doc server.html#add-a-partner"""
if isinstance(invite,int):
try:
item = await self.bot.fetch_user(invite)
if not item.bot:
raise Exception
except discord.NotFound:
return await ctx.send(await self.bot._(ctx.guild.id, "partners.invalid-bot"))
partner_type = 'bot'
elif isinstance(invite,str):
try:
item = await self.bot.fetch_invite(invite)
except discord.errors.NotFound:
return await ctx.send(await self.bot._(ctx.guild.id, "partners.invalid-invite"))
partner_type = 'guild'
else:
return
current_list = [x['target'] for x in await self.bdd_get_guild(ctx.guild.id)]
if str(item.id) in current_list:
return await ctx.send(await self.bot._(ctx.guild, "partners.already-added"))
if len(description) > 0:
description = await self.bot.get_cog('Emojis').anti_code(description)
await self.bdd_set_partner(guildID=ctx.guild.id,partnerID=item.id,partnerType=partner_type,desc=description)
await ctx.send(await self.bot._(ctx.guild.id, "partners.added-partner"))
# logs
emb = discord.Embed(description=f"New partner added: {partner_type} {item.id}", color=10949630, timestamp=self.bot.utcnow())
emb.set_footer(text=ctx.guild.name)
emb.set_author(name=self.bot.user, icon_url=self.bot.user.display_avatar)
await self.bot.send_embed([emb])
@partner_main.command(name='description',aliases=['desc'])
@commands.check(checks.database_connected)
async def partner_desc(self, ctx: MyContext, ID:int, *, description:str):
"""Add or modify a description for a partner
..Example partner desc 779713982 Very cool bot with tons of features, costs a lot
..Doc server.html#add-a-partner"""
l = await self.bdd_get_partner(ID,ctx.guild.id)
if len(l) == 0:
return await ctx.send(await self.bot._(ctx.guild.id, "partners.invalid-partner"))
l = l[0]
description = await self.bot.get_cog('Emojis').anti_code(description)
if await self.bdd_edit_partner(l['ID'],desc=description):
await ctx.send(await self.bot._(ctx.guild.id, "partners.changed-desc"))
else:
await ctx.send(await self.bot._(ctx.guild.id, "partners.unknown-error"))
@partner_main.command(name='invite')
async def partner_invite(self, ctx: MyContext, ID:int, new_invite:discord.Invite=None):
"""Get the invite of a guild partner.
If you specify an invite, the partner will be updated with this new invite
..Example partner invite 795897339 discord.gg/ruyvNYQ
..Doc server.html#change-a-server-invite"""
l = await self.bdd_get_partner(ID,ctx.guild.id)
if len(l) == 0 or l[0]['type']!='guild':
return await ctx.send(await self.bot._(ctx.guild.id, "partners.unknown-server"))
l = l[0]
if new_invite is None:
return await ctx.send('{}: discord.gg/{}'.format(await self.bot._(ctx.guild.id,'info.info.inv-4'),l['target']))
if not await checks.has_admin(ctx):
return
if await self.bdd_edit_partner(l['ID'],target=new_invite.code):
await ctx.send(await self.bot._(ctx.guild.id, "partners.changed-invite"))
else:
await ctx.send(await self.bot._(ctx.guild.id, "partners.unknown-error"))
@partner_main.command(name='remove')
@commands.check(checks.has_admin)
async def partner_remove(self, ctx: MyContext, ID:int):
"""Remove a partner from the partners list
..Example partner remove 800697342
..Doc server.html#remove-a-partner"""
if not ctx.channel.permissions_for(ctx.guild.me).add_reactions:
return await ctx.send(await self.bot._(ctx.guild.id, "partners.missing-reactions"))
l = await self.bdd_get_partner(ID,ctx.guild.id)
if len(l) == 0:
return await ctx.send(await self.bot._(ctx.guild.id, "partners.invalid-partner"))
l = l[0]
if l['type']=='bot':
try:
bot = await self.bot.fetch_user(l['target'])
except discord.NotFound:
bot = l['target']
msg = await ctx.send(await self.bot._(ctx.guild.id, "partners.confirm-bot", | |
import random
import math
import sc2
import time
import argparse
from MapAnalyzer.MapData import MapData
from sc2 import Difficulty
from sc2.player import Bot, Computer
from sc2.constants import *
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.ability_id import AbilityId
from sc2.position import Point2, Point3
from sc2.unit import Unit
from sc2.units import Units
from ANI_base_bot import ANI_base_bot
from sc2 import Race
from trainingdata import TrainingData as trainingData
from chat_messages import ChatData as ChatData
from unit_micros.Flanking_groups import FlankingController
from unit_micros.marines import MarineController
from unit_micros.ravens import RavenController
from unit_micros.banshees import BansheeController
from unit_micros.vikings import VikingController
from unit_micros.mines import MineController
from unit_trainer import UnitTrainer
# TODO don't try to expand in same location twice
# TODO Parasitic Bomb needs to be added in code.
async def find_potential_minral_line_turret_locations(location) -> [Point2]:
p = location.position
offset = 3.5
return [
Point2((p.x - offset, p.y + offset)),
Point2((p.x - offset, p.y - offset)),
Point2((p.x + offset, p.y - offset)),
Point2((p.x + offset, p.y + offset)),
Point2((p.x, p.y + offset)),
Point2((p.x, p.y - offset)),
Point2((p.x + offset, p.y)),
Point2((p.x + offset, p.y)),
]
class ANIbot(ANI_base_bot):
raw_affects_selection = True # True = fast play
debug = False
debug_vikings = False
debug_vikings_escape_grid = False
show_off = False
chat = True
upgrade_liberator = False
first_base_saturation = -2
refineries_in_first_base = 1 # note: refineries slow down first expansion!
refineries_in_second_base = 4
scv_limit = 80
scv_build_speed = 2
greedy_scv_consrtuction = False
BuildReapers = False
reapers_left = 3
MaxGhost = 2
NukesLeft = 5 # max 10. If used 11 or more changes many variables
raven_left = 3
mines_left = 4
aggressive_mines = False
leapfrog_mines = False
cyclone_left = 0
dual_liberator = False
liberator_left = 0
liberator_priority = False
hellion_left = 0
research_blue_flame = False # upgrades infernaligniter.
research_servos = False
banshee_left = 4
upgrade_banshee_cloak = False
upgrade_banshee_speed = False
min_marine = 8 # try keep this amount of marines
max_marine = 36
marine_drop = False
marines_last_resort = False
max_thor = 4
flanking_thors = False
thor_use_route_a = True
max_BC = 4
max_viking = 5
react_to_enemy_air = True
max_siege = 5
faster_tanks = False
max_barracks = 2 # maxamount of barracks
delay_barracs = False # makes only one barracks until starport ready
build_barracks_addons = True
barracks_reactor_first = False
super_fast_barracks = False
maxfactory = 2
max_starports = 3
build_extra_factories = True
build_extra_starports = True
build_starportreactor = 0
max_engineeringbays = 1
fast_engineeringbay = True
fast_armory = False
build_armory = True
maxmarauder = 6
build_extra_marauders = True
assault_enemy_home = True
careful_marines = False
marauder_push_limit = 0
build_missile_turrets = True
mineral_field_turret = False
mech_build = False
min_thors_to_attack = 2
expand_for_vespene = True
expand_fast_for_vespene = False
fast_vespene = False
fast_orbital = True # slow orbital makes first OC after first expansion is pending
upgrade_marine = True
upgrade_marine_defence_and_mech_attack = False
upgrade_mech = True
upgrade_vehicle_weapons = True
maxmedivacs = 3
build_cc_home = False
priority_tank = False
siege_behind_wall = False
priority_tank_pos = None
build_priority_cyclone = False
limit_vespene = 0
minimum_repairgroup = 1
nuke_enemy_home = False
activate_all_mines = False
scan_cloaked_enemies = False
more_depots = False
delay_expansion = False
delay_third = False
priority_factoty_reactor = False
nuke_rush = False
build_barracks_reactors = True
send_scout = True
delay_factory = False
debug_next_building = None
bunker_in_natural = 0
mine_mineral_wall = True
wait_until_4_orbital_ready = False
agressive_tanks = False
scan_enemy_at_4_min = False
send_flanking_units = 0
create_flanking_group_1 = True
# game state variables
target_of_assault = None
can_surrender = False
last_phase = False
last_iteration = 0
iteraatio = 0
last_turn = 0
start = 0
enemy_natural = None
enemy_third = None
scout_sent = False
chat_once_1 = True
chat_once_mine = True
chat_once_scv_kamikaze = True
chat_first_base = True
chat_second_base = True
canSiege = True
load_dropship = False
dropship_sent = False
viking_target_location = None
viking_priority = False
enemy_air_unit_location = None
midle_depo_position = None
home_in_danger = False
remembered_fired_mines_by_tag = {}
enemy_structures_at_start_by_tag = {}
gatekeeper = None
pick_fight = False
realtime_buffer = 0
unsiegetimer = 0
build_extra_factory_and_starport = True
scan_timer = 0
supply_limit_for_third = 120
emp_timer = 0
liberator_timer = 0
can_gg = 1
sergeant = None
reaper_harass = True
training_scv = False
lift_cc_once = True
locations_need_to_be_scanned = []
next_location_to_be_scanned = None
scan_enemy_base = True
proxy_defence = False
priority_raven = False
kamikaze_target = None
random_kamikaze_target = None
delay_starport = False
muster_home_defence = True
nuke_target = None
nuke_spotter_tag = None
nuke_spotter_last_alive_spot = None
nuke_spotter_last_died_spot = None
sweep_zones = []
sweep_timer = 0
debug_timer = 0
morph_to_hellbats = False
def __init__(self):
super().__init__("ANIbot")
self.viking_grid = None
self.viking_escape_grid = None
self.air_grid = None
self.unit_command_uses_self_do = True
self.strategy = None
self.new_strategy = False
self._training_data = trainingData()
self._chat_data = ChatData()
self.opp_id = self.findOppId()
self.enemy_start_location = None
self.clear_result = True
self.nuke_timer = 0
self.fallout_zone = []
self.storm_zone = []
self.bile_positions = []
self.enemy_liberation_zone = []
self.last_iter = 0
self.delay_first_expansion = False
self.super_greed = False
self.greedy_third = False
self.cc_first = False
self.real_time = False
self.last_game_loop = -10
self.doner_location = None
self.research_stimpack = True
self.research_stimpack_rush = False
self.research_combatshield = True
self.research_concussiveshels = True
self.defence_radius = 0
self.natural = None
self.ramps = None
self.last_jump_target = None
self.all_ramp_top_centers = []
self.all_ramp_bottom_centers = []
self.natural_ramp_top_center = None
self.flanking_controller = FlankingController(self)
self.marinecontroller = MarineController
self.ravencontroller = RavenController(self)
self.banshee_controller = BansheeController(self)
self.viking_controller = VikingController(self)
self.mine_controller = MineController(self)
self.unit_trainer = UnitTrainer(self)
self.can_do_worker_rush_defence = True
self.kill_scout = True
self.cached_we_should_expand = None
self.step_timer = 0
self.attack_route_a = []
self.attack_route_b = []
self.chat_warning = True
self.harass_reaper_tag = None
async def on_start(self):
await super().on_start()
self.map_data = MapData(self, loglevel="INFO")
# map_at_start = MapData(self)
# map_at_start.plot_map(fontdict = {"family": "serif", "weight": "bold", "size": 6})
# map_at_start.show()
async def execute(self):
iteration = self.knowledge.iteration
if self.realtime:
if self.chat:
self.chat = False
await self._client.chat_send("Hello meatbag.", team_only=False)
print("Playing against meatbag")
pass
else:
response = self._chat_data.find_response(opponent_chat_data=self.state.chat,
my_id_from_proto=self.player_id)
if response:
await self._client.chat_send(response, team_only=False)
self.remember_enemy_units()
if self.debug:
for unit in self.enemy_units_in_memory:
if unit.timer > (self.debug_timer * 5):
self.debug_timer = unit.timer / 5
p = unit.position
h2 = self.get_terrain_z_height(p)
pos = Point3((p.x, p.y, h2))
size = 0.2
p0 = Point3((pos.x - size, pos.y - size, pos.z + (unit.timer / self.debug_timer)))
p1 = Point3((pos.x + size, pos.y + size, pos.z - 0))
# print(f"Drawing {p0} to {p1}")
c = Point3((0, 255, 255))
self._client.debug_box_out(p0, p1, color=c)
self.remember_detectors()
self.remember_snapshots()
self.cached_we_should_expand = None
my_custom_air_grid = self.map_data.get_clean_air_grid(default_weight=50)
enemy_ga_structures = self.enemy_structures.filter(lambda x: x.can_attack_air)
enemy_ga_units = self.enemy_units_in_memory.filter(lambda x: not x.is_flying and x.can_attack_air)
enemy_aa_units = self.enemy_units_in_memory.filter(lambda x: x.is_flying and x.can_attack_air)
enemy_ag_types = [UnitTypeId.DRONE, UnitTypeId.SCV, UnitTypeId.PROBE]
enemy_ag_targets = self.enemy_units_in_memory.of_type(enemy_ag_types)
for enemy_unit in enemy_ga_units:
if enemy_unit.type_id == UnitTypeId.CYCLONE:
enemy_total_range = enemy_unit.radius + 8
else:
enemy_total_range = enemy_unit.radius + enemy_unit.air_range + 3
my_custom_air_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=enemy_total_range, grid=my_custom_air_grid)
for enemy_unit in self.enemy_structures.of_type(UnitTypeId.BUNKER):
enemy_total_range = enemy_unit.radius + 8
my_custom_air_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=enemy_total_range, grid=my_custom_air_grid)
for enemy_unit in enemy_aa_units:
enemy_total_range = enemy_unit.radius + enemy_unit.air_range + 4
my_custom_air_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=(enemy_total_range), grid=my_custom_air_grid)
if self.supply_used < 190:
for enemy_unit in enemy_ga_structures:
enemy_total_range = enemy_unit.radius + enemy_unit.air_range + 3
my_custom_air_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=(enemy_total_range), grid=my_custom_air_grid)
viking_target_types = [UnitTypeId.OBSERVER, UnitTypeId.OVERSEER, UnitTypeId.OVERLORDTRANSPORT,
UnitTypeId.MEDIVAC, UnitTypeId.OVERLORD, UnitTypeId.RAVEN, UnitTypeId.BANSHEE]
viking_targets = self.enemy_units.of_type(viking_target_types)
self.viking_escape_grid = my_custom_air_grid
for enemy_unit in viking_targets:
self.viking_escape_grid = self.map_data.add_cost(position=enemy_unit.position,
radius=5, grid=self.viking_escape_grid, weight=-1)
if self.debug_vikings_escape_grid:
self.map_data.draw_influence_in_game(grid=self.viking_escape_grid, lower_threshold=51)
self.map_data.draw_influence_in_game(grid=self.viking_escape_grid, lower_threshold=0,
upper_threshold=50)
for enemy_unit in enemy_ag_targets:
my_custom_air_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=4, grid=my_custom_air_grid, weight=-1)
if self.debug:
self.map_data.draw_influence_in_game(grid=my_custom_air_grid, lower_threshold=51)
self.map_data.draw_influence_in_game(grid=my_custom_air_grid, lower_threshold=0, upper_threshold=49)
self.air_grid = my_custom_air_grid
viking_grid = self.map_data.get_clean_air_grid(default_weight=10)
enemy_ga_structures = self.enemy_structures.ready.filter(lambda x: x.can_attack_air)
enemy_ga_units = self.enemy_units_in_memory.filter(lambda x: not x.is_flying and x.can_attack_air)
enemy_aa_units = self.enemy_units_in_memory.filter(lambda x: x.is_flying and x.can_attack_air)
for enemy_unit in enemy_ga_units:
enemy_total_range = enemy_unit.radius + enemy_unit.air_range + 3
viking_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=enemy_total_range, grid=viking_grid)
for enemy_unit in enemy_aa_units:
enemy_total_range = enemy_unit.radius + enemy_unit.air_range + 1
viking_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=enemy_total_range, grid=viking_grid, weight=-1)
for enemy_unit in enemy_ga_structures:
enemy_total_range = enemy_unit.radius + enemy_unit.air_range + 3
viking_grid = self.map_data.add_cost(
position=enemy_unit.position, radius=(enemy_total_range), grid=viking_grid)
if self.debug_vikings:
self.map_data.draw_influence_in_game(grid=viking_grid, lower_threshold=11)
self.map_data.draw_influence_in_game(grid=viking_grid, lower_threshold=0, upper_threshold=10)
self.viking_grid = viking_grid
if self.debug:
for cc in self.expansion_locations_list:
p = Point2((cc.position))
h2 = self.get_terrain_z_height(p)
h2 = 12
pos = Point3((p.x, p.y, h2))
size = 1.5
p0 = Point3((pos.x - size, pos.y - size, pos.z + 10)) # + Point2((0.5, 0.5))
p1 = Point3((pos.x + size, pos.y + size, pos.z - 10)) # + Point2((0.5, 0.5))
# print(f"Drawing {p0} to {p1}")
c = Point3((255, 0, 0))
self._client.debug_box_out(p0, p1, color=c)
for snapshot_in_memory in self.remembered_snapshots_by_tag.keys():
snapshot_info = self.remembered_snapshots_by_tag[snapshot_in_memory]
p | |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import warnings
class grapher(object):
"""
A simple class covering typical use of generating graphs.
Exmple of usage:
0) import class by using "from matgrapher import grapher"
1) create new object i.e. "gr = grapher.grapher()"
2) load labels "gr.loadLabels(label1, label2)"
3) load data "gr.loadData(x_data1, y_data1, x_data2, y_data2)"
4) generate graph "gr.generateGraph()"
5) remove loaded data "gr.destroyGraphTable()"
"""
def __init__(self):
self.x_table = []
self.y_table = []
self.contour_plots = []
self.point_table = [[], []]# x, y
self.point_colors = [[], []]# color (in hex or matplotlib), alpha
self.point_sizes = []
self.point_alpha_change = []
self.text_table = [[], []]
self.labels = []
self.xlim = []
self.ylim = []
self.linestyle = []
self.colors = []
self.show_label = []
self.graphTitle = "Graph"
self.axisNames = ["X Values", "Y Values"]
self.outputFilename = "output/file.png"
self.dpi = 300
self.plotSize = [15*1.5/2.54, 15*1.5/2.54]
self.showGrid = True
self.saveFile = True
self.showFigure = False
self.logscale = 'none'
def destroyGraphTable(self):
'''
Clean all data provided.
'''
for i in range(len(self.x_table)):
del self.x_table[0]
for i in range(len(self.y_table)):
del self.y_table[0]
for i in range(len(self.labels)):
del self.labels[0]
for i in range(len(self.linestyle)):
del self.linestyle[0]
for i in range(len(self.colors)):
del self.colors[0]
for i in range(len(self.point_table[0])):
del self.point_table[0][0]
del self.point_table[1][0]
for i in range(len(self.point_colors[0])):
del self.point_colors[0][0]
del self.point_colors[1][0]
for i in range(len(self.point_sizes)):
del self.point_sizes[0]
for i in range(len(self.point_alpha_change)):
del self.point_alpha_change[0]
for i in range(len(self.contour_plots)):
del self.contour_plots[0]
for i in range(len(self.text_table[0])):
del self.text_table[0][0]
del self.text_table[1][0]
return None
def loadLabels(self, label, *args):
'''
Load labels to internal array. Please provide them in order as x, y arguments were provided.
Arguments:
-> label (string) - label used in legend to describe a dataset,
-> *args (string, ...) - following labels if needed to load more than one in one step
'''
if(len(self.labels)+len(args)+1<len(self.x_table)):
warnings.warn(f"Not all data sets ({len(self.x_table)}) are labeled.")
self.labels.append(label)
self.show_label.append(True)
if(len(args)>0):
for i in range(len(args)):
self.labels.append(args[i])
self.show_label.append(True)
def loadData(self, x_argument, y_argument, *args):
'''
Load data to internal tables. Please provide it in pairs [x1, y1, x2, y2, ...]
Arguments:
-> x_argument ([float, ...]) - one dimensional array of x axis values,
-> y_argument ([float, ...]) - one dimensional array of y axis values.
'''
if(len(args)%2!=0):
warnings.warn(f"Expected equal ammount of x and y data sets. Got ({int((len(args)+1)/2)}) and ({int((len(args)-1)/2)}).")
self.x_table.append(x_argument.copy())
self.y_table.append(y_argument.copy())
if(len(args)>0):
for i in range(int(len(args)/2)):
self.x_table.append(args[2*i])
self.y_table.append(args[2*i+1])
def createContourPlot(self, fn, xlist, ylist):
X, Y = np.meshgrid(xlist, ylist)
Z = fn(X, Y)
contour = [X, Y, Z]
self.contour_plots.append(contour)
def loadPoints(self, point, *args):
'''
Load points to internal table.
If you wish to enable autocoloring of points, add "autocolor:[color of the point set],[opacity level]" string at the end of arguments.
If opacity level is not provided, it will be assumed as 1.0.
Arguments:
-> point ([float, float]) - list containing point coordinates.
'''
autocolor_flag = False
size_flag = False
command_line = None #a command line at the end of arguments
last_alpha = 0.0
if(len(self.point_colors[1])>0):
last_alpha = self.point_colors[1][-1]
if(not isinstance(point[0], list) and not isinstance(point[0], np.ndarray)):
self.point_table[0].append(point[0])
self.point_table[1].append(point[1])
else:
if(len(point[0])!=len(point[1])):
warnings.warn("Warning! Point data columns not equal in length! Not all points may be included.")
for i in range(min([len(point[0]), len(point[1])])):
self.point_table[0].append(point[0][i])
self.point_table[1].append(point[1][i])
if(len(args)>0):
#check for command line at the end of arguments
if(type(args[-1])==str):
command_line = args[-1].split(";")
#check for autocolor at the end of args
for cmd in command_line:
#if 'size' option is enabled
if('size' in cmd):
size = cmd.split(":")[-1]
try:
size = float(size)
except:
size = 20.0# autosizing, if user fails to provide correct size
if(isinstance(point[0], list) or isinstance(point[0], np.ndarray)):
for i in range(min([len(point[0]), len(point[1])])):
self.point_sizes.append(size)
else:
self.point_sizes.append(size)
size_flag = True
#if 'autocolor' option is enabled
if('autocolor' in cmd):
color = cmd.split(":")[-1]#remove 'autocolor' from the argument
# fixing autocolor string
if(color == '' or color == 'autocolor'):# if user forgot to add color after autocolor
color = '#4e4e4e,1.0'
elif('autocolor' in color):#if 'autcolor' could not be removed (user forgot to add ':')
color = color.split("autocolor")[-1]# remove 'autocolor' forcefully
else:
# checking if user stated opacity correctly
try:
float(color.split(',')[-1])
except:
color = color.split(',')[0]+',1.0'
autocolor_flag = True
#colouring if the basic argument were list
if(isinstance(point[0], list) or isinstance(point[0], np.ndarray)):
for i in range(min([len(point[0]), len(point[1])])):
self.point_colors[0].append(color.split(',')[0])
self.point_colors[1].append(float(color.split(',')[1]))
if(last_alpha!=self.point_colors[1][-1]):
self.point_alpha_change.append(len(self.point_colors[1])-1)
last_alpha = self.point_colors[1][-1]
else:
self.point_colors[0].append(color.split(',')[0])
self.point_colors[1].append(float(color.split(',')[1]))
if(last_alpha!=self.point_colors[1][-1]):
self.point_alpha_change.append(len(self.point_colors[1])-1)
last_alpha = self.point_colors[1][-1]
if(size_flag == False):
if(isinstance(point[0], list) or isinstance(point[0], np.ndarray)):
for i in range(min([len(point[0]), len(point[1])])):
self.point_sizes.append(20.0)
else:
self.point_sizes.append(20.0)
#add additional points defined in the arguments
for i in range(len(args)):
if(autocolor_flag == True):
if(i == len(args)-1):
break
self.point_colors[0].append(color.split(',')[0])
self.point_colors[1].append(float(color.split(',')[1]))
if(size_flag==True):
self.point_sizes.append(size)
if(size_flag==False):
self.point_sizes.append(20.0)
self.point_table[0].append(args[i][0])
self.point_table[1].append(args[i][1])
def changePointColor(self, color, point_index, end_point_index = None):
'''
Change color of a point or set of points in internal tables. If the point index is uknown, provide its position in form of a list.
Arguments:
-> color (string) - color of the point,
-> point_index (int or [float, float]) - index number or position of the point,
-> end_point_index (int or [float, float]) - index number or position of the last point of the set.
'''
if(isinstance(point_index, list)):
pos = [var for var, val in enumerate(zip(self.point_table[0], self.point_table[1])) if val[0]==point_index[0] and val[1]==point_index[1]]
for p in pos:
self.point_colors[p] = color
if(end_point_index != None):
if(isinstance(end_point_index, list)):
end_pos = [var for var, val in enumerate(zip(self.point_table[0], self.point_table[1])) if val[0]==end_point_index[0] and val[1]==end_point_index[1]]
for i in range(min(pos), max(end_pos)):
self.point_colors[i] = color
else:
for i in range(min(pos), end_point_index):
self.point_colors[i] = color
else:
self.point_colors[point_index] = color
if(end_point_index != None):
if(isinstance(end_point_index, list)):
end_pos = [var for var, val in enumerate(zip(self.point_table[0], self.point_table[1])) if val[0]==end_point_index[0] and val[1]==end_point_index[1]]
for i in range(point_index, max(end_pos)):
self.point_colors[i] = color
else:
for i in range(point_index, end_point_index):
self.point_colors[i] = color
def setPointColor(self, color, alpha = 1.0):
'''
Set color and transparency of a point.
Arguments:
-> color (string) - color of the point,
-> alpha (float) - level of point transparency (between 0.0 and 1.0).
'''
self.point_colors[0].append(color)
self.point_colors[1].append(alpha)
def loadText(self, text, position, *args):
'''
Load positioned text into internal table. Please provide arguments in pairs of text1, position1, text2, position2, ...
Arguments:
-> text (string) - string to be displayed
-> point ([float, float]) - list containing text position
'''
if(len(args)%2==1):
warnings.warn(f"Warning! Data pairing (text, position) not complete. Expected even ammount of arguments, got odd. Last point will be set to [0.0, 0.0]")
self.text_table[0].append(text)
self.text_table[1].append(position)
if(len(args)>0):
for i in range(int(len(args)/2)):
if(not isinstance(args[2*i+1], list)):
warnings.warn("Warning! Wrong data type provided! Expected position as list, got "+str(type(args[2*i+1]))+". Aborting loading text.")
return None
self.text_table[0].append(args[2*i])
self.text_table[1].append(args[2*i+1])
if(i*2<len(args)-1 and i!=0):
self.text_table[0].append(args[-1])
self.text_table[1].append([0.0, 0.0])
def hideLabel(self, label_index=False, label=''):
'''
Hide label from graph legend.
Arguments:
-> label_index (int or bool) - index of label to be hidden. Assign 'False' if more than one label is to be hidden.
-> label (string) - label or labels to be hidden if share the same text.
'''
if(label!=''):
if(label in self.labels):
for i in range(len(self.labels)):
if(self.labels[i]==label):
self.show_label[i]=False
else:
warning.warn(f"Warning: No label {label} found!")
else:
if(label_index!=False):
try:
self.show_label[label_index] = False
except:
warning.warn(f"Warning: Could not hide label {label_index}. Size of label array is {len(self.show_label)}")
def loadLineStyles(self, linestyle, *args):
self.linestyle.append(linestyle)
if(len(args)>0):
for i in range(len(args)):
self.linestyle.append(args[i])
def changeLineStyle(self, index, newlinestyle, linestyle = '-'):
if(index == 'u'):
if(linestyle in self.linestyle):
for i in range(len(self.linestyle)):
if(self.linestyle==linestyle):
self.linestyle = newlinestyle
self.linestyle[index] = newlinestyle
def loadColor(self, color, *args):
self.colors.append(color)
if(len(args)>0):
for cl in args:
self.colors.append(cl)
def setAxisNames(self, X_axis, Y_axis):
self.axisNames = [X_axis, Y_axis]
def setGraphTitle(self, graph_title):
self.graphTitle = graph_title
def setFilename(self, filename):
self.outputFilename = filename
def setExportMethod(self, method):
'''
Sets method of exporting file.
0 - save, don't show
1 - don't save, show
2 - save and show
'''
if(method!=0 and method!=1 and method!=2):
warnings.warn(f"Warning: wrong export method provided: {method}. Falling back to method 1 (don't save, show)")
if(method==0):
self.saveFile = True
self.showFigure = False
if(method==1):
self.saveFile = False
self.showFigure = True
if(method==2):
self.saveFile = True
self.showFigure = True
def setGridVisibility(self, grid_visible):
self.showGrid = grid_visible
def setLogscaleMethod(self, logscale_method):
self.logscale = logscale_method
def generateGraph(self, data_x=None, data_y=None, axis_names=None, x_lim = None, y_lim = None, graph_title=None, line_styles=None, colors | |
layer names that will be visible
# group_off = a list of layer names that will not be visible
def layer_visibility(self, group_on, groups_off):
for fc in groups_off:
iface.setActiveLayer(fc)
node = self.tc_root.findLayer(iface.activeLayer().id())
node.setItemVisibilityChecked(False)
for fc in group_on:
iface.setActiveLayer(fc)
node = self.tc_root.findLayer(iface.activeLayer().id())
node.setItemVisibilityChecked(True)
# EXTENT_RECTANGLE METHOD: Identifies the extent of a QGIS vector layer
# INPUTS: data1 = a single feature from a vector layer
def extent_rectangle(self, data1):
#rectangle = QgsVectorLayer.extent(data1)
rectangle = data1.geometry().boundingBox()
rect = [rectangle.xMinimum(), rectangle.yMinimum(), rectangle.xMaximum(), rectangle.yMaximum()]
exnt_str = "{},{},{},{}".format(rect[0], rect[1], rect[2], rect[3])
return exnt_str
def add_field(self, layer, field_name, field_type, att_value):
layer.startEditing()
myField = QgsField(field_name, getattr(QVariant, field_type))
layer.addAttribute( myField )
idx = layer.fields().indexFromName( field_name )
for f in layer.getFeatures():
f[idx] = str(att_value)
layer.updateFeature( f )
layer.commitChanges()
def delete_field(self, layer, field_name):
idx = layer.fields().indexFromName( field_name )
if idx != (-1):
res = layer.dataProvider().deleteAttributes([idx])
layer.updateFields()
class FNBLine:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'FNBLine_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&FNBLine')
# Check if plugin was started the first time in current QGIS session
# Must be set in initGui() to survive plugin reloads
self.first_start = None
self.dlg = FNBLineDialog()
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('FNBLine', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
# Adds plugin icon to Plugins toolbar
self.iface.addToolBarIcon(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/FN_BLine/icon.png' #must reload resource file if icon is changed using OSGeo4WShell
self.add_action(
icon_path,
text=self.tr(u'New2Q-Reports'),
callback=self.run,
parent=self.iface.mainWindow())
# will be set False in run()
self.first_start = True
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&FNBLine'),
action)
self.iface.removeToolBarIcon(action)
def select_output_file(self):
filename = QFileDialog.getSaveFileName(self.dlg, "Select output file ","", '*.txt')
self.dlg.path_input.setText(filename)
def run(self):
"""Run method that performs all the real work"""
# Create the dialog with elements (after translation) and keep reference
# Only create GUI ONCE in callback, so that it will only load when the plugin is started
if self.first_start == True:
self.first_start = False
self.dlg = FNBLineDialog()
con_filename = #~~~~~~~~~~~~~INSERT config.ini FILE PATH AND FILE NAME HERE ~~~~~~~~~~~~~~~~~~~~~
con_section = #~~~~~~~~~~~~~INSERT config.ini SECTION NAME HERE ~~~~~~~~~~~~~~~~~~~~~
db_config = config(con_filename, con_section)
#set database connection information in plugin gui from congif.ini file
self.dlg.database_input.setPlaceholderText(db_config['database'])
self.dlg.host_input.setPlaceholderText(db_config['host'])
self.dlg.port_input.setPlaceholderText(db_config['port'])
# show the dialogdb_config['host']
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
try:
progressMessageBar = iface.messageBar().createMessage("If software appears frozen, do not touch -tool is still working! Progress Status:")
progress = QProgressBar()
progress.setMaximum(20)
progress.setAlignment(Qt.AlignLeft|Qt.AlignVCenter)
progressMessageBar.layout().addWidget(progress)
iface.messageBar().pushWidget(progressMessageBar, level=0)
# get parameters
iface.mainWindow().blockSignals(True) #turns off CRS dialog box when creating layers
aoi = self.dlg.path_input.text()
aoi_wc = self.dlg.query_input.text()
aoi_name = aoi
if aoi == '': #grabs loaded qgs vector layer if no path input is given
aoi_name = str(self.dlg.vector_input.currentText())
for child in QgsProject.instance().layerTreeRoot().children():
if child.layer().name() == aoi_name:
aoi = child.layer()
if isinstance(aoi, QgsVectorLayer) is True: # Ends process if AOI is a point or line
if aoi.wkbType() == QgsWkbTypes.Point:
raise TypeError('Area of Interest cannot be a point geometry')
if aoi.wkbType() == QgsWkbTypes.LineString:
raise TypeError('Area of Interest cannot be a line geometry')
if isinstance(aoi, QgsVectorLayer) is False:# Ends process if AOI is raster or is non existent
if isinstance(aoi, QgsRasterLayer) is True:
raise TypeError('Area of Interest cannot be a raster layer')
if aoi == '':
raise NameError('Area of Interest does not exist')
database = self.dlg.database_input.text()
host = self.dlg.host_input.text()
port = self.dlg.port_input.text()
user = self.dlg.username_input.text()
password = self.dlg.password_input.text()
progress.setValue(1)
#check the database connection and if there is no input by user, default to config.ini file
input_list= {'database':database, 'host': host, 'port': port}
for item in input_list:
if input_list[item] == '':
input_list[item] = db_config[item]
database = input_list['database']
host = input_list['host']
port = input_list['port']
pdf_output = self.dlg.pdf_input.text()
if pdf_output == '':
try:
username = os.getenv('username')
pdf_output = os.path.join(r'C:\Users\{}\AppData\Local\Temp'.format(username), aoi_name)
if not os.path.exists(pdf_output):
os.mkdir(pdf_output)
else:
raise NameError("output name already exists")
except:
pdf_output = r'C:\Users\{}\AppData\Local\Temp\PDF_Output'.format(username)
if not os.path.exists(pdf_output):
os.mkdir(pdf_output)
#test the database connection by building a BCGW vector layer if it fails script breaks
test_Oracle_data = 'WHSE_ADMIN_BOUNDARIES.EBC_PROV_ELECTORAL_DIST_SVW'
test_Oracle_data_geom_col = 'SHAPE'
test_Oracle_data_unique_col = 'OBJECTID'
schema,table = test_Oracle_data.split('.')
uri = QgsDataSourceUri()
uri.setConnection(host, port, database, user, password)
uri.setDataSource(schema, table, test_Oracle_data_geom_col)
uri.setKeyColumn(test_Oracle_data_unique_col)
test_connection = QgsVectorLayer(uri.uri(), "LAYER_NAME", 'oracle')
if test_connection.isValid() is False:
QgsMessageLog.logMessage('Database connection error - confirm database, host and port are accurate', 'Messages')
sys.exit(1)
#create o2q object
test = o2q(database, host, user, port, password)
progress.setValue(2)
#change project coordinate system to bc albers
QgsProject.instance().setCrs(QgsCoordinateReferenceSystem('EPSG:3005'))
#build aoi
source, aoi_base = test.format_checker(aoi, aoi_wc)
symbol = QgsFillSymbol.createSimple({'line_style': 'solid', 'color': '255,0,0,0', 'color_border':'#ff0000', 'width_border':'1'})
aoi_base.renderer().setSymbol(symbol)
aoi_base.triggerRepaint()
iface.layerTreeView().refreshLayerSymbology(aoi_base.id())
#create buffer for arch sites 50m outside of aoi
buf = test.buffer(aoi_base, 50)
dif = test.difference(buf, aoi_base)
progress.setValue(3)
# creates dictionary of all fc to compare aoi too
csv_path #~~~~~~~~~~~~~~~~~~~~~~~INSERT CSV PATH HERE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fc_dic = parse_csv(csv_path)
#build objects for html table
html_counter = 0
html_dic = {}
group_dic = {}
obj_list = []
group_list = []
for obj in fc_dic: #get all unique group numbers from csv and build it into a list of all numbers
dic = (fc_dic[obj])
if dic['Grouping'] not in group_list:
group_list.append(dic['Grouping'])
for group in group_list: #add each group to the html_dic and group_dic
groupkey = 'g{}'.format(group)
html_dic[groupkey] = {'pg1': 'None'}
group_dic[groupkey] = [aoi_base]
globals()['makepdf_gr%s' % group] = pdf_maker_QGIS()
obj_list.append(globals()['makepdf_gr%s' % group])
#add aoi to html table by adding field
text_table = {}
count = aoi_base.featureCount()
test.add_field(aoi_base, 'layer', 'String', att_value=aoi_base.name())
sum_type, sum_num = summation(aoi_base, aoi_base.name(), special = 'aoi')
for item in obj_list:
group_key = [k for k, v in globals().items() if v is item][0][-1]
(item).html_combiner(html_dic, html_counter, sum_type, sum_num, group_key, ('AOI: ' + aoi_base.name()), count, text_table)
html_counter += 1
progress.setValue(4)
#BEGIN INTERSECTIONS
#intersect AOI with layers of interest and produce a breakdown table that will be added to the pdf
sorted_fc_dic = sorted(fc_dic, key=lambda x: (fc_dic[x]['Grouping']))
progress_count = len(sorted_fc_dic)
i = | |
[{'locale' : 'en', 'text' : 'Validated by a medical structure outside France'},{'locale' : 'es', 'text' : 'Validado por una autoridad francesa'},{'locale' : 'it', 'text' : 'Convalidato da autorità diversa da francese'},{'locale' : 'tr', 'text' : ''},{'locale' : 'de', 'text' : '-'}, {'locale' : 'fr', 'text' : 'Validé par autorité autre que française' }]},
{'name' : u'VALIDE_PAR_AUTORITE_FR', 'choice_title' : [{'locale' : 'en', 'text' : 'Validated by a French medical structure'},{'locale' : 'es', 'text' : 'Validado por una autoridad otra que francesa'},{'locale' : 'it', 'text' : 'Non precisato'},{'locale' : 'tr', 'text' : ''},{'locale' : 'de', 'text' : '-'}, {'locale' : 'fr', 'text' : 'Validé par autorité française' }]},
{'name' : u'NR', 'choice_title' : [{'locale' : 'en', 'text' : 'NR'},{'locale' : 'es', 'text' : 'NR'},{'locale' : 'it', 'text' : 'Non precisato'},{'locale' : 'tr', 'text' : ''},{'locale' : 'de', 'text' : '-'}, {'locale' : 'fr', 'text' : 'Non renseigné' }]},
],
},
],
"app_version" : version,
"method" : "GET",
"help" : u"define the endpoint to get data for : filters in search navbar",
"apiviz_front_uuid" : uuid_models["uuid_covid"],
"is_default" : True
},
### DATA TABLE
{ "field" : "tl_data_API_table",
"is_visible" : True,
"is_disabled" : False,
"data_type" : "data",
"endpoint_type" : "table",
"dataset_uri" : "diy",
"content" : u"apiviz default API endpoint for list results",
"root_url" : "https://solidata-api.co-demos.com/api/dsi/infos/get_one/5e8356f5328ed7125048b0b6", ## V2
"args_options" : [
{ "app_arg" : "dataToken", "arg" : "token", "optional" : True, "in" : ["url","header"], "default" : "", "type": "str" },
{ "app_arg" : "page", "arg" : "page", "optional" : True, "in" : ["url"], "default" : 1, "type": "int" },
{ "app_arg" : "perPage", "arg" : "per_page", "optional" : True, "in" : ["url"], "default" : 25, "type": "int", "authorized" : [10, 25, 50, 100, 200, 300] },
{ "app_arg" : "sortBy", "arg" : "sort_by", "optional" : True, "in" : ["url"], "default" : "NOM_TL", "type": "str" },
{ "app_arg" : "sortIsDescending", "arg" : "descending", "optional" : False, "in" : ["url"], "default" : False, "type": "bool" },
{ "app_arg" : "query", "arg" : "search_for", "optional" : True, "in" : ["url"], "default" : "", "type": "str" },
{ "app_arg" : "filters", "arg" : "search_filters", "optional" : True, "in" : ["url"], "default" : "", "type": "str" },
{ "app_arg" : "shuffleSeed", "arg" : "shuffle_seed", "optional" : True, "in" : ["url"], "default" : None , "type": "int" },
],
"resp_fields" : {
"projects" : { "resp_format" : "list", "path" : "data/data_raw/f_data" },
"total" : { "resp_format" : "int", "path" : "data/data_raw/f_data_count" },
},
"app_version" : version,
"method" : "GET",
"help" : u"define the endpoint to get data for : a view list",
"apiviz_front_uuid" : uuid_models["uuid_covid"],
"is_default" : True
},
### DATA LIST
{ "field" : "tl_data_API_list",
"is_visible" : True,
"is_disabled" : False,
"data_type" : "data",
"endpoint_type" : "list",
"dataset_uri" : "diy",
"content" : u"apiviz default API endpoint for list results",
"root_url" : "https://solidata-api.co-demos.com/api/dsi/infos/get_one/5e8356f5328ed7125048b0b6", ## V2
"args_options" : [
{ "app_arg" : "dataToken", "arg" : "token", "optional" : True, "in" : ["url","header"], "default" : "", "type": "str" },
{ "app_arg" : "page", "arg" : "page", "optional" : True, "in" : ["url"], "default" : 1, "type": "int" },
{ "app_arg" : "perPage", "arg" : "per_page", "optional" : True, "in" : ["url"], "default" : 300, "type": "int", "authorized" : [10, 25, 50, 100, 200, 300] },
{ "app_arg" : "query", "arg" : "search_for", "optional" : True, "in" : ["url"], "default" : "", "type": "str" },
{ "app_arg" : "filters", "arg" : "search_filters", "optional" : True, "in" : ["url"], "default" : "", "type": "str" },
{ "app_arg" : "shuffleSeed","arg" : "shuffle_seed", "optional" : True, "in" : ["url"], "default" : None , "type": "int" },
],
"resp_fields" : {
"projects" : { "resp_format" : "list", "path" : "data/data_raw/f_data" },
"total" : { "resp_format" : "int", "path" : "data/data_raw/f_data_count" },
},
"app_version" : version,
"method" : "GET",
"help" : u"define the endpoint to get data for : a view list",
"apiviz_front_uuid" : uuid_models["uuid_covid"],
"is_default" : True
},
### DATA DETAIL
{ "field" : "tl_data_API_detail",
"is_visible" : True,
"is_disabled" : False,
"data_type" : "data",
"endpoint_type" : "detail",
"dataset_uri" : "diy",
"content" : u"apiviz default API endpoint for detailled results",
"root_url" : "https://solidata-api.co-demos.com/api/dsi/infos/get_one/5e8356f5328ed7125048b0b6", ## V2
"args_options" : [
{ "app_arg" : "dataToken", "arg" : "token", "optional" : True, "in" : ["url","header"], "default" : "", "type": "str" },
{ "app_arg" : "itemId", "arg" : "item_id", "optional" : False, "in" : ["url"], "default" : "", "type": "str" },
],
"resp_fields" : {
"projects" : { "resp_format" : "list", "path" : "data/data_raw/f_data" },
"total" : { "resp_format" : "int", "path" : "data/data_raw/f_data_count" },
},
"app_version" : version,
"method" : "GET",
"help" : u"define the endpoint to get data for : a detailled data",
"apiviz_front_uuid" : uuid_models["uuid_covid"],
"is_default" : True
},
### DATA EXPORT
{ "field" : "tl_data_API_export",
"is_visible" : True,
"is_disabled" : False,
"data_type" : "data",
"endpoint_type" : "export",
"dataset_uri" : "diy",
"content" : u"apiviz default API endpoint for export results",
"root_url" : "https://solidata-api.co-demos.com/api/dsi/exports/as_csv/5e8356f5328ed7125048b0b6", ## V2
"args_options" : [
{ "app_arg" : "dataToken", "arg" : "token", "optional" : True, "in" : ["url","header"], "default" : "", "type": "str" },
# { "app_arg" : "page", "arg" : "page_n", "optional" : True, "in" : ["url"], "default" : 1, "type": "int" },
# { "app_arg" : "perPage", "arg" : "per_page", "optional" : True, "in" : ["url"], "default" : 300, "type": "int" },
# { "app_arg" : "query", "arg" : "search_for", "optional" : True, "in" : ["url"], "default" : "", "type": "str" },
# { "app_arg" : "filters", "arg" : "search_filters", "optional" : True, "in" : ["url"], "default" : "", "type": "str" },
# { "app_arg" : "shuffleSeed","arg" : "shuffle_seed", "optional" : True, "in" : ["url"], "default" : 205 , "type": "int" },
],
"resp_fields" : {
# "projects" : { "resp_format" : "list", "path" : "data/data_raw/f_data" },
# "total" : { "resp_format" : "int", "path" : "data/data_raw/f_data_count" },
},
"app_version" : version,
"method" : "GET",
"help" : u"define the endpoint to get data for : export dataset as csv",
"apiviz_front_uuid" : uuid_models["uuid_covid"],
"is_default" : True
},
####### KIFEKOI #######
### DATA FILTERS
{ "field" : "tl_data_API_filters",
"is_visible" : True,
"is_disabled" : False,
"data_type" : "data",
"endpoint_type" : "filters",
"dataset_uri" : "kifekoi",
"available_views" : ['VIEW_LIST', 'VIEW_MAP'],
"has_shuffle" : False,
"has_pagination" : False,
"has_export" : True,
"pagination_options" : {
"per_page" : [ 5, 10, 25, 100 ],
},
"has_infinite_scroll" : True,
"has_order_by" : False,
"order_by_options" : {
"order_by_list" : [],
},
"placeholder" : [
{"locale" : "en", "text" : "Enter the name of a producer"},{"locale" : "es", "text" : "Entra el nombre de un productor"},{"locale" : "it", "text" : "Digita qui il cognome di un produttore"}, {"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Tapez le nom d'un producteur" }
],
"items_found" : [
{"locale" : "en", "text" : "producers found"},{"locale" : "es", "text" : "productores"},{"locale" : "it", "text" : "produttore"}, {"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "producteurs trouvés" }
],
"stats_text" : [
{"locale" : "en", "text" : "experimental"},{"locale" : "es", "text" : "experimental"},{"locale" : "it", "text" : "experimental"}, {"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "expérimental" }
],
"reset" : [
{"locale" : "en", "text" : "reset"},{"locale" : "es", "text" : "reiniciar"},{"locale" : "it", "text" : "ripristina"}, {"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "effacer" }
],
"content" : u"apiviz default API endpoint for navbar filters",
"root_url" : "https://solidata-api.co-demos.com/api/dsi/infos/get_one/5e90aef2328ed74e1eea498e",
"args_options" : [
{ "app_arg" : "dataToken", "arg" : "token", "optional" : True, "in" : ["url","header"], "default" : "", "type": "str" },
{ "app_arg" : "filtersList", "arg" : "get_filters", "optional" : False, "in" : ["url"], "default" : True, "type": "bool" }, # also working with dsi?
{ "app_arg" : "filterChoices", "arg" | |
Does the standard V1 subnetowrk synthesis.
:param username: The user/users to synthesize for. If None, we group
synthesize across all. If a single user, we sythesize for that user
across all. If it is a list, we synthesize for the group that is that
list of users.
:return: Nothing
'''
# First we need our global priorities
pris = self.global_priority(username)
# Next we need the alternative priorities from each subnetwork
subnets = {}
node:ANPNode
for node in self.node_objs_with_subnet():
p = node.subnetwork.priority(username, ptype)
if node.invert:
p = self.invert_priority(p)
subnets[node.name]=p
rval = self.synthesize_combine(pris, subnets)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def node_objs_with_subnet(self):
"""
:return: List of ANPNode objects in this network that have v1 subnets
"""
return [node for node in self.node_objs() if node.subnetwork is not None]
def invert_priority(self, p):
"""
Makes a copy of the list like element p, and inverts. The current
standard inversion is 1-p. There could be others implemented later.
:param p: The list like to invert
:return: New list-like of same type as p, with inverted priorities
"""
rval = deepcopy(p)
for i in range(len(p)):
rval[i] = 1 - rval[i]
return rval
def synthesize_combine(self, priorities:pd.Series, alt_scores:dict):
"""
Performs the actual sythesis step from anp v1 synthesis.
:param priorities: Priorities of the subnetworks
:param alt_scores: Alt scores as dictionary, keys are subnetwork names
values are Series whose keys are alt names.
:return: Series whose keys are alt names, and whose values are the
synthesized scores.
"""
return self.subnet_formula(priorities, alt_scores)
def cluster_prioritizer(self, wrtcluster=None):
"""
Gets the prioritizer for the clusters wrt a given cluster.
:param wrtcluster: WRT cluster identifier as expected by cluster_obj() function.
If None, then we return a dictionary indexed by cluster names and values
are the prioritizers
:return: THe prioritizer for that cluster, or a dictionary of all cluster
prioritizers
"""
if wrtcluster is not None:
cluster = self.cluster_obj(wrtcluster)
return cluster.prioritizer
else:
rval = {}
for cluster in self.cluster_objs():
rval[cluster.name] = cluster.prioritizer
return rval
def to_excel(self, fname):
struct = pd.DataFrame()
cluster:ANPCluster
writer = pd.ExcelWriter(fname, engine='openpyxl')
for cluster in self.cluster_objs():
cluster_name = cluster.name
if cluster == self.alts_cluster:
cluster_name = "*"+str(cluster_name)
struct[cluster_name] = cluster.node_names()
struct.to_excel(writer, sheet_name="struct", index=False)
# Now the node connections
mat = self.node_connection_matrix()
pd.DataFrame(mat).to_excel(writer, sheet_name="connection", index=False, header=False)
# Lastly let's write just the comparison structure
cmp = self.data_names()
pd.DataFrame({"":cmp}).to_excel(writer, sheet_name="votes", index=False, header=True)
writer.save()
writer.close()
def cluster_incon_std_df(self, user_infos=None) -> pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are clusters, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for cluster, pw in self.cluster_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[cluster] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(
lambda x: x if x is not None else "Group Average")
return rval
def node_incon_std_df(self, user_infos=None)->pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are (node,cluster) pairs, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for info, pw in self.node_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[info] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(lambda x: x if x is not None else "Group Average")
return rval
def set_pairwise_from_supermatrix(self, mat, username="Imported"):
"""
Sets up all pairwise comparisons from supermatrix
:param mat: As numpy array
:return: Nothing
"""
node_names = self.node_names()
nnodes = len(node_names)
## Handle node pairwise comparisons first
for wrtnode_pos in range(nnodes):
wrtnode = node_names[wrtnode_pos]
offset=0
cluster_offsets = []
for cluster in self.cluster_names():
cluster_nodes = self.node_names(cluster)
npri:Pairwise
npri = self.node_prioritizer(wrtnode, cluster)
if npri is not None and isinstance(npri, Pairwise):
nclusternodes=len(cluster_nodes)
for node_row_pos in range(nclusternodes):
for node_col_pos in range(node_row_pos+1, nclusternodes):
rownode = cluster_nodes[node_row_pos]
colnode = cluster_nodes[node_col_pos]
vr = mat[offset+node_row_pos, wrtnode_pos]
vc = mat[offset+node_col_pos, wrtnode_pos]
#print("wrt="+wrtnode+" "+str(vr)+", "+str(vc)+": "+rownode+", "+colnode)
if vr!=0 and vc!= 0:
val = vr/vc
npri.vote(username, rownode, colnode, val, createUnknownUser=True)
cluster_offsets.append(range(offset, offset+len(cluster_nodes)))
offset+=len(cluster_nodes)
## Handle cluster pairwise comparisons now
cluster_names = self.cluster_names()
nclusters = len(cluster_names)
for wrt_cluster_pos in range(nclusters):
node_range = cluster_offsets[wrt_cluster_pos]
matrix_cols:np.ndarray
matrix_cols = mat[:,node_range]
avg_cols = matrix_cols.mean(axis=1)
cluster_pris = np.array([0.0]*nclusters)
for other_cluster_pos in range(nclusters):
cluster_pris[other_cluster_pos]=0
for node_pos in cluster_offsets[other_cluster_pos]:
cluster_pris[other_cluster_pos]+=avg_cols[node_pos]
#Now we have cluster priorities, now we can compare
cpri:Pairwise
cpri = self.cluster_obj(wrt_cluster_pos).prioritizer
for row_cluster_pos in range(nclusters):
for col_cluster_pos in range(row_cluster_pos+1, nclusters):
rowcluster = cluster_names[row_cluster_pos]
colcluster = cluster_names[col_cluster_pos]
vr = cluster_pris[row_cluster_pos]
vc = cluster_pris[col_cluster_pos]
if vr!=0 and vc!=0:
val = vr/vc
cpri.vote(username, rowcluster, colcluster, val, createUnknownUser=True)
def unscaled_structurematrix(self, username=None, as_df=False, add_self_connections=False):
rval = self.unscaled_supermatrix(username=username)
for row in rval:
for i in range(len(row)):
if row[i] != 0:
row[i] = 1
if add_self_connections:
for i in range(len(rval)):
row = rval[i]
if len(row) > i:
row[i] = 1
return rval
def scaled_structurematrix(self, username=None, as_df=False):
rval = self.unscaled_structurematrix(username=username, as_df=False)
normalize(rval, inplace=True)
return self._node_matrix_as_df(rval, as_df)
def limit_structurematrix(self, username=None, as_df=False):
rval = self.scaled_structurematrix(username=username, as_df=as_df)
rval = self.limitcalc(rval)
return self._node_matrix_as_df(rval, as_df)
def structure_global_priority(self, username=None):
lm = self.limit_structurematrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def _node_matrix_as_df(self, matrix, as_df=False):
if not as_df:
return matrix
else:
return matrix_as_df(matrix, self.node_names())
def structure_priority(self, username=None, ptype:PriorityType=None, alt_names=None)->pd.Series:
'''
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
gp = self.structure_global_priority(username)
if alt_names is None:
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def structure_cluster_priority(self, username=None, ptype:PriorityType=None, mean=False)->pd.Series:
gp = self.structure_global_priority(username)
cluster_names = self.cluster_names()
nclusters = self.nclusters()
rval = pd.Series(data=[0.0]*nclusters, index=cluster_names)
for cluster in cluster_names:
count=0
for node in self.node_names(cluster):
rval[cluster]+=gp[node]
count+=1
if mean and count > 0:
rval[cluster]/=count
return rval
__PW_COL_REGEX = re.compile('\\s+vs\\s+.+\\s+wrt\\s+')
def is_pw_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a pairwise
comparison, i.e. A vs B wrt C
:param col: The title of the column to check
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
else:
return __PW_COL_REGEX.search(col) is not None
__RATING_COL_REGEX = re.compile('\\s+wrt\\s+')
def is_rating_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a rating
column of data, i.e. A wrt B
:param col: The name of the column
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
elif is_pw_col_name(col):
return False
else:
return __RATING_COL_REGEX.search(col) is not None
def anp_manual_scales_from_excel(anp:ANPNetwork, excel_fname):
"""
Parses manual rating scales from an Excel file
:param anp: The model to put the scale values in.
:param excel_fname: The string file name of the excel file with the data
:return: Nothing
"""
xl = pd.ExcelFile(excel_fname)
if "scales" not in xl.sheet_names:
# We have no scales, do nothing
return
# Scales exist, read in
df = xl.parse(sheet_name="scales")
for scale_info in df:
# See if it has a wrt and whatnot
pieces = scale_info.split(" wrt ")
if len(pieces) == 2:
# Found one
cluster = pieces[0].strip()
wrtnode = pieces[1].strip()
scale_data = {}
for item in df[scale_info]:
name, val = str(item).split("=")
name = name.lower().strip()
val = float(val)
scale_data[name]=[val]
rating:Rating
rating = anp.node_prioritizer(wrtnode, cluster)
#print(scale_data)
rating.set_word_eval(scale_data)
# We are done!
def anp_from_excel(excel_fname:str)->ANPNetwork:
"""
Parses an excel file to get an ANPNetwork
:param excel_fname: The name of the excel file
:return: The newly created ANPNetwork | |
from __future__ import annotations
import datetime
import os
import shutil
import typing
from typing import Union, Any, IO, Type, Optional, List, Tuple
import dill
class FileCheck:
@staticmethod
def exists(full_path: str) -> bool:
return os.path.exists(full_path)
@staticmethod
def has_quarry(full_path: str, quarry: str) -> bool:
""" prec: file is a valid path, quarry is a string
postc: returns True if the quarry is in the file, false otherwise"""
text: Optional[str] = FileIO.read(full_path)
if text:
return quarry in text
return False
@staticmethod
def is_directory(full_path: str) -> bool:
return os.path.isdir(full_path)
@staticmethod
def is_file(full_path: str) -> bool:
return os.path.isfile(full_path)
@staticmethod
def is_link(full_path: str) -> bool:
return os.path.islink(full_path)
class FileCount:
@staticmethod
def characters(full_path: str) -> Optional[int]:
if FileCheck.exists(full_path):
count: int = 0
with FileIO.open(full_path) as file:
for line in file:
count += len(line)
return count
return None
@staticmethod
def lines(full_path: str) -> Optional[int]:
lines: Optional[List[str]] = FileTransform.lines(full_path)
return len(lines) if lines else None
@staticmethod
def words(full_path: str) -> Optional[int]:
words: Optional[List[str]] = FileTransform.words(full_path)
return len(words) if words else None
class FileDirectories:
@staticmethod
def clear_all(directory: str) -> None:
FileDirectories._raise_error_if_file(directory)
FileDirectories.clear(directory, recursive=True)
@staticmethod
def clear(directory: str, recursive: bool = False) -> None:
FileDirectories._raise_error_if_file(directory)
if not FileCheck.exists(directory):
return
if recursive:
children: typing.List[str] = FileDirectories.children(directory, include_full_path=True)
for full_path in children:
if FileCheck.is_link(full_path):
FileIO.unlink(full_path)
elif FileCheck.is_file(full_path):
FileIO.delete(full_path)
else:
FileDirectories.delete(full_path, recursive=True)
else:
files: List[str] = FileDirectories.files(directory)
for file_name in files:
full_path: str = FileTools.concat(directory, file_name)
FileIO.delete(full_path)
@staticmethod
def clear_files(directory: str) -> None:
FileDirectories._raise_error_if_file(directory)
FileDirectories.clear(directory)
@staticmethod
def children(directory: str,
recursive: bool = False,
include_full_path: bool = False,
extension: str = None) -> List[str]:
FileDirectories._raise_error_if_file(directory)
children: List[str] = []
for (current_directory, directories, file_names) in os.walk(directory):
if include_full_path:
directories = [FileTools.concat(current_directory, x) for x in directories]
file_names = [FileTools.concat(current_directory, x) for x in file_names]
if extension:
file_names = FileTransform.filter_by_extension(file_names, extension)
children.extend(directories)
children.extend(file_names)
if not recursive:
break
return children
@staticmethod
def create(full_path: str) -> Optional[str]:
"""
Checks for existence of a file path and if it does not exist, creates it.
Returns the created path
:param full_path: (str) The path to be checked and/or created. If path is None,
Files will check for an internal path to use.
If none is found, a ValueError will be raised
:return: The confirmed path
"""
if not FileCheck.exists(full_path):
os.makedirs(full_path)
elif os.path.isfile(full_path):
return None
return full_path
@staticmethod
def delete(directory: str, recursive: bool = False) -> None:
if FileCheck.is_link(directory):
FileIO.unlink(directory)
elif FileCheck.is_file(directory):
FileIO.delete(directory)
if recursive:
shutil.rmtree(directory, ignore_errors=True)
else:
os.rmdir(directory)
@staticmethod
def directories(directory: str,
recursive: bool = False,
include_full_path: bool = False) -> List[str]:
FileDirectories._raise_error_if_file(directory)
directories: List[str] = []
for root in os.walk(directory):
child_directories: List[str] = root[1]
if include_full_path:
parent: str = root[0]
child_directories = [FileTools.concat(parent, x) for x in child_directories]
directories.extend(child_directories)
if not recursive:
break
return directories
@staticmethod
def files(directory: str,
recursive: bool = False,
include_full_path: bool = False,
extension: str = None) -> List[str]:
FileDirectories._raise_error_if_file(directory)
files: List[str] = []
for root in os.walk(directory):
child_files: List[str] = root[2]
if include_full_path:
parent: str = root[0]
child_files = [FileTools.concat(parent, x) for x in child_files]
if extension:
child_files = FileTransform.filter_by_extension(child_files, extension)
files.extend(child_files)
if not recursive:
break
return files
@staticmethod
def parent(full_path: str, depth: int = 1) -> Optional[str]:
if not FileCheck.exists(full_path):
return None
while depth > 0:
full_path = os.path.dirname(full_path)
depth -= 1
return full_path
@staticmethod
def _raise_error_if_file(full_path: str) -> None:
if FileCheck.is_file(full_path):
raise ValueError('Given path must be a directory, not a file')
class FileFind:
@staticmethod
def line_number(full_path: str, word: str) -> Optional[int]:
""" prec: file is a valid path, word is a string
postc: returns the line number of the word in the file"""
if FileCheck.is_file(full_path):
in_pipe: IO = open(full_path, "r")
line_number: int = 0
for line in in_pipe:
line_number += 1
if word in line.split():
return line_number
return -1
return None
class FileIO:
@staticmethod
def copy(donor_path: str,
recipient_path: str,
overwrite: bool = False) -> bool:
if FileCheck.is_directory(donor_path):
raise ValueError('The donor path cannot be a folder')
if FileCheck.is_directory(recipient_path):
raise ValueError('The recipient path cannot be a folder')
if not FileCheck.exists(donor_path):
raise IOError('The given donor path does not exist')
if not overwrite and FileCheck.exists(recipient_path):
raise IOError('The given recipient path already exists')
donor: str = FileIO.read(donor_path)
FileIO.save(recipient_path, donor, overwrite=overwrite)
return True
@staticmethod
def delete(full_path: str) -> None:
FileIO._raise_error_if_directory(full_path)
if FileCheck.exists(full_path):
os.remove(full_path)
@staticmethod
def open(full_path: str,
write: bool = False,
append: bool = False,
binary: bool = False,
**kwargs: Any) -> Union[IO, None]:
FileIO._raise_error_if_directory(full_path)
if write:
command: str = 'wb' if binary else 'w'
elif append:
command = 'ab' if binary else 'a+'
else:
command = 'rb' if binary else 'r'
return open(full_path, command, **kwargs)
@staticmethod
def read(full_path: str) -> Optional[str]:
if FileCheck.is_file(full_path):
with FileIO.open(full_path) as file:
return file.read()
return None
@staticmethod
def read_object(full_path: str) -> Union[Any, None]:
""" prec: filename is a string
postc: returns the Python object saved in the file"""
FileIO._raise_error_if_directory(full_path)
if not os.path.isfile(full_path):
return None
with FileIO.open(full_path, binary=True) as file:
return dill.load(file)
@staticmethod
def save(full_path: str,
text: str,
overwrite: bool = False) -> None:
if not overwrite and FileCheck.exists(full_path):
raise IOError('The given file path already exists')
with FileIO.open(full_path, write=True) as file:
file.write(text)
@staticmethod
def save_object(full_path: str,
obj: Any,
overwrite: bool = False) -> None:
""" prec: obj is any python variable and filename is a string
postc: saves the object to a file"""
FileIO._raise_error_if_directory(full_path)
if not overwrite and FileCheck.exists(full_path):
raise IOError('The given file path already exists')
with FileIO.open(full_path, write=True, binary=True) as file:
dill.dump(obj, file)
@staticmethod
def text(full_path: str) -> Optional[str]:
return FileIO.read(full_path)
@staticmethod
def touch(full_path: str, overwrite: bool = False) -> None:
FileIO.save(full_path, '', overwrite=overwrite)
@staticmethod
def unlink(full_path: str) -> None:
os.unlink(full_path)
@staticmethod
def _raise_error_if_directory(full_path: str) -> None:
if FileCheck.is_directory(full_path):
raise ValueError('Given path cannot be a directory')
class FileNames:
@staticmethod
def stamp(file_path: str) -> str:
split_file: Tuple[str] = os.path.splitext(file_path)
return f'{split_file[0]} {FileTools.timestamp()}{split_file[1]}'
class FileTransform:
@staticmethod
def filter_by_extension(file_paths: List[str], extension: str) -> List[str]:
if not extension.startswith('.'):
extension = f'.{extension}'
filtered_file_names: List[str] = []
for file_path in file_paths:
split_file: Tuple[str, str] = os.path.splitext(file_path)
if split_file[1] == extension:
filtered_file_names.append(file_path)
return filtered_file_names
@staticmethod
def lines(full_path: str) -> Optional[List[str]]:
if FileCheck.exists(full_path):
with FileIO.open(full_path) as file:
return [line for line in file]
return None
@staticmethod
def words(full_path: str) -> Optional[List[str]]:
if FileCheck.exists(full_path):
with FileIO.open(full_path) as file:
output: List[str] = []
for line in file:
output.extend(line.split(' '))
return output
return None
class FileTools:
check: Type[FileCheck] = FileCheck
count: Type[FileCount] = FileCount
directories: Type[FileDirectories] = FileDirectories
find: Type[FileFind] = FileFind
io_: Type[FileIO] = FileIO
names: Type[FileNames] = FileNames
transform: Type[FileTransform] = FileTransform
@staticmethod
def concat(*paths: str) -> str:
return os.path.join(*paths)
@staticmethod
def timestamp() -> str:
return datetime.datetime.today().strftime('%Y-%m-%d %H.%M.%S')
class DirectoryWrapper:
directory: str
def __init__(self, full_path: str):
if FileTools.check.exists(full_path) and not FileTools.check.is_directory(full_path):
raise ValueError('Give path must be a path to a directory')
def clear_all(self) -> None:
FileDirectories.clear_all(self.directory)
def clear(self, recursive: bool = False) -> None:
FileDirectories.clear(self.directory, recursive=recursive)
def clear_files(self) -> None:
FileDirectories.clear_files(self.directory)
def children(self,
recursive: bool = False,
include_full_path: bool = False) -> List[str]:
return FileDirectories.children(self.directory,
recursive=recursive,
include_full_path=include_full_path)
def concat(self, *paths: str) -> Union[FileWrapper, DirectoryWrapper]:
full_path: str = os.path.join(self.directory, *paths)
if FileTools.check.is_directory(full_path):
return DirectoryWrapper(full_path)
else:
return FileWrapper(full_path)
def create(self) -> Optional[str]:
"""
Checks for existence of a file path and if it does not exist, creates it.
Returns the created path
:return: The confirmed path
"""
return FileDirectories.create(self.directory)
def delete(self, recursive: bool = False) -> None:
FileDirectories.delete(self.directory, recursive=recursive)
def directories(self,
recursive: bool = False,
include_full_path: bool = False) -> List[str]:
return FileDirectories.directories(self.directory,
recursive=recursive,
include_full_path=include_full_path)
def files(self,
recursive: bool = False,
include_full_path: bool = False) -> List[str]:
return FileDirectories.files(self.directory,
recursive=recursive,
include_full_path=include_full_path)
def parent(self, depth: int = 1) -> Optional[str]:
return FileDirectories.parent(self.directory, depth=depth)
class FileWrapperContainerBase:
_wrapper: FileWrapper
def __init__(self, wrapper: FileWrapper):
self._wrapper = wrapper
def full_path(self) -> str:
return self._wrapper.full_path
def name(self) -> str:
return self._wrapper.name
def path(self) -> str:
return self._wrapper.path
class FileWrapperCheck(FileWrapperContainerBase):
def exists(self) -> bool:
return FileCheck.exists(self.full_path())
def has_quarry(self, quarry: str) -> bool:
""" prec: file is a valid path, quarry is a string
postc: returns True if the quarry is in the file, false otherwise"""
return FileCheck.has_quarry(self.full_path(), quarry)
class FileWrapperCount(FileWrapperContainerBase):
def characters(self) -> Optional[int]:
return FileCount.characters(self.full_path())
def lines(self) -> Optional[int]:
return FileCount.lines(self.full_path())
def words(self) -> Optional[int]:
return FileCount.words(self.full_path())
class FileWrapperFind(FileWrapperContainerBase):
| |
# from https://github.com/amdegroot/ssd.pytorch
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
from Archs_2D.BBox import BBoxes
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of bboxes. The jaccard overlap
is simply the intersection over union of two bboxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding bboxes, Shape: [num_bboxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1])) # [A,B]
area_b = ((box_b[2] - box_b[0]) *
(box_b[3] - box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, targets=None):
for t in self.transforms:
img, targets = t(img, targets=targets)
return img, targets
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img, targets=None):
return self.lambd(img, targets)
class ConvertFromInts(object):
def __call__(self, image, targets=None):
return image.astype(np.float32), targets
class SubtractMeans(object):
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, targets=None):
image = image.astype(np.float32)
image -= self.mean
return image.astype(np.float32), targets
class ToAbsoluteCoords(object):
def __call__(self, image, targets=None):
height, width, channels = image.shape
if targets.has_field('bbox'):
bboxes = targets.get_field("bbox")
bboxes[:, 0::2] *= width
bboxes[:, 1::2] *= height
if targets.has_field('K'):
K = targets.get_field('K')
K[0, :] *= width
K[1, :] *= height
return image, targets
class ToPercentCoords(object):
def __call__(self, image, targets=None):
height, width, channels = image.shape
if targets is None:
return image, targets
if targets.has_field('bbox'):
bboxes = targets.get_field("bbox")
bboxes[:, 0::2] /= width
bboxes[:, 1::2] /= height
if targets.has_field('K'):
K = targets.get_field('K')
K[0, :] /= width
K[1, :] /= height
return image, targets
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, targets=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, targets
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, targets=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, targets
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, targets=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, targets
class ConvertColor(object):
def __init__(self, current, transform):
self.transform = transform
self.current = current
def __call__(self, image, targets=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'RGB' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif self.current == 'BGR' and self.transform == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
elif self.current == 'HSV' and self.transform == "RGB":
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
else:
raise NotImplementedError
return image, targets
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, targets=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, targets
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, targets=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, targets
class ToCV2Image(object):
def __call__(self, tensor, targets=None):
return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), targets
# class ToTensor(object):
# def __call__(self, cvimage, bboxes=None, labels=None):
# return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), bboxes, labels
class ToTorchTensor(object):
def __init__(self):
self.toTensor = transforms.ToTensor()
def __call__(self, image, targets=None):
if isinstance(image, list):
image = [self.toTensor(src) for src in image]
else:
image = self.toTensor(image)
return image, targets
class ToTensor(object):
def __init__(self):
self.trans = transforms.ToTensor()
def __call__(self, cvimage, targets=None):
timg = torch.from_numpy(cvimage.astype(np.float32))
return timg, targets
class ToNCHW(object):
def __call__(self, image, targets=None):
return image.permute(2, 0, 1), targets
class ToBBoxes(object):
def __call__(self, image, targets=None):
bboxes = targets.get_field('bbox')
bboxes = BBoxes(bboxes)
targets.update_field('bbox', bboxes)
return image, targets
class RandomAffine(object):
def __init__(self, mean, range=0.5, offset=0.5):
self.range = range
self.offset = offset
self.mean = mean
def __call__(self, image, targets=None):
h, w, _ = image.shape
if random.randint(2):
scale = (2 * random.random() - 1.) * self.range + 1.
base_offset = (np.array([w, h], dtype=np.float32) - np.array([w, h], dtype=np.float32) * scale) / 2.
offset = (2 * random.random_sample((2,)) - 1) * self.offset * np.abs(base_offset) + base_offset
affineMat = np.eye(3)
affineMat[:2, :2] *= scale
affineMat[:2, 2] = offset
image = cv2.warpAffine(image, affineMat[:2, :], dsize=(w, h), borderValue=self.mean)
if targets is None:
return image, targets
bboxes = targets.get_field('bbox')
bboxes *= scale
bboxes[:, 0::2] += offset[0]
bboxes[:, 1::2] += offset[1]
if targets.has_field('mask'):
bboxes = targets.get_field('bbox')
center_x = bboxes[:, 0::2].sum(axis=1) * 0.5
center_y = bboxes[:, 1::2].sum(axis=1) * 0.5
index1 = (center_x >= 0) & (center_x < w)
index2 = (center_y >= 0) & (center_y < h)
index = (index1) & (index2)
masks = targets.get_field('mask')
masks[index] = 1
return image, targets
class RandomMirror(object):
def __call__(self, image, targets=None):
_, width, _ = image.shape
if random.randint(2):
image = image[:, ::-1]
if targets is None:
return image, targets
bboxes = targets.get_field('bbox')
bboxes[:, 0::2] = width - bboxes[:, [2, 0]]
if targets.has_field('K'):
K = targets.get_field('K')
K[0, 2] = width - K[0, 2] - 1
if targets.has_field('alpha'):
alphas = targets.get_field('alpha')
idx_pos = alphas >= 0
idx_neg = alphas < 0
alphas[idx_pos] = -1. * alphas[idx_pos] + np.pi
alphas[idx_neg] = -1. * alphas[idx_neg] - np.pi
return image, targets
class Resize(object):
def __init__(self, size=300):
self.size = size
def __call__(self, image, targets=None):
if isinstance(self.size, (tuple, list)) and len(self.size) == 2:
if isinstance(image, list):
image = [cv2.resize(src=src, dsize=self.size, interpolation=cv2.INTER_CUBIC) for src in image]
else:
image = cv2.resize(src=image, dsize=self.size, interpolation=cv2.INTER_LINEAR)
else:
if isinstance(image, list):
image = [cv2.resize(src=src, dsize=(self.size, self.size), interpolation=cv2.INTER_CUBIC) for src in
image]
else:
image = cv2.resize(src=image, dsize=(self.size, self.size), interpolation=cv2.INTER_CUBIC)
return image, targets
class RandomCrop(object):
def __init__(self, rate=0.3):
self._rate = rate
def __call__(self, image, bboxes=None, labels=None):
height, width, _ = image.shape
if isinstance(bboxes, (tuple, list)):
bboxes = np.array(bboxes, dtype=np.float32)
elif torch.is_tensor(bboxes):
bboxes = bboxes.numpy().astype(dtype=np.float32)
assert (bboxes.ndim == 1)
bboxes[0::2] = np.clip(bboxes[0::2], a_min=0, a_max=width)
bboxes[1::2] = np.clip(bboxes[1::2], a_min=0, a_max=height)
if random.randint(2):
# crop image
x_range = (bboxes[2] - bboxes[0]) * self._rate
y_range = (bboxes[3] - bboxes[1]) * self._rate
det_bboxes = np.random.random_sample(bboxes.shape)
det_bboxes[0::2] = det_bboxes[0::2] * x_range * 2 - x_range
det_bboxes[1::2] = det_bboxes[1::2] * y_range * 2 - y_range
bboxes += det_bboxes
bboxes[0::2] = np.clip(bboxes[0::2], a_min=0, a_max=width)
bboxes[1::2] = np.clip(bboxes[1::2], a_min=0, a_max=height)
crop = image[int(bboxes[1]):(int(bboxes[3]) + 1), int(bboxes[0]):(int(bboxes[2]) + 1)]
return crop, bboxes, labels
class Crop(object):
def __call__(self, image, bboxes=None, labels=None):
height, width, _ = image.shape
if isinstance(bboxes, (tuple, list)):
bboxes = np.array(bboxes, dtype=np.float32)
elif torch.is_tensor(bboxes):
bboxes = bboxes.numpy().astype(dtype=np.float32)
assert (bboxes.ndim == 2) # (num, 4)
num, _ = bboxes.shape
crop = []
for i in range(num):
bbox = bboxes[i]
bbox[0::2] = np.clip(bbox[0::2], a_min=0, a_max=width)
bbox[1::2] = np.clip(bbox[1::2], a_min=0, a_max=height)
crop.append(image[int(bbox[1]):(int(bbox[3]) + 1), int(bbox[0]):(int(bbox[2]) + 1)])
return crop, bboxes, labels
class RandomSampleCrop(object):
"""Crop
Arguments:
img (Image): the image being input during training
bboxes (Tensor): the original bounding bboxes in pt form
labels (Tensor): the class labels for each bbox
mode (float tuple): the min and max jaccard overlaps
Return:
(img, bboxes, labels)
img (Image): the cropped image
bboxes (Tensor): the adjusted bounding bboxes in pt form
labels (Tensor): the class labels for each bbox
"""
def __init__(self):
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
| |
<gh_stars>0
import numpy as np
from numpy.testing._private.utils import _assert_no_gc_cycles_context
class Node():
def __init__(self, value=None, attribute_name="root", attribute_index=None, branches=None):
"""
This class implements a tree structure with multiple branches at each node.
If self.branches is an empty list, this is a leaf node and what is contained in
self.value is the predicted class.
The defaults for this are for a root node in the tree.
Arguments:
branches (list): List of Node classes. Used to traverse the tree. In a
binary decision tree, the length of this list is either 2 (for left and
right branches) or 0 (at a leaf node).
attribute_name (str): Contains name of attribute that the tree splits the data
on. Used for visualization (see `DecisionTree.visualize`).
attribute_index (float): Contains the index of the feature vector for the
given attribute. Should match with self.attribute_name.
value (number): Contains the value that data should be compared to along the
given attribute.
"""
self.branches = [] if branches is None else branches
self.attribute_name = attribute_name
self.attribute_index = attribute_index
self.value = value
class DecisionTree():
def __init__(self, attribute_names):
"""
Implement this class.
This class implements a binary decision tree learner for examples with
categorical attributes. Use the ID3 algorithm for implementing the Decision
Tree: https://en.wikipedia.org/wiki/ID3_algorithm
A decision tree is a machine learning model that fits data with a tree
structure. Each branching point along the tree marks a decision (e.g.
today is sunny or today is not sunny). Data is filtered by the value of
each attribute to the next level of the tree. At the next level, the process
starts again with the remaining attributes, recursing on the filtered data.
Which attributes to split on at each point in the tree are decided by the
information gain of a specific attribute.
Here, you will implement a binary decision tree that uses the ID3 algorithm.
Your decision tree will be contained in `self.tree`, which consists of
nested Node classes (see above).
Args:
attribute_names (list): list of strings containing the attribute names for
each feature (e.g. chocolatey, good_grades, etc.)
"""
self.attribute_names = attribute_names
self.tree = None
def _check_input(self, features):
if features.shape[1] != len(self.attribute_names):
raise ValueError(
"Number of features and number of attribute names must match!"
)
def fit_recursion(self, features, targets, curr_node, attributes):
""" Function to fit data to branch or leaf and discern left or right children
"""
if features.size == 0 and targets.size == 0:
return Node(attribute_name='leaf')
elif np.count_nonzero(targets == 1) == len(targets):
return Node(attribute_name='leaf', value=1)
elif np.count_nonzero(targets == 0) == len(targets):
return Node(attribute_name='leaf', value=0)
elif len(attributes) == 0:
if np.count_nonzero(targets == 1) > np.count_nonzero(targets == 0):
return Node(attribute_name='leaf', value=1)
else:
return Node(attribute_name='leaf', value=0)
else:
values = np.hstack((features, np.vstack(targets)))
gain = {}
for i in range(len(attributes)):
gain[attributes[i]] = information_gain(features, i ,targets)
curr_node.attribute_name = max(gain, key=gain.get)
curr_node.attribute_index = self.attribute_names.index(curr_node.attribute_name)
curr_idx = attributes.index(curr_node.attribute_name)
removed = attributes.copy()
removed.remove(curr_node.attribute_name)
# S(A < m) and S(A >= m)
if np.any(values[values[:, curr_idx] > 1]) or np.any(values[values[:, curr_idx] < 0]):
median = np.median(values[:, curr_idx])
curr_node.value = median
left_values = values[values[:,curr_idx] < median]
left_feats = left_values[:,:-1]
left_feats = np.delete(left_feats, curr_idx, 1)
left_tgts = left_values[:, -1]
else:
curr_node.value = 1
left_values = values[values[:, curr_idx] == 0]
left_feats = left_values[:,:-1]
left_feats = np.delete(left_feats, curr_idx, 1)
left_tgts = left_values[:, -1]
left = self.fit_recursion(left_feats, left_tgts, Node(), removed)
if np.any(values[values[:, curr_idx] > 1]) or np.any(values[values[:, curr_idx] < 0]):
median = np.median(values[:, curr_idx])
curr_node.value = median
right_values = values[values[:,curr_idx] >= median]
right_feats = right_values[:,:-1]
right_feats = np.delete(right_feats, curr_idx, 1)
right_tgts = right_values[:, -1]
else:
curr_node.value = 1
right_values = values[values[:, curr_idx] == 1]
right_feats = right_values[:,:-1]
right_feats = np.delete(right_feats, curr_idx, 1)
right_tgts = right_values[:, -1]
right = self.fit_recursion(right_feats, right_tgts, Node(), removed)
curr_node.branches.append(left)
curr_node.branches.append(right)
return curr_node
def fit(self, features, targets):
"""
Takes in the features as a numpy array and fits a decision tree to the targets.
Args:
features (np.array): numpy array of size NxF containing features, where N is
number of examples and F is number of features.
targets (np.array): numpy array containing class labels for each of the N
examples.
Output:
None: It should update self.tree with a built decision tree.
"""
self._check_input(features)
self.tree = self.fit_recursion(features, targets, Node(), self.attribute_names)
def predictor(self, point, curr_node):
if curr_node.branches == []:
return curr_node.value
else:
if (point[curr_node.attribute_index] < curr_node.value):
return self.predictor(point, curr_node.branches[0])
else:
return self.predictor(point, curr_node.branches[1])
def predict(self, features):
"""
Takes in features as a numpy array and predicts classes for each point using
the trained model.
Args:
features (np.array): numpy array of size NxF containing features, where N is
number of examples and F is number of features.
Outputs:
predictions (np.array): numpy array of size N array which has the predictions
for the input data.
"""
self._check_input(features)
predictions = np.zeros((features.shape[0]))
iter = 0
for j in features:
prediction = self.predictor(j, self.tree)
predictions[iter] = prediction
iter += 1
return predictions
def _visualize_helper(self, tree, level):
"""
Helper function for visualize a decision tree at a given level of recursion.
"""
tab_level = " " * level
val = tree.value if tree.value is not None else 0
print("%d: %s%s == %f" % (level, tab_level, tree.attribute_name, val))
def visualize(self, branch=None, level=0):
"""
Visualization of a decision tree. Implemented for you to check your work and to
use as an example of how to use the given classes to implement your decision
tree.
"""
if not branch:
branch = self.tree
self._visualize_helper(branch, level)
for branch in branch.branches:
self.visualize(branch, level+1)
def information_gain(features, attribute_index, targets):
"""
TODO: Implement me!
Information gain is how a decision tree makes decisions on how to create
split points in the tree. Information gain is measured in terms of entropy.
The goal of a decision tree is to decrease entropy at each split point as much as
possible. This function should work perfectly or your decision tree will not work
properly.
Information gain is a central concept in many machine learning algorithms. In
decision trees, it captures how effective splitting the tree on a specific attribute
will be for the goal of classifying the training data correctly. Consider
data points S and an attribute A; we'll split S into two data points.
For binary A: S(A == 0) and S(A == 1)
For continuous A: S(A < m) and S(A >= m), where m is the median of A in S.
Together, the two subsets make up S. If the attribute A were perfectly correlated with
the class of each data point in S, then all points in a given subset will have the
same class. Clearly, in this case, we want something that captures that A is a good
attribute to use in the decision tree. This something is information gain. Formally:
IG(S,A) = H(S) - H(S|A)
where H is information entropy. Recall that entropy captures how orderly or chaotic
a system is. A system that is very chaotic will evenly distribute probabilities to
all outcomes (e.g. 50% chance of class 0, 50% chance of class 1). Machine learning
algorithms work to decrease entropy, as that is the only way to make predictions
that are accurate on testing data. Formally, H is defined as:
H(S) = sum_{c in (groups in S)} -p(c) * log_2 p(c)
To elaborate: for each group in S, you compute its prior probability p(c):
(# of elements of group c in S) / (total # of elements in S)
Then you compute the term for this group:
-p(c) * log_2 p(c)
Then compute the sum across all groups: either classes 0 and 1 for binary data, or
for the above-median and below-median classes for continuous data. The final number
is the entropy. To gain more intuition about entropy, consider the following - what
does H(S) = 0 tell you about S?
Information gain is an extension of entropy. The equation for information gain
involves comparing the entropy of the set and | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 17:03:37 2020
@author: zmg
"""
from pathlib import Path
import sys
from scipy.stats import linregress
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
import shutil
import json
import matplotlib as mpl
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
if __name__ == "__main__":
print(f"Package: {__package__}, File: {__file__}")
from elchempy.experiments.ORR import ORR_analyze_scans
from elchempy.PostEC.plotting import eisplot
from elchempy.PostEC.collect_load import Load_from_Indexes
from elchempy.experiments.EIS.models import Model_Collection # EEC_models_index
# logger = start_logger(Path.cwd())
# import plotting
import post_helper
else:
print("\n\n***** run_PAR_DW *****")
print(__file__)
from elchempy.PostEC.plotting import eisplot
# from elchempy.experiments.EIS import plotting
from elchempy.PostEC import post_helper
from elchempy.PostEC.collect_load import Load_from_Indexes
from elchempy.experiments.ORR import ORR_analyze_scans
from elchempy.experiments.EIS.models import EEC_models_index
OriginColor = FindExpFolder().LoadOriginColor()
EvRHE = "E_AppV_RHE"
SampleCodes = SampleCodesChar.load()
def check_file_and_backup(all_fits_df, filepath, destdir, _bakd={}, suffix=".pkl"):
# destdir=PDD_eischeck
filepath = Path(destdir.joinpath(filepath).with_suffix(suffix))
if filepath.is_file():
date_prefix = datetime.fromtimestamp(filepath.stat().st_ctime).strftime(
"%Y-%m-%d"
)
bak_stem = filepath.parent.joinpath(
f"{date_prefix}_{filepath.stem}"
).with_suffix(suffix)
_old_df = pd.read_pickle(filepath)
_bakd.update({bak_stem: len(_old_df)})
filepath.replace(bak_stem)
all_fits_df.to_pickle(filepath)
return _bakd
def unique_destdir(uniq_str):
if uniq_str:
PostDestDir = FindExpFolder("VERSASTAT").PostDir.joinpath(f"EIS/{uniq_str}")
PostDestDir.mkdir(parents=True, exist_ok=True)
print(f"Dest dir: {PostDestDir}")
return PostDestDir
class EIS_selection:
filter = "(lmfit_MSE < 65E4) & (Rct < 1E3) & (Rct > 2E-2) \
& (Rs > 0.01) & (Rs < 200) & (Cdlp < 0.075)\
& (lmfit_redchi < 1E3) & (Aw < 10E3)\
& (Aw > 10E-2) & (Qad < 1) & (tau < 10)"
_old_fast_checking_EEC_models = [
"Model(R0-L0-p(R1,CPE1)-p(R2-W2,CPE2))",
"Model(R0-L0-p(R1-Wo1,CPE1)-C2)",
"Model(R0-L0-p(R1-Ws1,CPE1)-C2)",
"Model(R0-L0-W0-p(R1,CPE1)-p(R2,CPE2))",
"Model(R0-L0-p(R1-W1,CPE1)-C2)",
"Model(R0-L0-W0-p(R1,CPE1)-p(R2,C2))",
"Model(R0-L0-p(R1-W1,CPE1)-CPE2)",
"Model(R0-L0-p(R1-Wo0,C1)-W0)",
"Model(R0-L0-p(R1-W1,C1)-C2)",
"Model(R0-L0-p(R1-Wo1,CPE1))",
"Model(R0-L0-p(R1-Wo0,CPE1)-W0)",
"Model(R0-L0-p(R1-W1,CPE1))",
"Model(R0-L0-p(R1-Ws1,CPE1))",
]
mod_select = {
"N2": "Model(RL-TLM(Rct-Qad-W))",
"O2": "Model(RL-TLM(Rct-p(Qad-W,Rorr)))",
}
loadgrp_cols = ["SampleID", "Electrolyte", "E_RHE", "Gas", "Model_EEC"]
ECuniq_cols = ["SampleID", "Loading_cm2", "Electrolyte", "E_RHE", "Gas", "RPM_DAC"]
def EIS_filter():
_filter = "(EIS_pars.lmfit_MSE < 65E4) & (EIS_pars.Rct < 2E3) & (EIS_pars.Rct > 2E-4) \
& (EIS_pars.Rs > 0.01) & (EIS_pars.Rs < 200) & (EIS_pars.Cdlp < 0.075)\
& (EIS_pars.lmfit_redchi < 1E3) & (EIS_pars.Aw < 10E3) & (EIS_pars.Aw > 10E-2)\
& (EIS_pars.Qad < 0.5) & (EIS_pars.tau < 3E2)"
return _filter
# def add_best_model_per_spectrum():
# _grpkeys = ['PAR_file', 'Segment #']
# _tt = EIS_pars.query('SampleID == "JOS4" & Gas == "O2"')
# for (pf,seg),PF_pars in _tt.groupby(_grpkeys):
# (pf,seg),PF_pars
# def find_best_model_per_spectrum(PF_pars, var_lim = 1E5, var_err_lim = 3E3):
# PF_pars = PF_pars.loc[PF_pars.lmfit_message.str.contains('satisfied') == True]
# _lmfitcols = [i for i in PF_pars.columns if i.startswith('lmfit')]
# if PF_pars.Model_EEC.nunique() > 2 and PF_pars.Model_EEC.nunique() == len(PF_pars):
# _res = []
# for n,r in PF_pars.iterrows():
# _vars = r.lmfit_var_names.split(', ')
# _varserr = [i for i in [i+'_stderr' for i in _vars] if i in r.index]
# _vsum,_vmax = r[_vars].sum(),r[_vars].max()
# _bad_vars = set([i for i in _vars if r[i] > var_lim])
# _verrsum,_verrmax = 0, 0
# if _varserr:
# _verrsum,_verrmax = r[_varserr].sum(),r[_varserr].max()
# _bad_varserr = {i:{'val' : r[i],'rel_val' : r[i]/r[(i.split('_stderr')[0])], 'name' : (i.split('_stderr')[0])} for i in _varserr}
# _bad_varserr_val = [val['name'] for k,val in _bad_varserr.items() if val['val'] > var_err_lim]
# _bad_varserr_perc = [val['name'] for k,val in _bad_varserr.items() if val['rel_val'] > var_err_lim]
# _bad_vars_err = set(_bad_varserr_val + _bad_varserr_perc)
# _bad_vars= _bad_vars.union(_bad_vars_err)
# _res.append([n,r.Model_EEC_name, len(_vars), _vsum, _vmax, ', '.join(_bad_vars), len(_bad_vars),_verrsum,_verrmax, r.lmfit_aic,r.lmfit_redchi, r.lmfit_chiqsr])
# var_res = pd.DataFrame(_res,columns=['pf_index','Model_EEC_name','len_vars','varsum','varmax','bad_vars','bad_vars_len','err_varsum','err_varmax',
# 'lmfit_aic','lmfit_redchi','lmfit_chiqsr'])
# var_res = var_res.sort_values(by=['bad_vars_len','lmfit_aic','len_vars'])
# best_mod_row = var_res.head(1)
# _sorted_rank = ', '.join([str(i) for i in var_res.pf_index.values])
# _best_result = [best_mod_row.pf_index.iloc[0],best_mod_row.Model_EEC_name.iloc[0],_sorted_rank]
# var_res.bad_vars_len.unique()
def select_models_EISpars(EIS_pars):
N2_mod = "Model(RL-TLM(Rct-Qad-W))"
O2_mod = "Model(RL-TLM-Rct-W-p(Rorr,Qad))"
N2_O2_models = EIS_pars.loc[
((EIS_pars.Model_EEC == N2_mod) & (EIS_pars.Gas == "N2"))
| ((EIS_pars.Model_EEC == O2_mod) & (EIS_pars.Gas == "O2"))
]
return N2_O2_models
def plot_pars_E(Ekin, E_lower=0.59, E_upper=0.76, Xcol="BET_cat_agg"):
if Xcol in Ekin.columns:
parcols = [
i
for i in Ekin.columns
if i
in [i for i in SampleSelection.EC_EIS_par_cols if not "lmfit_redchi" in i]
+ SampleSelection.EC_ORR_kin_par_cols
]
for i in parcols:
EKinsl = Ekin.query(
f"E_RHE < {E_upper} & E_RHE > {E_lower} & Rct < 9E05 & Rorr < 1E09 & Qad < 35E-3 & Cdlp < 0.070"
)
for Elec, Elgr in EKinsl.groupby("Electrolyte"):
fig, ax = plt.subplots()
Elgr.plot(
x=Xcol, y=i, kind="scatter", c="E_RHE", colormap="rainbow_r", ax=ax
)
ax.set_xlim = (0.6, 0.9)
ax.set_title(Elec)
ps = plotting.eisplot(i)
ax.set_ylim(ps.ylim)
ax.set_yscale(ps.logyscale)
plt.show()
plt.close()
def EIS_all_check_redchi(EIS_pars, eischeck_plot=False):
PostDestDir = FindExpFolder("VERSASTAT").PostDir
PDD_eischeck = PostDestDir.joinpath("EIS/redchi_check")
PDD_eischeck.mkdir(parents=True, exist_ok=True)
SeriesIDs = [
SampleSelection.Series_CB_paper,
SampleSelection.Series_Porhp_SiO2,
SampleSelection.Series_Co_PANI,
SampleSelection.Series_ML_SiO2,
]
all_sIDs = [a for i in [i.get("sIDs", []) for i in SeriesIDs] for a in i]
EvRHE = "E_AppV_RHE"
EIS_pars_all = EIS_pars.loc[
EIS_pars.SampleID.isin(all_sIDs)
& (EIS_pars.Rs > 1)
& (EIS_pars.lmfit_message.str.contains("Fit succeeded"))
].drop_duplicates(subset=["PAR_file", EvRHE, "Model_EEC"])
EIS_pars_all[EvRHE] = EIS_pars_all[EvRHE].round(3)
all_good_lst, all_bad_lst, _bad_fit_suggestions = [], [], []
mod_index = EEC_models_index()
EIS_pars_all_grp_PF_mod = EIS_pars_all.groupby(["PAR_file", "Model_EEC"])
for (pf, mod), Mgrp in EIS_pars_all_grp_PF_mod:
# pf,mod, Mgrp
PDD_eischeck_figs = PDD_eischeck.joinpath(Path(pf).parent.name)
PDD_eischeck_figs.mkdir(parents=True, exist_ok=True)
# egrp = Mgrp.groupby(EvRHE)
if len(Mgrp) > 2:
savestem = f'{Path(pf).stem}_{mod.split("Model")[-1]}'
Mgrp2 = Mgrp
var_names_uniq = Mgrp2.lmfit_var_names.unique()[0]
if pd.isna(var_names_uniq):
var_names = [i for i in mod_index if mod in i[1].name][0][1].param_names
else:
var_names = list(
map(
str.strip,
var_names_uniq.strip(")(").replace("'", "").split(","),
)
)
# var_names = Mgrp2.lmfit_var_names.unique()[0]
var_zscores = {
var + "_zscore": (np.abs(stats.zscore(Mgrp[var])), Mgrp[var].index)
for var in var_names
}
z_indexes = [
i
for key, val in var_zscores.items()
for z, i in zip(val[0], val[1])
if z < 1.45
]
# Mgrp[Mgrp[[var+'_zscore' for var in var_names]] < 1.8]
# rdchi_min_mean = (Mgrp2.lmfit_redchi.nsmallest(int(len(Mgrp2)/2))).mean()+2*Mgrp2.lmfit_redchi.nsmallest(int(np.round(0.7*len(Mgrp2)))).std()
error_col = "lmfit_MSE"
error_min_mean = (
Mgrp2[error_col].nsmallest(int(len(Mgrp2) / 2))
).mean() + 2 * Mgrp2[error_col].nsmallest(
int(np.round(0.7 * len(Mgrp2)))
).std()
# [Mgrp[var+'_zscore'] > 1.9 for var in var_names]1
# lin = linregress(x=Mgrp[EvRHE], y= Mgrp.lmfit_redchi)
good_fits = Mgrp[
((stats.zscore(Mgrp[error_col])) < 1.4)
& (Mgrp[error_col] < error_min_mean)
& (Mgrp.Rs > 2)
& (Mgrp.Rct < 20e3)
& (Mgrp.index.isin(z_indexes))
]
if good_fits.empty:
s_ch = EIS_pars_all.loc[
(EIS_pars_all.SampleID == Mgrp.SampleID.unique()[0])
& (EIS_pars_all.PAR_file != pf)
& (EIS_pars_all.Model_EEC == mod)
]
gs = s_ch[
((stats.zscore(s_ch[error_col])) < 1.4)
& (s_ch[error_col] < error_min_mean)
& (s_ch.Rs > 2)
& (s_ch.Rct < 20e3)
& (s_ch.nAd > 0.1)
]
if not gs.empty:
good_fits = gs
var_min = {var + "_min": good_fits[var].min() for var in var_names}
good_fits = good_fits.assign(**var_min)
var_max = {var + "_max": good_fits[var].max() for var in var_names}
good_fits = good_fits.assign(**var_max)
bad_fits = Mgrp.loc[~Mgrp.index.isin(good_fits.index.values)]
bad_fits = bad_fits.assign(**var_max)
bad_fits = bad_fits.assign(**var_min)
if not bad_fits.empty:
for (Ev, RPM), Egrp in bad_fits.groupby([EvRHE, "RPM_DAC"]):
good_fits["E_diff"] = np.abs(good_fits["E_RHE"] - Ev)
top_Ev = int(len(good_fits) / 4 + 3)
gf_topE = good_fits.sort_values("E_diff").head(top_Ev)
Egrp = Egrp.assign(
**{var: gf_topE[var].mean() for var in var_names}
)
# bad_fit_guesses = good_fits.mean().drop(['E_AppV_RHE','E_RHE','RPM_DAC']).dropna().append(pd.Series({'PAR_file' : str(pf), 'Model_EEC' : mod, EvRHE : Ev, 'RPM_DAC' : RPM}))
_bad_fit_suggestions.append(Egrp)
all_good_lst.append(good_fits)
all_bad_lst.append(bad_fits)
eischeck_plot = 0
if eischeck_plot:
for par in [error_col]:
# par = 'Rct'
fig, ax = plt.subplots()
good_fits.plot(x=EvRHE, y=par, c="g", ax=ax)
bad_fits.plot(
x=EvRHE, y=par, c="r", ax=ax, kind="scatter", marker="x", s=80
)
ax.set_title(savestem)
ax.set_yscale("log")
ax.autoscale(True)
ax.legend()
plt.savefig(
PDD_eischeck_figs.joinpath(f"{savestem}.png"),
bbox_inches="tight",
)
plt.close()
all_good_fits = pd.concat(all_good_lst, sort=False).drop_duplicates()
all_bad_fits = pd.concat(all_bad_lst, sort=False).drop_duplicates()
all_bad_fits_suggestions = pd.concat(
_bad_fit_suggestions, sort=False
).drop_duplicates()
# .dropna(axis=1, how='all')
# print(f'Good fits: {len(all_good_fits)}, bad fits: {len(all_bad_fits)}, diff {len(all_good_fits) - len(all_bad_fits)}')
bak_dict = {}
for (all_fits_df, filepath) in [
(all_good_fits, "EIS_recheck_good_fits"),
(all_bad_fits, "EIS_recheck_bad_fits"),
(all_bad_fits_suggestions, "EIS_recheck_bad_fits_suggestions"),
]:
bak_dict.update(check_file_and_backup(all_fits_df, filepath, PDD_eischeck))
print(
f"Good fits: {len(all_good_fits)}, bad fits: {len(all_bad_fits)}, diff {len(all_good_fits) - len(all_bad_fits)}"
)
print(f"{bak_dict}")
# .to_excel(PDD_eischeck.joinpath('EIS_recheck_good_fits.xlsx'))
# .to_excel(PDD_eischeck.joinpath())
# all_bad_fits_suggestions.to_excel(PDD_eischeck.joinpath())
# for (pf,mod),Mgrp in EIS_pars_all.groupby(['PAR_file','Model_EEC']):
# Mgrp.lmfit_redchi.min(), Mgrp.lmfit_redchi.std()
def select_DRT_samples(ORR_acid_no_mod):
# Reads DRT fitting results from EIS_pars, loops over SourceFilename and loads json
_DRT_collect = {}
for sf, sfgr in EIS_pars.groupby("SourceFilename"):
# sf,sfgr
_DRT1 = sf.parent.joinpath(
"GP_DRT", sf.stem.split("_pars")[0] + "_GP_DRT_res_params.json"
)
if _DRT1.is_file():
_sfdrt = _DRT1
_drt_json = json.loads(_sfdrt.read_text())
_DRT_collect.update({sf: _drt_json})
else:
print(f"No file: {_DRT1}")
DRT_pars = pd.DataFrame(data=_DRT_collect.values(), index=_DRT_collect.keys())
DRT_pars = DRT_pars.rename(
columns={
i: "GP_DRT_" + i for i in DRT_pars.columns if not i.startswith("GP_DRT")
}
)
DRT_pars_columns = DRT_pars.columns
DRT_pars.index.rename("SourceFilename", inplace=True)
DRT_pars = DRT_pars.reset_index()
EIS_DRT_pars = pd.merge(EIS_pars, DRT_pars, on="SourceFilename", how="left")
EIS_DRT_pars = EIS_DRT_pars.loc[
(EIS_DRT_pars["GP_DRT_success"] == True)
| (EIS_DRT_pars["GP_DRT_success"] == False)
]
#
for (msg, Elec), Elgr in EIS_DRT_pars.groupby(["GP_DRT_success", "Electrolyte"]):
if not Elgr.empty:
for i in ["sigma_n", "sigma_f", "ell"]:
fig, ax = plt.subplots()
# Elgr.plot(x='BET_cat_agg', y= 'GP_DRT_'+i, kind='scatter', c='E_RHE',colormap='rainbow_r',ax=ax)
Elgr.plot(
x="E_RHE",
y="GP_DRT_" + i,
kind="scatter",
c="BET_cat_agg",
colormap="rainbow_r",
ax=ax,
)
ax.set_xlim = (0.6, 0.9)
ax.set_title(f"{Elec}, {msg}")
# ps = plotting.eisplot(i)
# ax.set_ylim(ps.ylim)
# ax.set_yscale(ps.logyscale)
plt.show()
plt.close()
# Prepares Spectra from DRT pars and read pickles from Z_star filenames
_lst_spectra = []
ECexp_cols = [
"PAR_file",
"SampleID",
"postAST",
"Loading_cm2",
"Electrolyte",
"pH",
"Gas",
"RPM_DAC",
"E_RHE",
]
for grnm, sgrp in EIS_DRT_pars.drop_duplicates(
subset=list(DRT_pars.columns) + ECexp_cols
).groupby(ECexp_cols):
if len(sgrp) == 1:
| |
<gh_stars>1-10
import json
from typing import List
from unittest import mock
import boto3
import pandas as pd
import pytest
from moto import mock_s3
from ruamel.yaml import YAML
import great_expectations.exceptions.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.core.batch import (
BatchDefinition,
BatchRequest,
BatchRequestBase,
IDDict,
)
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.data_connector import ConfiguredAssetS3DataConnector
yaml = YAML()
@mock_s3
def test_basic_instantiation():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector = ConfiguredAssetS3DataConnector(
name="my_data_connector",
datasource_name="FAKE_DATASOURCE_NAME",
default_regex={
"pattern": "alpha-(.*)\\.csv",
"group_names": ["index"],
},
bucket=bucket,
prefix="",
assets={"alpha": {}},
)
assert my_data_connector.self_check() == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"alpha",
],
"data_assets": {
"alpha": {
"example_data_references": [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
],
"batch_definition_count": 3,
},
},
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
# noinspection PyProtectedMember
my_data_connector._refresh_data_references_cache()
assert my_data_connector.get_data_reference_list_count() == 3
assert my_data_connector.get_unmatched_data_references() == []
# Illegal execution environment name
with pytest.raises(ValueError):
print(
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="something",
data_connector_name="my_data_connector",
data_asset_name="something",
)
)
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock_s3
def test_instantiation_from_a_config(mock_emit, empty_data_context_stats_enabled):
context: DataContext = empty_data_context_stats_enabled
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
report_object = context.test_yaml_config(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
datasource_name: FAKE_DATASOURCE
name: TEST_DATA_CONNECTOR
default_regex:
pattern: alpha-(.*)\\.csv
group_names:
- index
bucket: {bucket}
prefix: ""
assets:
alpha:
""",
return_mode="report_object",
)
assert report_object == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"alpha",
],
"data_assets": {
"alpha": {
"example_data_references": [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
],
"batch_definition_count": 3,
},
},
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
assert mock_emit.call_count == 1
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ConfiguredAssetS3DataConnector",
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock_s3
def test_instantiation_from_a_config_regex_does_not_match_paths(
mock_emit, empty_data_context_stats_enabled
):
context: DataContext = empty_data_context_stats_enabled
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
report_object = context.test_yaml_config(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
datasource_name: FAKE_DATASOURCE
name: TEST_DATA_CONNECTOR
bucket: {bucket}
prefix: ""
default_regex:
pattern: beta-(.*)\\.csv
group_names:
- index
assets:
alpha:
""",
return_mode="report_object",
)
assert report_object == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"alpha",
],
"data_assets": {
"alpha": {"example_data_references": [], "batch_definition_count": 0},
},
"example_unmatched_data_references": [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
],
"unmatched_data_reference_count": 3,
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
assert mock_emit.call_count == 1
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ConfiguredAssetS3DataConnector",
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
@mock_s3
def test_return_all_batch_definitions_unsorted():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: test_environment
#execution_engine:
# class_name: PandasExecutionEngine
bucket: {bucket}
prefix: ""
assets:
TestFiles:
default_regex:
pattern: (.+)_(.+)_(.+)\\.csv
group_names:
- name
- timestamp
- price
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "test_environment",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
with pytest.raises(TypeError):
my_data_connector.get_batch_definition_list_from_batch_request()
# with unnamed data_asset_name
with pytest.raises(TypeError):
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name=None,
)
)
# with unnamed data_asset_name
unsorted_batch_definition_list = (
my_data_connector._get_batch_definition_list_from_batch_request(
BatchRequestBase(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name=None,
)
)
)
expected = [
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "abe", "timestamp": "20200809", "price": "1040"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200809", "price": "1000"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200819", "price": "1300"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20200809", "price": "1500"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20201129", "price": "1900"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200713", "price": "1567"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200810", "price": "1003"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200811", "price": "1009"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200809", "price": "1002"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200810", "price": "1001"}
),
),
]
assert expected == unsorted_batch_definition_list
# with named data_asset_name
unsorted_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
)
)
)
assert expected == unsorted_batch_definition_list
@mock_s3
def test_return_all_batch_definitions_sorted():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: test_environment
#execution_engine:
# class_name: PandasExecutionEngine
bucket: {bucket}
prefix: ""
assets:
TestFiles:
default_regex:
pattern: (.+)_(.+)_(.+)\\.csv
group_names:
- name
- timestamp
- price
sorters:
- orderby: asc
class_name: LexicographicSorter
name: name
- datetime_format: "%Y%m%d"
orderby: desc
class_name: DateTimeSorter
name: timestamp
- orderby: desc
class_name: NumericSorter
name: price
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "test_environment",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
self_check_report = my_data_connector.self_check()
assert self_check_report["class_name"] == "ConfiguredAssetS3DataConnector"
assert self_check_report["data_asset_count"] == 1
assert self_check_report["data_assets"]["TestFiles"]["batch_definition_count"] == 10
assert self_check_report["unmatched_data_reference_count"] == 0
sorted_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
)
)
)
expected = [
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "abe", "timestamp": "20200809", "price": "1040"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200819", "price": "1300"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200809", "price": "1000"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20201129", "price": "1900"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20200809", "price": "1500"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200811", "price": "1009"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200810", "price": "1003"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200713", "price": "1567"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200810", "price": "1001"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200809", "price": "1002"}
),
),
]
# TEST 1: Sorting works
assert expected == sorted_batch_definition_list
my_batch_request: BatchRequest = BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
data_connector_query=IDDict(
**{
"batch_filter_parameters": {
"name": "james",
"timestamp": "20200713",
"price": "1567",
}
}
),
)
my_batch_definition_list: List[BatchDefinition]
my_batch_definition: BatchDefinition
# TEST 2: Should only return the specified partition
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 1
my_batch_definition = my_batch_definition_list[0]
expected_batch_definition: BatchDefinition = BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
**{
"name": "james",
"timestamp": "20200713",
"price": "1567",
}
),
)
assert my_batch_definition == expected_batch_definition
# TEST 3: Without data_connector_query, should return all 10
my_batch_request: BatchRequest = BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
data_connector_query=None,
)
# should return 10
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 10
@mock_s3
def test_alpha():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"test_dir_alpha/A.csv",
"test_dir_alpha/B.csv",
"test_dir_alpha/C.csv",
"test_dir_alpha/D.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
bucket: {bucket}
prefix: test_dir_alpha
assets:
A:
default_regex:
pattern: .*(.+)\\.csv
group_names:
- part_1
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "BASE",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
self_check_report = my_data_connector.self_check()
print(json.dumps(self_check_report, indent=2))
assert self_check_report["class_name"] == "ConfiguredAssetS3DataConnector"
assert self_check_report["data_asset_count"] == 1
assert set(list(self_check_report["data_assets"].keys())) == {"A"}
assert self_check_report["unmatched_data_reference_count"] == 0
my_batch_definition_list: List[BatchDefinition]
my_batch_definition: BatchDefinition
# Try to fetch a batch from a nonexistent asset
my_batch_request: BatchRequest = BatchRequest(
datasource_name="BASE",
data_connector_name="general_s3_data_connector",
data_asset_name="B",
data_connector_query=None,
)
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 0
| |
EPIGRAPHIC LETTER REVERSED P': None,
'LATIN LETTER AIN': None,
'LATIN LETTER SMALL CAPITAL A': None,
'LATIN LETTER SMALL CAPITAL AE': None,
'LATIN LETTER SMALL CAPITAL BARRED B': None,
'LATIN LETTER SMALL CAPITAL C': None,
'LATIN LETTER SMALL CAPITAL D': None,
'LATIN LETTER SMALL CAPITAL E': None,
'LATIN LETTER SMALL CAPITAL ETH': None,
'LATIN LETTER SMALL CAPITAL EZH': None,
'LATIN LETTER SMALL CAPITAL F': None,
'LATIN LETTER SMALL CAPITAL J': None,
'LATIN LETTER SMALL CAPITAL K': None,
'LATIN LETTER SMALL CAPITAL L WITH STROKE': None,
'LATIN LETTER SMALL CAPITAL M': None,
'LATIN LETTER SMALL CAPITAL O': None,
'LATIN LETTER SMALL CAPITAL OPEN O': None,
'LATIN LETTER SMALL CAPITAL OU': None,
'LATIN LETTER SMALL CAPITAL P': None,
'LATIN LETTER SMALL CAPITAL REVERSED N': None,
'LATIN LETTER SMALL CAPITAL REVERSED R': None,
'LATIN LETTER SMALL CAPITAL RUM': None,
'LATIN LETTER SMALL CAPITAL S': None,
'LATIN LETTER SMALL CAPITAL T': None,
'LATIN LETTER SMALL CAPITAL TURNED E': None,
'LATIN LETTER SMALL CAPITAL TURNED R': None,
'LATIN LETTER SMALL CAPITAL U': None,
'LATIN LETTER SMALL CAPITAL V': None,
'LATIN LETTER SMALL CAPITAL W': None,
'LATIN LETTER SMALL CAPITAL Z': None,
'LATIN LETTER VOICED LARYNGEAL SPIRANT': None,
'LATIN SMALL CAPITAL LETTER I WITH STROKE': None,
'LATIN SMALL CAPITAL LETTER U WITH STROKE': None,
'LATIN SMALL LETTER A WITH MACRON AND GRAVE': None,
'LATIN SMALL LETTER A WITH OGONEK AND ACUTE': None,
'LATIN SMALL LETTER A WITH OGONEK AND TILDE': None,
'LATIN SMALL LETTER A WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER A WITH STROKE': None,
'LATIN SMALL LETTER AA': None,
'LATIN SMALL LETTER ALPHA WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER AO': None,
'LATIN SMALL LETTER AU': None,
'LATIN SMALL LETTER AV': None,
'LATIN SMALL LETTER AV WITH HORIZONTAL BAR': None,
'LATIN SMALL LETTER AY': None,
'LATIN SMALL LETTER B WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER B WITH PALATAL HOOK': None,
'LATIN SMALL LETTER BOTTOM HALF O': None,
'LATIN SMALL LETTER BROKEN L': None,
'LATIN SMALL LETTER C WITH STROKE': None,
'LATIN SMALL LETTER CON': None,
'LATIN SMALL LETTER CUATRILLO': None,
'LATIN SMALL LETTER CUATRILLO WITH COMMA': None,
'LATIN SMALL LETTER D WITH CURL': None,
'LATIN SMALL LETTER D WITH HOOK AND TAIL': None,
'LATIN SMALL LETTER D WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER D WITH PALATAL HOOK': None,
'LATIN SMALL LETTER DB DIGRAPH': None,
'LATIN SMALL LETTER DELTA': None,
'LATIN SMALL LETTER DOTLESS J': None,
'LATIN SMALL LETTER DUM': None,
'LATIN SMALL LETTER E WITH CIRCUMFLEX AND CARON': None,
'LATIN SMALL LETTER E WITH CIRCUMFLEX AND MACRON': None,
'LATIN SMALL LETTER E WITH DOT ABOVE AND ACUTE': None,
'LATIN SMALL LETTER E WITH DOT ABOVE AND TILDE': None,
'LATIN SMALL LETTER E WITH NOTCH': None,
'LATIN SMALL LETTER E WITH OGONEK AND ACUTE': None,
'LATIN SMALL LETTER E WITH OGONEK AND TILDE': None,
'LATIN SMALL LETTER E WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER E WITH STROKE': None,
'LATIN SMALL LETTER E WITH VERTICAL LINE BELOW': None,
'LATIN SMALL LETTER E WITH VERTICAL LINE BELOW AND ACUTE': None,
'LATIN SMALL LETTER E WITH VERTICAL LINE BELOW AND GRAVE': None,
'LATIN SMALL LETTER EGYPTOLOGICAL AIN': None,
'LATIN SMALL LETTER EGYPTOLOGICAL ALEF': None,
'LATIN SMALL LETTER ESH WITH PALATAL HOOK': None,
'LATIN SMALL LETTER ESH WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER ET': None,
'LATIN SMALL LETTER EZH WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER F WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER F WITH PALATAL HOOK': None,
'LATIN SMALL LETTER G WITH PALATAL HOOK': None,
'LATIN SMALL LETTER GHA': None,
'LATIN SMALL LETTER GLOTTAL STOP': None,
'LATIN SMALL LETTER H WITH DESCENDER': None,
'LATIN SMALL LETTER HALF H': None,
'LATIN SMALL LETTER HENG': None,
'LATIN SMALL LETTER I WITH DOT ABOVE AND ACUTE': None,
'LATIN SMALL LETTER I WITH DOT ABOVE AND GRAVE': None,
'LATIN SMALL LETTER I WITH DOT ABOVE AND TILDE': None,
'LATIN SMALL LETTER I WITH MACRON AND GRAVE': None,
'LATIN SMALL LETTER I WITH OGONEK AND DOT ABOVE AND ACUTE': None,
'LATIN SMALL LETTER I WITH OGONEK AND DOT ABOVE AND TILDE': None,
'LATIN SMALL LETTER I WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER INSULAR D': None,
'LATIN SMALL LETTER INSULAR F': None,
'LATIN SMALL LETTER INSULAR G': None,
'LATIN SMALL LETTER INSULAR R': None,
'LATIN SMALL LETTER INSULAR S': None,
'LATIN SMALL LETTER INSULAR T': None,
'LATIN SMALL LETTER IOTA WITH STROKE': None,
'LATIN SMALL LETTER IS': None,
'LATIN SMALL LETTER J WITH DOT ABOVE AND TILDE': None,
'LATIN SMALL LETTER J WITH STROKE': None,
'LATIN SMALL LETTER K WITH DESCENDER': None,
'LATIN SMALL LETTER K WITH DIAGONAL STROKE': None,
'LATIN SMALL LETTER K WITH PALATAL HOOK': None,
'LATIN SMALL LETTER K WITH STROKE': None,
'LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE': None,
'LATIN SMALL LETTER L WITH CURL': None,
'LATIN SMALL LETTER L WITH DOUBLE BAR': None,
'LATIN SMALL LETTER L WITH HIGH STROKE': None,
'LATIN SMALL LETTER L WITH PALATAL HOOK': None,
'LATIN SMALL LETTER L WITH TILDE': None,
'LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE': None,
'LATIN SMALL LETTER LONG S WITH HIGH STROKE': None,
'LATIN SMALL LETTER LUM': None,
'LATIN SMALL LETTER M WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER M WITH PALATAL HOOK': None,
'LATIN SMALL LETTER M WITH TILDE': None,
'LATIN SMALL LETTER MIDDLE-WELSH LL': None,
'LATIN SMALL LETTER MIDDLE-WELSH V': None,
'LATIN SMALL LETTER MUM': None,
'LATIN SMALL LETTER N WITH CURL': None,
'LATIN SMALL LETTER N WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER N WITH PALATAL HOOK': None,
'LATIN SMALL LETTER NG WITH TILDE ABOVE': None,
'LATIN SMALL LETTER NUM': None,
'LATIN SMALL LETTER O WITH LONG STROKE OVERLAY': None,
'LATIN SMALL LETTER O WITH LOOP': None,
'LATIN SMALL LETTER O WITH LOW RING INSIDE': None,
'LATIN SMALL LETTER O WITH VERTICAL LINE BELOW': None,
'LATIN SMALL LETTER O WITH VERTICAL LINE BELOW AND ACUTE': None,
'LATIN SMALL LETTER O WITH VERTICAL LINE BELOW AND GRAVE': None,
'LATIN SMALL LETTER OO': None,
'LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER P WITH FLOURISH': None,
'LATIN SMALL LETTER P WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER P WITH PALATAL HOOK': None,
'LATIN SMALL LETTER P WITH SQUIRREL TAIL': None,
'LATIN SMALL LETTER P WITH STROKE': None,
'LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER': None,
'LATIN SMALL LETTER Q WITH DIAGONAL STROKE': None,
'LATIN SMALL LETTER Q WITH HOOK TAIL': None,
'LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER': None,
'LATIN SMALL LETTER QP DIGRAPH': None,
'LATIN SMALL LETTER R ROTUNDA': None,
'LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE': None,
'LATIN SMALL LETTER R WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER R WITH PALATAL HOOK': None,
'LATIN SMALL LETTER R WITH STROKE': None,
'LATIN SMALL LETTER R WITH TILDE': None,
'LATIN SMALL LETTER REVERSED C': None,
'LATIN SMALL LETTER REVERSED C WITH DOT': None,
'LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER RUM': None,
'LATIN SMALL LETTER RUM ROTUNDA': None,
'LATIN SMALL LETTER S WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER S WITH PALATAL HOOK': None,
'LATIN SMALL LETTER S WITH SWASH TAIL': None,
'LATIN SMALL LETTER S WITH VERTICAL LINE BELOW': None,
'LATIN SMALL LETTER SALTILLO': None,
'LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER SIDEWAYS DIAERESIZED U': None,
'LATIN SMALL LETTER SIDEWAYS O': None,
'LATIN SMALL LETTER SIDEWAYS O WITH STROKE': None,
'LATIN SMALL LETTER SIDEWAYS OPEN O': None,
'LATIN SMALL LETTER SIDEWAYS TURNED M': None,
'LATIN SMALL LETTER SIDEWAYS U': None,
'LATIN SMALL LETTER T WITH CURL': None,
'LATIN SMALL LETTER T WITH DIAGONAL STROKE': None,
'LATIN SMALL LETTER T WITH MIDDLE TILDE': None,
'LATIN SMALL LETTER TAILLESS PHI': None,
'LATIN SMALL LETTER TH WITH STRIKETHROUGH': None,
'LATIN SMALL LETTER THORN WITH STROKE': None,
'LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER': None,
'LATIN SMALL LETTER TOP HALF O': None,
'LATIN SMALL LETTER TRESILLO': None,
'LATIN SMALL LETTER TUM': None,
'LATIN SMALL LETTER TURNED AE': None,
'LATIN SMALL LETTER TURNED G': None,
'LATIN SMALL LETTER TURNED H WITH FISHHOOK': None,
'LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL': None,
'LATIN SMALL LETTER TURNED I': None,
'LATIN SMALL LETTER TURNED INSULAR G': None,
'LATIN SMALL LETTER TURNED L': None,
'LATIN SMALL LETTER TURNED OE': None,
'LATIN SMALL LETTER TURNED OPEN E': None,
'LATIN SMALL LETTER TURNED R WITH TAIL': None,
'LATIN SMALL LETTER TZ': None,
'LATIN SMALL LETTER U WITH MACRON AND ACUTE': None,
'LATIN SMALL LETTER U WITH MACRON AND GRAVE': None,
'LATIN SMALL LETTER U WITH MACRON AND TILDE': None,
'LATIN SMALL LETTER U WITH OGONEK AND ACUTE': None,
'LATIN SMALL LETTER U WITH OGONEK AND TILDE': None,
'LATIN SMALL LETTER U WITH RETROFLEX HOOK': None,
'LATIN SMALL LETTER UE': None,
'LATIN SMALL LETTER UM': None,
'LATIN SMALL LETTER UPSILON WITH STROKE': None,
'LATIN SMALL LETTER V WITH CURL': None,
'LATIN SMALL LETTER V WITH DIAGONAL STROKE': None,
'LATIN SMALL LETTER V WITH PALATAL HOOK': None,
'LATIN SMALL LETTER V WITH RIGHT HOOK': None,
'LATIN | |
= 0
self._createAllTables()
self._loadServers()
def _createAllTables(self):
"""Helper: check for the existence of all the tables and indices
we plan to use, and create the missing ones."""
self._lock.acquire()
try:
# FFFF There are still a few sucky bits of this DB design.
# FFFF First, we depend on SQLite's behavior when inserting null
# FFFF into an integer primary key column. (It picks a new integer
# FFFF for us, without our having to give a sequence.)
# FFFF Second, paths probably want to have their own table.
#### Tables holding raw data.
# Holds intervals over which this server was running. A
# row in myLifespan means: "This server started running at
# 'startup', and was still running at 'stillup'. If
# shutdown is provided, that's when the server shut down."
self._db.createTable("myLifespan",
[("startup", "timestamp", "not null"),
("stillup", "timestamp", "not null"),
("shutdown", "timestamp")])
# Holds information about probe packets sent into the network. A
# row in ping means: We send a probe packet along the path 'path'
# at the time 'sentat'. The payload of the message we receive
# will hash to 'hash' (base-64 encoded). If 'received' is not
# null, we received the packet again at 'received'.
self._db.createTable("ping",
[("hash", "char(28)", "primary key"),
("path", "varchar(200)", "not null"),
("sentat", "timestamp", "not null"),
("received", "timestamp")])
# Holds identity digests for all the servers we know about.
self._db.createTable("server",
[("id", "integer", "primary key"),
("identity", "varchar(40)", "unique not null")])
# Holds information about our attempts to launch MMTP connections
# to other servers. A row in connectionAttempt means: We tried to
# connect to 'serever' at the time 'at'. If 'success', we
# successfully connected and negotiated a protocol version.
# Otherwise, we failed before we could negotiate a protocol version.
self._db.createTable(
"connectionAttempt",
[("at", "timestamp", "not null"),
("server", "integer", "not null REFERENCES server(id)"),
("success", "bool", "not null")])
#### Tables holding results.
# Maps spans of time (currently, days) to identifiers.
self._db.createTable("statsInterval",
[("id", "integer", "primary key"),
("startAt", "timestamp", "not null"),
("endAt", "timestamp", "not null")])
# Holds estimated server uptimes. Each row in uptime means:
# during 'interval', our successful and failed connections to
# 'server' make us believe that it was running and on the network
# about 'uptime' fraction of the time.
self._db.createTable(
"uptime",
[("interval", "integer", "not null REFERENCES statsinterval(id)"),
("server", "integer", "not null REFERENCES server(id)"),
("uptime", "float", "not null")],
["PRIMARY KEY (interval, server)"])
# Holds estimates for server latency and reliability for a given
# interval. Each row in echolotOneHopResult means: during
# 'interval', we sent 'nSent' one-hop probe messages to 'server'.
# We eventually received 'nReceived' of them. The median latency
# (rounded off) of those we received was 'latency' seconds.
# Weighting unreceived probe messages by the fraction we would
# have expected to see by the time we computed the results times
# 0.8, and weighting received probes by 1.0, the weighted number
# of sent and received pings are in 'wsent' and 'wreceived', and
# the weighted fraction received is 'reliability'.
# (Yes, Echolot is dark magic.)
self._db.createTable(
"echolotOneHopResult",
[("server", "integer", "not null REFERENCES server(id)"),
("interval", "integer", "not null REFERENCES statsInterval(id)"),
("nSent", "integer", "not null"),
("nReceived","integer", "not null"),
("latency", "integer", "not null"),
("wsent", "float", "not null"),
("wreceived","float", "not null"),
("reliability", "float", "not null")],
["PRIMARY KEY (server, interval)"])
# Holds estimates for server latency and reliability over the last
# several (12) days. A row in echolotCurrentOneHopResults means:
# We most recently computed single-hop probe statistics for server
# at 'at'. Over the last several days, its median latency
# (rounded) has been 'latency' seconds, and its reliability,
# weighted by relevance of day, has been 'reliability'.
self._db.createTable(
"echolotCurrentOneHopResult",
[("server", "integer",
"primary key REFERENCES server(id)"),
("at", "timestamp", "not null"),
("latency", "integer", "not null"),
("reliability", "float", "not null")])
# Holds estimates for two-hop chain reliability. Each row means:
# We most recently calculted the reliability for the two-hop chain
# 'server1,server2' at 'at'. Over the last several (12) days, we
# sent nSent probes, and have received nReceieved of them. Iff
# 'broken', the fraction received is so much lower that what we'd
# expect that we have concluded that the chain is probably broken.
# Iff 'interesting', then there is not enough data to be sure, so
# we're going to probe this chain a bit more frequently for a
# while.
self._db.createTable(
"echolotCurrentTwoHopResult",
[("server1", "integer", "not null REFERENCES server(id)"),
("server2", "integer", "not null REFERENCES server(id)"),
("at", "timestamp", "not null"),
("nSent", "integer", "not null"),
("nReceived", "integer", "not null"),
("broken", "bool", "not null"),
("interesting", "bool", "not null")],
["PRIMARY KEY (server1, server2)"])
#### Indices.
self._db.createIndex("serverIdentity", "server",
["identity"], unique=1)
self._db.createIndex("statsIntervalSE", "statsInterval",
["startAt", "endAt"], unique=1)
self._db.createIndex("myLifespanStartup", "myLifespan", ["startUp"])
self._db.createIndex("pingHash", "ping", ["hash"], unique=1)
self._db.createIndex("pingPathSR", "ping",
["path", "sentat", "received"])
self._db.createIndex("connectionAttemptServerAt",
"connectionAttempt", ["server","at"])
self._db.createIndex("echolotOneHopResultSI",
"echolotOneHopResult",
["server", "interval"])
# XXXX008 We should maybe have indices on echolot*results,
# uptimes.
self._setUptime = self._db.getInsertOrUpdateFn(
"uptime", ["interval", "server"], ["uptime"])
self._setOneHop = self._db.getInsertOrUpdateFn(
"echolotOneHopResult",
["server", "interval"],
["nSent", "nReceived", "latency", "wsent", "wreceived",
"reliability"])
self._setCurOneHop = self._db.getInsertOrUpdateFn(
"echolotCurrentOneHopResult",
["server"],
["at", "latency", "reliability"])
self._setTwoHop = self._db.getInsertOrUpdateFn(
"echolotCurrentTwoHopResult",
["server1", "server2"],
["at", "nSent", "nReceived", "broken", "interesting"])
finally:
self._lock.release()
def _loadServers(self):
"""Helper function; callers must hold lock. Load _serverIDs,
_serverReliability, _brokenChains, and _interestingChains from
the database.
"""
cur = self._db.getCursor()
cur.execute("SELECT id, identity FROM server")
res = cur.fetchall()
serverIDs = {}
for idnum,identity in res:
serverIDs[self._db.decodeIdentity(identity)] = idnum
serverReliability = {}
cur.execute("SELECT identity, reliability FROM "
"echolotCurrentOneHopResult, server WHERE "
"echolotCurrentOneHopResult.server = server.id")
res = cur.fetchall()
for hexid,rel in res:
serverReliability[self._db.decodeIdentity(hexid)]=rel
cur.execute("SELECT S1.identity, S2.identity,broken,interesting FROM"
" echolotCurrentTwoHopResult,server AS S1,server AS S2 "
"WHERE (interesting = 1 OR broken = 1) "
" AND S1.id = server1 AND S2.id = server2")
res = cur.fetchall()
broken = {}
interesting = {}
for s1, s2, b, i in res:
if s1 == '<self>' or s2 == '<self>': continue
p = "%s,%s"%(s1,s2)
assert p == p.lower()
if b:
broken[p]=1
if i:
interesting[p]=1
self._serverIDs = serverIDs
self._serverReliability = serverReliability
self._brokenChains = broken
self._interestingChains = interesting
def updateServers(self, descriptorSource):
"""Add the names 'descriptorSource' to the database, if they
aren't there already.
"""
for s in descriptorSource.getServerList():
self._getServerID(s.getIdentityDigest())
self._db.getConnection().commit()
def _getServerID(self, identity):
"""Helper: Return the database ID for the server whose
identity digest is 'identity'. If the database doesn't
know about the server yet, add it. Does not commit the
current transaction.
"""
self._lock.acquire()
try:
try:
return self._serverIDs[identity]
except KeyError:
self._serverIDs[identity] = 1
finally:
self._lock.release()
cur = self._db.getCursor()
hexid = self._db.encodeIdentity(identity)
cur.execute("INSERT INTO server (identity) VALUES (?)", [hexid])
cur.execute("SELECT id FROM server WHERE identity = ?", [hexid])
#XXXX catch errors!
ident, = cur.fetchone()
self._serverIDs[identity]=ident
return ident
def _getIntervalID(self, start, end):
"""Helper: Return the database ID for the interval spanning from
'start' to 'end'. If the database doesn't know about the interval
yet, add it. Does not commit the current transaction.
"""
# CACHE THESE? FFFF
start = self._db.time(start)
end = self._db.time(end)
cur = self._db.getCursor()
cur.execute("SELECT id FROM statsInterval WHERE startAt = ? AND endAt = ?",
(start, end))
r = cur.fetchall()
if len(r) == 1:
return r[0][0]
cur.execute("INSERT INTO statsInterval (startAt, endAt) VALUES (?, ?)",
(start, end))
cur.execute("SELECT id FROM statsInterval WHERE startAt = ? AND endAt = ?",
(start, end))
r = cur.fetchall()
assert len(r) == 1
return r[0][0]
def rotate(self, dataCutoff, resultsCutoff):
"""Remove expired entries from the database. Remove any raw data from
before 'dataCutoff', and any computed statistics from before
'resultsCutoff'.
"""
#if now is None: now = time.time()
#sec = config['Pinging']
#dataCutoff = self._db.time(now - sec['RetainPingData'])
#resultsCutoff = self._db.time(now - sec['RetainPingResults'])
cur = self._db.getCursor()
cur.execute("DELETE FROM myLifespan WHERE stillup < ?", [dataCutoff])
cur.execute("DELETE FROM ping WHERE sentat < ?", [dataCutoff])
cur.execute("DELETE FROM connectionAttempt WHERE at < ?", [dataCutoff])
cur.execute("DELETE FROM uptime WHERE interval IN "
"( SELECT id FROM statsInterval WHERE endAt < ? )",
[resultsCutoff])
cur.execute("DELETE FROM echolotOneHopResult WHERE interval IN "
"( SELECT id FROM statsInterval WHERE endAt < | |
# ignorelongline
'1f69c': {'canonical_name': 'tractor', 'aliases': []},
# kick_scooter and scooter seem better for Places/14 and Places /16 than
# scooter and motor_scooter.
'1f6f4': {'canonical_name': 'kick_scooter', 'aliases': []},
'1f6b2': {'canonical_name': 'bike', 'aliases': ['bicycle']},
# see Places/14. Called motor_bike (or bike) in India
'1f6f5': {'canonical_name': 'scooter', 'aliases': ['motor_bike']},
'1f3cd': {'canonical_name': 'motorcycle', 'aliases': []},
# siren seems more useful. alert seems like a reasonable addition
'1f6a8': {'canonical_name': 'siren', 'aliases': ['rotating_light', 'alert']},
'1f694': {'canonical_name': 'oncoming_police_car', 'aliases': []},
'1f68d': {'canonical_name': 'oncoming_bus', 'aliases': []},
# car to parallel e.g. Places/1
'1f698': {'canonical_name': 'oncoming_car', 'aliases': ['oncoming_automobile']},
'1f696': {'canonical_name': 'oncoming_taxi', 'aliases': []},
# ski_lift seems like a good addition
'1f6a1': {'canonical_name': 'aerial_tramway', 'aliases': ['ski_lift']},
# gondola seems more useful
'1f6a0': {'canonical_name': 'gondola', 'aliases': ['mountain_cableway']},
'1f69f': {'canonical_name': 'suspension_railway', 'aliases': []},
# train_car seems like a reasonable addition
'1f683': {'canonical_name': 'railway_car', 'aliases': ['train_car']},
# this does not seem like a good emoji for train, especially compared to
# Places/33. streetcar seems like a good addition.
'1f68b': {'canonical_name': 'tram', 'aliases': ['streetcar']},
'1f69e': {'canonical_name': 'mountain_railway', 'aliases': []},
# elevated_train seems like a reasonable addition
'1f69d': {'canonical_name': 'monorail', 'aliases': ['elevated_train']},
# from gemoji/unicode. Also, don't thin we need two bullettrain's
'1f684': {'canonical_name': 'high_speed_train', 'aliases': []},
# google, wikipedia, etc prefer bullet train to bullettrain
'1f685': {'canonical_name': 'bullet_train', 'aliases': []},
'1f688': {'canonical_name': 'light_rail', 'aliases': []},
'1f682': {'canonical_name': 'train', 'aliases': ['steam_locomotive']},
# oncoming_train seems better than train2
'1f686': {'canonical_name': 'oncoming_train', 'aliases': []},
# saving metro for Symbols/108. The tunnel makes subway more appropriate
# anyway.
'1f687': {'canonical_name': 'subway', 'aliases': []},
# all the glyphs of oncoming vehicles have names like oncoming_*. The
# alternate names are to parallel the alternates to Places/27.
'1f68a': {'canonical_name': 'oncoming_tram', 'aliases': ['oncoming_streetcar', 'oncoming_trolley']},
'1f689': {'canonical_name': 'station', 'aliases': []},
'1f681': {'canonical_name': 'helicopter', 'aliases': []},
'1f6e9': {'canonical_name': 'small_airplane', 'aliases': []},
'2708': {'canonical_name': 'airplane', 'aliases': []},
# take_off seems more useful than airplane_departure. departure also seems
# more useful than airplane_departure. Arguably departure should be the
# primary, since arrival is probably more useful than landing in Places/42,
# but going with this for now.
'1f6eb': {'canonical_name': 'take_off', 'aliases': ['departure', 'airplane_departure']},
# parallel to Places/41
'1f6ec': {'canonical_name': 'landing', 'aliases': ['arrival', 'airplane_arrival']},
'1f680': {'canonical_name': 'rocket', 'aliases': []},
'1f6f0': {'canonical_name': 'satellite', 'aliases': []},
'1f4ba': {'canonical_name': 'seat', 'aliases': []},
'1f6f6': {'canonical_name': 'canoe', 'aliases': []},
'26f5': {'canonical_name': 'boat', 'aliases': ['sailboat']},
'1f6e5': {'canonical_name': 'motor_boat', 'aliases': []},
'1f6a4': {'canonical_name': 'speedboat', 'aliases': []},
# yacht and cruise seem like reasonable additions
'1f6f3': {'canonical_name': 'passenger_ship', 'aliases': ['yacht', 'cruise']},
'26f4': {'canonical_name': 'ferry', 'aliases': []},
'1f6a2': {'canonical_name': 'ship', 'aliases': []},
'2693': {'canonical_name': 'anchor', 'aliases': []},
# there already is a construction in Places/82, and work_in_progress seems
# like a useful thing to have. Construction_zone seems better than the
# unicode construction_sign, and is there partly so this autocompletes for
# construction.
'1f6a7': {'canonical_name': 'work_in_progress', 'aliases': ['construction_zone']},
# alternates from https://emojipedia.org/fuel-pump/. unicode is fuel_pump,
# not fuelpump
'26fd': {'canonical_name': 'fuel_pump', 'aliases': ['gas_pump', 'petrol_pump']},
# not sure why iamcal removed the space
'1f68f': {'canonical_name': 'bus_stop', 'aliases': []},
# https://emojipedia.org/vertical-traffic-light/ thinks this is the more
# common of the two traffic lights, so putting traffic_light on this one
'1f6a6': {'canonical_name': 'traffic_light', 'aliases': ['vertical_traffic_light']},
# see Places/57
'1f6a5': {'canonical_name': 'horizontal_traffic_light', 'aliases': []},
# road_trip from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
'1f5fa': {'canonical_name': 'map', 'aliases': ['world_map', 'road_trip']},
# rock_carving, statue, and tower seem more general and less culturally
# specific, for Places/60, 61, and 63.
'1f5ff': {'canonical_name': 'rock_carving', 'aliases': ['moyai']},
# new_york from https://emojipedia.org/statue-of-liberty/. see Places/60
# for statue
'1f5fd': {'canonical_name': 'statue', 'aliases': ['new_york', 'statue_of_liberty']},
'26f2': {'canonical_name': 'fountain', 'aliases': []},
# see Places/60
'1f5fc': {'canonical_name': 'tower', 'aliases': ['tokyo_tower']},
# choosing this as the castle since castles are a way bigger thing in
# europe than japan, and shiro is a pretty reasonable name for Places/65
'1f3f0': {'canonical_name': 'castle', 'aliases': []},
# see Places/64
'1f3ef': {'canonical_name': 'shiro', 'aliases': []},
'1f3df': {'canonical_name': 'stadium', 'aliases': []},
'1f3a1': {'canonical_name': 'ferris_wheel', 'aliases': []},
'1f3a2': {'canonical_name': 'roller_coaster', 'aliases': []},
# merry_go_round seems like a good addition
'1f3a0': {'canonical_name': 'carousel', 'aliases': ['merry_go_round']},
# beach_umbrella seems more useful
'26f1': {'canonical_name': 'beach_umbrella', 'aliases': []},
'1f3d6': {'canonical_name': 'beach', 'aliases': []},
'1f3dd': {'canonical_name': 'island', 'aliases': []},
'26f0': {'canonical_name': 'mountain', 'aliases': []},
'1f3d4': {'canonical_name': 'snowy_mountain', 'aliases': []},
# already lots of other mountains, otherwise would rename this like
# Places/60
'1f5fb': {'canonical_name': 'mount_fuji', 'aliases': []},
'1f30b': {'canonical_name': 'volcano', 'aliases': []},
'1f3dc': {'canonical_name': 'desert', 'aliases': []},
# campsite from https://emojipedia.org/camping/, I think Places/79 is a
# better camping
'1f3d5': {'canonical_name': 'campsite', 'aliases': []},
'26fa': {'canonical_name': 'tent', 'aliases': ['camping']},
'1f6e4': {'canonical_name': 'railway_track', 'aliases': ['train_tracks']},
# road is used much more frequently at
# https://trends.google.com/trends/explore?q=road,motorway
'1f6e3': {'canonical_name': 'road', 'aliases': ['motorway']},
'1f3d7': {'canonical_name': 'construction', 'aliases': []},
'1f3ed': {'canonical_name': 'factory', 'aliases': []},
'1f3e0': {'canonical_name': 'house', 'aliases': []},
# suburb seems more useful
'1f3e1': {'canonical_name': 'suburb', 'aliases': []},
'1f3d8': {'canonical_name': 'houses', 'aliases': []},
# condemned seemed like a good addition
'1f3da': {'canonical_name': 'derelict_house', 'aliases': ['condemned']},
'1f3e2': {'canonical_name': 'office', 'aliases': []},
'1f3ec': {'canonical_name': 'department_store', 'aliases': []},
'1f3e3': {'canonical_name': 'japan_post', 'aliases': []},
'1f3e4': {'canonical_name': 'post_office', 'aliases': []},
'1f3e5': {'canonical_name': 'hospital', 'aliases': []},
'1f3e6': {'canonical_name': 'bank', 'aliases': []},
'1f3e8': {'canonical_name': 'hotel', 'aliases': []},
'1f3ea': {'canonical_name': 'convenience_store', 'aliases': []},
'1f3eb': {'canonical_name': 'school', 'aliases': []},
'1f3e9': {'canonical_name': 'love_hotel', 'aliases': []},
'1f492': {'canonical_name': 'wedding', 'aliases': []},
'1f3db': {'canonical_name': 'classical_building', 'aliases': []},
'26ea': {'canonical_name': 'church', 'aliases': []},
'1f54c': {'canonical_name': 'mosque', 'aliases': []},
'1f54d': {'canonical_name': 'synagogue', 'aliases': []},
'1f54b': {'canonical_name': 'kaaba', 'aliases': []},
'26e9': {'canonical_name': 'shinto_shrine', 'aliases': []},
'1f5fe': {'canonical_name': 'japan', 'aliases': []},
# rice_scene seems like a strange name to have. gemoji alternate is
# moon_ceremony
'1f391': {'canonical_name': 'moon_ceremony', 'aliases': []},
'1f3de': {'canonical_name': 'national_park', 'aliases': []},
# ocean_sunrise to parallel Places/109
'1f305': {'canonical_name': 'sunrise', 'aliases': ['ocean_sunrise']},
'1f304': {'canonical_name': 'mountain_sunrise', 'aliases': []},
# shooting_star and wish seem like way better descriptions. gemoji/unicode
# is shooting_star
'1f320': {'canonical_name': 'shooting_star', 'aliases': ['wish']},
'1f387': {'canonical_name': 'sparkler', 'aliases': []},
'1f386': {'canonical_name': 'fireworks', 'aliases': []},
'1f307': {'canonical_name': 'city_sunrise', 'aliases': []},
'1f306': {'canonical_name': 'sunset', 'aliases': []},
# city and skyline seem more useful than cityscape
'1f3d9': {'canonical_name': 'city', 'aliases': ['skyline']},
'1f303': {'canonical_name': 'night', 'aliases': []},
# night_sky seems like a good addition
'1f30c': {'canonical_name': 'milky_way', 'aliases': ['night_sky']},
'1f309': {'canonical_name': 'bridge', 'aliases': []},
'1f301': {'canonical_name': 'foggy', 'aliases': []},
'231a': {'canonical_name': 'watch', 'aliases': []},
# unicode/gemoji is mobile_phone. The rest seem like good additions
'1f4f1': {'canonical_name': 'mobile_phone', 'aliases': ['smartphone', 'iphone', 'android']},
'1f4f2': {'canonical_name': 'calling', 'aliases': []},
# gemoji has laptop, even though the google emoji for this does not look
# like a laptop
'1f4bb': {'canonical_name': 'computer', 'aliases': ['laptop']},
'2328': {'canonical_name': 'keyboard', 'aliases': []},
'1f5a5': {'canonical_name': 'desktop_computer', 'aliases': []},
'1f5a8': {'canonical_name': 'printer', 'aliases': []},
# gemoji/unicode is computer_mouse
'1f5b1': {'canonical_name': 'computer_mouse', 'aliases': []},
'1f5b2': {'canonical_name': 'trackball', 'aliases': []},
# arcade seems like a reasonable addition
'1f579': {'canonical_name': 'joystick', 'aliases': ['arcade']},
# vise seems like a reasonable addition
'1f5dc': {'canonical_name': 'compression', 'aliases': ['vise']},
# gold record seems more useful, idea came from
# https://11points.com/11-emoji-different-meanings-think/
'1f4bd': {'canonical_name': 'gold_record', 'aliases': ['minidisc']},
'1f4be': {'canonical_name': 'floppy_disk', 'aliases': []},
'1f4bf': {'canonical_name': 'cd', 'aliases': []},
'1f4c0': {'canonical_name': 'dvd', 'aliases': []},
# videocassette from gemoji/unicode
'1f4fc': {'canonical_name': 'vhs', 'aliases': ['videocassette']},
'1f4f7': {'canonical_name': 'camera', 'aliases': []},
# both of these seem more useful than camera_with_flash
'1f4f8': {'canonical_name': 'taking_a_picture', 'aliases': ['say_cheese']},
# video_recorder seems like a reasonable addition
'1f4f9': {'canonical_name': 'video_camera', 'aliases': ['video_recorder']},
'1f3a5': {'canonical_name': 'movie_camera', 'aliases': []},
# seems like the best emoji for movie
'1f4fd': {'canonical_name': 'projector', 'aliases': ['movie']},
'1f39e': {'canonical_name': 'film', 'aliases': []},
# both of these seem more useful than telephone_receiver
'1f4de': {'canonical_name': 'landline', 'aliases': ['home_phone']},
'260e': {'canonical_name': 'phone', 'aliases': ['telephone']},
'1f4df': {'canonical_name': 'pager', 'aliases': []},
'1f4e0': {'canonical_name': 'fax', 'aliases': []},
'1f4fa': {'canonical_name': 'tv', 'aliases': ['television']},
'1f4fb': {'canonical_name': 'radio', 'aliases': []},
'1f399': {'canonical_name': 'studio_microphone', 'aliases': []},
# volume seems more useful
'1f39a': {'canonical_name': 'volume', 'aliases': ['level_slider']},
'1f39b': {'canonical_name': 'control_knobs', 'aliases': []},
'23f1': {'canonical_name': 'stopwatch', 'aliases': []},
'23f2': {'canonical_name': 'timer', 'aliases': []},
'23f0': {'canonical_name': 'alarm_clock', 'aliases': []},
'1f570': {'canonical_name': 'mantelpiece_clock', 'aliases': []},
# times_up and time_ticking seem more useful than the hourglass | |
= "Rollout"
deployment_or_rollout = await Rollout.read(*read_args)
init_args = dict(rollout_config = deployment_or_rollout_config, rollout = deployment_or_rollout)
else:
raise NotImplementedError(f"Unknown configuration type '{type(deployment_or_rollout_config).__name__}'")
if not deployment_or_rollout:
raise ValueError(
f'cannot create CanaryOptimization: target {controller_type} "{deployment_or_rollout_config.name}"'
f' does not exist in Namespace "{deployment_or_rollout_config.namespace}"'
)
# NOTE: Currently only supporting one container
assert len(deployment_or_rollout_config.containers) == 1, "CanaryOptimization currently only supports a single container"
container_config = deployment_or_rollout_config.containers[0]
main_container = await deployment_or_rollout.get_target_container(container_config)
name = (
deployment_or_rollout_config.strategy.alias
if isinstance(deployment_or_rollout_config.strategy, CanaryOptimizationStrategyConfiguration)
and deployment_or_rollout_config.strategy.alias
else f"{deployment_or_rollout.name}/{main_container.name}-tuning"
)
optimization = cls(
name=name,
**init_args,
container_config=container_config,
main_container=main_container,
**kwargs,
)
await optimization._load_tuning_state()
return optimization
async def _load_tuning_state(self) -> None:
# Find an existing tuning Pod/Container if available
try:
tuning_pod = await Pod.read(self.tuning_pod_name, cast(str, self.namespace))
tuning_container = tuning_pod.get_container(self.container_config.name)
except kubernetes_asyncio.client.exceptions.ApiException as e:
if e.status != 404 or e.reason != "Not Found":
servo.logger.trace(f"Failed reading tuning pod: {e}")
raise
else:
tuning_pod = None
tuning_container = None
# TODO: Factor into a new class?
self.tuning_pod = tuning_pod
self.tuning_container = tuning_container
await self._configure_tuning_pod_template_spec()
@property
def pod_template_spec_container(self) -> Container:
container_obj = next(filter(lambda c: c.name == self.container_config.name, self._tuning_pod_template_spec.spec.containers))
return Container(container_obj, None)
def adjust(self, adjustment: servo.Adjustment, control: servo.Control = servo.Control()) -> None:
assert self.tuning_pod, "Tuning Pod not loaded"
assert self.tuning_container, "Tuning Container not loaded"
self.adjustments.append(adjustment)
setting_name, value = _normalize_adjustment(adjustment)
self.logger.info(f"adjusting {setting_name} to {value}")
if setting_name in ("cpu", "memory"):
# NOTE: use copy + update to apply values that may be outside of the range
servo.logger.debug(f"Adjusting {setting_name}={value}")
setting = getattr(self.container_config, setting_name).copy(update={"value": value})
# Set only the requirements defined in the config
requirements: Dict[ResourceRequirement, Optional[str]] = {}
for requirement in setting.set:
requirements[requirement] = value
servo.logger.debug(f"Assigning {setting_name}.{requirement}={value}")
servo.logger.debug(f"Setting resource requirements for {setting_name} to {requirements} on PodTemplateSpec")
self.pod_template_spec_container.set_resource_requirements(setting_name, requirements)
elif setting_name == "replicas":
if value != 1:
servo.logger.warning(
f'ignored attempt to set replicas to "{value}"'
)
else:
raise servo.AdjustmentFailedError(
f"failed adjustment of unsupported Kubernetes setting '{setting_name}'"
)
async def apply(self) -> None:
"""Apply the adjustments to the target."""
assert self.tuning_pod, "Tuning Pod not loaded"
assert self.tuning_container, "Tuning Container not loaded"
servo.logger.info("Applying adjustments to Tuning Pod")
task = asyncio.create_task(self.create_or_recreate_tuning_pod())
try:
await task
except asyncio.CancelledError:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
raise
# TODO: logging the wrong values -- should be coming from the podtemplatespec?
servo.logger.success(f"Built new tuning pod with container resources: {self.tuning_container.resources}")
@property
def namespace(self) -> str:
return self.target_controller_config.namespace
@property
def tuning_pod_name(self) -> str:
"""
Return the name of tuning Pod for this optimization.
"""
return f"{self.target_controller_config.name}-tuning"
async def delete_tuning_pod(self, *, raise_if_not_found: bool = True) -> Optional[Pod]:
"""
Delete the tuning Pod.
"""
try:
# TODO: Provide context manager or standard read option that handle not found? Lots of duplication on not found/conflict handling...
tuning_pod = await Pod.read(self.tuning_pod_name, self.namespace)
self.logger.info(
f"Deleting tuning Pod '{tuning_pod.name}' from namespace '{tuning_pod.namespace}'..."
)
await tuning_pod.delete()
await tuning_pod.wait_until_deleted()
self.logger.info(
f"Deleted tuning Pod '{tuning_pod.name}' from namespace '{tuning_pod.namespace}'."
)
self.tuning_pod = None
self.tuning_container = None
return tuning_pod
except kubernetes_asyncio.client.exceptions.ApiException as e:
if e.status != 404 or e.reason != "Not Found" and raise_if_not_found:
raise
self.tuning_pod = None
self.tuning_container = None
return None
@property
def target_controller_name(self) -> str:
return self.target_controller_config.name
@property
def container_name(self) -> str:
return self.container_config.name
# TODO: Factor into another class?
async def _configure_tuning_pod_template_spec(self) -> None:
# Configure a PodSpecTemplate for the tuning Pod state
pod_template_spec: kubernetes_asyncio.client.models.V1PodTemplateSpec = await self.target_controller.get_pod_template_spec_copy()
pod_template_spec.metadata.name = self.tuning_pod_name
if pod_template_spec.metadata.annotations is None:
pod_template_spec.metadata.annotations = {}
pod_template_spec.metadata.annotations["opsani.com/opsani_tuning_for"] = self.name
if pod_template_spec.metadata.labels is None:
pod_template_spec.metadata.labels = {}
pod_template_spec.metadata.labels["opsani_role"] = "tuning"
# Build a container from the raw podspec
container_obj = next(filter(lambda c: c.name == self.container_config.name, pod_template_spec.spec.containers))
container = Container(container_obj, None)
servo.logger.debug(f"Initialized new tuning container from Pod spec template: {container.name}")
if self.container_config.static_environment_variables:
if container.obj.env is None:
container.obj.env = []
# Filter out vars with the same name as the ones we are setting
container.obj.env = list(filter(
lambda e: e.name not in self.container_config.static_environment_variables,
container.obj.env
))
env_list = [
kubernetes_asyncio.client.V1EnvVar(name=k, value=v)
for k, v in self.container_config.static_environment_variables.items()
]
container.obj.env.extend(env_list)
if self.tuning_container:
servo.logger.debug(f"Copying resource requirements from existing tuning pod container '{self.tuning_pod.name}/{self.tuning_container.name}'")
resource_requirements = self.tuning_container.resources
container.resources = resource_requirements
else:
servo.logger.debug(f"No existing tuning pod container found, initializing resource requirement defaults")
set_container_resource_defaults_from_config(container, self.container_config)
# If the servo is running inside Kubernetes, register self as the controller for the Pod and ReplicaSet
servo_pod_name = os.environ.get("POD_NAME")
servo_pod_namespace = os.environ.get("POD_NAMESPACE")
if servo_pod_name is not None and servo_pod_namespace is not None:
self.logger.debug(
f"running within Kubernetes, registering as Pod controller... (pod={servo_pod_name}, namespace={servo_pod_namespace})"
)
servo_pod = await Pod.read(servo_pod_name, servo_pod_namespace)
pod_controller = next(
iter(
ow
for ow in servo_pod.obj.metadata.owner_references
if ow.controller
)
)
# TODO: Create a ReplicaSet class...
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
api_client = kubernetes_asyncio.client.AppsV1Api(api)
servo_rs: kubernetes_asyncio.client.V1ReplicaSet = (
await api_client.read_namespaced_replica_set(
name=pod_controller.name, namespace=servo_pod_namespace
)
) # still ephemeral
rs_controller = next(
iter(
ow for ow in servo_rs.metadata.owner_references if ow.controller
)
)
servo_dep: kubernetes_asyncio.client.V1Deployment = (
await api_client.read_namespaced_deployment(
name=rs_controller.name, namespace=servo_pod_namespace
)
)
pod_template_spec.metadata.owner_references = [
kubernetes_asyncio.client.V1OwnerReference(
api_version=servo_dep.api_version,
block_owner_deletion=True,
controller=True, # Ensures the pod will not be adopted by another controller
kind="Deployment",
name=servo_dep.metadata.name,
uid=servo_dep.metadata.uid,
)
]
self._tuning_pod_template_spec = pod_template_spec
async def create_or_recreate_tuning_pod(self) -> Pod:
"""
Creates a new Tuning Pod or deletes and recreates one from the current optimization state.
"""
servo.logger.info("Deleting existing tuning pod (if any)")
await self.delete_tuning_pod(raise_if_not_found=False)
return await self.create_tuning_pod()
async def create_tuning_pod(self) -> Pod:
"""
Creates a new Tuning Pod from the current optimization state.
"""
assert self._tuning_pod_template_spec, "Must have tuning pod template spec"
assert self.tuning_pod is None, "Tuning Pod already exists"
assert self.tuning_container is None, "Tuning Pod Container already exists"
self.logger.debug(
f"creating tuning pod '{self.tuning_pod_name}' based on {self.target_controller_type} '{self.target_controller_name}' in namespace '{self.namespace}'"
)
# Setup the tuning Pod -- our settings are updated on the underlying PodSpec template
self.logger.trace(f"building new tuning pod")
pod_obj = kubernetes_asyncio.client.V1Pod(
metadata=self._tuning_pod_template_spec.metadata, spec=self._tuning_pod_template_spec.spec
)
# Update pod with latest controller state
pod_obj = self.target_controller.update_pod(pod_obj)
tuning_pod = Pod(obj=pod_obj)
# Create the Pod and wait for it to get ready
self.logger.info(
f"Creating tuning Pod '{self.tuning_pod_name}' in namespace '{self.namespace}'"
)
await tuning_pod.create(self.namespace)
servo.logger.success(f"Created Tuning Pod '{self.tuning_pod_name}' in namespace '{self.namespace}'")
servo.logger.info(f"waiting up to {self.timeout} for Tuning Pod to become ready...")
progress = servo.EventProgress(self.timeout)
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting for '{self.tuning_pod_name}' to become ready...", prefix=False)
)
progress.start()
task = asyncio.create_task(tuning_pod.wait_until_ready())
task.add_done_callback(lambda _: progress.complete())
gather_task = asyncio.gather(
task,
progress.watch(progress_logger),
)
try:
await asyncio.wait_for(
gather_task,
timeout=self.timeout.total_seconds()
)
except asyncio.TimeoutError:
servo.logger.error(f"Timed out waiting for Tuning Pod to become ready...")
servo.logger.debug(f"Cancelling Task: {task}, progress: {progress}")
for t in {task, gather_task}:
t.cancel()
with contextlib.suppress(asyncio.CancelledError):
await t
servo.logger.debug(f"Cancelled Task: {t}, progress: {progress}")
await tuning_pod.raise_for_status(adjustments=self.adjustments)
# Load the in memory model for various convenience accessors
await tuning_pod.refresh()
await tuning_pod.get_containers()
# Hydrate local state
self.tuning_pod = tuning_pod
self.tuning_container = tuning_pod.get_container(self.container_config.name)
servo.logger.info(f"Tuning Pod successfully created")
return tuning_pod
@property
def tuning_cpu(self) -> Optional[CPU]:
"""
Return the current CPU setting for the target container of the tuning Pod (if any).
"""
if not self.tuning_pod:
return None
cpu = self.container_config.cpu.copy()
# Determine the value in priority order from the config
resource_requirements = self.tuning_container.get_resource_requirements('cpu')
cpu.request = resource_requirements.get(ResourceRequirement.request)
cpu.limit = resource_requirements.get(ResourceRequirement.limit)
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.cpu.get), None)
)
value = Millicore.parse(value)
# NOTE: use copy + update to apply values that may be outside of the range
cpu = cpu.copy(update={"value": value})
return cpu
@property
def tuning_memory(self) -> Optional[Memory]:
"""
Return the current Memory setting for the target container of the tuning Pod (if any).
"""
if not self.tuning_pod:
return None
memory = self.container_config.memory.copy()
# Determine the value in priority order from the config
resource_requirements = self.tuning_container.get_resource_requirements('memory')
memory.request = resource_requirements.get(ResourceRequirement.request)
memory.limit = resource_requirements.get(ResourceRequirement.limit)
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.memory.get), None)
)
value = ShortByteSize.validate(value)
# NOTE: use copy + update to apply values that may be outside of the range
memory = memory.copy(update={"value": value})
return memory
@property
def tuning_replicas(self) -> servo.Replicas:
"""
Return the current Replicas setting for the optimization.
"""
value = 1 if self.tuning_pod else 0
return servo.Replicas(
min=0,
max=1,
value=value,
pinned=True,
)
@property
def on_failure(self) -> FailureMode:
"""
Return the configured failure behavior. If not set explicitly, this will be cascaded
from the base kubernetes configuration (or its default)
"""
return self.target_controller_config.on_failure
@property
def main_cpu(self) -> CPU:
"""
| |
#! /usr/bin/env python3
# coding=utf-8
# Copyright 2018 The Uber AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example command with bag of words:
python examples/run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95
Example command with discriminator:
python examples/run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95
"""
import argparse
import json
from operator import add
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from tqdm import trange
from transformers import GPT2Tokenizer
from transformers.file_utils import cached_path
from transformers.modeling_gpt2 import GPT2LMHeadModel
from pplm_classification_head import ClassificationHead
PPLM_BOW = 1
PPLM_DISCRIM = 2
PPLM_BOW_DISCRIM = 3
SMALL_CONST = 1e-15
BIG_CONST = 1e10
QUIET = 0
REGULAR = 1
VERBOSE = 2
VERY_VERBOSE = 3
VERBOSITY_LEVELS = {
'quiet': QUIET,
'regular': REGULAR,
'verbose': VERBOSE,
'very_verbose': VERY_VERBOSE,
}
DISCRIMINATOR_MODELS_PARAMS = {
"clickbait": {
"url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt",
"class_size": 2,
"embed_size": 1024,
"class_vocab": {"non_clickbait": 0, "clickbait": 1},
"default_class": 1,
"pretrained_model": "gpt2-medium",
},
"sentiment": {
"url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt",
"class_size": 5,
"embed_size": 1024,
"class_vocab": {"very_positive": 2, "very_negative": 3},
"default_class": 3,
"pretrained_model": "gpt2-medium",
},
}
def to_var(x, requires_grad=False, volatile=False, device='cuda'):
if torch.cuda.is_available() and device == 'cuda':
x = x.cuda()
elif device != 'cuda':
x = x.to(device)
return Variable(x, requires_grad=requires_grad, volatile=volatile)
def top_k_filter(logits, k, probs=False):
"""
Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won't contribute to the
sum of the denominator.
"""
if k == 0:
return logits
else:
values = torch.topk(logits, k)[0]
batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
if probs:
return torch.where(logits < batch_mins,
torch.ones_like(logits) * 0.0, logits)
return torch.where(logits < batch_mins,
torch.ones_like(logits) * -BIG_CONST,
logits)
def perturb_past(
past,
model,
last,
unpert_past=None,
unpert_logits=None,
accumulated_hidden=None,
grad_norms=None,
stepsize=0.01,
one_hot_bows_vectors=None,
classifier=None,
class_label=None,
loss_type=0,
num_iterations=3,
horizon_length=1,
window_length=0,
decay=False,
gamma=1.5,
kl_scale=0.01,
device='cuda',
verbosity_level=REGULAR
):
# Generate inital perturbed past
# each layer gets a grad_accumulator with the shape of (2, d_batch, num_heads, seq_len, embed_size_per_head)
grad_accumulator = [
(np.zeros(p.shape).astype("float32"))
for p in past
]
if accumulated_hidden is None:
accumulated_hidden = 0
if decay:
decay_mask = torch.arange(
0.,
1.0 + SMALL_CONST,
1.0 / (window_length)
)[1:]
else:
decay_mask = 1.0
# TODO fix this comment (SUMANTH)
# Generate a mask is gradient perturbated is based on a past window
# curr_length: current seq_len (generated so far)
_, _, _, curr_length, _ = past[0].shape
if curr_length > window_length and window_length > 0:
ones_key_val_shape = (
tuple(past[0].shape[:-2])
+ tuple([window_length])
+ tuple(past[0].shape[-1:])
)
zeros_key_val_shape = (
tuple(past[0].shape[:-2])
+ tuple([curr_length - window_length])
+ tuple(past[0].shape[-1:])
)
ones_mask = torch.ones(ones_key_val_shape)
ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3)
ones_mask = ones_mask.permute(0, 1, 2, 4, 3)
window_mask = torch.cat(
(ones_mask, torch.zeros(zeros_key_val_shape)),
dim=-2
).to(device)
else:
window_mask = torch.ones_like(past[0]).to(device)
# accumulate perturbations for num_iterations
loss_per_iter = []
new_accumulated_hidden = None
for i in range(num_iterations):
if verbosity_level >= VERBOSE:
print("Iteration ", i + 1)
curr_perturbation = [
to_var(torch.from_numpy(p_), requires_grad=True, device=device)
for p_ in grad_accumulator
] # grad -> a list of variables
# Compute hidden using perturbed past
perturbed_past = list(map(add, past, curr_perturbation))
_, _, _, curr_length, _ = curr_perturbation[0].shape
all_logits, _, all_hidden = model(last, past=perturbed_past)
hidden = all_hidden[-1] # last hidden layer
new_accumulated_hidden = accumulated_hidden + torch.sum(
hidden,
dim=1
).detach() # TODO: double check this line
# TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth)
logits = all_logits[:, -1, :]
probs = F.softmax(logits, dim=-1)
loss = 0.0
loss_list = []
assert loss_type == PPLM_DISCRIM or loss_type == PPLM_BOW_DISCRIM, 'Use the original code for BOW discriminator'
if loss_type == PPLM_DISCRIM or loss_type == PPLM_BOW_DISCRIM: # perturb again for the future
ce_loss = torch.nn.CrossEntropyLoss()
# TODO why we need to do this assignment and not just using unpert_past? (Sumanth)
curr_unpert_past = unpert_past
curr_probs = torch.unsqueeze(probs, dim=1) # the perturbed prob at the current position
wte = model.resize_token_embeddings()
for _ in range(horizon_length): # horizon_length is set to 1 so this loop only runs once
inputs_embeds = torch.matmul(curr_probs, wte.weight.data) # as input, use the last expected embedding (intead of actual embedding)
_, curr_unpert_past, curr_all_hidden = model(
past=curr_unpert_past,
inputs_embeds=inputs_embeds
)
curr_hidden = curr_all_hidden[-1]
new_accumulated_hidden = new_accumulated_hidden + torch.sum(
curr_hidden, dim=1)
prediction = classifier(new_accumulated_hidden /
(curr_length + 1 + horizon_length)) # 1 is the perturbation for the present, horizon_length is for the future
label = torch.tensor(prediction.shape[0] * [class_label],
device=device,
dtype=torch.long)
discrim_loss = ce_loss(prediction, label)
if verbosity_level >= VERY_VERBOSE:
print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy())
loss += discrim_loss
loss_list.append(discrim_loss)
kl_loss = 0.0
if kl_scale > 0.0:
unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1)
unpert_probs = (
unpert_probs + SMALL_CONST *
(unpert_probs <= SMALL_CONST).float().to(device).detach()
)
correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(
device).detach()
corrected_probs = probs + correction.detach()
kl_loss = kl_scale * (
(corrected_probs * (corrected_probs / unpert_probs).log()).sum()
)
if verbosity_level >= VERY_VERBOSE:
print(' kl_loss', kl_loss.data.cpu().numpy())
loss += kl_loss
loss_per_iter.append(loss.data.cpu().numpy())
if verbosity_level >= VERBOSE:
print(' pplm_loss', (loss - kl_loss).data.cpu().numpy())
# compute gradients
loss.backward()
# calculate gradient norms
if grad_norms is not None and loss_type == PPLM_BOW:
grad_norms = [
torch.max(grad_norms[index], torch.norm(p_.grad * window_mask))
for index, p_ in enumerate(curr_perturbation)
]
else:
grad_norms = [
(torch.norm(p_.grad * window_mask) + SMALL_CONST)
for index, p_ in enumerate(curr_perturbation)
]
# normalize gradients
grad = [
-stepsize *
(p_.grad * window_mask / grad_norms[
index] ** gamma).data.cpu().numpy()
for index, p_ in enumerate(curr_perturbation)
]
# accumulate gradient
grad_accumulator = list(map(add, grad, grad_accumulator))
# reset gradients, just to make sure
for p_ in curr_perturbation:
p_.grad.data.zero_()
# removing past from the graph
new_past = []
for p_ in past:
new_past.append(p_.detach())
past = new_past
# apply the accumulated perturbations to the past
grad_accumulator = [
to_var(torch.from_numpy(p_), requires_grad=True, device=device)
for p_ in grad_accumulator
]
pert_past = list(map(add, past, grad_accumulator))
return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter
def get_classifier(
name: Optional[str],
class_label: Union[str, int],
device: str,
verbosity_level: int = REGULAR
) -> Tuple[Optional[ClassificationHead], Optional[int]]:
if name is None:
return None, None
params = DISCRIMINATOR_MODELS_PARAMS[name]
classifier = ClassificationHead(
class_size=params['class_size'],
embed_size=params['embed_size']
).to(device)
if "url" in params:
resolved_archive_file = cached_path(params["url"])
elif "path" in params:
resolved_archive_file = params["path"]
else:
raise ValueError("Either url or path have to be specified "
"in the discriminator model parameters")
classifier.load_state_dict(
torch.load(resolved_archive_file, map_location=device))
classifier.eval()
if isinstance(class_label, str):
if class_label in params["class_vocab"]:
label_id = params["class_vocab"][class_label]
else:
label_id = params["default_class"]
if verbosity_level >= REGULAR:
print("class_label {} not in class_vocab".format(class_label))
print("available values are: {}".format(params["class_vocab"]))
print("using default class {}".format(label_id))
elif isinstance(class_label, int):
if class_label in set(params["class_vocab"].values()):
label_id = class_label
else:
label_id = params["default_class"]
if verbosity_level >= REGULAR:
print("class_label {} not in class_vocab".format(class_label))
print("available values are: {}".format(params["class_vocab"]))
print("using default class {}".format(label_id))
else:
label_id = params["default_class"]
return classifier, label_id
def build_bows_one_hot_vectors(bow_indices, tokenizer, device='cuda'):
if bow_indices is None:
return None
one_hot_bows_vectors = []
for single_bow in bow_indices:
single_bow = list(filter(lambda x: len(x) <= 1, single_bow))
single_bow = torch.tensor(single_bow).to(device)
num_words = single_bow.shape[0]
one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device)
one_hot_bow.scatter_(1, single_bow, 1)
one_hot_bows_vectors.append(one_hot_bow)
return one_hot_bows_vectors
def full_text_generation(
model,
tokenizer,
context=None,
num_samples=1,
device="cuda",
discrim=None,
class_label=None,
length=100,
stepsize=0.02,
temperature=1.0,
top_k=10,
sample=True,
num_iterations=3,
grad_length=10000,
horizon_length=1,
window_length=0,
decay=False,
gamma=1.5,
gm_scale=0.9,
kl_scale=0.01,
verbosity_level=REGULAR,
**kwargs
):
classifier, class_id = get_classifier(
discrim,
class_label,
device
)
if classifier is not None:
loss_type = PPLM_DISCRIM
if verbosity_level >= REGULAR:
print("Using PPLM-Discrim")
else:
raise Exception("Specify either a discriminator")
unpert_gen_tok_text, _, _ = generate_text_pplm(
model=model,
tokenizer=tokenizer,
context=context,
device=device,
length=length,
sample=sample,
perturb=False,
verbosity_level=verbosity_level
)
if device == 'cuda':
torch.cuda.empty_cache()
pert_gen_tok_texts = []
discrim_losses = []
losses_in_time = []
for i in range(num_samples):
pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm(
model=model,
tokenizer=tokenizer,
context=context,
device=device,
perturb=True,
bow_indices=bow_indices,
classifier=classifier,
class_label=class_id,
loss_type=loss_type,
length=length,
stepsize=stepsize,
temperature=temperature,
top_k=top_k,
sample=sample,
num_iterations=num_iterations,
grad_length=grad_length,
horizon_length=horizon_length,
window_length=window_length,
decay=decay,
gamma=gamma,
gm_scale=gm_scale,
kl_scale=kl_scale,
verbosity_level=verbosity_level
)
pert_gen_tok_texts.append(pert_gen_tok_text)
if classifier is not None:
discrim_losses.append(discrim_loss.data.cpu().numpy())
losses_in_time.append(loss_in_time)
if device == 'cuda':
torch.cuda.empty_cache()
return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
def generate_text_pplm(
model,
tokenizer,
context=None,
| |
d = Encodable.parents_encode(self, __class__)
return d
class FactorTypesRelationUnidirectionalLinearTransformObservation(FactorTypesRelationObservation, Encodable):
"""
Expression of an Unidirectional Linear Transform, from an origin FactorType to a destination FactorType
A weight can be a expression containing parameters
This relation will be applied to Factors which are instances of the origin FactorTypes, to obtain destination
FactorTypes
"""
def __init__(self, origin: FactorType, destination: FactorType, weight: Union[float, str],
origin_context: Processor = None, destination_context: Processor = None, origin_unit=None,
destination_unit=None, observer: Observer = None, tags=None, attributes=None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._origin = origin
self._destination = destination
self._weight = weight
self._observer = observer
self._origin_context = origin_context
self._destination_context = destination_context
self._origin_unit = origin_unit
self._destination_unit = destination_unit
def encode(self):
d = Encodable.parents_encode(self, __class__)
d.update({
"origin": name_and_id_dict(self.origin),
"destination": name_and_id_dict(self.destination),
"weight": self._weight,
"observer": name_and_id_dict(self.observer),
"origin_context": self._origin_context,
"destination_context": self._destination_context,
"origin_unit": self._origin_unit,
"destination_unit": self._destination_unit
})
return d
def __str__(self):
return f"ScaleChange: from {self._origin.name} to {self._destination.name}, origin ctx {self._origin_context.name if self._origin_context else '-'}, dst ctx {self._destination_context.name if self._destination_context else '-'}"
@staticmethod
def create_and_append(origin: FactorType, destination: FactorType, weight, origin_context: Processor = None,
destination_context: Processor = None, origin_unit=None, destination_unit=None,
observer: Observer = None, tags=None, attributes=None):
o = FactorTypesRelationUnidirectionalLinearTransformObservation(
origin, destination, weight, origin_context, destination_context,
origin_unit, destination_unit, observer, tags, attributes)
# if origin:
# origin.observations_append(o)
# if destination:
# destination.observations_append(o)
if observer:
observer.observables_append(origin)
observer.observables_append(destination)
return o
@property
def origin(self):
return self._origin
@property
def destination(self):
return self._destination
@property
def scaled_weight(self):
return UnitConversion.get_scaled_weight(self._weight,
self.origin.unit, self._origin_unit,
self._destination_unit, self.destination.unit)
@property
def observer(self):
return self._observer
@staticmethod
def partial_key(origin: FactorType = None, destination: FactorType = None,
origin_context: Processor = None, destination_context: Processor = None, observer: Observer = None):
d = {"_t": RelationClassType.ftft_directed_linear_transform.name}
if origin:
d["__o"] = origin.ident
if destination:
d["__d"] = destination.ident
if observer:
d["__oer"] = observer.ident
if origin_context:
d["__oc"] = origin_context.ident
if destination_context:
d["__dc"] = destination_context.ident
return d
def key(self):
d = {"_t": RelationClassType.ftft_directed_linear_transform.name,
"__o": self._origin.ident, "__d": self._destination.ident}
if self._origin_context:
d["__oc"] = self._origin_context.ident
if self._destination_context:
d["__dc"] = self._destination_context.ident
if self._observer:
d["__oer"] = self._observer.ident
return d
class ProcessorsRelationIsAObservation(ProcessorsRelationObservation, Encodable):
def __init__(self, parent: Processor, child: Processor, observer: Observer=None, tags=None, attributes=None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._parent = parent
self._child = child
self._observer = observer
def encode(self):
d = Encodable.parents_encode(self, __class__)
d.update({
"origin": name_and_id_dict(self._parent),
"destination": name_and_id_dict(self._child),
"observer": name_and_id_dict(self.observer)
})
return d
@staticmethod
def create_and_append(parent: Processor, child: Processor, observer: Observer, tags=None, attributes=None):
o = ProcessorsRelationIsAObservation(parent, child, observer, tags, attributes)
if parent:
parent.observations_append(o)
if child:
child.observations_append(o)
if observer:
observer.observables_append(parent)
observer.observables_append(child)
return o
@property
def parent_processor(self):
return self._parent
@property
def child_processor(self):
return self._child
@property
def observer(self):
return self._observer
@staticmethod
def partial_key(parent: Processor=None, child: Processor=None, observer: Observer=None):
d = {"_t": RelationClassType.pp_isa.name}
if child:
d["__c"] = child.ident
if parent:
d["__p"] = parent.ident
if observer:
d["__oer"] = observer.ident
return d
def key(self):
d = {"_t": RelationClassType.pp_isa.name, "__p": self._parent.ident, "__c": self._child.ident}
if self._observer:
d["__oer"] = self._observer.ident
return d
class ProcessorsRelationPartOfObservation(ProcessorsRelationObservation, Encodable):
def __init__(self, parent: Processor, child: Processor, observer: Observer=None, behave_as: Processor=None, weight=None, tags=None, attributes=None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._parent = parent
self._child = child
self._observer = observer
self._weight = "1" if weight is None else weight
self._behave_as = behave_as
def encode(self):
d = Encodable.parents_encode(self, __class__)
d.update({
"origin": name_and_id_dict(self._parent),
"destination": name_and_id_dict(self._child),
"observer": name_and_id_dict(self.observer)
})
return d
@staticmethod
def create_and_append(parent: Processor, child: Processor, observer: Optional[Observer] = None, behave_as: Processor=None, weight=None, tags=None, attributes=None):
o = ProcessorsRelationPartOfObservation(parent, child, observer, behave_as, weight, tags, attributes)
if parent:
parent.observations_append(o)
if child:
child.observations_append(o)
if observer:
observer.observables_append(parent)
observer.observables_append(child)
return o
@property
def parent_processor(self):
return self._parent
@property
def child_processor(self):
return self._child
@property
def observer(self):
return self._observer
@property
def weight(self):
return self._weight
@property
def behave_as(self):
return self._behave_as
@staticmethod
def partial_key(parent: Processor=None, child: Processor=None, observer: Observer=None):
d = {"_t": RelationClassType.pp_part_of.name}
if child:
d["__c"] = child.ident
if parent:
d["__p"] = parent.ident
if observer:
d["__oer"] = observer.ident
return d
def key(self):
d = {"_t": RelationClassType.pp_part_of.name, "__p": self._parent.ident, "__c": self._child.ident}
if self._observer:
d["__oer"] = self._observer.ident
return d
class ProcessorsRelationUndirectedFlowObservation(ProcessorsRelationObservation):
"""
Represents an undirected Flow, from a source to a target Processor
Undirected flow is DYNAMICALLY converted to Directed flow, for each factor:
* If the factor of the source (parent) is "Incoming", the flow is from the parent to the child
* If the factor of the source (parent) is "Outgoing", the flow is from the child to the parent
from nexinfosys.models.musiasem_concepts import Processor, Observer, ProcessorsRelationUndirectedFlowObservation
pr = ProcessorsRelationFlowObservation.create_and_append(Processor("A"), Processor("B"), Observer("S"))
"""
def __init__(self, source: Processor, target: Processor, observer: Observer=None, tags=None, attributes=None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._source = source
self._target = target
self._observer = observer
@staticmethod
def create_and_append(source: Processor, target: Processor, observer: Observer, tags=None, attributes=None):
o = ProcessorsRelationUndirectedFlowObservation(source, target, observer, tags, attributes)
if source:
source.observations_append(o)
if target:
target.observations_append(o)
if observer:
observer.observables_append(source)
observer.observables_append(target)
return o
@property
def source_processor(self):
return self._source
@property
def target_processor(self):
return self._target
@property
def observer(self):
return self._observer
@staticmethod
def partial_key(source: Processor=None, target: Processor=None, observer: Observer=None):
d = {"_t": RelationClassType.pp_undirected_flow.name}
if target:
d["__t"] = target.ident
if source:
d["__s"] = source.ident
if observer:
d["__oer"] = observer.ident
return d
def key(self):
d = {"_t": RelationClassType.pp_undirected_flow.name, "__s": self._source.ident, "__t": self._target.parent}
if self._observer:
d["__oer"] = self._observer.ident
return d
class ProcessorsRelationUpscaleObservation(ProcessorsRelationObservation):
def __init__(self, parent: Processor, child: Processor, observer: Observer, factor_name: str, quantity: str=None, tags=None, attributes=None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._parent = parent
self._child = child
self._observer = observer
self._factor_name = factor_name
self._quantity = quantity
def encode(self):
d = Encodable.parents_encode(self, __class__)
d.update({
"origin": name_and_id_dict(self._parent),
"destination": name_and_id_dict(self._child),
"observer": name_and_id_dict(self.observer),
"interface": self.factor_name,
"quantity": self.quantity
})
return d
@staticmethod
def create_and_append(parent: Processor, child: Processor, observer: Optional[Observer], factor_name: str, quantity: str, tags=None, attributes=None):
o = ProcessorsRelationUpscaleObservation(parent, child, observer, factor_name, quantity, tags, attributes)
if parent:
parent.observations_append(o)
if child:
child.observations_append(o)
if observer:
observer.observables_append(parent)
observer.observables_append(child)
return o
@property
def parent_processor(self):
return self._parent
@property
def child_processor(self):
return self._child
@property
def factor_name(self):
return self._factor_name
@property
def quantity(self):
return self._quantity
@property
def observer(self):
return self._observer
@staticmethod
def partial_key(parent: Processor=None, child: Processor=None, observer: Observer=None):
d = {"_t": RelationClassType.pp_upscale.name}
if child:
d["__c"] = child.ident
if parent:
d["__p"] = parent.ident
if observer:
d["__oer"] = observer.ident
return d
def key(self):
d = {"_t": RelationClassType.pp_upscale.name, "__p": self._parent.ident, "__c": self._child.ident}
if self._observer:
d["__oer"] = self._observer.ident
return d
class FactorsRelationDirectedFlowObservation(FactorsRelationObservation, Encodable):
"""
Represents a directed Flow, from a source to a target Factor
from nexinfosys.models.musiasem_concepts import Processor, Factor, Observer, FactorsRelationDirectedFlowObservation
pr = ProcessorsRelationFlowObservation.create_and_append(Processor("A"), Processor("B"), Observer("S"))
"""
def __init__(self, source: Factor, target: Factor, observer: Observer = None, weight: str=None, tags=None,
attributes=None, back: Factor = None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._source = source
self._target = target
self._back = back
self._weight = weight
self._observer = observer
def encode(self):
d = Encodable.parents_encode(self, __class__)
d.update({
"origin": name_and_id_dict(self.source_factor),
"destination": name_and_id_dict(self.target_factor),
"observer": name_and_id_dict(self.observer),
"weight": self.weight
})
if self.back_factor:
d.update({
"back": name_and_id_dict(self.back_factor)
})
return d
@staticmethod
def create_and_append(source: Factor, target: Factor, observer: Optional[Observer], weight: str=None, tags=None,
attributes=None, back: Factor = None):
o = FactorsRelationDirectedFlowObservation(source, target, observer, weight, tags, attributes, back=back)
if source:
source.observations_append(o)
if target:
target.observations_append(o)
if back:
back.observations_append(o)
if observer:
observer.observables_append(source)
observer.observables_append(target)
observer.observables_append(back)
return o
@property
def source_factor(self):
return self._source
@property
def target_factor(self):
return self._target
@property
def back_factor(self):
return self._back
@property
def weight(self):
return self._weight
@property
def scale_change_weight(self):
return self.attributes.get("scale_change_weight")
@property
def observer(self):
return self._observer
@staticmethod
def partial_key(source: Factor = None, target: Factor = None, observer: Observer = None, back: Factor = None):
d = {"_t": RelationClassType.ff_directed_flow.name}
if target:
d["__t"] = target.ident
if source:
d["__s"] = source.ident
if back:
d["__b"] = back.ident
if observer:
d["__oer"] = observer.ident
return d
def key(self):
d = {"_t": RelationClassType.ff_directed_flow.name,
"__s": self._source.ident,
"__t": self._target.ident,
"__b": self._back.ident if self._back else None}
if self._observer:
d["__oer"] = self._observer.ident
return d
class FactorsRelationScaleObservation(FactorsRelationObservation):
def __init__(self, origin: Factor, destination: Factor, observer: Observer, quantity: str=None, tags=None, attributes=None):
Taggable.__init__(self, tags)
Qualifiable.__init__(self, attributes)
Automatable.__init__(self)
self._origin = origin
self._destination = destination
self._observer = observer
self._quantity = quantity
def encode(self):
d = Encodable.parents_encode(self, __class__)
d.update({
"origin": name_and_id_dict(self._origin),
"destination": name_and_id_dict(self._destination),
"observer": name_and_id_dict(self._observer),
"quantity": self._quantity
})
return d
@staticmethod
def create_and_append(origin: Factor, destination: Factor, observer: Observer, quantity: str, tags=None, attributes=None):
o = FactorsRelationScaleObservation(origin, destination, observer, quantity, tags, attributes)
if origin:
origin.observations_append(o)
if destination:
destination.observations_append(o)
if observer:
observer.observables_append(origin)
observer.observables_append(destination)
return o
@property
def origin(self):
return self._origin
@property
def destination(self):
return self._destination
@property
def quantity(self):
return self._quantity
@property
def observer(self):
return self._observer
| |
struct.calcsize(pattern)
val1.fx = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
val1.fy = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
val1.cx = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
val1.cy = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
val1.width = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
val1.height = struct.unpack(pattern, str[start:end])
start = end
end += 4
(val1.baseline,) = _get_struct_f().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.localTransform = []
for i in range(0, length):
val2 = geometry_msgs.msg.Transform()
_v35 = val2.translation
_x = _v35
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v36 = val2.rotation
_x = _v36
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.localTransform.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.laserScan = str[start:end]
_x = val1
start = end
end += 12
(_x.laserScanMaxPts, _x.laserScanMaxRange, _x.laserScanFormat,) = _get_struct_ifi().unpack(str[start:end])
_v37 = val1.laserScanLocalTransform
_v38 = _v37.translation
_x = _v38
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v39 = _v37.rotation
_x = _v39
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.userData = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.grid_ground = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.grid_obstacles = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.grid_empty_cells = str[start:end]
start = end
end += 4
(val1.grid_cell_size,) = _get_struct_f().unpack(str[start:end])
_v40 = val1.grid_view_point
_x = _v40
start = end
end += 12
(_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
val1.wordIds = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.wordKpts = []
for i in range(0, length):
val2 = rtabmap_ros.msg.KeyPoint()
_v41 = val2.pt
_x = _v41
start = end
end += 8
(_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])
_x = val2
start = end
end += 20
(_x.size, _x.angle, _x.response, _x.octave, _x.class_id,) = _get_struct_3f2i().unpack(str[start:end])
val1.wordKpts.append(val2)
_v42 = val1.wordPts
_v43 = _v42.header
start = end
end += 4
(_v43.seq,) = _get_struct_I().unpack(str[start:end])
_v44 = _v43.stamp
_x = _v44
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v43.frame_id = str[start:end].decode('utf-8')
else:
_v43.frame_id = str[start:end]
_x = _v42
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v42.fields = []
for i in range(0, length):
val3 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.name = str[start:end].decode('utf-8')
else:
val3.name = str[start:end]
_x = val3
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
_v42.fields.append(val3)
_x = _v42
start = end
end += 9
(_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])
_v42.is_bigendian = bool(_v42.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
_v42.data = str[start:end]
start = end
end += 1
(_v42.is_dense,) = _get_struct_B().unpack(str[start:end])
_v42.is_dense = bool(_v42.is_dense)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.descriptors = str[start:end]
self.data.nodes.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs))
_x = self.data.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.data.graph.header.seq, _x.data.graph.header.stamp.secs, _x.data.graph.header.stamp.nsecs))
_x = self.data.graph.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.data.graph.mapToOdom.translation.x, _x.data.graph.mapToOdom.translation.y, _x.data.graph.mapToOdom.translation.z, _x.data.graph.mapToOdom.rotation.x, _x.data.graph.mapToOdom.rotation.y, _x.data.graph.mapToOdom.rotation.z, _x.data.graph.mapToOdom.rotation.w))
length = len(self.data.graph.posesId)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.data.graph.posesId.tostring())
length = len(self.data.graph.poses)
buff.write(_struct_I.pack(length))
for val1 in self.data.graph.poses:
_v45 = val1.position
_x = _v45
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v46 = val1.orientation
_x = _v46
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(self.data.graph.links)
buff.write(_struct_I.pack(length))
for val1 in self.data.graph.links:
_x = val1
buff.write(_get_struct_3i().pack(_x.fromId, _x.toId, _x.type))
_v47 = val1.transform
_v48 = _v47.translation
_x = _v48
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v49 = _v47.rotation
_x = _v49
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(val1.information.tostring())
length = len(self.data.nodes)
buff.write(_struct_I.pack(length))
for val1 in self.data.nodes:
_x = val1
buff.write(_get_struct_3id().pack(_x.id, _x.mapId, _x.weight, _x.stamp))
_x = val1.label
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v50 = val1.pose
_v51 = _v50.position
_x = _v51
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v52 = _v50.orientation
_x = _v52
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_v53 = val1.groundTruthPose
_v54 = _v53.position
_x = _v54
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v55 = _v53.orientation
_x = _v55
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_v56 = val1.gps
_x = _v56
buff.write(_get_struct_6d().pack(_x.stamp, _x.longitude, _x.latitude, _x.altitude, _x.error, _x.bearing))
_x = val1.image
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.depth
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(val1.fx)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(val1.fx.tostring())
length = len(val1.fy)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(val1.fy.tostring())
length = len(val1.cx)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(val1.cx.tostring())
length = len(val1.cy)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(val1.cy.tostring())
length = len(val1.width)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(val1.width.tostring())
length = len(val1.height)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(val1.height.tostring())
buff.write(_get_struct_f().pack(val1.baseline))
length = len(val1.localTransform)
buff.write(_struct_I.pack(length))
for val2 in val1.localTransform:
_v57 = val2.translation
_x = _v57
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v58 = val2.rotation
_x = _v58
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = val1.laserScan
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_ifi().pack(_x.laserScanMaxPts, _x.laserScanMaxRange, _x.laserScanFormat))
_v59 = val1.laserScanLocalTransform
_v60 = _v59.translation
_x = _v60
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v61 = _v59.rotation
_x = _v61
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = val1.userData
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.grid_ground
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.grid_obstacles
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.grid_empty_cells
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_f().pack(val1.grid_cell_size))
_v62 = val1.grid_view_point
_x = _v62
buff.write(_get_struct_3f().pack(_x.x, _x.y, _x.z))
length = len(val1.wordIds)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(val1.wordIds.tostring())
length = len(val1.wordKpts)
buff.write(_struct_I.pack(length))
for val2 in val1.wordKpts:
_v63 = val2.pt
_x = _v63
buff.write(_get_struct_2f().pack(_x.x, _x.y))
_x = val2
buff.write(_get_struct_3f2i().pack(_x.size, _x.angle, _x.response, _x.octave, _x.class_id))
_v64 = val1.wordPts
_v65 = _v64.header
buff.write(_get_struct_I().pack(_v65.seq))
_v66 = _v65.stamp
_x = _v66
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v65.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
| |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class ScreenipFunctions(object):
"""
Function class for screenip.
"""
def __init__(self):
"""Class representing the functions for screenip"""
super(ScreenipFunctions, self).__init__()
def fw_bird(self):
"""
For birds, the daily water intake rate is calculated using the equation below.
This equation is representative of passerine birds, which represent the majority
of bird species visiting agricultural areas and which have higher daily water flux
requirements than other birds. As a result, the equations represent the most
conservative estimate of pesticide concentrations in water. The resulting daily
water intake rate for the 20 g bird is 0.0162 L.
Flux(water) = (1.180 * BW^0.874) / 1000
where: BW = 20 g
"""
"""
Using fixed value to correctly handle floating point decimals as compared to spreadsheet implementation
"""
fw_bird = 0.0162
self.out_fw_bird = pd.Series([fw_bird for x in range(self.no_of_runs)])
return self.out_fw_bird
# Daily water intake rate for mammals
def fw_mamm(self):
"""
For mammals, the daily water intake rate is calculated using the equation below.
This equation is representative of eutherian herbivore mammals, which have higher
daily water flux requirements compared to other mammals that visit agricultural areas.
The only equation that would generate higher estimates of daily water flux corresponds
to marsupial carnivores, which are not considered to be representative of the majority
of mammals that visit agricultural areas. The resulting daily water intake rate for a
1000 g mammal is 0.172 L.
Flux(water) = (0.708 * BW^0.795) / 1000
where: BW = 1000 g
"""
"""
Using fixed value to correctly handle floating point decimals as compared to spreadsheet implementation
"""
fw_mamm = 0.172
self.out_fw_mamm = pd.Series([fw_mamm for x in range(self.no_of_runs)])
return self.out_fw_mamm
# Upper bound estimate of exposure for birds
def dose_bird(self):
"""
The model calculates the upper bound estimate of exposure in drinking water
(dose-based; units in mg/kg-bw) by multiplying the daily water intake rate (L)
by the chemical solubility (mg/L) and then dividing by the body weight (in kg)
of the assessed animal (See equation below). In cases where water characteristics
(e.g., pH) influence the solubility of a chemical in water, the user should select
the highest available water solubility for use in SIP.
Dose = (Flux(water) * solubility) / BW
where: BW = body weight (kg) of the assessed bird (e.g. mallard duck, bobtail quail, other)
"""
conv = 1000.0
self.out_dose_bird = (self.out_fw_bird * self.solubility) / (self.bodyweight_assessed_bird / conv)
return self.out_dose_bird
# Upper bound estimate of exposure for mammals
def dose_mamm(self):
"""
The model calculates the upper bound estimate of exposure in drinking water
(dose-based; units in mg/kg-bw) by multiplying the daily water intake rate (L)
by the chemical solubility (mg/L) and then dividing by the body weight (in kg)
of the assessed animal (See equation below). In cases where water characteristics
(e.g., pH) influence the solubility of a chemical in water, the user should select
the highest available water solubility for use in SIP.
Dose = (Flux(water) * solubility) / BW
where: BW = body weight (kg) of the assessed animal (e.g. laboratory rat, other)
"""
conv = 1000.0
self.out_dose_mamm = (self.out_fw_mamm * self.solubility) / (self.bodyweight_assessed_mammal / conv)
return self.out_dose_mamm
# Acute adjusted toxicity value for birds
def at_bird(self):
"""
LD50 values for mammals and birds are adjusted using the same approach employed
by T-REX (USEPA 2008). These equations are provided below. In these equations,
AT = adjusted toxicity value (mg/kg-bw); LD50 = endpoint reported by toxicity study
(mg/kg-bw); TW = body weight of tested animal (350g rat, 1580g mallard duck, 178 g
Northern bobwhite quail or weight defined by the model user for an alternative species);
AT = LD50* (AW / TW)^(x-1)
where:
AW = body weight of assessed animal (g)
x = Mineau scaling factor. Chemical specific values for x may be located in the
worksheet titled "Mineau scaling factors." If no chemical specific data are available,
the default value of 1.15 should be used for this parameter.
"""
self.out_at_bird = self.ld50_avian_water * (
(self.bodyweight_assessed_bird / self.ld50_bodyweight_tested_bird) ** (self.mineau_scaling_factor - 1.))
return self.out_at_bird
# Acute adjusted toxicity value for mammals
def at_mamm(self):
"""
LD50 values for mammals and birds are adjusted using the same approach employed
by T-REX (USEPA 2008). These equations are provided below. In these equations,
AT = adjusted toxicity value (mg/kg-bw); LD50 = endpoint reported by toxicity study
(mg/kg-bw); TW = body weight of tested animal (350g rat, 1580g mallard duck, 178 g
Northern bobwhite quail or weight defined by the model user for an alternative species);
AT = LD50* (TW / AW)^0.25
where:
AW = body weight of assessed animal (g)
x = Mineau scaling factor. Chemical specific values for x may be located in the
worksheet titled "Mineau scaling factors." If no chemical specific data are available,
the default value of 1.15 should be used for this parameter.
"""
self.out_at_mamm = self.ld50_mammal_water * (
(self.ld50_bodyweight_tested_mammal / self.bodyweight_assessed_mammal) ** 0.25)
return self.out_at_mamm
# Adjusted chronic toxicity values for birds
# FI = Food Intake Rate
def fi_bird(self, bw_grams):
"""
Daily Food Intake Rate:
Chronic avian toxicity studies produce endpoints based on concentration in food, not dose.
The endpoint is a No Observed Adverse Effects Concentration (NOAEC) that is assumed to be
relevant to all birds, regardless of body weight. In order to convert a reported avian
NOAEC (mg/kg-diet) value to a dose equivalent toxicity value for the assessed animal,
the daily food (dry) intake of the test bird is considered. The daily food intake rate
(FI; units in kg-food) of the test bird is calculated using the equation below.
FI = 0.0582 * BW^0.651
where:
BW = body weight in kg (USEPA 1993). This equation corresponds to a daily food intake
rate for all birds, which generates a lower food intake rate compared to passerines.
The equation is more conservative because it results in a lower dose-equivalent toxicity value.
"""
#bw_grams is the bodyweight of test bird (it's a series with a value per model simulation run)
fi_bird = 0.0582 * ((bw_grams / 1000.) ** 0.651)
return fi_bird
# Dose-equivalent chronic toxicity value for birds
def det(self):
"""
Dose Equiv. Toxicity:
The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by
the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):
Dose Equiv. Toxicity = (NOAEC * FI) / BW
NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,
and for any other test species. The model calculates the dose equivalent toxicity values for
all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose
equivalent toxicity value to represent the chronic toxicity of the chemical to birds.
"""
try:
# Body weight of bobtail quail is 178 g (assigned across all model simulation runs)
bw_quail_series = pd.Series([self.bodyweight_bobwhite_quail for x in range(self.no_of_runs)])
self.out_det_quail = (self.noaec_quail * self.fi_bird(bw_quail_series)) / (bw_quail_series / 1000.)
except Exception:
pass
try:
# Body weight of mallard duck is 1580 g (assigned across all model simulation runs)
bw_duck_series = pd.Series([self.bodyweight_mallard_duck for x in range(self.no_of_runs)])
self.out_det_duck = (self.noaec_duck * self.fi_bird(bw_duck_series)) / (bw_duck_series / 1000.)
except Exception:
pass
try:
self.out_det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.noaec_bodyweight_bird_other_1)) / (
self.noaec_bodyweight_bird_other_1 / 1000.)
except Exception:
pass
# self.out_det_other_1 = pd.Series(None, list(range(self.chemical_name.size)))
try:
self.out_det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.noaec_bodyweight_bird_other_2)) / (
self.noaec_bodyweight_bird_other_2 / 1000.)
except Exception:
pass
# self.out_det_other_2 = pd.Series(None, list(range(self.chemical_name.size)))
# Create DataFrame containing method Series created above
df_noaec = pd.DataFrame({
'out_det_quail': self.out_det_quail,
'out_det_duck': self.out_det_duck,
'out_det_other_1': self.out_det_other_1,
'out_det_other_2': self.out_det_other_2
})
# Create a Series of the minimum values for each row/model run of the above DataFrame
self.out_det = df_noaec.min(axis=1, numeric_only=True)
return self.out_det
# Adjusted chronic toxicty value for mammals
def act(self):
"""
SIP relies upon the No Observed Adverse Effects Level (NOAEL; | |
ck, cache_name):
nlst = nls.copy()
def fun(at):
vec = nm.r_[ufun(at), vfun(at), at]
aux = nls.fun(vec)
i3 = len(at)
rt = aux[:i3] + aux[i3:2*i3] + aux[2*i3:]
return rt
@_cache(self, cache_name, self.conf.is_linear)
def fun_grad(at):
vec = None if self.conf.is_linear else nm.r_[ufun(at), vfun(at), at]
M, C, K = self.get_matrices(nls, vec)
Kt = M + cc * C + ck * K
return Kt
nlst.fun = fun
nlst.fun_grad = fun_grad
nlst.u = ufun
nlst.v = vfun
return nlst
def _create_nlst_u(self, nls, dt, vfun, afun, cm, cc, cache_name):
nlst = nls.copy()
def fun(ut):
vt = vfun(ut)
at = afun(vt)
vec = nm.r_[ut, vt, at]
aux = nls.fun(vec)
i3 = len(at)
rt = aux[:i3] + aux[i3:2*i3] + aux[2*i3:]
return rt
@_cache(self, cache_name, self.conf.is_linear)
def fun_grad(ut):
if self.conf.is_linear:
vec = None
else:
vt = vfun(ut)
at = afun(vt)
vec = nm.r_[ut, vt, at]
M, C, K = self.get_matrices(nls, vec)
Kt = cm * M + cc * C + K
return Kt
nlst.fun = fun
nlst.fun_grad = fun_grad
nlst.v = vfun
nlst.a = afun
return nlst
class VelocityVerletTS(ElastodynamicsBaseTS):
"""
Solve elastodynamics problems by the velocity-Verlet method.
The algorithm can be found in [1].
[1] <NAME>.; <NAME>; <NAME>; <NAME> (1
January 1982). "A computer simulation method for the calculation of
equilibrium constants for the formation of physical clusters of molecules:
Application to small water clusters". The Journal of Chemical Physics. 76
(1): 648 (Appendix). doi:10.1063/1.442716
"""
name = 'ts.velocity_verlet'
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
('is_linear', 'bool', False, False,
'If True, the problem is considered to be linear.'),
]
def create_nlst(self, nls, dt, u0, v0, a0):
vm = v0 + 0.5 * dt * a0
u1 = u0 + dt * vm
def v1(a):
return vm + 0.5 * dt * a
nlst = nls.copy()
def fun(at):
vec = nm.r_[u1, vm, at]
aux = nls.fun(vec)
i3 = len(at)
rt = aux[:i3] + aux[i3:2*i3] + aux[2*i3:]
return rt
@_cache(self, 'matrix', self.conf.is_linear)
def fun_grad(at):
vec = None if self.conf.is_linear else nm.r_[u1, vm, at]
M = self.get_matrices(nls, vec)[0]
return M
nlst.fun = fun
nlst.fun_grad = fun_grad
nlst.v1 = v1
nlst.u1 = u1
return nlst
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None, **kwargs):
"""
Solve elastodynamics problems by the velocity-Verlet method.
"""
nls = get_default(nls, self.nls)
vec, unpack, pack = self.get_initial_vec(
nls, vec0, init_fun, prestep_fun, poststep_fun)
ts = self.ts
for step, time in ts.iter_from(ts.step):
output(self.format % (time, step + 1, ts.n_step),
verbose=self.verbose)
dt = ts.dt
prestep_fun(ts, vec)
ut, vt, at = unpack(vec)
nlst = self.create_nlst(nls, dt, ut, vt, at)
atp = nlst(at)
vtp = nlst.v1(atp)
utp = nlst.u1
vect = pack(utp, vtp, atp)
poststep_fun(ts, vect)
vec = vect
return vec
class NewmarkTS(ElastodynamicsBaseTS):
"""
Solve elastodynamics problems by the Newmark method.
The method was introduced in [1]. Common settings [2]:
==================== ======== ==== ===== ==========
name kind beta gamma Omega_crit
==================== ======== ==== ===== ==========
trapezoidal rule: implicit 1/4 1/2 unconditional
linear acceleration: implicit 1/6 1/2 :math:`2\sqrt{3}`
Fox-Goodwin: implicit 1/12 1/2 :math:`\sqrt{6}`
central difference: explicit 0 1/2 2
==================== ======== ==== ===== ==========
All of these methods are 2-order of accuracy.
[1] <NAME>. (1959) A method of computation for structural dynamics.
Journal of Engineering Mechanics, ASCE, 85 (EM3) 67-94.
[2] <NAME>, <NAME>: Solvers for Computational Mechanics
"""
name = 'ts.newmark'
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
('is_linear', 'bool', False, False,
'If True, the problem is considered to be linear.'),
('beta', 'float', 0.25, False, 'The Newmark method parameter beta.'),
('gamma', 'float', 0.5, False, 'The Newmark method parameter gamma.'),
]
def create_nlst(self, nls, dt, gamma, beta, u0, v0, a0):
dt2 = dt**2
def v(a):
return v0 + dt * ((1.0 - gamma) * a0 + gamma * a)
def u(a):
return u0 + dt * v0 + dt2 * ((0.5 - beta) * a0 + beta * a)
nlst = self._create_nlst_a(nls, dt, u, v, gamma * dt, beta * dt2,
'matrix')
return nlst
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None, **kwargs):
"""
Solve elastodynamics problems by the Newmark method.
"""
conf = self.conf
nls = get_default(nls, self.nls)
vec, unpack, pack = self.get_initial_vec(
nls, vec0, init_fun, prestep_fun, poststep_fun)
ts = self.ts
for step, time in ts.iter_from(ts.step):
output(self.format % (time, step + 1, ts.n_step),
verbose=self.verbose)
dt = ts.dt
prestep_fun(ts, vec)
ut, vt, at = unpack(vec)
nlst = self.create_nlst(nls, dt, conf.gamma, conf.beta, ut, vt, at)
atp = nlst(at)
vtp = nlst.v(atp)
utp = nlst.u(atp)
vect = pack(utp, vtp, atp)
poststep_fun(ts, vect)
vec = vect
return vec
class GeneralizedAlphaTS(ElastodynamicsBaseTS):
r"""
Solve elastodynamics problems by the generalized :math:`\alpha` method.
- The method was introduced in [1].
- The method is unconditionally stable provided :math:`\alpha_m \leq
\alpha_f \leq \frac{1}{2}`, :math:`\beta >= \frac{1}{4} +
\frac{1}{2}(\alpha_f - \alpha_m)`.
- The method is second-order accurate provided :math:`\gamma = \frac{1}{2} -
\alpha_m + \alpha_f`. This is used when `gamma` is ``None``.
- High frequency dissipation is maximized for :math:`\beta = \frac{1}{4}(1
- \alpha_m + \alpha_f)^2`. This is used when `beta` is ``None``.
- The default values of :math:`\alpha_m`, :math:`\alpha_f` (if `alpha_m` or
`alpha_f` are ``None``) are based on the user specified high-frequency
dissipation parameter `rho_inf`.
Special settings:
- :math:`\alpha_m = 0` corresponds to the HHT-:math:`\alpha` method.
- :math:`\alpha_f = 0` corresponds to the WBZ-:math:`\alpha` method.
- :math:`\alpha_m = 0`, :math:`\alpha_f = 0` produces the Newmark method.
[1] <NAME>, G.M.Hubert. "A Time Integration Algorithm for Structural
Dynamics with Improved Numerical Dissipation: The
Generalized-:math:`\alpha` Method" ASME Journal of Applied Mechanics, 60,
371:375, 1993.
"""
name = 'ts.generalized_alpha'
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
('is_linear', 'bool', False, False,
'If True, the problem is considered to be linear.'),
('rho_inf', 'float', 0.5, False,
"""The spectral radius in the high frequency limit (user specified
high-frequency dissipation) in [0, 1]:
1 = no dissipation, 0 = asymptotic annihilation."""),
('alpha_m', 'float', None, False,
r'The parameter :math:`\alpha_m`.'),
('alpha_f', 'float', None, False,
r'The parameter :math:`\alpha_f`.'),
('beta', 'float', None, False,
r'The Newmark-like parameter :math:`\beta`.'),
('gamma', 'float', None, False,
r'The Newmark-like parameter :math:`\gamma`.'),
]
def create_nlst(self, nls, dt, alpha_m, alpha_f, gamma, beta, u0, v0, a0):
dt2 = dt**2
def v1(a):
return v0 + dt * ((1.0 - gamma) * a0 + gamma * a)
def u1(a):
return u0 + dt * v0 + dt2 * ((0.5 - beta) * a0 + beta * a)
def v(a):
return (1.0 - alpha_f) * v1(a) + alpha_f * v0
def u(a):
return (1.0 - alpha_f) * u1(a) + alpha_f * u0
def a1(am):
return (am - alpha_m * a0) / (1.0 - alpha_m)
nlst = self._create_nlst_a(nls, dt, u, v,
(1.0 - alpha_f) * gamma * dt,
(1.0 - alpha_f) * beta * dt2,
'matrix')
nlst.u1 = u1
nlst.v1 = v1
nlst.a1 = a1
return nlst
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None, **kwargs):
"""
Solve elastodynamics problems by the generalized :math:`\alpha` method.
"""
conf = self.conf
nls = get_default(nls, self.nls)
rho_inf = conf.rho_inf
alpha_m = get_default(conf.alpha_m,
(2.0 * rho_inf - 1.0) / (rho_inf + 1.0))
alpha_f = get_default(conf.alpha_f, rho_inf / (rho_inf + 1.0))
beta = get_default(conf.beta, 0.25 * (1.0 - alpha_m + alpha_f)**2)
gamma = get_default(conf.gamma, 0.5 - alpha_m + alpha_f)
output('parameters rho_inf, alpha_m, alpha_f, beta, gamma:',
verbose=self.verbose)
output(rho_inf, alpha_m, alpha_f, beta, gamma,
verbose=self.verbose)
vec, unpack, pack = self.get_initial_vec(
nls, vec0, init_fun, prestep_fun, poststep_fun)
ts = | |
int_proto_state,
'interface-mac': None,
'ip-address': ip_address}
x = next((x for x in result if int_type == x['interface-type'] and
int_name == x['interface-name']), None)
if x is not None:
results.update(x)
ip_result.append(results)
return ip_result
@staticmethod
def get_interface_detail_request(last_interface_name,
last_interface_type):
""" Creates a new Netconf request based on the last received
interface name and type when the hasMore flag is true
"""
request_interface = ET.Element(
'get-interface-detail',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if last_interface_name != '':
last_received_int = ET.SubElement(request_interface,
"last-rcvd-interface")
last_int_type_el = ET.SubElement(last_received_int,
"interface-type")
last_int_type_el.text = last_interface_type
last_int_name_el = ET.SubElement(last_received_int,
"interface-name")
last_int_name_el.text = last_interface_name
return request_interface
@property
def interface_detail(self):
"""list[dict]: A list of dictionary items describing the
interface type, name, role, mac, admin and operational
state of interfaces of all rbridges.
This method currently only lists the Physical Interfaces (
Gigabitethernet, tengigabitethernet, fortygigabitethernet,
hundredgigabitethernet) and port-channel
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
has_more = ''
last_interface_name = ''
last_interface_type = ''
while (has_more == '') or (has_more == 'true'):
request_interface = self.get_interface_detail_request(
last_interface_name, last_interface_type)
interface_result = self._callback(request_interface, 'get')
has_more = interface_result.find('%shas-more' % urn).text
for item in interface_result.findall('%sinterface' % urn):
interface_type = item.find('%sinterface-type' % urn).text
interface_name = item.find('%sinterface-name' % urn).text
last_interface_type = interface_type
last_interface_name = interface_name
if "gigabitethernet" in interface_type or\
"port-channel" in interface_type:
if "gigabitethernet" in interface_type:
interface_role = item.find('%sport-role' % urn).text
else:
interface_role = "None"
if_name = item.find('%sif-name' % urn).text
interface_state = item.find('%sif-state' % urn).text
interface_proto_state = item.find('%sline-protocol-state' %
urn).text
interface_mac = item.find(
'%scurrent-hardware-address' % urn).text
item_results = {'interface-type': interface_type,
'interface-name': interface_name,
'interface-role': interface_role,
'if-name': if_name,
'interface-state': interface_state,
'interface-proto-state':
interface_proto_state,
'interface-mac': interface_mac}
result.append(item_results)
return result
@property
def switchport_list(self):
"""list[dict]:A list of dictionary items describing the details
of list of dictionary items describing the details of switch port"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
request_interface = self.get_interface_switchport_request()
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sswitchport' % urn):
vlans = []
interface_type = self.get_node_value(interface, '%sinterface-type',
urn)
interface_name = self.get_node_value(interface, '%sinterface-name',
urn)
mode = self.get_node_value(interface, '%smode', urn)
intf = interface.find('%sactive-vlans' % urn)
for vlan_node in intf.findall('%svlanid' % urn):
vlan = vlan_node.text
vlans.append(vlan)
results = {'vlan-id': vlans,
'mode': mode,
'interface-name': interface_name,
'interface_type': interface_type}
result.append(results)
return result
@property
def vlans(self):
"""list[dict]: A list of dictionary items describing the details of
vlan interfaces.
This method fetches the VLAN interfaces
Examples:
>>> import pynos.device
>>> switch = '10.24.39.202'
>>> auth = ('<PASSWORD>', 'password')
>>> conn = (switch, '22')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.add_vlan_int('736')
... interfaces = dev.interface.vlans
... is_vlan_interface_present = False
... for interface in interfaces:
... if interface['vlan-id'] == '736':
... is_vlan_interface_present = True
... break
... dev.interface.del_vlan_int('736')
... assert is_vlan_interface_present
True
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
has_more = ''
last_vlan_id = ''
while (has_more == '') or (has_more == 'true'):
request_interface = self.get_vlan_brief_request(last_vlan_id)
interface_result = self._callback(request_interface, 'get')
has_more = self.get_node_value(interface_result, '%shas-more', urn)
last_vlan_id = self.get_node_value(
interface_result, '%slast-vlan-id', urn)
for interface in interface_result.findall('%svlan' % urn):
vlan_id = self.get_node_value(interface, '%svlan-id', urn)
vlan_type = self.get_node_value(interface, '%svlan-type', urn)
vlan_name = self.get_node_value(interface, '%svlan-name', urn)
vlan_state = self.get_node_value(
interface, '%svlan-state', urn)
ports = []
for intf in interface.findall('%sinterface' % urn):
interface_type = self.get_node_value(
intf, '%sinterface-type', urn)
interface_name = self.get_node_value(
intf, '%sinterface-name', urn)
tag = self.get_node_value(intf, '%stag', urn)
port_results = {'interface-type': interface_type,
'interface-name': interface_name,
'tag': tag}
ports.append(port_results)
results = {'interface-name': vlan_name,
'vlan-state': vlan_state,
'vlan-id': vlan_id,
'vlan-type': vlan_type,
'interface': ports}
result.append(results)
return result
@staticmethod
def get_interface_switchport_request():
"""Creates a new Netconf request"""
request_interface = ET.Element(
'get-interface-switchport',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
return request_interface
@staticmethod
def get_vlan_brief_request(last_vlan_id):
""" Creates a new Netconf request based on the last received
vlan id when the hasMore flag is true
"""
request_interface = ET.Element(
'get-vlan-brief',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if last_vlan_id != '':
last_received_int_el = ET.SubElement(request_interface,
"last-rcvd-vlan-id")
last_received_int_el.text = last_vlan_id
return request_interface
@property
def port_channels(self):
"""list[dict]: A list of dictionary items of port channels.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.202']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name='226/0/1',
... int_type='tengigabitethernet',
... port_int='1', channel_type='standard', mode='active')
... result = dev.interface.port_channels
... is_port_channel_exist = False
... for port_chann in result:
... if port_chann['interface-name']=='port-channel-1':
... for interfaces in port_chann['interfaces']:
... for keys, values in interfaces.items():
... if '226/0/1' in values:
... is_port_channel_exist = True
... break
... output = dev.interface.remove_port_channel(
... port_int='1')
... assert is_port_channel_exist
"""
pc_urn = "{urn:brocade.com:mgmt:brocade-lag}"
result = []
has_more = ''
last_aggregator_id = ''
while (has_more == '') or (has_more == 'true'):
request_port_channel = self.get_port_chann_detail_request(
last_aggregator_id)
port_channel_result = self._callback(request_port_channel, 'get')
has_more = self.get_node_value(port_channel_result,
'%shas-more', pc_urn)
if has_more == 'true':
for x in port_channel_result.findall('%slacp' % pc_urn):
last_aggregator_id = self.get_node_value(x,
'%saggregator-id',
pc_urn)
for item in port_channel_result.findall('%slacp' % pc_urn):
interface_list = []
aggregator_id = self.get_node_value(
item, '%saggregator-id', pc_urn)
aggregator_type = self.get_node_value(
item, '%saggregator-type', pc_urn)
is_vlag = self.get_node_value(item, '%sisvlag', pc_urn)
aggregator_mode = self.get_node_value(
item, '%saggregator-mode', pc_urn)
system_priority = self.get_node_value(
item, '%ssystem-priority', pc_urn)
actor_system_id = self.get_node_value(
item, '%sactor-system-id', pc_urn)
partner_oper_priority = self.get_node_value(
item, '%spartner-oper-priority', pc_urn)
partner_system_id = self.get_node_value(
item, '%spartner-system-id', pc_urn)
admin_key = self.get_node_value(
item, '%sadmin-key', pc_urn)
oper_key = self.get_node_value(item, '%soper-key', pc_urn)
partner_oper_key = self.get_node_value(
item, '%spartner-oper-key', pc_urn)
rx_link_count = self.get_node_value(
item, '%srx-link-count', pc_urn)
tx_link_count = self.get_node_value(
item, '%stx-link-count', pc_urn)
individual_agg = self.get_node_value(
item, '%sindividual-agg', pc_urn)
ready_agg = self.get_node_value(
item, '%sready-agg', pc_urn)
for item1 in item.findall('%saggr-member' % pc_urn):
rbridge_id = self.get_node_value(
item1, '%srbridge-id', pc_urn)
int_type = self.get_node_value(
item1, '%sinterface-type', pc_urn)
int_name = self.get_node_value(
item1, '%sinterface-name', pc_urn)
actor_port = self.get_node_value(
item1, '%sactor-port', pc_urn)
sync = self.get_node_value(item1, '%ssync', pc_urn)
port_channel_interface = {'rbridge-id': rbridge_id,
'interface-type': int_type,
'interface-name': int_name,
'actor_port': actor_port,
'sync': sync}
interface_list.append(port_channel_interface)
results = {'interface-name': 'port-channel-' + aggregator_id,
'interfaces': interface_list,
'aggregator_id': aggregator_id,
'aggregator_type': aggregator_type,
'is_vlag': is_vlag,
'aggregator_mode': aggregator_mode,
'system_priority': system_priority,
'actor_system_id': actor_system_id,
'partner-oper-priority': partner_oper_priority,
'partner-system-id': partner_system_id,
'admin-key': admin_key,
'oper-key': oper_key,
'partner-oper-key': partner_oper_key,
'rx-link-count': rx_link_count,
'tx-link-count': tx_link_count,
'individual-agg': individual_agg,
'ready-agg': ready_agg}
result.append(results)
return result
@staticmethod
def get_node_value(node, node_name, urn):
value = node.find(node_name % urn)
if value is not None:
return value.text
else:
return ''
@staticmethod
def get_port_chann_detail_request(last_aggregator_id):
""" Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true
"""
port_channel_ns = 'urn:brocade.com:mgmt:brocade-lag'
request_port_channel = ET.Element('get-port-channel-detail',
xmlns=port_channel_ns)
if last_aggregator_id != '':
last_received_port_chann_el = ET.SubElement(request_port_channel,
"last-aggregator-id")
last_received_port_chann_el.text = last_aggregator_id
return request_port_channel
def bfd(self, **kwargs):
raise NotImplementedError
def vrrpe_spf_basic(self, **kwargs):
"""Set vrrpe short path forwarding to default.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
enable (bool): If vrrpe short path fowarding should be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
vrid (str): vrrpe router ID.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid` is not passed.
ValueError: if `int_type`, `name`, `vrid` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.services.vrrpe(ip_version='6',
... enable=True, rbridge_id='225')
... output = dev.interface.vrrpe_vip(int_type='ve',
... name='89', vrid='1',
... vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... output = dev.interface.vrrpe_vip(int_type='ve',
... name='89',
... vrid='1', vip='fc00:e968:6179::de52:7100/64',
... rbridge_id='225')
... output = dev.services.vrrpe(enable=False,
... rbridge_id='225')
... output = dev.interface.vrrpe_spf_basic(int_type='ve',
... name='89', vrid='1', rbridge_id='1')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
vrrpe_args = dict(name=name, vrid=vrid)
method_class = self._interface
if get:
enable = None
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
method_name = 'interface_%s_vrrpe_short_path_forwarding_basic' % \
int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
method_class = self._rbridge
vrrpe_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port | |
self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
if hasattr(self, 'db_objectId') and (self._db_what, self._db_objectId) in id_remap:
cp._db_objectId = id_remap[(self._db_what, self._db_objectId)]
if hasattr(self, 'db_parentObjId') and (self._db_parentObjType, self._db_parentObjId) in id_remap:
cp._db_parentObjId = id_remap[(self._db_parentObjType, self._db_parentObjId)]
# recreate indices and set flags
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBDelete()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'what' in class_dict:
res = class_dict['what'](old_obj, trans_dict)
new_obj.db_what = res
elif hasattr(old_obj, 'db_what') and old_obj.db_what is not None:
new_obj.db_what = old_obj.db_what
if 'objectId' in class_dict:
res = class_dict['objectId'](old_obj, trans_dict)
new_obj.db_objectId = res
elif hasattr(old_obj, 'db_objectId') and old_obj.db_objectId is not None:
new_obj.db_objectId = old_obj.db_objectId
if 'parentObjId' in class_dict:
res = class_dict['parentObjId'](old_obj, trans_dict)
new_obj.db_parentObjId = res
elif hasattr(old_obj, 'db_parentObjId') and old_obj.db_parentObjId is not None:
new_obj.db_parentObjId = old_obj.db_parentObjId
if 'parentObjType' in class_dict:
res = class_dict['parentObjType'](old_obj, trans_dict)
new_obj.db_parentObjType = res
elif hasattr(old_obj, 'db_parentObjType') and old_obj.db_parentObjType is not None:
new_obj.db_parentObjType = old_obj.db_parentObjType
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
return [(self, parent[0], parent[1])]
def db_deleted_children(self, remove=False):
children = []
return children
def has_changes(self):
if self.is_dirty:
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_what(self):
return self._db_what
def __set_db_what(self, what):
self._db_what = what
self.is_dirty = True
db_what = property(__get_db_what, __set_db_what)
def db_add_what(self, what):
self._db_what = what
def db_change_what(self, what):
self._db_what = what
def db_delete_what(self, what):
self._db_what = None
def __get_db_objectId(self):
return self._db_objectId
def __set_db_objectId(self, objectId):
self._db_objectId = objectId
self.is_dirty = True
db_objectId = property(__get_db_objectId, __set_db_objectId)
def db_add_objectId(self, objectId):
self._db_objectId = objectId
def db_change_objectId(self, objectId):
self._db_objectId = objectId
def db_delete_objectId(self, objectId):
self._db_objectId = None
def __get_db_parentObjId(self):
return self._db_parentObjId
def __set_db_parentObjId(self, parentObjId):
self._db_parentObjId = parentObjId
self.is_dirty = True
db_parentObjId = property(__get_db_parentObjId, __set_db_parentObjId)
def db_add_parentObjId(self, parentObjId):
self._db_parentObjId = parentObjId
def db_change_parentObjId(self, parentObjId):
self._db_parentObjId = parentObjId
def db_delete_parentObjId(self, parentObjId):
self._db_parentObjId = None
def __get_db_parentObjType(self):
return self._db_parentObjType
def __set_db_parentObjType(self, parentObjType):
self._db_parentObjType = parentObjType
self.is_dirty = True
db_parentObjType = property(__get_db_parentObjType, __set_db_parentObjType)
def db_add_parentObjType(self, parentObjType):
self._db_parentObjType = parentObjType
def db_change_parentObjType(self, parentObjType):
self._db_parentObjType = parentObjType
def db_delete_parentObjType(self, parentObjType):
self._db_parentObjType = None
def getPrimaryKey(self):
return self._db_id
class DBVistrail(object):
vtType = 'vistrail'
def __init__(self, id=None, entity_type=None, version=None, name=None, last_modified=None, actions=None, tags=None, annotations=None):
self._db_id = id
self._db_entity_type = entity_type
self._db_version = version
self._db_name = name
self._db_last_modified = last_modified
self.db_deleted_actions = []
self.db_actions_id_index = {}
if actions is None:
self._db_actions = []
else:
self._db_actions = actions
for v in self._db_actions:
self.db_actions_id_index[v.db_id] = v
self.db_deleted_tags = []
self.db_tags_id_index = {}
self.db_tags_name_index = {}
if tags is None:
self._db_tags = []
else:
self._db_tags = tags
for v in self._db_tags:
self.db_tags_id_index[v.db_id] = v
self.db_tags_name_index[v.db_name] = v
self.db_deleted_annotations = []
self.db_annotations_id_index = {}
self.db_annotations_key_index = {}
if annotations is None:
self._db_annotations = []
else:
self._db_annotations = annotations
for v in self._db_annotations:
self.db_annotations_id_index[v.db_id] = v
self.db_annotations_key_index[v.db_key] = v
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBVistrail.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBVistrail(id=self._db_id,
entity_type=self._db_entity_type,
version=self._db_version,
name=self._db_name,
last_modified=self._db_last_modified)
if self._db_actions is None:
cp._db_actions = []
else:
cp._db_actions = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_actions]
if self._db_tags is None:
cp._db_tags = []
else:
cp._db_tags = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_tags]
if self._db_annotations is None:
cp._db_annotations = []
else:
cp._db_annotations = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_annotations]
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
cp.db_actions_id_index = dict((v.db_id, v) for v in cp._db_actions)
cp.db_tags_id_index = dict((v.db_id, v) for v in cp._db_tags)
cp.db_tags_name_index = dict((v.db_name, v) for v in cp._db_tags)
cp.db_annotations_id_index = dict((v.db_id, v) for v in cp._db_annotations)
cp.db_annotations_key_index = dict((v.db_key, v) for v in cp._db_annotations)
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBVistrail()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'entity_type' in class_dict:
res = class_dict['entity_type'](old_obj, trans_dict)
new_obj.db_entity_type = res
elif hasattr(old_obj, 'db_entity_type') and old_obj.db_entity_type is not None:
new_obj.db_entity_type = old_obj.db_entity_type
if 'version' in class_dict:
res = class_dict['version'](old_obj, trans_dict)
new_obj.db_version = res
elif hasattr(old_obj, 'db_version') and old_obj.db_version is not None:
new_obj.db_version = old_obj.db_version
if 'name' in class_dict:
res = class_dict['name'](old_obj, trans_dict)
new_obj.db_name = res
elif hasattr(old_obj, 'db_name') and old_obj.db_name is not None:
new_obj.db_name = old_obj.db_name
if 'last_modified' in class_dict:
res = class_dict['last_modified'](old_obj, trans_dict)
new_obj.db_last_modified = res
elif hasattr(old_obj, 'db_last_modified') and old_obj.db_last_modified is not None:
new_obj.db_last_modified = old_obj.db_last_modified
if 'actions' in class_dict:
res = class_dict['actions'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_action(obj)
elif hasattr(old_obj, 'db_actions') and old_obj.db_actions is not None:
for obj in old_obj.db_actions:
new_obj.db_add_action(DBAction.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_actions') and hasattr(new_obj, 'db_deleted_actions'):
for obj in old_obj.db_deleted_actions:
n_obj = DBAction.update_version(obj, trans_dict)
new_obj.db_deleted_actions.append(n_obj)
if 'tags' in class_dict:
res = class_dict['tags'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_tag(obj)
elif hasattr(old_obj, 'db_tags') and old_obj.db_tags is not None:
for obj in old_obj.db_tags:
new_obj.db_add_tag(DBTag.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_tags') and hasattr(new_obj, 'db_deleted_tags'):
for obj in old_obj.db_deleted_tags:
n_obj = DBTag.update_version(obj, trans_dict)
new_obj.db_deleted_tags.append(n_obj)
if 'annotations' in class_dict:
res = class_dict['annotations'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_annotation(obj)
elif hasattr(old_obj, 'db_annotations') and old_obj.db_annotations is not None:
for obj in old_obj.db_annotations:
new_obj.db_add_annotation(DBAnnotation.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_annotations') and hasattr(new_obj, 'db_deleted_annotations'):
for obj in old_obj.db_deleted_annotations:
n_obj = DBAnnotation.update_version(obj, trans_dict)
new_obj.db_deleted_annotations.append(n_obj)
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
children = []
to_del = []
for child in self.db_actions:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_action(child)
to_del = []
for child in self.db_tags:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_tag(child)
to_del = []
for child in self.db_annotations:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_annotation(child)
children.append((self, parent[0], parent[1]))
return children
def db_deleted_children(self, remove=False):
children = []
children.extend(self.db_deleted_actions)
children.extend(self.db_deleted_tags)
children.extend(self.db_deleted_annotations)
if remove:
self.db_deleted_actions = []
self.db_deleted_tags = []
self.db_deleted_annotations = []
return children
def has_changes(self):
if self.is_dirty:
return True
for child in self._db_actions:
if child.has_changes():
return True
for child in self._db_tags:
if child.has_changes():
return True
for child in self._db_annotations:
if child.has_changes():
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_entity_type(self):
return self._db_entity_type
def __set_db_entity_type(self, entity_type):
self._db_entity_type = entity_type
self.is_dirty = True
db_entity_type = property(__get_db_entity_type, __set_db_entity_type)
def db_add_entity_type(self, entity_type):
self._db_entity_type = entity_type
def db_change_entity_type(self, entity_type):
self._db_entity_type = entity_type
def db_delete_entity_type(self, entity_type):
self._db_entity_type = None
def __get_db_version(self):
return self._db_version
def __set_db_version(self, version):
self._db_version = version
self.is_dirty = True
db_version = property(__get_db_version, __set_db_version)
def db_add_version(self, version):
self._db_version = version
def db_change_version(self, version):
self._db_version = version
def db_delete_version(self, version):
self._db_version = None
def __get_db_name(self):
return self._db_name
def __set_db_name(self, name):
self._db_name = name
self.is_dirty = True
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self._db_name = name
def db_change_name(self, name):
self._db_name = name
def db_delete_name(self, name):
self._db_name = None
def __get_db_last_modified(self):
return self._db_last_modified
def __set_db_last_modified(self, last_modified):
self._db_last_modified = last_modified
self.is_dirty = True
db_last_modified = property(__get_db_last_modified, __set_db_last_modified)
def db_add_last_modified(self, last_modified):
self._db_last_modified = last_modified
def db_change_last_modified(self, last_modified):
self._db_last_modified = last_modified
def db_delete_last_modified(self, last_modified):
self._db_last_modified = None
def __get_db_actions(self):
return self._db_actions
def __set_db_actions(self, actions):
self._db_actions = actions
self.is_dirty = True
db_actions = property(__get_db_actions, __set_db_actions)
def db_get_actions(self):
return self._db_actions
def db_add_action(self, action):
self.is_dirty = True
self._db_actions.append(action)
self.db_actions_id_index[action.db_id] = action
def db_change_action(self, action):
self.is_dirty = True
found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.