input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
O0o00oOOOO00 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 84 - 84: I1IiiI . I1IiiI
return ( [ packet , O0o00oOOOO00 ] )
if 82 - 82: OoO0O00 - iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
if 45 - 45: iII111i . oO0o * iII111i
def lcaf_decode_eid ( self , packet ) :
O00oO00oOO00O = "BBB"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
if 54 - 54: Oo0Ooo . OoO0O00 * I1IiiI % IiII
if 97 - 97: o0oOOo0O0Ooo + Ii1I
if 77 - 77: I11i - oO0o . Ii1I
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
O0Ooo000OO00 , ooOOooooo0Oo , O000oo0O0OO0 = struct . unpack ( O00oO00oOO00O ,
packet [ : ooOoooOoo0oO ] )
if 74 - 74: ooOoO0o
if ( O000oo0O0OO0 == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( O000oo0O0OO0 == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , O0o00oOOOO00 = self . lcaf_decode_sg ( packet )
return ( [ packet , O0o00oOOOO00 ] )
elif ( O000oo0O0OO0 == LISP_LCAF_GEO_COORD_TYPE ) :
O00oO00oOO00O = "BBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
i111IiI1III1 , ooOOooooo0Oo , O000oo0O0OO0 , I1iii1IiI11I11I , iiii1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
if ( O000oo0O0OO0 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
iiii1 = socket . ntohs ( iiii1 )
packet = packet [ ooOoooOoo0oO : : ]
if ( iiii1 > len ( packet ) ) : return ( None )
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
iiIi1ii1IiI = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = iiIi1ii1IiI
packet = iiIi1ii1IiI . decode_geo ( packet , iiii1 , I1iii1IiI11I11I )
self . mask_len = self . host_mask_len ( )
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
return ( [ packet , None ] )
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
if 80 - 80: OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 1 - 1: OoO0O00 - I1IiiI * o0oOOo0O0Ooo
if 84 - 84: OoO0O00 % OoooooooOO
def copy_elp_node ( self ) :
Ooo0o0OoOO = lisp_elp_node ( )
Ooo0o0OoOO . copy_address ( self . address )
Ooo0o0OoOO . probe = self . probe
Ooo0o0OoOO . strict = self . strict
Ooo0o0OoOO . eid = self . eid
Ooo0o0OoOO . we_are_last = self . we_are_last
return ( Ooo0o0OoOO )
if 66 - 66: OoOoOO00 . iII111i
if 1 - 1: iII111i * i1IIi . iIii1I11I1II1 % O0 - OoooooooOO
if 87 - 87: iII111i . Oo0Ooo * i11iIiiIii % o0oOOo0O0Ooo + Ii1I
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
def copy_elp ( self ) :
Ii1111i = lisp_elp ( self . elp_name )
Ii1111i . use_elp_node = self . use_elp_node
Ii1111i . we_are_last = self . we_are_last
for Ooo0o0OoOO in self . elp_nodes :
Ii1111i . elp_nodes . append ( Ooo0o0OoOO . copy_elp_node ( ) )
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
return ( Ii1111i )
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
def print_elp ( self , want_marker ) :
OoOo0Oo0 = ""
for Ooo0o0OoOO in self . elp_nodes :
Oo0000 = ""
if ( want_marker ) :
if ( Ooo0o0OoOO == self . use_elp_node ) :
Oo0000 = "*"
elif ( Ooo0o0OoOO . we_are_last ) :
Oo0000 = "x"
if 76 - 76: iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
OoOo0Oo0 += "{}{}({}{}{}), " . format ( Oo0000 ,
Ooo0o0OoOO . address . print_address_no_iid ( ) ,
"r" if Ooo0o0OoOO . eid else "R" , "P" if Ooo0o0OoOO . probe else "p" ,
"S" if Ooo0o0OoOO . strict else "s" )
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
return ( OoOo0Oo0 [ 0 : - 2 ] if OoOo0Oo0 != "" else "" )
if 46 - 46: OoOoOO00
if 75 - 75: I1IiiI
def select_elp_node ( self ) :
Ii1II111i1 , iii1i , OoO0o0OOOO = lisp_myrlocs
ooo = None
if 100 - 100: OOooOOo * OoooooooOO
for Ooo0o0OoOO in self . elp_nodes :
if ( Ii1II111i1 and Ooo0o0OoOO . address . is_exact_match ( Ii1II111i1 ) ) :
ooo = self . elp_nodes . index ( Ooo0o0OoOO )
break
if 80 - 80: O0 + oO0o - OoooooooOO - O0 . ooOoO0o . OoooooooOO
if ( iii1i and Ooo0o0OoOO . address . is_exact_match ( iii1i ) ) :
ooo = self . elp_nodes . index ( Ooo0o0OoOO )
break
if 76 - 76: Ii1I
if 62 - 62: O0 / OoO0O00 % i11iIiiIii / OOooOOo * iIii1I11I1II1
if 78 - 78: OOooOOo % O0 * O0
if 62 - 62: ooOoO0o
if 77 - 77: I1IiiI . i11iIiiIii - I1ii11iIi11i
if 83 - 83: OoO0O00 - i11iIiiIii + I1ii11iIi11i - OOooOOo / OoOoOO00 / I11i
if 53 - 53: I11i * I1IiiI . I1IiiI / o0oOOo0O0Ooo - I1Ii111
if ( ooo == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
Ooo0o0OoOO . we_are_last = False
return
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
if 88 - 88: O0
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
if 27 - 27: II111iiii - OoOoOO00
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ ooo ] ) :
self . use_elp_node = None
Ooo0o0OoOO . we_are_last = True
return
if 81 - 81: o0oOOo0O0Ooo - Oo0Ooo % IiII - ooOoO0o / O0
if 27 - 27: Oo0Ooo
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
if 45 - 45: iIii1I11I1II1 - i1IIi % I1IiiI - I1Ii111 + oO0o
if 15 - 15: iIii1I11I1II1 - OoooooooOO / ooOoO0o
self . use_elp_node = self . elp_nodes [ ooo + 1 ]
return
if 83 - 83: IiII + I1Ii111 / OoOoOO00 * IiII . oO0o
if 22 - 22: O0 + ooOoO0o + I1Ii111
if 57 - 57: OOooOOo . | |
self.count += 1
class PLTIntermittentPlotter(AccumulatedValuePlotter):
# Class/Static variables.
def __init__(self, saveDir, name, av, avNameList, dispTypeList = None, semiLog = False):
super(PLTIntermittentPlotter, self).__init__(name, av, avNameList, dispTypeList)
self.count = 0
self.minPlotPoints = 2
self.saveDir = saveDir
if ( True == semiLog ):
self.plotType = "log"
else:
self.plotType = "linear"
def initialize(self):
if ( not os.path.isdir(self.saveDir) ):
os.makedirs( self.saveDir )
print("PLTIntermittentPlotter initialized.")
def update(self, prefix="", suffix=""):
# Gather the data.
# nMaxPoints = 0
# for name in self.avNameList:
# # Find the AccumulatedVariable object.
# av = self.AV[name]
# nPoints = av.get_num_values()
# if ( nPoints > nMaxPoints ):
# nMaxPoints = nPoints
# if ( nMaxPoints < self.minPlotPoints ):
# # Not enough points to plot, do nothing.
# return
# # Enough points to plot.
# # Get the points to be ploted.
# nameList = []
# for name in self.avNameList:
# av = self.AV[name]
# lastIdx = self.plotIndexDict[name]
# pointsInAv = av.get_num_values()
# if ( pointsInAv - 1 > lastIdx and 0 != pointsInAv ):
# nameList.append(name)
# if ( 0 == len( nameList ) ):
# # No update actions should be performed.
# return
# # === Need to plot new figure. ===
# # Create matplotlib figure.
# fig, ax = plt.subplots(1)
# legend = []
# for i in range( len(nameList) ):
# name = nameList[i]
# av = self.AV[name]
# lastIdx = self.plotIndexDict[name]
# # x = np.array( av.get_stamps()[ lastIdx + 1 : ] )
# # y = np.array( av.get_values()[ lastIdx + 1 : ] )
# x = np.array( av.get_stamps() )
# y = np.array( av.get_values() )
# ax.plot( x, y )
# legend.append( name )
# for i in range( len(nameList) ):
# name = nameList[i]
# av = self.AV[name]
# lastIdx = self.plotIndexDict[name]
# # x = np.array( av.get_stamps()[ lastIdx + 1 : ] )
# # y = np.array( self.AV[name].get_avg()[ self.plotIndexDict[name] + 1 : ] )
# x = np.array( av.get_stamps() )
# y = np.array( self.AV[name].get_avg() )
# ax.plot( x, y )
# legend.append( name )
# # Update the self.plotIndexDict.
# self.plotIndexDict[name] = self.AV[name].get_num_values() - 1
# if ( self.plotType == "log" ):
# ax.set_yscale("log")
# ax.legend(legend)
# ax.grid()
# ax.set_title( self.title )
# ax.set_xlabel( self.xlabel )
# ax.set_ylabel( self.ylabel )
# # Save to an image.
# fn = "%s/%04d%s%s%s.png" % (self.saveDir, self.count, prefix, self.title, suffix)
# fig.savefig(fn)
# plt.close(fig)
self.count += 1
class WorkFlow(object):
SIG_INT = False # If Ctrl-C is sent to this instance, this will be set to be True.
IS_FINALISING = False
TERMINATION_FILE = ".wf_terminate"
def __init__(self, workingDir, prefix = "", suffix = "", logFilename = None, disableStreamLogger = False):
# Add the current path to system path
self.workingDir = workingDir # The working directory.
self.prefix = prefix
self.suffix = suffix
self.logdir = os.path.join(self.workingDir, 'logdata')
self.imgdir = os.path.join(self.workingDir, 'resimg')
self.modeldir = os.path.join(self.workingDir, 'models')
if ( not os.path.isdir(self.workingDir) ):
os.makedirs( self.workingDir )
if ( not os.path.isdir(self.logdir) ):
os.makedirs( self.logdir)
if ( not os.path.isdir(self.imgdir) ):
os.makedirs( self.imgdir)
if ( not os.path.isdir(self.modeldir) ):
os.makedirs( self.modeldir)
self.terminationFile = self.workingDir + "/" + WorkFlow.TERMINATION_FILE
self.isInitialized = False
# Accumulated value dictionary.
self.AV = {"loss": AccumulatedValue("loss")}
# Accumulated value Plotter.
# self.AVP should be an object of class AccumulatedValuePlotter.
self.AVP = [] # The child class is responsible to populate this member.
self.verbose = False
if ( logFilename is not None ):
self.logFilename = logFilename
else:
self.logFilename = self.prefix + "wf" + self.suffix +".log"
# Logger.
# logging.basicConfig(datefmt = '%m/%d/%Y %I:%M:%S')
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
streamHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
streamHandler.setFormatter(formatter)
if ( False == disableStreamLogger ):
self.logger.addHandler(streamHandler)
logFilePathPlusName = os.path.join(self.logdir, self.logFilename)
fileHandler = logging.FileHandler(filename = logFilePathPlusName, mode = "w")
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
self.logger.info("WorkFlow created.")
def add_accumulated_value(self, name, avgWidth = 2):
# Check if there is alread an ojbect which has the same name.
if ( name in self.AV.keys() ):
# This is an error.
desc = "There is already an object registered as \"%s\"." % (name)
exp = WFException(desc, "add_accumulated_value")
raise(exp)
# Name is new. Create a new AccumulatedValue object.
self.AV[name] = AccumulatedValue(name, avgWidth)
def have_accumulated_value(self, name):
return ( name in self.AV.keys() )
def push_to_av(self, name, value, stamp = None):
# Check if the name exists.
if ( False == (name in self.AV.keys()) ):
# This is an error.
desc = "No object is registered as %s." % (name)
exp = WFException(desc, "push_to_av")
raise(exp)
# Retrieve the AccumulatedValue object.
av = self.AV[name]
av.push_back(value, stamp)
def initialize(self):
# Check the system-wide signal.
self.check_signal()
# Check whether the working directory exists.
if ( False == os.path.isdir(self.workingDir) ):
# Directory does not exist, create the directory.
os.mkdir(self.workingDir)
if ( True == self.isInitialized ):
# This should be an error.
desc = "The work flow is already initialized."
exp = WFException(desc, "initialize")
raise(exp)
# Delete termination file.
if ( os.path.isfile( self.terminationFile ) ):
os.remove( self.terminationFile )
self.debug_print("initialize() get called.")
def post_initialize(self):
# Initialize AVP.
if ( len(self.AVP) > 0 ):
self.AVP[0].initialize()
self.logger.info("AVP initialized.")
# add prefix to AVP
for avp in self.AVP:
avp.title = self.prefix + avp.title
self.isInitialized = True
self.debug_print("post_initialize() get called.")
def train(self):
# Check the system-wide signal.
self.check_signal()
# Check the termination file.
self.check_termination_file()
if ( False == self.isInitialized ):
# This should be an error.
desc = "The work flow is not initialized yet."
exp = WFException(desc, "tain")
raise(exp)
self.debug_print("train() get called.")
def test(self):
# Check the system-wide signal.
self.check_signal()
# Check the termination file.
self.check_termination_file()
if ( False == self.isInitialized ):
# This should be an error.
desc = "The work flow is not initialized yet."
exp = WFException(desc, "test")
raise(exp)
self.debug_print("test() get called.")
def finalize(self):
WorkFlow.IS_FINALISING = True
if ( False == self.isInitialized ):
# This should be an error.
desc = "The work flow is not initialized yet."
exp = WFException(desc, "finalize")
raise(exp)
# Write the accumulated values.
self.write_accumulated_values()
self.draw_accumulated_values()
self.logger.info("Accumulated values are written.")
self.isInitialized = False
self.debug_print("finalize() get called.")
WorkFlow.IS_FINALISING = False
def plot_accumulated_values(self, prefix="", suffix=""):
if ( 0 == len(self.AVP) ):
return
for avp in self.AVP:
avp.update(prefix, suffix)
def write_accumulated_values(self, outDir = None):
if ( outDir is None ):
outDir = self.logdir
if ( False == os.path.isdir( outDir ) ):
os.makedirs( outDir )
if ( sys.version_info[0] < 3 ):
for av in self.AV.itervalues():
av.dump(outDir, self.prefix, self.suffix)
else:
for av in self.AV.values():
av.dump(outDir, self.prefix, self.suffix)
def draw_accumulated_values(self, outDir = None):
if ( outDir is None ):
outDir = self.imgdir
if ( False == os.path.isdir( outDir ) ):
os.makedirs( outDir )
for avp in self.AVP:
avp.write_image(outDir, self.prefix, self.suffix)
def is_initialized(self):
return self.isInitialized
def debug_print(self, msg):
if ( True == self.verbose ):
print(msg)
def compose_file_name(self, fn, ext = "", subFolder=None):
if ( subFolder is None ):
return self.workingDir + "/" + self.prefix + fn + self.suffix + "." + ext
else:
return self.workingDir + "/" + subFolder + "/" + self.prefix + fn + self.suffix + "." + ext
def make_subfolder(self, subFolder):
path = self.workingDir + "/" + subFolder
if ( not os.path.isdir( path ) ):
os.makedirs( path )
def check_termination_file(self):
if ( os.path.isfile( self.terminationFile ) ):
s = "Find termination file %s." % ( self.terminationFile )
self.logger.info(s)
raise SigIntException(s, "TermFile")
def check_signal(self):
if ( True == WorkFlow.SIG_INT ):
s = "SIGINT received."
self.logger.info(s)
raise SigIntException(s, "SigIntExp")
def print_delimeter(self, title = "", c = "=", n = 10, leading = "\n", ending = "\n"):
d = [c for i in range(int(n))]
if ( 0 == len(title) ):
s = "".join(d) + "".join(d)
else:
s = "".join(d) + " " + title + " " + "".join(d)
print("%s%s%s" % (leading, s, ending))
def get_log_str(self, ignore=[]):
logstr = ''
for key in self.AV.keys():
valid = True
for ingstr in ignore:
if ingstr in key:
valid = | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""\
"""
import select
import socket
import sys
import errno
import random
import time
import os
import logging
import guild
from guild.actor import *
debug = False
for actor_class_name in ["Selector", "TCPServer", "RawConnectionHandler","EchoServer"]:
logger = logging.getLogger(__name__ +"." + actor_class_name)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def Print(*args):
if debug:
sys.stderr.write(" ".join([str(x) for x in args]))
sys.stderr.write("\n")
sys.stderr.flush()
#NOTE: Initially the aim here is convenience. Things could be improved.
class Selector(Actor):
"""
Purpose:
Handle notifications regarding activity on non-blocking sockets.
Constructor call:
Selector() # No arguments
Primary Thread behaviour:
* Wait for activity on any of the sockets
* Notify socket handler via appropriate callback
* Remove caller - (Prevents mutiple notifications for same event)
Shutdown Behaviour:
Just exits.
Does not attempt to shutdown anything else, or notify anything else
Actor methods:
selector.add_reader(sock, read_ready_notification_callback)
selector.add_writer(sock, write_ready_notification_callback)
selector.add_exceptional(sock, exceptional_notification_callback)
Actor Functions:
These are actor functions to prevent a race hazard.
selector.remove_reader(sock)
selector.remove_writer(sock)
selector.remove_exceptional(sock)
You may want to do these when your protocol shuts down.
These are safe to call if the selector is not managing your socket
Config options:
Selector.SELECT_TIMEOUT = 0.05 (Effectively responsiveness to actor
methods/functions is dictated by this)
"""
SELECT_TIMEOUT = 0.05
def __init__(self):
super(Selector,self).__init__()
self.readers = {}
self.writers = {}
self.exceptionals = {}
@actor_function
def remove_reader(self, sock):
try:
del self.readers[sock]
except KeyError:
Print("WARNING - attempt to remove same socket twice", sock)
@actor_function
def remove_writer(self, sock):
try:
del self.writers[sock]
except KeyError:
Print("WARNING - attempt to remove same socket twice", sock)
@actor_function
def remove_exceptional(self, sock):
try:
del self.exceptionals[sock]
except KeyError:
Print("WARNING - attempt to remove same socket twice", sock)
@actor_method
def add_reader(self, sock, callback):
self.readers[sock] = callback
@actor_method
def add_writer(self, sock, callback):
self.writers[sock] = callback
@actor_method
def add_exceptional(self, sock, callback):
self.exceptionals[sock] = callback
def gen_process(self):
self.readers_busy = {}
while True:
yield 1
# NOTE: This could be improved.
readers = self.readers.keys()
writers = self.writers.keys()
exceptionals = self.exceptionals.keys()
readers_ready, writers_ready, exceptionals_ready = select.select(readers, writers, exceptionals, self.SELECT_TIMEOUT)
for sock in readers_ready:
callback = self.readers[sock]
del self.readers[sock] # Always delete after notifying - means this no longer requires an actor function
callback()
yield 1
for sock in writers_ready:
callback = self.writers[sock]
del self.writers[sock] # Always delete after notifying - means this no longer requires an actor function
callback()
yield 1
for sock in exceptionals_ready :
callback = self.exceptionals[sock]
del self.exceptionals[sock] # Always delete after notifying - means this no longer requires an actor function
callback()
yield 1
yield 1
def onStop(self):
# Print("Selector STOPPING")
pass
# FIXME: This should really be a service
selector = None
def get_selector():
global selector
if selector:
return selector
selector = Selector()
start(selector)
return selector
def stop_selector():
stop(selector)
def waitfor_selector():
wait_for(selector)
#
# Kinda similar to the old Kamaelia.Internet.ConnectedSocketAdapter
#
class RawConnectionHandler(Actor):
"""
Purpose:
Directly manage talking an active socket
Constructor call:
RawConnectionHandler(client_sock, addr, on_exit_cb [, selector] )
client_sock - a socket that has "someone" connected to the other end.
addr - address connection info
on_exit_cb - callback to call when this handler stops
selector - selector to use [optional]
Note for a server:
client_sock, addr = server_socket.accept()
Primary Thread behaviour:
At start up, asks the selector to notify it if there's data on
the input, and is then event driven by actor methods and the
public bindable API
Shutdown Behaviour:
Notifies the server (our creator) that the raw connection has exitted.
It does this as follows:
self.on_exit_cb(self)
Actor methods:
raw_connection_handler.read_from_socket()
raw_connection_handler.handle_socket_read_ready()
NOTE: Perhaps should be called "read_from_socket"
NOTE: Perhaps should be called "handle_socket_read_ready"
NOTE: This is really about the socket being ready for reading
- Or perhaps split into two names?
- One that handles the message, one that does the work
- That would be nice, and probably make most sense
Triggered by the selector when there is data on the socket for
us to handle. Effectively does this:
data = self.client_sock.recv(SIZE)
if success:
# Emit via "from_connection" :
self.from_connection(data)
Add ourselves to the selector again
if retryable failure:
Add ourselves to the selector again
if fatal failure:
Shutdown / Stop
raw_connection_handler.write_to_socket()
raw_connection_handler.handle_socket_write_ready()
NOTE: The reason the client doesn't bind to this is because this
is really about the socket being ready to send.
- Or perhaps split into two names?
- One that handles the message, one that does the work
- That would be nice, and probably make most sense
Triggered whenever there is data to send, specifically::
- when there's an external API call for "to_connection"
- when we've tried sending but could not empty the buffer
Logic: (NOTE: isn't actually this but probably should be(!))
If outbuffer is empty:
return
while chunks in self.outbuffer:
length_sent = self.client_sock.send(chunk)
if fatal error:
shutdown/stop
if retryable error:
requeue (at start) the chunk (or part of chunk) to send
break out of Loop
if outbuffer is not empty: # This should be the case
Add ourselves to the selector again to resend
Other methods:
self.add_reader_to_selector()
Used internally to ask the selector to nofify the RCH when there is data to
be read
self.add_writer_to_selector()
selector.add_writer(self.client_sock, self.data_to_client)
External BINDable API:
raw_connection_handler.from_connection(chunk) [ intended to late bound FROM ]
Called whenever there is any data from the connection.
If unbound, then the data is lost.
raw_connection_handler.to_connection(chunk) [ intended to late bound TO ]
When called, data is buffer in an outbuffer
The selector is then asked to notify us when the socket is ready to write to
given rch ISA RawConnectionHandler (from somewhere)
ph ISA protocol_handler
Bind data from the connection to our protocol handler
rch.bind("from_connection", ph, "from_connection")
Bind data from the protocol handler to our connection
ph.bind("to_connection", rch, "to_connection")
"""
def __init__(self, client_sock, addr, on_exit_cb, selector=None):
super(RawConnectionHandler, self).__init__()
self.client_sock = client_sock
self.addr = addr
self.selector = selector
self.buffer = [] # FIXME: TO BE DELETED
self.outbuffer = []
self.on_exit_cb = on_exit_cb
self.send_open = True
self.recv_open = True
def onStop(self):
# Print("Notify the Server that Raw Connection has exitted.")
cb = self.on_exit_cb
cb(self)
@actor_method
def handle_socket_read_ready(self):
self.read_from_socket()
def read_from_socket(self):
try:
data = self.client_sock.recv(100)
while len(data) != 0: # while data
# Otherwise OK - pass data on
self.from_connection(data)
data = self.client_sock.recv(100)
except socket.error as e:
if e.errno == errno.EAGAIN:
# FAIL, wait again
self.add_reader_to_selector()
return
if e.errno == errno.EWOULDBLOCK:
# FAIL, wait again
self.add_reader_to_selector()
return
if len(data) == 0:
Print("CLIENT HAS GONE AWAY")
Print("Exitting")
self.client_sock.close()
self.client_sock = None
self.stop()
return
# All data passed on, or error, or disconnect, so done
# Re-add self to selector to say we've read all the data
self.add_reader_to_selector()
def add_reader_to_selector(self):
selector.add_reader(self.client_sock, self.handle_socket_read_ready)
def add_writer_to_selector(self):
selector.add_writer(self.client_sock, self.handle_socket_write_ready)
def process_start(self):
if self.selector is None:
self.selector = get_selector()
self.add_reader_to_selector()
@actor_method
def handle_socket_write_ready(self):
self.write_to_socket()
def write_to_socket(self):
if len(self.outbuffer) == 0: # Shouldn't be able to get here, but there are odd edge cases
return
while len(self.outbuffer) > 0:
chunk = self.outbuffer.pop(0)
Print("Chunk to send", len(chunk))
try:
bytes_sent = self.client_sock.send(chunk)
except socket.error as e: # e.errno ; e.errmsg:
if e.errno == errno.EAGAIN:
# FAIL, wait again
x.insert(0, chunk)
self.add_writer_to_selector()
return
if e.errno == errno.EWOULDBLOCK:
# FAIL, wait again
x.insert(0, chunk)
self.add_writer_to_selector()
return
Print("Unexpected error sending. Disconnect client")
self.stop()
if len(chunk) == bytes_sent:
Print("Chunk send success")
# if len(self.outbuffer) > 0: # Guaranteed by the loop we're in
# self.add_writer_to_selector()
# return
else:
Print("Chunk send almost success, actually sent", bytes_sent, "vs", len(chunk))
# Did not manage to send all the data from this chunk. Try again in a moment with the rest
still_to_send = chunk[bytes_sent:]
x.insert(0, chunk)
self.add_writer_to_selector()
Print("All chunks sent")
# Public Interface
@late_bind_safe
def from_connection(self, chunk):
pass
# Public Interface
@actor_method
def to_connection(self, chunk):
Print("Data to connection", repr(chunk))
self.outbuffer.append(chunk)
self.add_writer_to_selector()
# Kinda similar to Kamaelia.Internet.TCPServer
class TCPServer(Actor):
maxlisten = 5
socketOptions = [socket.SOL_SOCKET, socket.SO_REUSEADDR, 1]
HOST=''
PORT = 54321
def __init__(self, selector=None, nonblocking=False,
host=None, port=None, socketOptions=None, maxlisten=None):
super(TCPServer, self).__init__()
self.handlers = []
self.nonblocking = nonblocking
self.selector = selector
self.server_socket = None
# Local overrides
if host:
self.HOST = host
if port:
self.PORT = port
if socketOptions:
self.socketOptions = socketOptions
| |
_(u'邮箱容量'), _(u'网盘容量'),
_(u'排序权重'), _(u'QQ号码'), _(u'出生日期'), _(u'密码')]]
name = 'mailbox-list_{}'.format(time.strftime('%Y%m%d%H%M%S'))
lists = get_mailbox_list(request)
all_data_size = {}
all_data_user = {}
all_data_position = {}
all_data_depts = {}
all_data_depts2 = {}
all_data_depts_parent = {}
# 不预先把所有值取出来的话,会非常,非常,非常卡
for d in MailboxUser.objects.all().values("mailbox_id","showorder","realname","eenumber","tel_mobile","tel_work","im_qq","birthday","gender","last_login"):
all_data_user[d["mailbox_id"]] = d
for d in MailboxSize.objects.all().values("mailbox_id","size"):
all_data_size[d["mailbox_id"]] = d
for d in Department.objects.values("id","parent_id","title"):
all_data_depts[d["id"]] = d["title"] if d["title"] else u""
all_data_depts_parent[d["id"]] = d["parent_id"]
for d in DepartmentMember.objects.values("mailbox_id","dept_id","position"):
mailbox_id = d["mailbox_id"]
all_data_depts2.setdefault(mailbox_id, [])
all_data_position.setdefault(mailbox_id, {})
if d["position"]:
all_data_position[mailbox_id][d["dept_id"]] = unicode(d["position"])
if not d["dept_id"] in all_data_depts:
continue
dept_name = all_data_depts.get(d["dept_id"], "")
all_data_depts2[d["mailbox_id"]].append( (dept_name,d["dept_id"]) )
for l in lists:
if not l.id in all_data_user:
data_user = {
u"realname" : "",
u"tel_mobile" : "",
u"tel_work" : "",
u"im_qq" : "",
u"birthday" : "",
u"gender" : "",
u"last_login" : "",
u"eenumber" : "",
u"showorder" : "",
}
else:
data_user = all_data_user[l.id]
#获取部门名称, 用'-'分隔
dept_id_lst = all_data_depts2.get(l.id, [])
dept_name_lst = []
position = ""
if dept_id_lst:
parent_id = int(dept_id_lst[0][1])
position = all_data_position.get(l.id, {}).get(parent_id, "")
while parent_id > 0:
if not parent_id in all_data_depts:
break
title = all_data_depts[parent_id]
dept_name_lst.append( u'{}'.format(title) )
parent_id = int(all_data_depts_parent[parent_id])
dept_name_lst.reverse()
depts = u'-'.join(dept_name_lst)
#获取部门名称 完毕
if data_user["last_login"]:
last_login = data_user["last_login"].strftime("%Y-%m-%d %H:%M:%S")
else:
last_login = ""
used = all_data_size.get(l.id, {}).get("size", 0)
used = int(used) if used else 0
realname = "" if not data_user["realname"] else data_user["realname"]
tel_mobile = "" if not data_user["tel_mobile"] else data_user["tel_mobile"]
tel_work = "" if not data_user["tel_work"] else data_user["tel_work"]
im_qq = "" if not data_user["im_qq"] else data_user["im_qq"]
birthday = "" if not data_user["birthday"] else data_user["birthday"]
gender = "" if not data_user["gender"] else data_user["gender"]
# None 竟然已经作为unicode 存在于数据库了!
tel_mobile = "" if tel_mobile=="None" else tel_mobile
tel_work = "" if tel_work=="None" else tel_work
im_qq = "" if im_qq=="None" else im_qq
birthday = "" if birthday=="None" else birthday
gender = "" if gender=="None" else gender
eenumber = "" if not data_user["eenumber"] else data_user["eenumber"]
showorder = "0" if not data_user["showorder"] else data_user["showorder"]
status = _(u"启用") if str(l.disabled)!="1" else _(u"禁用")
list.append(
[l.name, realname, depts, position, eenumber, tel_mobile, tel_work,
l.quota_mailbox, l.quota_netdisk, showorder, im_qq, birthday, u""])
return ExcelResponse(list, name, encoding='gbk')
@licence_required
def mailbox_reset_pwd(request):
from passlib.hash import md5_crypt
data = {
"status" : "OK",
"message" : "Success",
}
if request.method == 'POST':
password = request.POST.get("password","")
password1 = request.POST.get("password1","")
password2 = request.POST.get("password2","")
if not md5_crypt.verify(password, request.user.password):
data["status"] = "Failure"
data["message"] = _(u"密码验证失败")
elif password1!=<PASSWORD>:
data["status"] = "Failure"
data["message"] = _(u"两次密码输入不正确")
else:
domain_id = get_domainid_bysession(request)
ret, reason = CheckMailboxPassword(domain_id=domain_id, mailbox_id=request.user.id, password=<PASSWORD>)
if ret != 0:
reason = _(reason)
data["status"] = "Failure"
data["message"] = reason
else:
Mailbox.objects.filter(username=request.user.username).update(change_pwd=-1, password=<PASSWORD>(<PASSWORD>))
objAttr, created = MailboxUserAttr.objects.get_or_create(mailbox_id=request.user.id,domain_id=request.user.domain_id,type=u"system",item=u"password")
raw_password = u"<PASSWORD>:::"+password1+u":::<PASSWORD>"
raw_password = <PASSWORD>string(<PASSWORD>_password)
objAttr.value = raw_password
objAttr.save()
return HttpResponse(json.dumps(data), content_type="application/json")
@licence_required
def ajax_check_change_pwd(request):
user = request.user
ret = 1 if str(user.change_pwd)=="1" else 0
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
#demo用户不用修改密码
if domain.domain in ("comingchina.com","fenbu.comingchina.com") and unicode(request.user).startswith(u"demo_admin@"):
ret = 0
data = {
"result" : ret,
"reason" : "",
}
if ret == 1:
_, reason = CheckMailboxPassword(domain_id=user.domain_id, mailbox_id=user.id)
if not reason:
reason = _(u"被系统强制设置为需要修改密码"),
else:
reason = _(reason)
data["reason"] = reason
return HttpResponse(json.dumps(data), content_type="application/json")
@licence_required
def add_account(request, template_name='mailbox/add_account.html'):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
form = MailboxForm(domain)
user_form = MailboxUserForm(domain)
if request.method == 'POST':
data = request.POST.copy()
disabled = data.get('disabled', '')
data['disabled'] = '-1' if disabled == 'on' else '1'
change_pwd = data.get('change_pwd', '')
data['change_pwd'] = '1' if change_pwd == 'on' else '-1'
enable_share = data.get('enable_share', '')
data['enable_share'] = 1 if enable_share == 'on' else -1
oabshow = data.get('oabshow', '')
data['oabshow'] = '1' if oabshow == 'on' else '-1'
form = MailboxForm(domain, data)
user_form = MailboxUserForm(domain, data)
if form.is_valid() and user_form.is_valid():
checker = MailboxLimitChecker()
if form.cleaned_data['disabled'] == '-1':
check_count = 1
else:
check_count = 0
try:
checker.simple_check(domain_id, form.cleaned_data['quota_mailbox'], form.cleaned_data['quota_netdisk'],
count=check_count)
except Exception, e:
#msg = '{}{}'.format(_(u'添加失败。'), get_exception_info())
msg = '{}{}'.format(_(u'添加失败。'), e.message)
messages.add_message(request, messages.ERROR, msg)
else:
obj = form.save()
user_form.save(obj.id)
deptlist = request.POST.getlist('deptlist[]')
maillist = request.POST.getlist('maillist[]')
if deptlist:
for dept in deptlist:
dept_id, position = dept.split('::', 1)
if not DepartmentMember.objects.filter(domain=domain, dept_id=dept_id, mailbox_id=obj.id):
DepartmentMember.objects.create(domain=domain, dept_id=dept_id, mailbox_id=obj.id,
position=position)
if maillist:
for l in maillist:
list_id, permit = l.split('::', 1)
if not ExtListMember.objects.filter(domain_id=domain_id, extlist_id=list_id,
address=obj.username):
ExtListMember.objects.create(domain_id=domain_id, extlist_id=list_id, address=obj.username,
permit=permit, name=obj.name, update_time=int(time.time()))
task_queue = TaskQueue()
task_queue.create_trigger('userinit')
messages.add_message(request, messages.SUCCESS, _(u'添加成功'))
return HttpResponseRedirect(reverse('mailbox_account'))
else:
messages.add_message(request, messages.ERROR, _(u'添加失败: {}-{}').format(form.errors, user_form.errors))
return HttpResponseRedirect(reverse('mailbox_account'))
mail_list = ExtList.objects.filter(domain_id=domain_id, dept_id=0).order_by('-id')
return render(request, template_name=template_name, context={
'form': form,
'domain': domain,
'user_form': user_form,
"dept_list": json.dumps(get_dept_list_sort(get_user_child_departments_kv(request, domain_id))),
'mail_list': mail_list,
})
def decode_upload_line(code, charset="gbk"):
if isinstance(code, unicode):
return code
try:
code = code.strip()
return code.decode(charset)
except Exception,err:
print 'decode_upload_line error: ',err
return code.decode("utf-8")
@licence_required
def batchadd_account(request, template_name='mailbox/batchadd_account.html'):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
# 取得邮件域默认邮箱、网络硬盘大小
mb_quota_def = DomainAttr.getAttrObjValue(domain.id, 'system', 'cf_def_mailbox_size')
nd_quota_def = DomainAttr.getAttrObjValue(domain.id, 'system', 'cf_def_netdisk_size')
# 当前域是否开通强密码
server_pass = DomainAttr.getAttrObjValue(domain.id, 'webmail', 'sw_pass_severe_new')
# 失败统计
failures = []
success = 0
if request.method == 'POST':
mb_quota = request.POST.get('quota_mailbox', mb_quota_def)
nd_quota = request.POST.get('quota_netdisk', nd_quota_def)
compatible_id = request.POST.get('compatible_id', )
dept_id = request.POST.get('option_value_dpt', '')
dept_id = -1 if not dept_id else int(dept_id)
checker = MailboxLimitChecker()
fobj = request.FILES.get("txtfile")
#print "request.files ",fobj,type(fobj)
file_name = fobj.name
fext = file_name.split('.')[-1]
if fext not in ('xls', 'xlsx', 'csv', 'txt'):
messages.add_message(request, messages.ERROR, _(u"只支持excel、txt、csv文件导入。"))
return render(request, template_name=template_name, context={
'mb_quota_def': mb_quota_def,
'nd_quota_def': nd_quota_def,
'server_pass': server_pass,
"dept_list": json.dumps(get_dept_list_sort(get_user_child_departments_kv(request, domain_id))),
})
lines = []
if fext == "txt":
#第一行是标题
fobj.readline()
for line in fobj.readlines():
line = decode_upload_line(line)
if not line:
continue
elem = line.split('\t')
lines.append( elem )
elif fext == "csv":
import csv
lines = list(csv.reader(fobj))
#第一行是标题
if lines:
lines.pop(0)
elif fext in ('xls', 'xlsx'):
import xlrd
content = fobj.read()
workbook = xlrd.open_workbook(filename=None, file_contents=content)
table = workbook.sheets()[0]
for line in xrange(table.nrows):
#前x行跳过
if line in (0,):
continue
lines.append( table.row_values(line) )
if len(lines)>500:
messages.add_message(request, messages.ERROR, _(u"单次只能导入500行数据,请分批次导入。"))
return render(request, template_name=template_name, context={
'mb_quota_def': mb_quota_def,
'nd_quota_def': nd_quota_def,
'server_pass': server_pass,
"dept_list": json.dumps(get_dept_list_sort(get_user_child_departments_kv(request, domain_id))),
})
for elem in lines:
# 用户名 真实名称 所属部门 职位 工号 手机号码 电话号码 密码 邮箱容量 网盘容量 排序权重 QQ号码 出生日期 密码
data = {'limit_send': '-1', 'limit_login': '-1', 'disabled': '-1',
'limit_recv': '-1', 'pwd_days': '365', 'change_pwd': <PASSWORD>', 'enable_share': '-1', 'showorder': '0',
'gender': 'male', 'oabshow': '1'}
if compatible_id == '2':
fields_list = ['name', '_tmp', 'password1', 'quota_mailbox', 'quota_netdisk', 'realname', 'dept',
'eenumber', 'position', 'tel_mobile', 'tel_work', 'im_qq', 'birthday']
else:
fields_list = ['name', 'realname', 'dept', 'position', 'eenumber', 'tel_mobile', 'tel_work',
'quota_mailbox', 'quota_netdisk', 'showorder', 'im_qq', 'birthday', '<PASSWORD>']
for i, k in enumerate(fields_list):
try:
v = elem[i]
if isinstance(v, str) or isinstance(v, unicode):
v = v.strip()
else:
v = v
#excel保存日期为数字的
if fext in ('xls', 'xlsx') and k == "birthday" and v:
excel_date = int(v)
dt = datetime.datetime.fromordinal(datetime.datetime(1900, 1, 1).toordinal() + excel_date - 2)
v = dt.strftime('%Y-%m-%d')
if k == "im_qq" and v:
v = int(float(v))
data[k] = v
except:
data[k] = ''
data['password2'] = data['<PASSWORD>']
form = MailboxForm(domain, data)
user_form = MailboxUserForm(domain, data)
if form.is_valid() and user_form.is_valid():
quota_mailbox = data.get('quota_mailbox', '')
quota_netdisk = data.get('quota_netdisk', '')
try:
quota_mailbox = int(quota_mailbox)
except:
quota_mailbox = mb_quota
try:
quota_netdisk = int(quota_netdisk)
except:
quota_netdisk = nd_quota
try:
checker.simple_check(domain_id, quota_mailbox, quota_netdisk)
except Exception, e:
failures.append([e.message, elem])
else:
obj = form.save()
user_form.save(obj.id)
# 部门处理
dept = data.get('dept', '')
if dept:
parent_id = -1
for d in dept.split('-'):
dept_obj, __ = Department.objects.get_or_create(domain=domain, parent_id=parent_id, title=d)
parent_id = dept_obj.id
_dept_id = parent_id
else:
_dept_id = dept_id
if _dept_id > 0:
DepartmentMember.objects.create(domain=domain, dept_id=_dept_id, mailbox_id=obj.id,
position=data['position'])
success += 1
else:
failures.append([u'{}{}'.format(form.errors, user_form.errors), elem])
return render(request, template_name=template_name, context={
'mb_quota_def': mb_quota_def,
'nd_quota_def': nd_quota_def,
'server_pass': server_pass,
'success': success,
"dept_list": json.dumps(get_dept_list_sort(get_user_child_departments_kv(request, domain_id))),
'failures': failures
})
@licence_required
def batchedit_account(request, template_name='mailbox/batchedit_account.html'):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
# 取得邮件域默认邮箱、网络硬盘大小
mb_quota_def = DomainAttr.getAttrObjValue(domain.id, 'system', 'cf_def_mailbox_size')
nd_quota_def = DomainAttr.getAttrObjValue(domain.id, 'system', 'cf_def_netdisk_size')
# 当前域是否开通强密码
server_pass = DomainAttr.getAttrObjValue(domain.id, 'webmail', 'sw_pass_severe_new')
# 失败统计
failures = []
success = 0
if request.method == 'POST':
fobj = request.FILES.get("txtfile")
#print "request.files ",fobj,type(fobj)
file_name = fobj.name
fext = file_name.split('.')[-1]
if fext not in ('xls', 'xlsx', 'csv', 'txt'):
messages.add_message(request, messages.ERROR, _(u"只支持excel、txt、csv文件导入。"))
return render(request, template_name=template_name, context={
'domain': domain,
'failures': failures,
'success': success
})
# 初始化邮箱限制检查器
checker = MailboxLimitChecker()
lines = []
if fext == "txt":
#第一行是标题
fobj.readline()
for line in fobj.readlines():
line = decode_upload_line(line)
if not line:
continue
elem = line.split('\t')
lines.append( elem )
elif fext == "csv":
import csv
lines = list(csv.reader(fobj))
#第一行是标题
if lines:
lines.pop(0)
elif fext in ('xls', | |
db.session.commit()
continue
for page in response:
url = page['url'] if 'url' in page.keys() else None
uniq_visitors = page['nb_uniq_visitors'] if 'nb_uniq_visitors' in page.keys() else None
page_summary = MatomoDailyGetPageUrlsSummary()
page_summary.date = date
page_summary.url = url
page_summary.label = page['label']
page_summary.nb_hits = page['nb_hits']
page_summary.nb_visits = page['nb_visits']
page_summary.nb_uniq_visitors = uniq_visitors
page_summary.sum_time_spent = page['sum_time_spent']
page_summary.avg_time_on_page = page['avg_time_on_page']
db.session.merge(page_summary)
db.session.commit()
print(f'[INFO ] Inserted Matomo visits per page URL for {date}')
def _update_analytics_matomo_get_daily_dataset_views_summary(app, matomo_api_baseurl):
"""
Function to update specifically the Matomo daily dataset views summary
queried from the Matomo API endpoint Actions.getPageUrl for each dataset_id.
Note: gather stats only until the day before the current
day since stats are still being gathered by Matomo for the
current day.
"""
from app import db
from app.models import MatomoDailyGetDatasetPageViewsSummary
from app.models import Dataset as DBDataset
import requests
# grep the dates already inserted into the database
date_field = MatomoDailyGetDatasetPageViewsSummary.date
db_results = db.session.query(date_field).distinct(date_field).all()
dates_in_database = [row[0] for row in db_results]
# determines which dates are missing from the database and could be queried on Matomo
dates_to_process = determine_dates_to_query_on_matomo(dates_in_database)
# get the list of dataset_id_list to process
dataset_id_list = [row[0] for row in db.session.query(DBDataset.dataset_id).all()]
# for each date and each dataset, query Matomo for the view stats
for date in dates_to_process:
date_inserted = False
for dataset_id in dataset_id_list:
page_url = f"https://portal.conp.ca/dataset?id={dataset_id}"
matomo_query = f"{matomo_api_baseurl}" \
f"&method=Actions.getPageUrl" \
f"&period=day" \
f"&date={date}" \
f"&pageUrl={page_url}"
response = requests.get(matomo_query).json()
if not response:
continue
views_summary = MatomoDailyGetDatasetPageViewsSummary()
views_summary.date = date
views_summary.dataset_id = dataset_id
views_summary.url = response[0]['url']
views_summary.label = response[0]['label']
views_summary.nb_hits = response[0]['nb_hits']
views_summary.nb_visits = response[0]['nb_visits']
views_summary.nb_uniq_visitors = response[0]['nb_uniq_visitors']
views_summary.sum_time_spent = response[0]['sum_time_spent']
views_summary.avg_time_on_page = response[0]['avg_time_on_page']
db.session.merge(views_summary)
db.session.commit()
date_inserted = True
print(f'[INFO ] Inserted Matomo number of views for {dataset_id} on {date}')
# if no stats existed for that date, then add a row to the table
# with empty values so that the script does not reprocess that date
if not date_inserted:
views_summary = MatomoDailyGetDatasetPageViewsSummary()
views_summary.date = date
db.session.merge(views_summary)
db.session.commit()
def _update_analytics_matomo_get_daily_portal_download_summary(app, matomo_api_baseurl):
"""
Function to update specifically the Matomo daily download summary
queried from the Matomo API endpoint Actions.getDownloads.
Note: gather stats only until the day before the current
day since stats are still being gathered by Matomo for the
current day.
"""
from app import db
from app.models import MatomoDailyGetPortalDownloadSummary
import requests
# grep the dates already inserted into the database
date_field = MatomoDailyGetPortalDownloadSummary.date
db_results = db.session.query(date_field).distinct(date_field).all()
dates_in_database = [row[0] for row in db_results]
# determines which dates are missing from the database and could be queried on Matomo
dates_to_process = determine_dates_to_query_on_matomo(dates_in_database)
# for each date query Matomo for the download stats
for date in dates_to_process:
matomo_query = f"{matomo_api_baseurl}" \
f"&method=Actions.getDownloads" \
f"&period=day" \
f"&date={date}" \
f"&expanded=1"
response = requests.get(matomo_query).json()
if not response:
download_summary = MatomoDailyGetPortalDownloadSummary()
download_summary.date = date
db.session.merge(download_summary)
db.session.commit()
continue
for category in response:
for downloaded_item in category['subtable']:
download_summary = MatomoDailyGetPortalDownloadSummary()
download_summary.date = date
download_summary.url = downloaded_item['url']
download_summary.label = downloaded_item['label']
download_summary.nb_hits = downloaded_item['nb_hits']
download_summary.nb_visits = downloaded_item['nb_visits']
download_summary.nb_uniq_visitors = downloaded_item['nb_uniq_visitors']
download_summary.sum_time_spent = downloaded_item['sum_time_spent']
download_summary.segment = downloaded_item['segment']
db.session.merge(download_summary)
db.session.commit()
label = downloaded_item['label']
print(f'[INFO ] Inserted Matomo number of portal downloads for {label} on {date}')
def _update_analytics_matomo_get_daily_keyword_searches_summary(app, matomo_api_baseurl):
"""
Function to update specifically the Matomo daily keyword search summary
queried from the Matomo API endpoint Actions.getSiteSearchKeywords.
Note: gather stats only until the day before the current
day since stats are still being gathered by Matomo for the
current day.
"""
from app import db
from app.models import MatomoDailyGetSiteSearchKeywords
import requests
# grep the dates already inserted into the database
date_field = MatomoDailyGetSiteSearchKeywords.date
db_results = db.session.query(date_field).distinct(date_field).all()
dates_in_database = [row[0] for row in db_results]
# determines which dates are missing from the database and could be queried on Matomo
dates_to_process = determine_dates_to_query_on_matomo(dates_in_database)
# for each date to process, query Matomo API and insert response into the database
for date in dates_to_process:
matomo_query = f"{matomo_api_baseurl}" \
f"&method=Actions.getSiteSearchKeywords" \
f"&period=day" \
f"&date={date}"
response = requests.get(matomo_query).json()
if not response:
# if no response, then there are no stats for that date.
# enter the date in the table so that this date is not
# reprocessed at the next run of analytics updates
keyword_summary = MatomoDailyGetSiteSearchKeywords()
keyword_summary.date = date
db.session.merge(keyword_summary)
db.session.commit()
for keyword in response:
exit_nb_visits = keyword['exit_nb_visits'] \
if 'exit_nb_visits' in keyword.keys() else None
keyword_summary = MatomoDailyGetSiteSearchKeywords()
keyword_summary.date = date
keyword_summary.avg_time_on_page = keyword['avg_time_on_page']
keyword_summary.bounce_rate = keyword['bounce_rate']
keyword_summary.exit_nb_visits = exit_nb_visits
keyword_summary.exit_rate = keyword['exit_rate']
keyword_summary.label = keyword['label']
keyword_summary.nb_hits = keyword['nb_hits']
keyword_summary.nb_pages_per_search = keyword['nb_pages_per_search']
keyword_summary.nb_visits = keyword['nb_visits']
keyword_summary.segment = keyword['segment']
keyword_summary.sum_time_spent = keyword['sum_time_spent']
db.session.merge(keyword_summary)
db.session.commit()
print(f'[INFO ] Inserted Matomo search keywords summary for {date}')
def determine_dates_to_query_on_matomo(dates_in_database):
"""
Determines which dates need to be queried on Matomo to update the dataset.
"""
from datetime import datetime, timedelta
# determines which dates are missing from the database and could be queried on Matomo
# NOTE: start date was set to 2020-05-01 as May is when the portal started to be live
start_date = datetime.strptime('2020-05-01', '%Y-%m-%d').date()
end_date = (datetime.today() - timedelta(1)).date()
delta = timedelta(days=1)
dates_to_process = []
while start_date <= end_date:
if str(start_date) not in dates_in_database:
dates_to_process.append(str(start_date))
start_date += delta
return dates_to_process
def _generate_missing_ark_ids(app):
"""
Generates ARK identifiers for datasets that do not have yet an ARK ID.
"""
from app import db
from app.models import ArkId
from app.models import Dataset as DBDataset
from app.pipelines.pipelines import get_pipelines_from_cache
pipelines = get_pipelines_from_cache()
dataset_id_list = [row[0] for row in db.session.query(DBDataset.dataset_id).all()]
dataset_with_ark_id_list = [row[0] for row in db.session.query(ArkId.dataset_id).all()]
pipeline_id_list = [row['ID'] for row in pipelines]
pipeline_with_ark_id_list = [row[0] for row in db.session.query(ArkId.pipeline_id).all()]
for dataset_id in dataset_id_list:
if dataset_id not in dataset_with_ark_id_list:
new_ark_id = ark_id_minter(app, 'dataset')
save_ark_id_in_database(app, 'dataset', new_ark_id, dataset_id)
for pipeline_id in pipeline_id_list:
if pipeline_id not in pipeline_with_ark_id_list:
new_ark_id = ark_id_minter(app, 'pipeline')
save_ark_id_in_database(app, 'pipeline', new_ark_id, pipeline_id)
def ark_id_minter(app, ark_id_type):
"""
Generates ARK identifiers for datasets and pipelines that do not have yet an ARK ID.
:param ark_id_type: "dataset" or "pipeline"
:type ark_id_type: str
:return: a new minted ARK identifier
"""
from app import db
from app.models import ArkId
from app.services.pynoid import mint
# arkid shoulder will be d7 for datasets and p7 for pipelines
template = 'd7.reeeeeeedeeedeeek' if ark_id_type == 'dataset' else 'p7.reeeeeeedeeedeeek'
new_ark_id = mint(
template=template,
scheme='ark:/',
naa=app.config["ARK_CONP_NAAN"]
)
# remint ARK ID until we get an ARK ID not already present in `ark_id` table
# get the list of existing ARK IDs
already_used_ark_id_list = [row[0] for row in db.session.query(ArkId.ark_id).all()]
while new_ark_id in already_used_ark_id_list:
new_ark_id = ark_id_minter(app, 'dataset')
return new_ark_id
def save_ark_id_in_database(app, ark_id_type, new_ark_id, source_id):
from app import db
from app.models import ArkId
# get the list of existing ARK IDs
already_used_ark_id_list = [row[0] for row in db.session.query(ArkId.ark_id).all()]
# if the new ARK identifier does not already exist, add an entry in the ark_id table
if new_ark_id not in already_used_ark_id_list:
ark_id_summary = ArkId()
ark_id_summary.ark_id = new_ark_id
ark_id_summary.dataset_id = source_id if ark_id_type == "dataset" else None
ark_id_summary.pipeline_id = source_id if ark_id_type == "pipeline" else None
db.session.merge(ark_id_summary)
db.session.commit()
print(f'[INFO ] Created ARK ID {new_ark_id} for {ark_id_type} {source_id}')
def _update_github_traffic_counts(app):
"""
Logic to update the GitHub traffic count tables of the portal to save
traffic information in our local database (clone and view counts).
Updates the following two tables based on GitHub API calls:
- github_daily_views_count
- github_daily_clones_count (a.k.a. DataLad download count estimation)
Warnings on major confounding factors in the number of dataset clones:
- the daily tests run on Circle CI create multiple clones per day (which
explains why there is always at least 1 unique clone counted per dataset
every day)
- the archiver and other automated script run by CONP contributes to the
number of clones every time a dataset is updated (including DATS and
README updates)
- CONP developers also contribute to the number of clones when testing,
updating or downloading datasets
"""
from app import db
from app.models import GithubDailyClonesCount, GithubDailyViewsCount
from pathlib import Path
import git
datasetsdir = Path(app.config['DATA_PATH']) / 'conp-dataset'
try:
repo = git.Repo(datasetsdir)
except git.exc.InvalidGitRepositoryError:
repo = git.Repo.clone_from(
'https://github.com/CONP-PCNO/conp-dataset',
datasetsdir,
branch='master'
)
# loop through the list of submodules present in CONP-PCNO/conp-dataset.git
for | |
#!/usr/bin/env python
"""
ftguess.py
ftguess is a Python module to determine the type of a file based on its contents.
It can be used as a Python library or a command-line tool.
Usage: ftguess <file>
ftguess is part of the python-oletools package:
http://www.decalage.info/python/oletools
"""
#=== LICENSE =================================================================
# ftguess is copyright (c) 2018-2022, <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
#------------------------------------------------------------------------------
# CHANGELOG:
# 2018-07-04 v0.54 PL: - first version
# 2021-05-09 v0.60 PL: -
__version__ = '0.60.1'
# ------------------------------------------------------------------------------
# TODO:
# === IMPORTS =================================================================
import sys
import io
import zipfile
import os
import olefile
import logging
import optparse
# import lxml or ElementTree for XML parsing:
try:
# lxml: best performance for XML processing
import lxml.etree as ET
except ImportError:
try:
# Python 2.5+: batteries included
import xml.etree.ElementTree as ET
except ImportError:
try:
# Python <2.5: standalone ElementTree install
import elementtree.cElementTree as ET
except ImportError:
raise ImportError("lxml or ElementTree are not installed, " \
+ "see http://codespeak.net/lxml " \
+ "or http://effbot.org/zone/element-index.htm")
# IMPORTANT: it should be possible to run oletools directly as scripts
# in any directory without installing them with pip or setup.py.
# In that case, relative imports are NOT usable.
# And to enable Python 2+3 compatibility, we need to use absolute imports,
# so we add the oletools parent folder to sys.path (absolute+normalized path):
_thismodule_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
# print('_thismodule_dir = %r' % _thismodule_dir)
_parent_dir = os.path.normpath(os.path.join(_thismodule_dir, '..'))
# print('_parent_dir = %r' % _thirdparty_dir)
if _parent_dir not in sys.path:
sys.path.insert(0, _parent_dir)
from oletools.common import clsid
from oletools.thirdparty.xglob import xglob
# === LOGGING =================================================================
class NullHandler(logging.Handler):
"""
Log Handler without output, to avoid printing messages if logging is not
configured by the main application.
Python 2.7 has logging.NullHandler, but this is necessary for 2.6:
see https://docs.python.org/2.6/library/logging.html#configuring-logging-for-a-library
"""
def emit(self, record):
pass
def get_logger(name, level=logging.CRITICAL+1):
"""
Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging.
"""
# First, test if there is already a logger with the same name, else it
# will generate duplicate messages (due to duplicate handlers):
if name in logging.Logger.manager.loggerDict:
#NOTE: another less intrusive but more "hackish" solution would be to
# use getLogger then test if its effective level is not default.
logger = logging.getLogger(name)
# make sure level is OK:
logger.setLevel(level)
return logger
# get a new logger:
logger = logging.getLogger(name)
# only add a NullHandler for this logger, it is up to the application
# to configure its own logging:
logger.addHandler(NullHandler())
logger.setLevel(level)
return logger
# a global logger object used for debugging:
log = get_logger('ftguess')
def enable_logging():
"""
Enable logging for this module (disabled by default).
This will set the module-specific logger level to NOTSET, which
means the main application controls the actual logging level.
"""
log.setLevel(logging.NOTSET)
# === CONSTANTS ===============================================================
# file types for FileTypeGuesser:
class FTYPE(object):
"""
Constants for file types
"""
ZIP = 'Zip'
WORD = 'Word'
WORD6 = 'Word6'
WORD97 = 'Word97'
WORD2007 = 'Word2007'
WORD2007_DOCX = 'Word2007_DOCX'
WORD2007_DOTX = 'Word2007_DOTX'
WORD2007_DOCM = 'Word2007_DOCM'
WORD2007_DOTM = 'Word2007_DOTM'
EXCEL = 'Excel'
EXCEL5 = 'Excel5'
EXCEL97 = 'Excel97'
EXCEL2007 = 'Excel2007'
EXCEL2007_XLSX = 'Excel2007_XLSX'
EXCEL2007_XLSM = 'Excel2007_XLSM'
EXCEL2007_XLTX = 'Excel2007_XLTX'
EXCEL2007_XLTM = 'Excel2007_XLTM'
EXCEL2007_XLSB = 'Excel2007_XLSB'
EXCEL2007_XLAM = 'Excel2007_XLAM'
POWERPOINT97 = 'Powerpoint97'
POWERPOINT2007 = 'Powerpoint2007'
POWERPOINT2007_PPTX = 'Powerpoint2007_PPTX'
POWERPOINT2007_PPSX = 'Powerpoint2007_PPSX'
POWERPOINT2007_PPTM = 'Powerpoint2007_PPTM'
POWERPOINT2007_PPSM = 'Powerpoint2007_PPSM'
# TODO: DOCM, PPTM, PPSX, PPSM, ...
XPS = 'XPS'
RTF = 'RTF'
HTML = 'HTML'
PDF = 'PDF'
MHTML = 'MHTML'
TEXT = 'TEXT'
EXE_PE = 'EXE_PE'
GENERIC_OLE = 'OLE' # Generic OLE file
GENERIC_XML = 'XML' # Generic XML file
GENERIC_OPENXML = 'OpenXML' # Generic OpenXML file
UNKNOWN = 'Unknown File Type'
class CONTAINER(object):
"""
Constants for file container types
"""
RTF = 'RTF'
ZIP = 'Zip'
OLE = 'OLE'
OpenXML = 'OpenXML'
FlatOPC = 'FlatOPC'
OpenDocument = 'OpenDocument'
MIME = 'MIME'
BINARY = 'Binary' # Generic binary file without container
UNKNOWN = 'Unknown Container'
class APP(object):
"""
Constants for file types
"""
MSWORD = 'MS Word'
MSEXCEL = 'MS Excel'
MSPOWERPOINT = 'MS PowerPoint'
MSACCESS = 'MS Access'
MSVISIO = 'MS Visio'
MSPROJECT = 'MS Project'
MSOFFICE = 'MS Office' # when the exact app is unknown
ZIP_ARCHIVER = 'Any Zip Archiver'
WINDOWS = 'Windows' # for Windows executables and XPS
UNKNOWN = 'Unknown Application'
# FTYPE_NAME = {
# FTYPE_ZIP: 'Zip archive',
# FTYPE_WORD97: 'MS Word 97-2000 Document',
# }
# Namespaces and tags for OpenXML parsing`- RELS files:
# root: <Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
NS_RELS = '{http://schemas.openxmlformats.org/package/2006/relationships}'
TAG_RELS = NS_RELS + 'Relationships'
# <Relationship Id="rId1" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument" Target="xl/workbook.bin"/>
TAG_REL = NS_RELS + 'Relationship'
ATTR_REL_TYPE = 'Type'
ATTR_REL_TARGET = 'Target'
URL_REL_OFFICEDOC = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument"
# For "strict" OpenXML formats, the URL is different:
URL_REL_OFFICEDOC_STRICT = 'http://purl.oclc.org/ooxml/officeDocument/relationships/officeDocument'
# Url for xps files
URL_REL_XPS = 'http://schemas.microsoft.com/xps/2005/06/fixedrepresentation'
# Namespaces and tags for OpenXML parsing`- Content-types file:
NS_CONTENT_TYPES = '{http://schemas.openxmlformats.org/package/2006/content-types}'
TAG_CTYPES_DEFAULT = NS_CONTENT_TYPES + 'Default'
TAG_CTYPES_OVERRIDE = NS_CONTENT_TYPES + 'Override'
# Namespaces and tags for Word/PowerPoint 2007+ XML parsing:
# root: <pkg:package xmlns:pkg="http://schemas.microsoft.com/office/2006/xmlPackage">
NS_XMLPACKAGE = '{http://schemas.microsoft.com/office/2006/xmlPackage}'
TAG_PACKAGE = NS_XMLPACKAGE + 'package'
# the tag <pkg:part> includes <pkg:binaryData> that contains the VBA macro code in Base64:
# <pkg:part pkg:name="/word/vbaProject.bin" pkg:contentType="application/vnd.ms-office.vbaProject"><pkg:binaryData>
TAG_PKGPART = NS_XMLPACKAGE + 'part'
ATTR_PKG_NAME = NS_XMLPACKAGE + 'name'
ATTR_PKG_CONTENTTYPE = NS_XMLPACKAGE + 'contentType'
CTYPE_VBAPROJECT = "application/vnd.ms-office.vbaProject"
TAG_PKGBINDATA = NS_XMLPACKAGE + 'binaryData'
# === CLASSES ================================================================
class FType_Base (object):
container = CONTAINER.UNKNOWN
application = APP.UNKNOWN
filetype = FTYPE.UNKNOWN
name = "Unknown file type"
longname = "Unknown file type"
extensions = [] # list of common file extensions used for the format
content_types = [] # list of MIME content-types (can be several)
PUID = None # PRONOM Unique ID - see https://www.nationalarchives.gov.uk/PRONOM/Default.aspx
may_contain_vba = False
may_contain_xlm = False
may_contain_ole = False
@classmethod
def recognize(cls, ftg):
"""
return True if the provided file matches the type of this class
:param ftg: FileTypeGuesser object
:return: bool
"""
return False
class FType_Unknown(FType_Base):
pass
class FType_RTF(FType_Base):
container = CONTAINER.RTF
application = APP.MSWORD
filetype = FTYPE.RTF
name = 'RTF'
longname = 'Rich Text Format'
extensions = ['rtf', 'doc']
content_types = ('application/rtf', 'text/rtf')
PUID = 'fmt/355' # RTF 1.9 (from Word 2007)
@classmethod
def recognize(cls, ftg):
# print('checking RTF')
# print(repr(data[0:4]))
return True if ftg.data.startswith(b'{\\rt') else False
class FType_Generic_OLE(FType_Base):
container = CONTAINER.OLE
application = APP.UNKNOWN
filetype = FTYPE.GENERIC_OLE
name = 'Generic OLE/CFB file'
longname = 'Generic OLE file / Compound File (unknown format)'
@classmethod
def recognize(cls, ftg):
# Here there's an issue with non-OLE files smaller than 1536 bytes
# see https://github.com/decalage2/olefile/issues/142
# Workaround: pad data when it's smaller than 1536 bytes
# TODO: use the new data parameter of isOleFile when it's implemented
if len(ftg.data)<1536:
data = ftg.data + (b'\x00'*1536)
else:
data = ftg.data
if olefile.isOleFile(data):
# open the OLE file
try:
# Open and parse the OLE file:
ftg.olefile = olefile.OleFileIO(ftg.data)
# Extract the CLSID of the root storage
ftg.root_clsid = ftg.olefile.root.clsid
ftg.root_clsid_name = clsid.KNOWN_CLSIDS.get(ftg.root_clsid, None)
except:
# TODO: log the error
return False
return True
else:
return False
class | |
<filename>multidomain_visual_ssl/main.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import argparse
import json
import math
import os
import signal
import subprocess
import sys
import time
from torch import nn, optim
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.distributed as dist
import utils
parser = argparse.ArgumentParser(description='Barlow Twins Training')
parser.add_argument('--data', type=Path, metavar='DIR',
help='path to dataset')
parser.add_argument('--workers', default=8, type=int, metavar='N',
help='number of data loader workers')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch-size', default=4096, type=int, metavar='N',
help='mini-batch size')
parser.add_argument('--learning-rate', default=0.2, type=float, metavar='LR',
help='base learning rate')
parser.add_argument('--weight-decay', default=1e-6, type=float, metavar='W',
help='weight decay')
# for barlow twins
parser.add_argument('--lambd', default=3.9e-3, type=float, metavar='L',
help='weight on off-diagonal terms')
# for simclr
parser.add_argument('--temperature', default=0.1, type=float, metavar='T',
help='temperature in infonce loss')
parser.add_argument('--projector', default='8192-8192-8192', type=str,
metavar='MLP', help='projector MLP')
parser.add_argument('--scale-loss', default=1 / 32, type=float,
metavar='S', help='scale the loss')
parser.add_argument('--print-freq', default=100, type=int, metavar='N',
help='print frequency')
parser.add_argument('--checkpoint-dir', default='./checkpoint/', type=Path,
metavar='DIR', help='path to checkpoint directory')
parser.add_argument('--dataset', default='imagenet', type=str, metavar='D',
help='dataset: cifar10 or imagenet or tiny_imagenet or stl10')
parser.add_argument('--method', default='barlow_twins', type=str, metavar='M',
help='method: barlow_twins or simclr or hsic')
parser.add_argument('--dist-address', default='58472', type=str, metavar='N',
help='address for distributed training')
def main():
args = parser.parse_args()
args.ngpus_per_node = torch.cuda.device_count()
if 'SLURM_JOB_ID' in os.environ:
# single-node and multi-node distributed training on SLURM cluster
# requeue job on SLURM preemption
signal.signal(signal.SIGUSR1, handle_sigusr1)
signal.signal(signal.SIGTERM, handle_sigterm)
# find a common host name on all nodes
# assume scontrol returns hosts in the same order on all nodes
cmd = 'scontrol show hostnames ' + os.getenv('SLURM_JOB_NODELIST')
stdout = subprocess.check_output(cmd.split())
host_name = stdout.decode().splitlines()[0]
args.rank = int(os.getenv('SLURM_NODEID')) * args.ngpus_per_node
args.world_size = int(os.getenv('SLURM_NNODES')) * args.ngpus_per_node
args.dist_url = 'tcp://{}:'.format(host_name) + args.dist_address
else:
# single-node distributed training
args.rank = 0
args.dist_url = 'tcp://localhost:' + args.dist_address
args.world_size = args.ngpus_per_node
torch.multiprocessing.spawn(main_worker, (args,), args.ngpus_per_node)
def main_worker(gpu, args):
args.rank += gpu
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
save_name = args.method + '_' + args.dataset + '_bsz_' + str(args.batch_size) + \
'_lr_' + str(args.learning_rate) + '_featdim_' + args.projector.split('-')[-1] + \
'_temp_' + str(args.temperature) + '_epoch_' + str(args.epochs)
save_name_stats = save_name + '_stats.txt'
save_name_ckpt = save_name + '_checkpoint.pth'
save_name_final = save_name + '_resnet50.pth'
if args.rank == 0:
args.checkpoint_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(args.checkpoint_dir / save_name_stats, 'a', buffering=1)
print(' '.join(sys.argv))
print(' '.join(sys.argv), file=stats_file)
torch.cuda.set_device(gpu)
torch.backends.cudnn.benchmark = True
model = Model(args).cuda(gpu)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
optimizer = LARS(model.parameters(), lr=0, weight_decay=args.weight_decay,
weight_decay_filter=exclude_bias_and_norm,
lars_adaptation_filter=exclude_bias_and_norm)
# automatically resume from checkpoint if it exists
if (args.checkpoint_dir / save_name_ckpt).is_file():
ckpt = torch.load(args.checkpoint_dir / save_name_ckpt,
map_location='cpu')
start_epoch = ckpt['epoch']
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
else:
start_epoch = 0
if args.dataset == 'imagenet':
dataset = torchvision.datasets.ImageFolder(args.data / 'train', utils.Transform())
elif args.dataset == 'tiny_imagenet':
dataset = torchvision.datasets.ImageFolder(args.data / 'tiny-imagenet-200/train',\
utils.TinyImageNetPairTransform(train_transform=True, pair_transform=True))
elif args.dataset == 'cifar10':
dataset = torchvision.datasets.CIFAR10(root=args.data, train=True,\
transform=utils.CifarPairTransform(train_transform=True, pair_transform=True),\
download=True)
elif args.dataset == 'stl10':
dataset = torchvision.datasets.STL10(root=args.data, split="train+unlabeled", \
transform=utils.StlPairTransform(train_transform=True, pair_transform=True),\
download=True)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
assert args.batch_size % args.world_size == 0
per_device_batch_size = args.batch_size // args.world_size
loader = torch.utils.data.DataLoader(
dataset, batch_size=per_device_batch_size, num_workers=args.workers,
pin_memory=True, sampler=sampler)
start_time = time.time()
scaler = torch.cuda.amp.GradScaler()
for epoch in range(start_epoch, args.epochs):
sampler.set_epoch(epoch)
for step, ((y1, y2), _) in enumerate(loader, start=epoch * len(loader)):
y1 = y1.cuda(gpu, non_blocking=True)
y2 = y2.cuda(gpu, non_blocking=True)
lr = adjust_learning_rate(args, optimizer, loader, step)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
loss = model.forward(y1, y2, temperature=args.temperature)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if step % args.print_freq == 0:
torch.distributed.reduce(loss.div_(args.world_size), 0)
if args.rank == 0:
stats = dict(epoch=epoch, step=step, learning_rate=lr,
loss=loss.item(),
time=int(time.time() - start_time),
dataset=args.dataset,
method=args.method,
batch_size=args.batch_size,
proj_feat_dim=args.projector.split('-')[-1])
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
if args.rank == 0:
# save checkpoint
state = dict(epoch=epoch + 1, model=model.state_dict(),
optimizer=optimizer.state_dict())
torch.save(state, args.checkpoint_dir / save_name_ckpt)
if args.rank == 0:
# save final model
torch.save(model.module.backbone.state_dict(),
args.checkpoint_dir / save_name_final)
def adjust_learning_rate(args, optimizer, loader, step):
max_steps = args.epochs * len(loader)
warmup_steps = 10 * len(loader)
base_lr = args.learning_rate * args.batch_size / 256
if step < warmup_steps:
lr = base_lr * step / warmup_steps
else:
step -= warmup_steps
max_steps -= warmup_steps
q = 0.5 * (1 + math.cos(math.pi * step / max_steps))
end_lr = base_lr * 0.001
lr = base_lr * q + end_lr * (1 - q)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def handle_sigusr1(signum, frame):
os.system(f'scontrol requeue {os.getenv("SLURM_JOB_ID")}')
exit()
def handle_sigterm(signum, frame):
pass
def off_diagonal(x):
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def all_gather(tensor, expand_dim=0, num_replicas=None):
"""Gathers a tensor from other replicas, concat on expand_dim and return."""
num_replicas = dist.get_world_size() if num_replicas is None else num_replicas
other_replica_tensors = [torch.zeros_like(tensor) for _ in range(num_replicas)]
dist.all_gather(other_replica_tensors, tensor)
return torch.cat([o.unsqueeze(expand_dim) for o in other_replica_tensors], expand_dim)
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
if args.dataset == 'cifar10' or args.dataset == 'tiny_imagenet' or args.dataset == 'stl10':
resnet50 = torchvision.models.resnet50(zero_init_residual=True)
self.backbone = []
for name, module in resnet50.named_children():
if name == 'conv1':
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if args.dataset == 'cifar10':
if not isinstance(module, nn.Linear) and not isinstance(module, nn.MaxPool2d):
self.backbone.append(module)
elif args.dataset == 'tiny_imagenet' or args.dataset == 'stl10':
if not isinstance(module, nn.Linear):
self.backbone.append(module)
self.backbone = nn.Sequential(*self.backbone)
elif args.dataset == 'imagenet':
self.backbone = torchvision.models.resnet50(zero_init_residual=True)
self.backbone.fc = nn.Identity()
self.dataset = args.dataset
# projector
sizes = [2048] + list(map(int, args.projector.split('-')))
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i + 1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.projector = nn.Sequential(*layers)
# normalization layer for the representations z1 and z2
self.bn = nn.BatchNorm1d(sizes[-1], affine=False)
self.method = args.method
# default: forward with Barlow Twins
def forward(self, y1, y2, temperature=0.1, num_replicas=None):
if self.dataset == 'cifar10' or self.dataset == 'tiny_imagenet' or self.dataset == 'stl10':
embedding1 = self.projector(torch.flatten(self.backbone(y1), start_dim=1))
embedding2 = self.projector(torch.flatten(self.backbone(y2), start_dim=1))
elif self.dataset == 'imagenet':
embedding1 = self.projector(self.backbone(y1))
embedding2 = self.projector(self.backbone(y2))
if self.method == 'barlow_twins' or self.method == 'hsic':
# empirical cross-correlation matrix
c = self.bn(embedding1).T @ self.bn(embedding2)
# sum the cross-correlation matrix between all gpus
c.div_(self.args.batch_size)
torch.distributed.all_reduce(c)
# use --scale-loss to multiply the loss by a constant factor
# see the Issues section of the readme
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum().mul(self.args.scale_loss)
if self.method == 'barlow_twins':
off_diag = off_diagonal(c).pow_(2).sum().mul(self.args.scale_loss)
elif self.method == 'hsic':
off_diag = off_diagonal(c).add_(1).pow_(2).sum().mul(self.args.scale_loss)
loss = on_diag + self.args.lambd * off_diag
return loss
elif self.method == 'simclr':
"""NT-XENT Loss from SimCLR
:param embedding1: embedding of augmentation1
:param embedding2: embedding of augmentation2
:param temperature: nce normalization temp
:param num_replicas: number of compute devices
:returns: scalar loss
:rtype: float32
"""
batch_size = embedding1.shape[0]
feature_size = embedding1.shape[-1]
num_replicas = dist.get_world_size() if num_replicas is None else num_replicas
LARGE_NUM = 1e9
# normalize both embeddings
embedding1 = F.normalize(embedding1, dim=-1)
embedding2 = F.normalize(embedding2, dim=-1)
if num_replicas > 1 and self.training:
# First grab the tensor from all other embeddings
embedding1_full = all_gather(embedding1, num_replicas=num_replicas)
embedding2_full = all_gather(embedding2, num_replicas=num_replicas)
# fold the tensor in to create [B, F]
embedding1_full = embedding1_full.reshape(-1, feature_size)
embedding2_full = embedding2_full.reshape(-1, feature_size)
# Create pseudo-labels using the current replica id & ont-hotting
replica_id = dist.get_rank()
labels = torch.arange(batch_size, device=embedding1.device) + replica_id * batch_size
labels = labels.type(torch.int64)
full_batch_size = embedding1_full.shape[0]
masks = F.one_hot(labels, full_batch_size).to(embedding1_full.device)
labels = F.one_hot(labels, full_batch_size * 2).to(embedding1_full.device)
else: # no replicas or we are in test mode; test set is same size on all replicas for now
embedding1_full = embedding1
embedding2_full = embedding2
masks = F.one_hot(torch.arange(batch_size), batch_size).to(embedding1.device)
labels = F.one_hot(torch.arange(batch_size), batch_size * 2).to(embedding1.device)
# Matmul-to-mask
logits_aa = torch.matmul(embedding1, embedding1_full.T) / temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(embedding2, embedding2_full.T) / temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(embedding1, embedding2_full.T) / temperature
logits_ba = torch.matmul(embedding2, embedding1_full.T) / temperature
# Use our standard cross-entropy loss which uses log-softmax internally.
# Concat on the feature dimension to provide all features for standard softmax-xent
loss_a = F.cross_entropy(input=torch.cat([logits_ab, logits_aa], 1),
target=torch.argmax(labels, -1),
reduction="none")
loss_b = F.cross_entropy(input=torch.cat([logits_ba, logits_bb], 1),
target=torch.argmax(labels, -1),
reduction="none")
loss = loss_a + loss_b
return torch.mean(loss)
class LARS(optim.Optimizer):
def __init__(self, params, lr, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=None, lars_adaptation_filter=None):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if g['weight_decay_filter'] is None or not g['weight_decay_filter'](p):
dp = dp.add(p, alpha=g['weight_decay'])
| |
<gh_stars>0
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that FakeFilesystem calls work identically to a real filesystem."""
# pylint: disable-all
import os
import shutil
import sys
import tempfile
import time
import unittest
from pyfakefs import fake_filesystem
def sep(path):
"""Converts slashes in the path to the architecture's path seperator."""
if isinstance(path, str):
return path.replace('/', os.sep)
return path
def _get_errno(raised_error):
if raised_error is not None:
try:
return raised_error.errno
except AttributeError:
pass
class TestCase(unittest.TestCase):
is_windows = sys.platform.startswith('win')
_FAKE_FS_BASE = sep('/fakefs')
class FakeFilesystemVsRealTest(TestCase):
def _paths(self, path):
"""For a given path, return paths in the real and fake filesystems."""
if not path:
return None, None
return (os.path.join(self.real_base, path),
os.path.join(self.fake_base, path))
def _create_test_file(self, file_type, path, contents=None):
"""Create a dir, file, or link in both the real fs and the fake."""
path = sep(path)
self._created_files.append([file_type, path, contents])
real_path, fake_path = self._paths(path)
if file_type == 'd':
os.mkdir(real_path)
self.fake_os.mkdir(fake_path)
if file_type == 'f':
fh = open(real_path, 'w')
fh.write(contents or '')
fh.close()
fh = self.fake_open(fake_path, 'w')
fh.write(contents or '')
fh.close()
# b for binary file
if file_type == 'b':
fh = open(real_path, 'wb')
fh.write(contents or '')
fh.close()
fh = self.fake_open(fake_path, 'wb')
fh.write(contents or '')
fh.close()
# l for symlink, h for hard link
if file_type in ('l', 'h'):
real_target, fake_target = (contents, contents)
# If it begins with '/', make it relative to the base. You can't go
# creating files in / for the real file system.
if contents.startswith(os.sep):
real_target, fake_target = self._paths(contents[1:])
if file_type == 'l':
os.symlink(real_target, real_path)
self.fake_os.symlink(fake_target, fake_path)
elif file_type == 'h':
os.link(real_target, real_path)
self.fake_os.link(fake_target, fake_path)
def setUp(self):
# Base paths in the real and test file systems. We keep them different
# so that missing features in the fake don't fall through to the base
# operations and magically succeed.
tsname = 'fakefs.%s' % time.time()
self.cwd = os.getcwd()
# Fully expand the base_path - required on OS X.
self.real_base = os.path.realpath(
os.path.join(tempfile.gettempdir(), tsname))
os.chdir(tempfile.gettempdir())
if os.path.isdir(self.real_base):
shutil.rmtree(self.real_base)
os.mkdir(self.real_base)
self.fake_base = self._FAKE_FS_BASE
# Make sure we can write to the physical testing temp directory.
self.assertTrue(os.access(self.real_base, os.W_OK))
self.fake_filesystem = fake_filesystem.FakeFilesystem()
self.fake_filesystem.create_dir(self.fake_base)
self.fake_os = fake_filesystem.FakeOsModule(self.fake_filesystem)
self.fake_open = fake_filesystem.FakeFileOpen(self.fake_filesystem)
self._created_files = []
os.chdir(self.real_base)
self.fake_os.chdir(self.fake_base)
def tearDown(self):
# We have to remove all the files from the real FS. Doing the same for
# the fake FS is optional, but doing it is an extra sanity check.
os.chdir(tempfile.gettempdir())
try:
rev_files = self._created_files[:]
rev_files.reverse()
for info in rev_files:
real_path, fake_path = self._paths(info[1])
if info[0] == 'd':
try:
os.rmdir(real_path)
except OSError as e:
if 'Directory not empty' in e:
self.fail('Real path %s not empty: %s : %s' % (
real_path, e, os.listdir(real_path)))
else:
raise
self.fake_os.rmdir(fake_path)
if info[0] == 'f' or info[0] == 'l':
os.remove(real_path)
self.fake_os.remove(fake_path)
finally:
shutil.rmtree(self.real_base)
os.chdir(self.cwd)
def _compare_behaviors(self, method_name, path, real, fake,
method_returns_path=False):
"""Invoke an os method in both real and fake contexts and compare
results.
Invoke a real filesystem method with a path to a real file and invoke
a fake filesystem method with a path to a fake file and compare the
results. We expect some calls to throw Exceptions, so we catch those
and compare them.
Args:
method_name: Name of method being tested, for use in
error messages.
path: potential path to a file in the real and fake file systems,
passing an empty tuple indicates that no arguments to pass
to method.
real: built-in system library or method from the built-in system
library which takes a path as an arg and returns some value.
fake: fake_filsystem object or method from a fake_filesystem class
which takes a path as an arg and returns some value.
method_returns_path: True if the method returns a path, and thus we
must compensate for expected difference between real and fake.
Returns:
A description of the difference in behavior, or None.
"""
# pylint: disable=C6403
def _error_class(exc):
return (exc and exc.__class__.__name__) or 'None'
real_err, real_value = self._get_real_value(method_name, path, real)
fake_err, fake_value = self._get_fake_value(method_name, path, fake)
method_call = '%s' % method_name
method_call += '()' if path == () else '(%s)' % path
# We only compare on the error class because the acutal error contents
# is almost always different because of the file paths.
if _error_class(real_err) != _error_class(fake_err):
if real_err is None:
return '%s: real version returned %s, fake raised %s' % (
method_call, real_value, _error_class(fake_err))
if fake_err is None:
return '%s: real version raised %s, fake returned %s' % (
method_call, _error_class(real_err), fake_value)
return '%s: real version raised %s, fake raised %s' % (
method_call, _error_class(real_err), _error_class(fake_err))
real_errno = _get_errno(real_err)
fake_errno = _get_errno(fake_err)
if real_errno != fake_errno:
return '%s(%s): both raised %s, real errno %s, fake errno %s' % (
method_name, path, _error_class(real_err),
real_errno, fake_errno)
# If the method is supposed to return a full path AND both values
# begin with the expected full path, then trim it off.
if method_returns_path:
if (real_value and fake_value
and real_value.startswith(self.real_base)
and fake_value.startswith(self.fake_base)):
real_value = real_value[len(self.real_base):]
fake_value = fake_value[len(self.fake_base):]
if real_value != fake_value:
return '%s: real return %s, fake returned %s' % (
method_call, real_value, fake_value)
return None
def _get_fake_value(self, method_name, path, fake):
fake_value = None
fake_err = None
try:
fake_method = fake
if not callable(fake):
fake_method = getattr(fake, method_name)
args = [] if path == () else [path]
fake_value = str(fake_method(*args))
except Exception as e: # pylint: disable-msg=W0703
fake_err = e
return fake_err, fake_value
def _get_real_value(self, method_name, path, real):
real_value = None
real_err = None
# Catching Exception below gives a lint warning, but it's what we need.
try:
args = [] if path == () else [path]
real_method = real
if not callable(real):
real_method = getattr(real, method_name)
real_value = str(real_method(*args))
except Exception as e: # pylint: disable-msg=W0703
real_err = e
return real_err, real_value
def assertOsMethodBehaviorMatches(self, method_name, path,
method_returns_path=False):
"""Invoke an os method in both real and fake contexts and compare.
For a given method name (from the os module) and a path, compare the
behavior of the system provided module against the fake_filesystem
module.
We expect results and/or Exceptions raised to be identical.
Args:
method_name: Name of method being tested.
path: potential path to a file in the real and fake file systems.
method_returns_path: True if the method returns a path, and thus we
must compensate for expected difference between real and fake.
Returns:
A description of the difference in behavior, or None.
"""
path = sep(path)
return self._compare_behaviors(method_name, path, os, self.fake_os,
method_returns_path)
def diff_open_method_behavior(self, method_name, path, mode, data,
method_returns_data=True):
"""Invoke an open method in both real and fkae contexts and compare.
Args:
method_name: Name of method being tested.
path: potential path to a file in the real and fake file systems.
mode: how to open the file.
data: any data to pass to the method.
method_returns_data: True if a method returns some sort of data.
For a given method name (from builtin open) and a path, compare the
behavior of the system provided module against the fake_filesystem
module.
We expect results and/or Exceptions raised to be identical.
Returns:
A description of the difference in behavior, or None.
"""
with open(path, mode) as real_fh:
with self.fake_open(path, mode) as fake_fh:
return self._compare_behaviors(
method_name, data, real_fh, fake_fh, method_returns_data)
def diff_os_path_method_behavior(self, method_name, path,
method_returns_path=False):
"""Invoke an os.path method in both real and fake contexts and compare.
For a given method name (from the os.path module) and a path, compare
the behavior of the system provided module against the
fake_filesytem module.
We expect results and/or Exceptions raised to be identical.
Args:
method_name: Name of method being tested.
path: potential path to | |
<gh_stars>0
"""
Here the implementation of the OLD peeler.
It was used for tridesclous<=1.2.2
This is implementation is kept for comparison reason.
In some situtaion this peeler engine was making some mistake.
It was a bit faster.
Please do not use it anymore
"""
import time
import numpy as np
from .peeler_engine_classic import PeelerEngineClassic
from .peeler_tools import *
from .peeler_tools import _dtype_spike
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
class PeelerEngineOldClassic(PeelerEngineClassic):
def process_one_chunk(self, pos, sigs_chunk):
#~ print('*'*5)
#~ print('chunksize', self.chunksize, '=', self.chunksize/self.sample_rate*1000, 'ms')
t1 = time.perf_counter()
abs_head_index, preprocessed_chunk = self.signalpreprocessor.process_data(pos, sigs_chunk)
#~ t2 = time.perf_counter()
#~ print('process_data', (t2-t1)*1000)
#note abs_head_index is smaller than pos because prepcorcessed chunk
# is late because of local filfilt in signalpreprocessor
#shift rsiruals buffer and put the new one on right side
t1 = time.perf_counter()
fifo_roll_size = self.fifo_residuals.shape[0]-preprocessed_chunk.shape[0]
if fifo_roll_size>0 and fifo_roll_size!=self.fifo_residuals.shape[0]:
self.fifo_residuals[:fifo_roll_size,:] = self.fifo_residuals[-fifo_roll_size:,:]
self.fifo_residuals[fifo_roll_size:,:] = preprocessed_chunk
#~ t2 = time.perf_counter()
#~ print('fifo move', (t2-t1)*1000.)
# relation between inside chunk index and abs index
shift = abs_head_index - self.fifo_residuals.shape[0]
# TODO remove from peak the very begining of the signal because of border filtering effects
good_spikes = []
#~ already_tested = []
# negative mask 1: not tested 0: already tested
mask_already_tested = np.ones(self.fifo_residuals.shape[0] - 2 * self.n_span, dtype='bool')
local_peaks_mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
#~ print('sum(local_peaks_mask)', np.sum(local_peaks_mask))
n_loop = 0
t3 = time.perf_counter()
while True:
#detect peaks
#~ t3 = time.perf_counter()
#~ local_peaks_mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
local_peaks_indexes, = np.nonzero(local_peaks_mask & mask_already_tested)
#~ print(local_peaks_indexes)
local_peaks_indexes += self.n_span
#~ exit()
#~ t4 = time.perf_counter()
#~ print(' detect peaks', (t4-t3)*1000.)pythran_loop_sparse_dist
#~ if len(already_tested)>0:
#~ local_peaks_to_check = local_peaks_indexes[~np.in1d(local_peaks_indexes, already_tested)]
#~ else:
#~ local_peaks_to_check = local_peaks_indexes
n_ok = 0
for local_ind in local_peaks_indexes:
#~ print(' local_peak', local_peak, 'i', i)
t1 = time.perf_counter()
spike = self.classify_and_align_one_spike(local_ind, self.fifo_residuals, self.catalogue)
t2 = time.perf_counter()
#~ print(' classify_and_align_one_spike', (t2-t1)*1000., spike.cluster_label)
if spike.cluster_label>=0:
#~ print(' >>spike.index', spike.index, spike.cluster_label, 'abs index', spike.index+shift)
#~ spikes = np.array([spike], dtype=_dtype_spike)
#~ prediction = make_prediction_signals(spikes, self.fifo_residuals.dtype, self.fifo_residuals.shape, self.catalogue, safe=False)
#~ self.fifo_residuals -= prediction
#~ spikes['index'] += shift
#~ good_spikes.append(spikes)
#~ n_ok += 1
# substract one spike
pos, pred = make_prediction_on_spike_with_label(spike.index, spike.cluster_label, spike.jitter, self.fifo_residuals.dtype, self.catalogue)
self.fifo_residuals[pos:pos+self.peak_width, :] -= pred
# append
spikes = np.array([spike], dtype=_dtype_spike)
spikes['index'] += shift
good_spikes.append(spikes)
n_ok += 1
# recompute peak in neiborhood
# here indexing is tricky
# sl1 : we need n_pan more in each side
# sl2: we need a shift of n_span because smaler shape
sl1 = slice(local_ind + self.n_left - 1 - self.n_span, local_ind + self.n_right + 1 + self.n_span)
sl2 = slice(local_ind + self.n_left - 1 - self.n_span, local_ind + self.n_right + 1- self.n_span)
local_peaks_mask[sl2] = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals[sl1, :])
#~ print(' already_tested before', already_tested)
#~ already_tested = [ind for ind in already_tested if np.abs(spike.index-ind)>self.peak_width]
# set neighboor untested
mask_already_tested[local_ind - self.peak_width - self.n_span:local_ind + self.peak_width - self.n_span] = True
#~ print(' already_tested new deal', already_tested)
else:
# set peak tested
#~ print(mask_already_tested.shape)
#~ print(self.fifo_residuals.shape)
#~ print(self.n_span)
mask_already_tested[local_ind - self.n_span] = False
#~ print('already tested', local_ind)
#~ already_tested.append(local_peak)
n_loop += 1
if n_ok==0:
# no peak can be labeled
# reserve bad spikes on the right limit for next time
#~ local_peaks_indexes = local_peaks_indexes[local_peaks_indexes<(self.chunksize+self.n_span)]
#~ bad_spikes = np.zeros(local_peaks_indexes.shape[0], dtype=_dtype_spike)
#~ bad_spikes['index'] = local_peaks_indexes + shift
#~ bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED
nolabel_indexes, = np.nonzero(~mask_already_tested)
nolabel_indexes += self.n_span
nolabel_indexes = nolabel_indexes[nolabel_indexes<(self.chunksize+self.n_span)]
bad_spikes = np.zeros(nolabel_indexes.shape[0], dtype=_dtype_spike)
bad_spikes['index'] = nolabel_indexes + shift
bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED
break
#~ t4 = time.perf_counter()
#~ print('LOOP classify_and_align_one_spike', (t4-t3)*1000)
#~ print('n_good', len(good_spikes), 'n_loop', n_loop)
#concatenate, sort and count
# here the trick is to keep spikes at the right border
# and keep then until the next loop this avoid unordered spike
if len(good_spikes)>0:
good_spikes = np.concatenate(good_spikes)
near_border = (good_spikes['index'] - shift)>=(self.chunksize+self.n_span)
near_border_good_spikes = good_spikes[near_border].copy()
good_spikes = good_spikes[~near_border]
all_spikes = np.concatenate([good_spikes] + [bad_spikes] + self.near_border_good_spikes)
self.near_border_good_spikes = [near_border_good_spikes] # for next chunk
else:
all_spikes = np.concatenate([bad_spikes] + self.near_border_good_spikes)
self.near_border_good_spikes = []
# all_spikes = all_spikes[np.argsort(all_spikes['index'])]
all_spikes = all_spikes.take(np.argsort(all_spikes['index']))
self.total_spike += all_spikes.size
return abs_head_index, preprocessed_chunk, self.total_spike, all_spikes
def classify_and_align_one_spike(self, local_index, residual, catalogue):
# local_index is index of peaks inside residual and not
# the absolute peak_pos. So time scaling must be done outside.
peak_width = catalogue['peak_width']
n_left = catalogue['n_left']
#~ alien_value_threshold = catalogue['clean_waveforms_params']['alien_value_threshold']
#ind is the windows border!!!!!
left_ind = local_index + n_left
if left_ind+peak_width+self.maximum_jitter_shift+1>=residual.shape[0]:
# too near right limits no label
label = LABEL_RIGHT_LIMIT
jitter = 0
elif left_ind<=self.maximum_jitter_shift:
# too near left limits no label
#~ print(' LABEL_LEFT_LIMIT', left_ind)
label = LABEL_LEFT_LIMIT
jitter = 0
elif catalogue['centers0'].shape[0]==0:
# empty catalogue
label = LABEL_UNCLASSIFIED
jitter = 0
else:
waveform = residual[left_ind:left_ind+peak_width,:]
if self.alien_value_threshold is not None and \
np.any((waveform>self.alien_value_threshold) | (waveform<-self.alien_value_threshold)) :
label = LABEL_ALIEN
jitter = 0
else:
#~ t1 = time.perf_counter()
label, jitter = self.estimate_one_jitter(waveform)
#~ t2 = time.perf_counter()
#~ print(' estimate_one_jitter', (t2-t1)*1000.)
#~ jitter = -jitter
#TODO debug jitter sign is positive on right and negative to left
#~ print('label, jitter', label, jitter)
# if more than one sample of jitter
# then we try a peak shift
# take it if better
#TODO debug peak shift
if np.abs(jitter) > 0.5 and label >=0:
prev_ind, prev_label, prev_jitter =left_ind, label, jitter
shift = -int(np.round(jitter))
#~ print('classify and align shift', shift)
if np.abs(shift) >self.maximum_jitter_shift:
#~ print(' LABEL_MAXIMUM_SHIFT avec shift')
label = LABEL_MAXIMUM_SHIFT
else:
left_ind = left_ind + shift
if left_ind+peak_width>=residual.shape[0]:
#~ print(' LABEL_RIGHT_LIMIT avec shift')
label = LABEL_RIGHT_LIMIT
elif left_ind < 0:
#~ print(' LABEL_LEFT_LIMIT avec shift')
label = LABEL_LEFT_LIMIT
#TODO: force to label anyway the spike if spike is at the left of FIFO
else:
waveform = residual[left_ind:left_ind+peak_width,:]
#~ print(' second estimate jitter')
new_label, new_jitter = self.estimate_one_jitter(waveform, label=label)
#~ new_label, new_jitter = self.estimate_one_jitter(waveform, label=None)
if np.abs(new_jitter)<np.abs(prev_jitter):
#~ print('keep shift')
label, jitter = new_label, new_jitter
local_index += shift
else:
#~ print('no keep shift worst jitter')
pass
#security if with jitter the index is out
if label>=0:
local_pos = local_index - np.round(jitter).astype('int64') + n_left
if local_pos<0:
label = LABEL_LEFT_LIMIT
elif (local_pos+peak_width) >=residual.shape[0]:
label = LABEL_RIGHT_LIMIT
return Spike(local_index, label, jitter)
def estimate_one_jitter(self, waveform, label=None):
"""
Estimate the jitter for one peak given its waveform
label=None general case
label not None when estimate_one_jitter is the second call
frequently happen when abs(jitter)>0.5
Method proposed by <NAME> see:
https://hal.archives-ouvertes.fr/hal-01111654v1
http://christophe-pouzat.github.io/LASCON2016/SpikeSortingTheElementaryWay.html
for best reading (at least for me SG):
* wf = the wafeform of the peak
* k = cluster label of the peak
* wf0, wf1, wf2 : center of catalogue[k] + first + second derivative
* jitter0 : jitter estimation at order 0
* jitter1 : jitter estimation at order 1
* h0_norm2: error at order0
* h1_norm2: error at order1
* h2_norm2: error at order2
"""
# This line is the slower part !!!!!!
# cluster_idx = np.argmin(np.sum(np.sum((catalogue['centers0']-waveform)**2, axis = 1), axis = 1))
catalogue = self.catalogue
if label is None:
#~ if self.use_opencl_with_sparse:
if self.argmin_method == 'opencl':
t1 = time.perf_counter()
rms_waveform_channel = np.sum(waveform**2, axis=0).astype('float32')
pyopencl.enqueue_copy(self.queue, self.one_waveform_cl, waveform)
pyopencl.enqueue_copy(self.queue, self.rms_waveform_channel_cl, rms_waveform_channel)
event = self.kern_waveform_distance(self.queue, self.cl_global_size, self.cl_local_size,
self.one_waveform_cl, self.catalogue_center_cl, self.sparse_mask_cl,
self.rms_waveform_channel_cl, self.waveform_distance_cl)
pyopencl.enqueue_copy(self.queue, self.waveform_distance, self.waveform_distance_cl)
cluster_idx = np.argmin(self.waveform_distance)
t2 = time.perf_counter()
#~ print(' np.argmin opencl_with_sparse', (t2-t1)*1000., cluster_idx)
#~ elif self.use_pythran_with_sparse:
elif self.argmin_method == 'pythran':
s = pythran_tools.pythran_loop_sparse_dist(waveform,
catalogue['centers0'], self.sparse_mask)
cluster_idx = np.argmin(s)
elif self.argmin_method == 'numba':
s = numba_loop_sparse_dist(waveform, catalogue['centers0'], self.sparse_mask)
cluster_idx = np.argmin(s)
elif self.argmin_method == 'numpy':
# replace by this (indentique but faster, a but)
#~ t1 = time.perf_counter()
d = catalogue['centers0']-waveform[None, :, :]
d *= d
#s = d.sum(axis=1).sum(axis=1) # intuitive
#s = d.reshape(d.shape[0], -1).sum(axis=1) # a bit faster
s = np.einsum('ijk->i', d) # a bit faster
cluster_idx = np.argmin(s)
#~ t2 = time.perf_counter()
#~ print(' np.argmin V2', (t2-t1)*1000., cluster_idx)
else:
raise(NotImplementedError())
k = | |
<reponame>Smear-Lab/Olfactory_Search
#Misc
import os, time, argparse
import h5py, json
import glob, fnmatch,pdb
from tqdm import tqdm
import multiprocessing
#Base
import numpy as np
import pandas as pd
import scipy.stats as st
from sklearn.model_selection import StratifiedKFold
#Plotting
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
#State-Space Modeling
#S.Linderman
import ssm
#M.Johnson
from pyhsmm.util.text import progprint_xrange
from pybasicbayes.distributions import Gaussian, AutoRegression
import autoregressive.models as pyhmm
#User Modules
import utilities as util
import plotting_YAA as plots_YAA
##===== Run Command =====##
# OMP_NUM_THREADS=1 python olfactory_search_xval.py --model_type "ARHMM_MJ" --Kmin 12 --Kmax 20
##===== ============================ =====##
##===== Parse Command Line Arguments =====##
parser = argparse.ArgumentParser(description='ARHMM Mouse')
parser.add_argument('--save',type=bool, default=1,
help='Save Results?')
parser.add_argument('--json_dir', type=str,
help='Directory Path of model parameter json file; not required if using other arguments')
##===== Data Options =====##
parser.add_argument('--mID',type=str, default='all_mice',
help='mouse to fit model to')
parser.add_argument('--condition', type=str, default='all_conds',
help='trial condition type')
parser.add_argument('--data_type', type=str, default='BHNx',
help='BHNx vs BHNxv vs EgoAllo_xv')
parser.add_argument('--HMM_inputs', type=str, default='BHNx',
help='BHNx vs BHNxv')
parser.add_argument('--x_units', type=str, default='pixels',
help='pixels or arena_length')
##===== Model Type =====##
parser.add_argument('--model_type', type=str, default='ARHMM_MJ',
help='ARHMM_SL or ARHMM_MJ')
parser.add_argument('--robust', type=bool, default=0,
help='autoregressive(0) or robust_autoregressive(1)')
parser.add_argument('--sticky', type=bool, default=0,
help='standard(0) or sticky(1) ARHMM')
parser.add_argument('--inputdriven', type=bool, default=0,
help='HMM transitions dependent on some input in addition to previous HMM state')
##===== Model Parameters =====##
parser.add_argument('--kappa', type=float, default=1e5,
help='sticky arhmm kappa')
parser.add_argument('--AR_lags', type=str, default=1,
help='Autoregressive lags')
parser.add_argument('--l2_penalty_A', type=float, default=0,
help='AR l2_penalty_A')
parser.add_argument('--l2_penalty_b', type=float, default=0,
help='AR l2_penalty_b')
parser.add_argument('--l2_penalty_V', type=float, default=0,
help='AR l2_penalty_V')
parser.add_argument('--MAP_threshold', type=float, default=0.80,
help='MAP threshold')
parser.add_argument('--nGibbs', type=int, default=200,
help='number of iterations to run the Gibbs sampler')
parser.add_argument('--burn_fraction', type=float, default=0.66,
help='Calculate MAP sequence with the last 37.5% of samples; of nGibbs = 400, 250 samples are burned')
##===== Run Options =====##
parser.add_argument('--Kmin', type=int, default=80,
help='minimum number of HMM states')
parser.add_argument('--Kmax', type=int, default=100,
help='maximum number of HMM states')
parser.add_argument('--kXval', type=int, default=5,
help='number of kfold')
parser.add_argument('--EM_tolerance', type=float, default=1e-5,
help='SSM EM algorithm tolerance')
parser.add_argument('--EM_iters', type=int, default=200,
help='EM Iterations')
parser.add_argument('--max_processes', type=int, default=18,
help='max # of parallel processes to run')
args = parser.parse_args()
def set_arhmm_hyperparams(opt,K):
D_obs = opt['D_obs']
Mobs = 0
#Autoregressive keyword arguments
ar_kwargs = dict(
# l2_penalty_A= args_dic['l2_penalty_A'],
# l2_penalty_b= args_dic['l2_penalty_b'],
# l2_penalty_V= args_dic['l2_penalty_V'],
lags = opt['AR_lags']
)
#HMM Transition parameters
trans_kwargs = dict(
# alpha= args_dic['alpha'],
)
#Gaussian or t-distribution
if not opt['robust']:
observation_type = "autoregressive"
else:
observation_type = "robust_autoregressive"
#What model are we going to run?
if not opt['inputdriven']:
M = 0
if not opt['sticky']:
if opt['model_type'] == 'ARHMM_MJ':
print('Bayesian ARHMM')
else:
print('Vanilla ARHMM')
transition_type = "standard"
else:
print('sticky ARHMM')
transition_type = "sticky"
trans_kwargs['kappa'] = opt['kappa']
else:
M = D_obs
# trans_kwargs['l2_penalty'] = args_dic['l2_penalty_W'] #coeff of l2-regul penalty on W (weights of logistic regression)
transition_type = "inputdriven"
if not opt['sticky']:
print('input-driven ARHMM')
else:
print('input-driven sticky ARHMM')
trans_kwargs['kappa'] = opt['kappa']
#If we're using matt Johnsons code, most of the above parameters don't matter
#Initialize Observation distribution and set it to ar_kwargs
if opt['model_type'] == 'ARHMM_MJ':
affine = True
dynamics_hypparams = \
dict(nu_0=D_obs + 2,
S_0=np.eye(D_obs),
M_0=np.hstack((np.eye(D_obs), np.zeros((D_obs,int(affine))))),
K_0=np.eye(D_obs + affine),
affine=affine)
# Initialize a list of autorgressive objects given the size of the
# observations and number of max discrete states
ar_kwargs = [AutoRegression(A=np.column_stack((0.99 * np.eye(D_obs),\
np.zeros((D_obs, int(affine))))),sigma=np.eye(D_obs),\
**dynamics_hypparams) for _ in range(K)]
return D_obs, M, Mobs, observation_type, ar_kwargs, transition_type, trans_kwargs
def make_hyperparams_dic(opt, K, M, trans_kwargs, ar_kwargs):
hyperparams = opt.copy()
del hyperparams['Kmin'], hyperparams['Kmax']
hyperparams['K'] = K
hyperparams['M'] = M
# hyperparams['Mobs'] = Mobs
hyperparams['trans_kwargs'] = trans_kwargs
if opt['model_type'] == 'ARHMM_SL':
hyperparams['ar_kwargs'] = ar_kwargs
return hyperparams
def arhmm_bayesian_fit(arhmm, data_train, data_test, opt, i_fold):
# Add test data to ARHMM
for data in data_train:
# Add data per trial
arhmm.add_data(data)
#Create data structures to contain gibb samples
nGibbs = opt['nGibbs']
nTrials = len(data_train)
K = arhmm.num_states; D_obs = arhmm.D;
stateseq_smpls = [[] for i in range(nTrials)]
AB_smpls = np.zeros((nGibbs,K,D_obs,D_obs+1))
sqrt_sigmas_smpls = np.zeros((nGibbs,K,D_obs,D_obs))
trans_matrix_smpls = np.zeros((nGibbs,K,K))
GibbsLLs = np.zeros((nGibbs))
# Loop over samples
for iSample in tqdm(range(nGibbs)):
# Sample Model
arhmm.resample_model()
#keep track of model log_likelihood's as a check for "convergence"
GibbsLLs[iSample] = arhmm.log_likelihood()
# Append each Gibbs sample for each trial
for iTrial in range(len(arhmm.states_list)):
stateseq_smpls[iTrial].append(arhmm.states_list[iTrial].stateseq.copy())
# Append the ARHMM matrix A and transition matrix for this sample
for state in range(K):
AB_smpls[iSample,state] = arhmm.obs_distns[state].A.copy()
sqrt_sigmas_smpls[iSample,state] = np.linalg.cholesky(arhmm.obs_distns[state].sigma)
trans_matrix_smpls[iSample] = arhmm.trans_distn.trans_matrix.copy()
# Calculate the mean A, B, and transition matrix for all
burn = opt['burn_fraction']
ABs_mean = np.mean(AB_smpls[int(burn*nGibbs):],axis=0)
As = ABs_mean[:,:,:D_obs]; Bs = ABs_mean[:,:,D_obs]
sqrt_Sigmas = np.mean(sqrt_sigmas_smpls[int(burn*nGibbs):],axis=0)
obs = {'ABs': ABs_mean, 'As': As,'Bs': Bs, 'sqrt_Sigmas': sqrt_Sigmas}
log_mean_transition_matrix = np.log(np.mean(trans_matrix_smpls[int(burn*nGibbs):,:,:],axis=0))
trans = {'log_Ps': log_mean_transition_matrix}
init = {'P0': arhmm.init_state_distn.pi_0}
param_dict = {}
param_dict['transitions'] = trans
param_dict['observations'] = obs
param_dict['init_state_distn'] = init
#llhood of heldout
ll_heldout = arhmm.log_likelihood(data=data_test)
state_usage = arhmm.state_usages
#Lists to contain import stuffff
trMAPs = []
trPosteriors = []
trMasks = []
#Plot convergence here
SaveDir, fname_sffx = util.make_sub_dir(K, opt, i_fold)
plots_YAA.plot_model_convergence(stateseq_smpls, AB_smpls, trans_matrix_smpls, GibbsLLs, sorted(arhmm.used_states), SaveDir, fname='-'.join(('Model_convergence',fname_sffx))+'.pdf')
#All of the data has been used to fit the model
#All of the data is contained with the ARHMM object already
if i_fold == -1:
#Calculate the MAP estimate
for iTrial in range(nTrials):
# Take the gibbs samples after the burn fraction to construct MAP
z_smpls = np.array(stateseq_smpls[iTrial][int(burn*nGibbs):])
state_probs_trial = []
for state in range(K):
state_occurances = np.isin(z_smpls,state)
state_probs_trial.append(np.sum(state_occurances,axis=0)/z_smpls.shape[0])
#Save the maximum posterior probability for each time step
pprob = np.vstack((np.zeros((1,K)),np.array(state_probs_trial).T))
trPosteriors.append(pprob)
mask = np.max(pprob,axis=1) < opt['MAP_threshold']
trMasks.append(mask)
#Use the maximum posterior probability to determine a robust MAP State sequence
MAP = np.hstack(([-1],np.ndarray.flatten(st.mode(z_smpls)[0])))
#Add MAP to list
trMAPs.append(MAP)
ll_heldout
#Else this is a fold of the x-validation
else:
#Get the state sequences and state marginal distributions of the heldout data
for data in data_test:
#Get state marginals
state_marginals = arhmm.heldout_state_marginals(data)
trPosteriors.append(state_marginals)
#Create mask
mask = np.max(state_marginals,axis=1) < opt['MAP_threshold']
trMasks.append(mask)
#Get the state sequence with the max probability
stateseq = np.argmax(state_marginals,axis=1)
trMAPs.append(stateseq)
return trMAPs, trPosteriors, trMasks, state_usage, ll_heldout, param_dict, GibbsLLs
def map_seq_n_usage(arhmm, data_test, opt, inputs=None):
"""
Compute the local MAP state (arg-max of marginal state probabilities at each time step)
and overall state usages.
thresh: if marginal probability of MAP state is below threshold, replace with np.nan
(or rather output a mask array with nan's in those time steps)
Also output average state usages and the marginal state probabilities
"""
T = 0; ll_heldout = 0
state_usage = np.zeros(arhmm.K)
trMAPs = []
trPosteriors = []
trMasks = []
#Loop over data to obtain MAP sequence for each trial
for index, data in enumerate(data_test):
#Get state probabilities and log-likelihood
if opt['inputdriven']:
inputdata = inputs[index]
Ez, _, ll = arhmm.expected_states(data,input=inputdata)
else:
Ez, _, ll = arhmm.expected_states(data)
#Update number of data points, state usage, and llood of data
T += Ez.shape[0]
state_usage += Ez.sum(axis=0)
ll_heldout += ll
#maximum a posteriori probability estimate of states
map_seq = np.argmax(Ez,axis=1)
max_prob = Ez[np.r_[0:Ez.shape[0]],map_seq]
#Save sequences
trMAPs.append(map_seq)
trPosteriors.append(Ez)
trMasks.append(max_prob < opt['MAP_threshold'])
#Normalize
state_usage /= T
#Get parameters from ARHMM object
param_dict = util.params_to_dict(arhmm.params, HMM_INPUTS = opt['inputdriven'], ROBUST = opt['robust'])
return trMAPs, trPosteriors, trMasks, state_usage, ll_heldout, param_dict
def fit_arhmm_get_llhood(data_list, trsum, K, opt, train_inds=None, test_inds=None, i_fold=-1):
#Go!
startTime = time.time()
#Separate the data into a training and test set based on the indices given
if train_inds is not None and test_inds is not None:
data_train = [data_list[ii] for ii in train_inds]
data_test = [data_list[ii] for ii in test_inds]
trsum_test = trsum.iloc[test_inds]
else:
#fit model on all data
data_train = data_list
data_test = data_list
trsum_test = trsum
#adding 10 so i_fold == -1 case doesn't give error
np.random.seed(10+i_fold)
# set hyperparameters
D_obs, M, Mobs, observation_type, ar_kwargs, transition_type, trans_kwargs = set_arhmm_hyperparams(opt,K)
##===== Create the ARHMM object either from Scott's package =====##
if opt['model_type'] == 'ARHMM_SL':
arhmm = ssm.HMM(K, D_obs, M=M,
observations=observation_type, observation_kwargs=ar_kwargs,
transitions=transition_type, transition_kwargs=trans_kwargs)
if opt['inputdriven']:
#Separate inputs from the data_list into training and test sets
raise Exception('TODO: Separate inputs from the data_list into training and test sets')
else:
inputs_train = None
inputs_test = None
##===== Fit on training data =====##
model_convergence = arhmm.fit(data_train, inputs=inputs_train, method="em", num_em_iters=opt['EM_iters'], tolerance=opt['EM_tolerance'])
#Get MAP sequences for heldout data (or all of the data if this isn't part of | |
np.sort(self.Z,axis=None).reshape([self.M,1])
self.Z = self.Z.reshape([self.Z.shape[0],1])
# Run the selected test and get likelihoods for all genes
def run_test(self,lik_name,models_number,genes_index,branching = False):
genes_results = {}
genes_state = {}
self.Y = self.Y_copy
self.models_number = models_number
self.lik_name = lik_name
self.optimize = True
#column names for likelihood dataframe
if self.models_number == 1:
column_name = ['Dynamic_model_log_likelihood']
elif self.models_number == 2:
column_name = ['Dynamic_model_log_likelihood','Constant_model_log_likelihood','log_likelihood_ratio']
else:
column_name = ['Shared_log_likelihood','model_1_log_likelihood','model_2_log_likelihood','log_likelihood_ratio']
for self.index in tqdm(genes_index):
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([-1,1])
results = self.fit_single_gene(column_name)
genes_results[self.genes_name[self.index]] = results
return pd.DataFrame.from_dict(genes_results, orient='index', columns= column_name)
# fit numbers of GPs = models_number to run the selected test
def fit_single_gene(self,column_name,reset =False):
if self.models_number == 1:
col_name = 0
else:
col_name = 2
self.model_index = 1
model_1_log_likelihood = self.fit_model()
results = [model_1_log_likelihood]
if self.models_number == 2:
if not(np.isnan(model_1_log_likelihood)):
if self.lik_name == 'Negative_binomial':
self.lik_alpha = self.model.likelihood.alpha.numpy()
if self.lik_name == 'Zero_inflated_negative_binomial':
self.lik_km = self.model.likelihood.km.numpy()
self.lik_alpha = self.model.likelihood.alpha.numpy()
self.model_index = 2
model_2_log_likelihood= self.fit_model()
if not(np.isnan(model_2_log_likelihood)):
ll_ratio = model_1_log_likelihood - model_2_log_likelihood
if np.isnan(model_1_log_likelihood) or np.isnan(model_2_log_likelihood):
model_2_log_likelihood = np.nan
ll_ratio = np.nan
results = [model_1_log_likelihood,model_2_log_likelihood,ll_ratio]
if self.models_number == 3:
X_df = pd.DataFrame(data=self.X,index= self.cells_name,columns= ['times'])
Y_df = pd.DataFrame(data=self.Y_copy,index= self.genes_name,columns= self.cells_name)
# initialize X and Y with first time series
self.set_X_Y(X_df[0 : int(self.N/2)],Y_df.iloc[:,0:int(self.N/2)])
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([self.N,1])
self.model_index = 2
model_2_log_likelihood = self.fit_model()
# initialize X and Y with second time series
self.set_X_Y(X_df[self.N : :],Y_df.iloc[:,int(self.N) : :])
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([self.N,1])
self.model_index = 3
model_3_log_likelihood = self.fit_model()
self.set_X_Y(X_df,Y_df)
if np.isnan(model_1_log_likelihood) or np.isnan(model_2_log_likelihood) or np.isnan(model_3_log_likelihood):
ll_ratio = np.nan
else:
ll_ratio = ((model_2_log_likelihood+model_3_log_likelihood)-model_1_log_likelihood)
results = [model_1_log_likelihood,model_2_log_likelihood,model_3_log_likelihood,ll_ratio]
return results
#Save and get log likelihood of successed fit and set likelihood to Nan in case of failure
def fit_model(self,reset = False):
fit = self.fit_GP(reset)
if fit: # save the model in case of successeded fit
if self.sparse and self.lik_name is not 'Gaussian':
log_likelihood = self.model.log_posterior_density((self.X,self.y)).numpy()
else:
log_likelihood = self.model.log_posterior_density().numpy()
# fix positive likelihood by random restart
if log_likelihood > 0 and self.count_fix < 10 and self.safe_mode and self.lik_name is not 'Gaussian':
self.count_fix = self.count_fix + 1
log_likelihood = self.fit_model(True)
if not np.isnan(log_likelihood):
filename = self.get_file_name()
ckpt = tf.train.Checkpoint(model=self.model, step=tf.Variable(1))
ckpt.write(filename)
else: # set log likelihood to Nan in case of Cholesky decomposition or optimization failure
log_likelihood = np.nan
self.model = np.nan
return log_likelihood
def fit_GP(self,reset = False):
self.init_hyper_parameters(reset=reset)
fit = True
try:
fit = self.fit_GP_with_likelihood()
except tf.errors.InvalidArgumentError as e:
if self.count_fix < 10: # fix failure by random restart
fit = self.fit_GP(True)
else:
print('Can not fit a Gaussian process, Cholesky decomposition was not successful.')
fit = False
if fit and self.optimize and self.count_fix < 5 and not self.branching and self.safe_mode:
self.test_local_optima_case1()
return fit
# Fit a GP with selected kernel,likelihood,run it as sparse or full GP
def fit_GP_with_likelihood(self):
fit = True
#select kernel RBF,constant or branching kernel
if self.hyper_parameters['ls'] == -1.: # flag to fit constant kernel
kern = gpflow.kernels.Constant(variance= self.hyper_parameters['var'])
elif self.kernel:
if 'linear' in self.kernel:
kern = gpflow.kernels.Linear(variance = self.hyper_parameters['var'])
print('Fitting GP with Linear Kernel')
self.K = 3
elif 'periodic' in self.kernel:
kern = gpflow.kernels.Periodic((gpflow.kernels.SquaredExponential(variance = self.hyper_parameters['var'],lengthscales = self.hyper_parameters['ls'])))
print('Fitting GP with Periodic Kernel')
self.K = 4
else:
kern = gpflow.kernels.RBF(variance= self.hyper_parameters['var'],
lengthscales = self.hyper_parameters['ls'])
print('Fitting GP with RBF Kernel')
self.K = 4
else:
kern = gpflow.kernels.RBF(variance= self.hyper_parameters['var'],
lengthscales = self.hyper_parameters['ls'])
if self.branching:
del kern
if self.fix:
kern = gpflow.kernels.RBF(variance=self.branching_kernel_var,
lengthscales=self.branching_kernel_ls)
set_trainable(kern.lengthscales,False)
set_trainable(kern.variance,False)
else:
kern = gpflow.kernels.RBF()
kernel = branchingKernel.BranchKernel(kern,self.xp)
else:
kernel = kern
#select likelihood
if self.lik_name == 'Poisson':
likelihood = gpflow.likelihoods.Poisson()
if self.lik_name == 'Negative_binomial':
# library size scaling
if self.nb_scaled:
scale=pd.DataFrame(self.scale)
self.Scale = self.scale.iloc[:,self.index]
self.Scale=np.array(self.Scale)
self.Scale=np.transpose([self.Scale] * 20)
likelihood = NegativeBinomialLikelihood.NegativeBinomial(self.hyper_parameters['alpha'],scale=self.Scale,nb_scaled=self.nb_scaled)
else:
likelihood = NegativeBinomialLikelihood.NegativeBinomial(self.hyper_parameters['alpha'],nb_scaled=self.nb_scaled)
if self.lik_name == 'Zero_inflated_negative_binomial':
likelihood = NegativeBinomialLikelihood.ZeroInflatedNegativeBinomial(self.hyper_parameters['alpha'],self.hyper_parameters['km'])
# Run model with selected kernel and likelihood
if self.lik_name == 'Gaussian':
if self.transform: # use log(count+1) in case of Gaussian likelihood and transform
self.y = np.log(self.y+1)
if self.sparse:
if self.ConditionalVariance:
init_method = ConditionalVariance()
self.Z = init_method.compute_initialisation(self.X, self.M, kernel)[0]
self.model = gpflow.models.SGPR((self.X,self.y), kernel=kernel,inducing_variable=self.Z)
if self.model_index == 2 and self.models_number == 2:
set_trainable(self.model.inducing_variable.Z,False)
else:
self.model = gpflow.models.GPR((self.X,self.y), kernel)
training_loss = self.model.training_loss
else:
if self.sparse:
if self.ConditionalVariance:
init_method = ConditionalVariance()
self.Z = init_method.compute_initialisation(self.X, self.M, kernel)[0]
self.model = gpflow.models.SVGP( kernel ,likelihood,self.Z)
training_loss = self.model.training_loss_closure((self.X, self.y))
if self.model_index == 2 and self.models_number == 2:
set_trainable(self.model.inducing_variable.Z,False)
else:
self.model = gpflow.models.VGP((self.X, self.y) , kernel , likelihood)
training_loss = self.model.training_loss
if self.optimize:
if self.ConditionalVariance:
set_trainable(self.model.inducing_variable.Z,False)
o = gpflow.optimizers.Scipy()
res = o.minimize(training_loss, variables=self.model.trainable_variables,options=dict(maxiter=5000))
if not(res.success): # test if optimization fail
if self.count_fix < 10: # fix failure by random restart
#print('Optimization fail.')
fit = self.fit_GP(True)
else:
print('Can not Optimaize a Gaussian process, Optimization fail.')
fit = False
return fit
def get_file_name(self):
if not os.path.exists(self.folder_name):
os.mkdir(self.folder_name)
filename = self.folder_name+self.lik_name+'_'
if self.sparse:
filename += 'sparse_'
if self.models_number == 3:
filename += 'tst_'
filename += self.genes_name[self.index]+'_model_'+str(self.model_index)
return filename
# user assign the default values for hyper_parameters
def initialize_hyper_parameters(self,length_scale = None,variance = None,alpha = None,km = None):
if length_scale is None:
self.hyper_parameters['ls'] = (5*(np.max(self.X)-np.min(self.X)))/100
else:
self.hyper_parameters['ls'] = length_scale
if variance is None:
if self.lik_name == 'Gaussian' and not self.transform:
self.hyper_parameters['var'] = np.mean(self.y+1**2)
else:
self.hyper_parameters['var'] = np.mean(np.log(self.y+1)**2)
else:
self.hyper_parameters['var'] = variance
if alpha is None:
self.hyper_parameters['alpha'] = 1.
else:
self.hyper_parameters['alpha'] = alpha
if km is None:
self.hyper_parameters['km'] = 35.
else:
self.hyper_parameters['km'] = km
self.user_hyper_parameters = [length_scale,variance,alpha,km]
# Hyper-parameters initialization or restting in case of failure
def init_hyper_parameters(self,reset = False):
if not reset:
self.seed_value = 0
self.count_fix = 0
np.random.seed(self.seed_value)
self.initialize_hyper_parameters(self.user_hyper_parameters[0],self.user_hyper_parameters[1],
self.user_hyper_parameters[2],self.user_hyper_parameters[3])
# in case of failure change the seed and sample hyper-parameters from uniform distributions
if reset:
self.count_fix = self.count_fix +1
self.seed_value = self.seed_value + 1
np.random.seed(self.seed_value)
self.hyper_parameters['ls'] = np.random.uniform((.25*(np.max(self.X)-np.min(self.X)))/100 ,(30.*(np.max(self.X)-np.min(self.X)))/100)
self.hyper_parameters['var'] = np.random.uniform(0. ,10.)
self.hyper_parameters['alpha'] = np.random.uniform(0., 10.)
self.hyper_parameters['km'] = np.random.uniform(0., 100.)
# set ls to 1000 in case of one sample test when fit the constant model
if self.model_index == 2 and self.models_number == 2:
self.hyper_parameters['ls'] = -1.
if self.optimize and self.count_fix == 0:
if self.lik_name == 'Negative_binomial':
self.hyper_parameters['alpha'] = self.lik_alpha
else:
#save likelihood parameters to initialize constant model
self.lik_alpha = None
self.lik_km = None
if not self.branching:
self.fix = False # fix kernel hyper-parameters
# reset gpflow graph
tf.compat.v1.get_default_graph()
tf.compat.v1.set_random_seed(self.seed_value)
tf.random.set_seed(self.seed_value)
gpflow.config.set_default_float(np.float64)
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([-1,1])
self.model = None
self.var = None
self.mean = None
def generate_Samples_from_distribution(self,mean):
y = []
if self.lik_name == 'Poisson':
for i in range(mean.shape[0]):
y.append(ss.poisson.rvs(mean[i], size = 500))
if self.lik_name == 'Negative_binomial':
if self.model.likelihood.alpha.numpy() == 0:
for i in range(mean.shape[0]):
y.append(ss.poisson.rvs(mean[i], size = 500))
else:
r = 1./self.model.likelihood.alpha.numpy() # r number of failures
prob = r / (mean+ r) # p probability of success
for i in range(mean.shape[0]):
y.append(ss.nbinom.rvs(r, prob[i], size = 500))
if self.lik_name == 'Zero_inflated_negative_binomial':
r = 1./self.model.likelihood.alpha.numpy() # r number of failures
prob = r / (mean+ r) # p probability of success
km = self.model.likelihood.km.numpy() # Michaelin-Menten (MM) constant
psi = 1.- (mean/(km+mean)) # psi probability of zeros
for i in range(mean.shape[0]):
B = ss.bernoulli.rvs(size=1,p = 1-psi[i])
if B == 0:
y.append(np.zeros(500))
else:
y.append(ss.nbinom.rvs(r, prob[i], size = 500))
y = np.vstack(y)
return y
def samples_posterior_predictive_distribution(self,xtest):
var = []
f_samples = []
for i in range(20):
f_samples.append(self.model.predict_f_samples(xtest, 5))
f = np.vstack(f_samples)
link_f = np.exp(f[:, :, 0])
var.append(self.generate_Samples_from_distribution(np.mean(link_f, 0)).T)
var = np.vstack(var)
if self.branching:
mean = np.mean(link_f, axis=0)
else:
mean = np.mean(var,axis = 0)
mean = savgol_filter(np.mean(var,axis = 0), int(xtest.shape[0]/2)+1, 3)
mean = [(i > 0) * i for i in mean]
return mean,var
| |
(isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_folder(self, catalog_id, data_asset_key, folder_key, **kwargs):
"""
Deletes a specific folder of a data asset identified by it's key.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str folder_key: (required)
Unique folder key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/folders/{folderKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_folder got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"folderKey": folder_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_folder_tag(self, catalog_id, data_asset_key, folder_key, tag_key, **kwargs):
"""
Deletes a specific folder tag.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str folder_key: (required)
Unique folder key.
:param str tag_key: (required)
Unique tag key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/folders/{folderKey}/tags/{tagKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_folder_tag got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"folderKey": folder_key,
"tagKey": tag_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_glossary(self, catalog_id, glossary_key, **kwargs):
"""
Deletes a specific glossary identified by it's key.
:param str catalog_id: (required)
Unique catalog identifier.
:param str glossary_key: (required)
Unique glossary key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/glossaries/{glossaryKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_glossary got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"glossaryKey": glossary_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_job(self, catalog_id, job_key, **kwargs):
"""
Deletes a specific job identified by it's key.
:param str catalog_id: (required)
Unique catalog identifier.
:param str job_key: (required)
Unique job key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/jobs/{jobKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"jobKey": job_key
}
path_params = {k: v for (k, v) | |
<reponame>dagmartin/ldpush<gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connections via pexpect to SSH and Telnet endpoints.
By deliberate side-effect, this module overwrites pexpect.spawn.__select
with an implementation based on poll(), to support use with higher file
descriptors than supported by select().
"""
import errno
import os
import re
import select
import socket
import time
import paramiko
import pexpect
import gflags
import logging
import sshclient
import push_exceptions as exceptions
FLAGS = gflags.FLAGS
TIMEOUT_DEFAULT = 20.0
class Error(Exception):
pass
class ConnectionError(Error):
"""The connection failed due to an error."""
class TimeoutError(Error):
"""The operation timed-out."""
class OperationFailedError(Error):
"""The sub-process had a non-zero exit status."""
class ScpError(Error):
"""An error occurred during an SCP operation."""
def _SelectViaPoll(_, rfds, wfds, efds, timeout):
"""poll() based replacement for pexpect.spawn.__select().
As mentioned in the module docstring, this is required since Python's select
is unable to wait for events on high-numbered file descriptors. The API is
as per select.select(), however if we are interrupted by a signal, we wait
again for the remaining time.
Args:
_: An object, self, unused.
rfds: A list, file descriptors to check for read.
wfds: A list, file descriptors to check for write.
efds: A list, file descriptors to check for exceptions.
timeout: A float, timeout (seconds).
Returns:
A tuple of three lists, being the descriptors in each of the incoming lists
which are ready for read, write or have an exception, respectively.
"""
if wfds or efds:
logging.fatal('Unexpected code change in pexpect: __select '
'called with wfds=%s efds=%s', wfds, efds)
p = select.poll()
for fd in rfds:
p.register(fd, select.POLLIN)
# See pexpect.spawn.__select for timeout handling logic; this is the same
# in select() and poll(), except that the timeout argument to poll() is
# in milliseconds. poll() raises the same exception on timeout as select().
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
fdstate = p.poll(int(timeout * 1000) if timeout is not None else None)
# Build a list of descriptors which select() would return as 'available
# for read' (which includes EOF conditions which may be indicated as
# POLLIN, POLLHUP or POLLIN|POLLHUP, depending on the type of file
# descriptor).
rrfds = []
for fd, state in fdstate:
if state & select.POLLIN or state & select.POLLHUP:
rrfds.append(fd)
return (rrfds, [], [])
except select.error as e:
if e[0] == errno.EINTR:
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([], [], [])
else:
raise
# Override pexpect.spawn.__select as mentioned in module docstring.
pexpect.spawn._spawn__select = _SelectViaPoll
class Connection(object):
"""The base class for pexpect connections."""
def __init__(self, host, username, password=<PASSWORD>, success=None,
connect_command=None, timeout=None, find_prompt=False,
enable_password=None, find_prompt_prefix=None,
find_prompt_suffix=''):
"""Initializer.
Args:
host: A string, the hostname or IP address to connect to.
username: A string, the username to use on the connection.
password: A string, the password to use on the connection.
success: A string, the string to expect to trigger successful completion.
connect_command: A string, the command to connect (minus the host suffix).
timeout: A float, the number of seconds before a connection times out.
find_prompt: A bool, if true then success is a regexp and it's group(1)
should be used to build self._prompt.
enable_password: A string, the enable password to optionally use.
find_prompt_prefix: A string, the prefix to put before group(1) from the
success regexp to build self._prompt, if find_prompt is true.
"""
self._connect_timeout = timeout or TIMEOUT_DEFAULT
self._host = host
self._username = username
self._password = password
self._success = success
self._find_prompt = find_prompt
self._connect_command = connect_command
self._enable_password = <PASSWORD>
self._find_prompt_prefix = (
r'(?:^|\n)' if find_prompt_prefix is None else find_prompt_prefix)
self._find_prompt_suffix = find_prompt_suffix
self.child = None
def _MaybeFindPrompt(self):
"""Enable if necessary, then perform prompt discovery if required."""
if self._enable_password:
# Enable before prompt discovery. Set a broad prompt expression.
password_sent = False
logging.debug('Enabling on %r', self._host)
self.child.sendline('enable')
while True:
i = self.child.expect(
[self._success, 'Password:', '<PASSWORD>',
'Password: <PASSWORD>!'], timeout=10)
if i == 0:
# Found the prompt, we're enabled.
logging.debug('We are enabled')
break
elif i == 1 and not password_sent:
self.child.sendline(self._enable_password)
logging.debug('Sent enable password to %r', self._host)
password_sent = True
else:
logging.debug('Got index %d back from expect', i)
# Sleep momentarily before expecting again, break buffer swap races.
time.sleep(0.05)
if self._find_prompt:
host = re.escape(self.child.match.group(1))
if len(self.child.match.groups()) > 1:
mode = re.escape(self.child.match.group(2))
else:
mode = ''
try:
self._prompt = (
self._find_prompt_prefix + host + self._find_prompt_suffix + mode)
self.re_prompt = re.compile(self._prompt)
logging.debug('%s: prompt set to %r', self._host, self._prompt)
except IndexError:
logging.debug('%s: find_prompt set but no capture group - skipping',
self._host)
else:
self.re_prompt = re.compile(self._success)
class SocketSpawn(pexpect.spawn):
"""Wrapper around pexpect.spawn to use a supplied socket.
This class does not close the file; it assumes it is a Python socket
which will be held/destroyed by the caller.
"""
# pylint: disable=g-bad-name
def __init__(self, sock, *args, **kwargs):
pexpect.spawn.__init__(self, None, *args, **kwargs)
self.child_fd = sock.fileno()
self.closed = False
self.name = '<file descriptor %d>' % self.child_fd
def isalive(self):
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except OSError:
return False
def __del__(self):
return
def close(self):
return
def terminate(self, force=False):
_ = force
return
def kill(self, sig):
_ = sig
return
class SocketConnection(Connection):
"""IPv4 TCP socket connection class."""
def __init__(self, host, port, username, password=None, success=None,
timeout=None, initial_chat=None, find_prompt=False,
find_prompt_prefix=None):
"""Creates an IPv4 TCP socket connection.
Args:
host: As per parent.
port: An int, the port number to connect to.
username: As per parent.
password: <PASSWORD> parent.
success: As per parent.
timeout: As per parent.
initial_chat: A tuple of tuples, each tuple in this list is a string
to expect from the socket and a response; the chat must occur in the
exact order specified. Intended only for telnet option negotiation.
find_prompt: As per parent.
find_prompt_prefix: As per parent.
"""
super(SocketConnection, self).__init__(
host, username=username, password=password, success=success,
timeout=timeout, find_prompt=find_prompt,
find_prompt_prefix=find_prompt_prefix)
self._port = port
self._initial_chat = initial_chat
self._connect_timeout = timeout or TIMEOUT_DEFAULT
if success is None:
self._success = self._username+r'.*> '
def Connect(self):
"""Makes the connection."""
self._sock = socket.socket()
self._sock.settimeout(self._connect_timeout)
try:
self._sock.connect((self._host, self._port))
except socket.timeout:
raise TimeoutError(self._connect_timeout)
except socket.gaierror as e:
raise ConnectionError('Lookup failure for %r: %s' % (self._host, e[1]))
except socket.error as e:
raise ConnectionError('Connect failure for %r: %s' % (self._host, e[1]))
if self._initial_chat is not None:
try:
for expected_recv, to_send in self._initial_chat:
actual_recv = self._sock.recv(len(expected_recv))
if actual_recv == expected_recv:
self._sock.send(to_send)
else:
raise ConnectionError('Initial chat failure for %r: expected %r, '
'got %r' % (self._host, expected_recv,
actual_recv))
except socket.timeout:
logging.debug('Initial chat timeout for %r', self._host)
raise TimeoutError(self._connect_timeout)
self._sock.settimeout(None)
self.child = SocketSpawn(self._sock, maxread=8192)
self.child.timeout = self._connect_timeout
logging.debug('Socket connected to %r:%s', self._host, self._port)
responses = self.child.compile_pattern_list([
self._success,
r'[Ll]ogin|[Uu]ser[Nn]ame',
r'[Pp]assword:',
r'Permission denied|Authentication failed'])
self.exit_list = self.child.compile_pattern_list(pexpect.EOF)
while True:
try:
timeout = max(1, self._connect_timeout)
pattern = self.child.expect_list(responses, timeout=timeout)
logging.debug('Connect() matched responses[%d]', pattern)
if pattern == 0:
self._MaybeFindPrompt()
break
elif pattern == 1:
self.child.send(self._username+'\r')
elif pattern == 2:
self.child.send(self._password+'\r')
elif pattern == 3:
raise ConnectionError('Permission denied for %r' % self._host)
else:
raise ConnectionError('Unexpected pattern %d' % pattern)
except pexpect.TIMEOUT:
raise TimeoutError(timeout)
except pexpect.EOF as e:
raise ConnectionError(str(e))
return None
class SshSpawn(pexpect.spawn):
"""Wrapper around pexpect.spawn to use a Paramiko channel."""
# pylint: disable=g-bad-name
def __init__(self, channel, *args, **kwargs):
pexpect.spawn.__init__(self, None, *args, **kwargs)
self.channel = channel
self.child_fd = None
self.closed = False
self.name = '<ssh channel %s>' % channel.get_id()
def isalive(self):
try:
return self.channel.get_transport().is_active()
except AttributeError:
return False
def read_nonblocking(self, size=1, timeout=None):
"""See parent. This actually may or may not block based on timeout."""
if not self.isalive():
raise pexpect.EOF('End Of File (EOF) in read() - Not alive.')
if timeout == -1:
timeout = self.timeout
self.channel.settimeout(timeout)
try:
s = self.channel.recv(size)
except socket.timeout:
raise pexpect.TIMEOUT('Timeout (%s) exceeded in read().' % timeout)
except paramiko.SSHException as e:
raise pexpect.EOF('Paramiko exception: %s' % e)
except | |
not in self.config.latex_elements):
# use Sonny style if any language specified (except English)
self.elements['fncychap'] = ('\\usepackage[Sonny]{fncychap}\n'
'\\ChNameVar{\\Large\\normalfont'
'\\sffamily}\n\\ChTitleVar{\\Large'
'\\normalfont\\sffamily}')
self.babel = self.builder.babel
if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
tocdepth = self.document.get('tocdepth', 999) + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning(__('too large :maxdepth:, ignored.'))
tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = '\\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
contentsname = document.get('contentsname')
if contentsname:
self.elements['contentsname'] = self.babel_renewcommand('\\contentsname',
contentsname)
if self.elements['maxlistdepth']:
sphinxpkgoptions.append('maxlistdepth=%s' % self.elements['maxlistdepth'])
if sphinxpkgoptions:
self.elements['sphinxpkgoptions'] = '[,%s]' % ','.join(sphinxpkgoptions)
if self.elements['sphinxsetup']:
self.elements['sphinxsetup'] = ('\\sphinxsetup{%s}' %
self.elements['sphinxsetup'])
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style,
latex_engine=self.config.latex_engine)
self.context = [] # type: List[Any]
self.descstack = [] # type: List[str]
self.tables = [] # type: List[Table]
self.next_table_colspec = None # type: str
self.bodystack = [] # type: List[List[str]]
self.footnote_restricted = None # type: nodes.Element
self.pending_footnotes = [] # type: List[nodes.footnote_reference]
self.curfilestack = [] # type: List[str]
self.handled_abbrs = set() # type: Set[str]
def pushbody(self, newbody: List[str]) -> None:
self.bodystack.append(self.body)
self.body = newbody
def popbody(self) -> List[str]:
body = self.body
self.body = self.bodystack.pop()
return body
def format_docclass(self, docclass: str) -> str:
""" prepends prefix to sphinx document classes
"""
warnings.warn('LaTeXWriter.format_docclass() is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
if docclass in self.docclasses:
docclass = 'sphinx' + docclass
return docclass
def astext(self) -> str:
self.elements.update({
'body': ''.join(self.body),
'indices': self.generate_indices()
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id: str, withdoc: bool = True, anchor: bool = True) -> str:
if withdoc:
id = self.curfilestack[-1] + ':' + id
return ('\\phantomsection' if anchor else '') + \
'\\label{%s}' % self.idescape(id)
def hypertarget_to(self, node: Element, anchor: bool = False) -> str:
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
else:
return labels
def hyperlink(self, id: str) -> str:
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id: str) -> str:
return '\\autopageref*{%s}' % self.idescape(id)
def escape(self, s: str) -> str:
return texescape.escape(s, self.config.latex_engine)
def idescape(self, id: str) -> str:
return '\\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command: str, definition: str) -> str:
if self.elements['multilingual']:
prefix = '\\addto\\captions%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix))
def generate_indices(self) -> str:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> None:
ret.append('\\begin{sphinxtheindex}\n')
ret.append('\\let\\bigletter\\sphinxstyleindexlettergroup\n')
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append('\\indexspace\n')
ret.append('\\bigletter{%s}\n' % self.escape(letter))
for entry in entries:
if not entry[3]:
continue
ret.append('\\item\\relax\\sphinxstyleindexentry{%s}' %
self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append('\\sphinxstyleindexextra{%s}' % self.encode(entry[4]))
ret.append('\\sphinxstyleindexpageref{%s:%s}\n' %
(entry[2], self.idescape(entry[3])))
ret.append('\\end{sphinxtheindex}\n')
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append('\\renewcommand{\\indexname}{%s}\n' %
indexcls.localname)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name: str, variables: Dict) -> str:
renderer = LaTeXRenderer(latex_engine=self.config.latex_engine)
for template_dir in self.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
if path.exists(template):
return renderer.render(template, variables)
return renderer.render(template_name, variables)
@property
def table(self) -> Table:
"""Get current table."""
if self.tables:
return self.tables[-1]
else:
return None
def visit_document(self, node: Element) -> None:
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append('\n\\appendix\n')
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node: Element) -> None:
pass
def visit_start_of_file(self, node: Element) -> None:
self.curfilestack.append(node['docname'])
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
def visit_section(self, node: Element) -> None:
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append('\n\n')
def depart_section(self, node: Element) -> None:
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node: Element) -> None:
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node: Element) -> None:
self.body.append('}')
def visit_topic(self, node: Element) -> None:
self.in_minipage = 1
self.body.append('\n\\begin{sphinxShadowBox}\n')
def depart_topic(self, node: Element) -> None:
self.in_minipage = 0
self.body.append('\\end{sphinxShadowBox}\n')
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_productionlist(self, node: Element) -> None:
self.body.append('\n\n\\begin{productionlist}\n')
self.in_production_list = 1
def depart_productionlist(self, node: Element) -> None:
self.body.append('\\end{productionlist}\n\n')
self.in_production_list = 0
def visit_production(self, node: Element) -> None:
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append('\\production{%s}{' % self.encode(tn))
else:
self.body.append('\\productioncont{')
def depart_production(self, node: Element) -> None:
self.body.append('}\n')
def visit_transition(self, node: Element) -> None:
self.body.append(self.elements['transition'])
def depart_transition(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
elif isinstance(parent, nodes.section):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
logger.warning(__('document title is not a single Text node'),
location=node)
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = self.escape(node.astext())
self.this_is_the_title = 0
raise nodes.SkipNode
else:
short = ''
if node.traverse(nodes.image):
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(r'\%s%s{' % (self.sectionnames[-1], short))
self.context.append('}\n' + self.hypertarget_to(node.parent))
elif isinstance(parent, nodes.topic):
self.body.append(r'\sphinxstyletopictitle{')
self.context.append('}\n')
elif isinstance(parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebartitle{')
self.context.append('}\n')
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}\n')
elif isinstance(parent, nodes.table):
# Redirect body output until title is finished.
self.pushbody([])
else:
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.body.append('\\sphinxstyleothertitle{')
self.context.append('}\n')
self.in_title = 1
def depart_title(self, node: Element) -> None:
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
else:
self.body.append(self.context.pop())
def visit_subtitle(self, node: Element) -> None:
if isinstance(node.parent, nodes.sidebar):
self.body.append('\\sphinxstylesidebarsubtitle{')
self.context.append('}\n')
else:
self.context.append('')
def depart_subtitle(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_desc(self, node: Element) -> None:
self.body.append('\n\n\\begin{fulllineitems}\n')
if self.table:
self.table.has_problematic = True
def depart_desc(self, node: Element) -> None:
self.body.append('\n\\end{fulllineitems}\n\n')
def _visit_signature_line(self, node: Element) -> None:
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
break
else:
self.body.append(r'\pysigline{')
def _depart_signature_line(self, node: Element) -> None:
self.body.append('}')
def visit_desc_signature(self, node: Element) -> None:
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
hyper = ''
self.body.append(hyper)
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append('%\n\\pysigstartmultiline\n')
def depart_desc_signature(self, node: Element) -> None:
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append('%\n\\pysigstopmultiline')
def visit_desc_signature_line(self, node: Element) -> None:
self._visit_signature_line(node)
def depart_desc_signature_line(self, node: Element) -> None:
self._depart_signature_line(node)
def visit_desc_addname(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_addname(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node: Element) -> None:
self.body.append(r'}')
def visit_desc_name(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_name(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_parameterlist(self, node: Element) -> None:
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
if not node.hasattr('noemph'):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node: Element) -> None:
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node: | |
class NurbsCurve(Curve,IDisposable,ISerializable,IEpsilonComparable[NurbsCurve]):
"""
Represents a Non Uniform Rational B-Splines (NURBS) curve.
NurbsCurve(other: NurbsCurve)
NurbsCurve(degree: int,pointCount: int)
NurbsCurve(dimension: int,rational: bool,order: int,pointCount: int)
"""
def ConstructConstObject(self,*args):
"""
ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int)
Assigns a parent object and a subobject index to this.
parentObject: The parent object.
subobject_index: The subobject index.
"""
pass
@staticmethod
def Create(periodic,degree,points):
""" Create(periodic: bool,degree: int,points: IEnumerable[Point3d]) -> NurbsCurve """
pass
@staticmethod
def CreateFromArc(arc):
"""
CreateFromArc(arc: Arc) -> NurbsCurve
Gets a rational degree 2 NURBS curve representation
of the arc. Note that the
parameterization of NURBS curve
does not match arc's transcendental
paramaterization.
Returns: Curve on success,null on failure.
"""
pass
@staticmethod
def CreateFromCircle(circle):
"""
CreateFromCircle(circle: Circle) -> NurbsCurve
Gets a rational degree 2 NURBS curve representation
of the circle. Note that the
parameterization of NURBS curve
does not match circle's transcendental
paramaterization.
Use GetRadianFromNurbFormParameter() and
GetParameterFromRadian() to convert between the NURBS curve
parameter and the
transcendental parameter.
Returns: Curve on success,null on failure.
"""
pass
@staticmethod
def CreateFromEllipse(ellipse):
"""
CreateFromEllipse(ellipse: Ellipse) -> NurbsCurve
Gets a rational degree 2 NURBS curve representation of the ellipse.
Note that the
parameterization of the NURBS curve does not match
with the transcendental
paramaterization of the ellipsis.
Returns: A nurbs curve representation of this ellipse or null if no such representation could be made.
"""
pass
@staticmethod
def CreateFromLine(line):
"""
CreateFromLine(line: Line) -> NurbsCurve
Gets a non-rational,degree 1 Nurbs curve representation of the line.
Returns: Curve on success,null on failure.
"""
pass
@staticmethod
def CreateSpiral(*__args):
"""
CreateSpiral(railCurve: Curve,t0: float,t1: float,radiusPoint: Point3d,pitch: float,turnCount: float,radius0: float,radius1: float,pointsPerTurn: int) -> NurbsCurve
Create a C2 non-rational uniform cubic NURBS approximation of a swept helix or spiral.
railCurve: The rail curve.
t0: Starting portion of rail curve's domain to sweep along.
t1: Ending portion of rail curve's domain to sweep along.
radiusPoint: Point used only to get a vector that is perpedicular to the axis. In
particular,
this vector must not be (anti)parallel to the axis vector.
pitch: The pitch. Positive values produce counter-clockwise orientation,
negative values
produce clockwise orientation.
turnCount: The turn count. If != 0,then the resulting helix will have this many
turns. If=
0,then pitch must be != 0 and the approximate distance
between turns will be set
to pitch. Positive values produce counter-clockwise
orientation,negitive values
produce clockwise orientation.
radius0: The starting radius. At least one radii must benonzero. Negative values
are allowed.
radius1: The ending radius. At least ont radii must be nonzero. Negative values
are allowed.
pointsPerTurn: Number of points to intepolate per turn. Must be greater than 4.
When in doubt,use
12.
Returns: NurbsCurve on success,null on failure.
CreateSpiral(axisStart: Point3d,axisDir: Vector3d,radiusPoint: Point3d,pitch: float,turnCount: float,radius0: float,radius1: float) -> NurbsCurve
Creates a C1 cubic NURBS approximation of a helix or spiral. For a helix,
you may
have radius0 == radius1. For a spiral radius0 == radius0 produces
a circle. Zero
and negative radii are permissible.
axisStart: Helix's axis starting point or center of spiral.
axisDir: Helix's axis vector or normal to spiral's plane.
radiusPoint: Point used only to get a vector that is perpedicular to the axis. In
particular,
this vector must not be (anti)parallel to the axis vector.
pitch: The pitch,where a spiral has a pitch=0,and pitch > 0 is the distance
between
the helix's "threads".
turnCount: The number of turns in spiral or helix. Positive
values produce counter-clockwise
orientation,negitive values produce
clockwise orientation. Note,for a helix,
turnCount * pitch=length of
the helix's axis.
radius0: The starting radius.
radius1: The ending radius.
Returns: NurbsCurve on success,null on failure.
"""
pass
def Dispose(self):
"""
Dispose(self: Curve,disposing: bool)
For derived class implementers.
This method is called with argument true when class
user calls Dispose(),while with argument false when
the Garbage Collector invokes
the finalizer,or Finalize() method.You must reclaim all used unmanaged resources in both cases,
and can use this chance to call Dispose on disposable fields if the argument is true.Also,you
must call the base virtual method within your overriding method.
disposing: true if the call comes from the Dispose() method; false if it comes from the Garbage Collector
finalizer.
"""
pass
def EpsilonEquals(self,other,epsilon):
"""
EpsilonEquals(self: NurbsCurve,other: NurbsCurve,epsilon: float) -> bool
Check that all values in other are within epsilon of the values in this
"""
pass
def GrevilleParameter(self,index):
"""
GrevilleParameter(self: NurbsCurve,index: int) -> float
Gets the greville (edit point) parameter that belongs
to the control point at the
specified index.
index: Index of Greville (Edit) point.
"""
pass
def GrevilleParameters(self):
"""
GrevilleParameters(self: NurbsCurve) -> Array[float]
Gets all Greville (Edit point) parameters for this curve.
"""
pass
def GrevillePoint(self,index):
"""
GrevillePoint(self: NurbsCurve,index: int) -> Point3d
Gets the greville (edit point) parameter that belongs
to the control point at the
specified index.
index: Index of Greville (Edit) point.
"""
pass
def GrevillePoints(self):
"""
GrevillePoints(self: NurbsCurve) -> Point3dList
Gets all Greville (Edit) points for this curve.
"""
pass
def IncreaseDegree(self,desiredDegree):
"""
IncreaseDegree(self: NurbsCurve,desiredDegree: int) -> bool
Increase the degree of this curve.
desiredDegree: The desired degree.
Degrees should be number between and including 1 and 11.
Returns: true on success,false on failure.
"""
pass
@staticmethod
def IsDuplicate(curveA,curveB,ignoreParameterization,tolerance):
"""
IsDuplicate(curveA: NurbsCurve,curveB: NurbsCurve,ignoreParameterization: bool,tolerance: float) -> bool
Determines if two curves are similar.
curveA: First curve used in comparison.
curveB: Second curve used in comparison.
ignoreParameterization: if true,parameterization and orientaion are ignored.
tolerance: tolerance to use when comparing control points.
Returns: true if curves are similar within tolerance.
"""
pass
def MakePiecewiseBezier(self,setEndWeightsToOne):
"""
MakePiecewiseBezier(self: NurbsCurve,setEndWeightsToOne: bool) -> bool
Clamps ends and adds knots so the NURBS curve has bezier spans
(all distinct knots
have multiplitity=degree).
setEndWeightsToOne: If true and the first or last weight is not one,then the first and
last spans are
reparameterized so that the end weights are one.
Returns: true on success,false on failure.
"""
pass
def NonConstOperation(self,*args):
"""
NonConstOperation(self: Curve)
For derived classes implementers.
Defines the necessary implementation to free the
instance from being const.
"""
pass
def OnSwitchToNonConst(self,*args):
"""
OnSwitchToNonConst(self: GeometryBase)
Is called when a non-const operation occurs.
"""
pass
def Reparameterize(self,c):
"""
Reparameterize(self: NurbsCurve,c: float) -> bool
Use a linear fractional transformation to reparameterize the NURBS curve.
This does
not change the curve's domain.
c: reparameterization constant (generally speaking,c should be > 0). The
control
points and knots are adjusted so that
output_nurbs(t)=input_nurbs(lambda(t)),
where lambda(t)=c*t/( (c-1)*t + 1 ).
Note that lambda(0)=0,lambda(1)=1,
lambda'(t) > 0,
lambda'(0)=c and lambda'(1)=1/c.
Returns: true if successful.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: NurbsCurve)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
__new__(cls: type,degree: int,pointCount: int)
__new__(cls: type,dimension: int,rational: bool,order: int,pointCount: int)
"""
pass
def __reduce_ex__(self,*args):
pass
HasBezierSpans=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns true if the NURBS curve has bezier spans (all distinct knots have multiplitity=degree)
Get: HasBezierSpans(self: NurbsCurve) -> bool
"""
IsRational=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether or not the curve is rational.
Rational curves have control-points with custom weights.
Get: IsRational(self: NurbsCurve) -> bool
"""
Knots=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets access to the knots (or "knot vector") of this nurbs | |
import asyncio
import datetime
import unittest
import unittest.mock
from unittest.mock import Mock
from typing import Optional, List
import shc.base
from shc import timer, base, datatypes
from ._helper import ClockMock, async_test, ExampleSubscribable, AsyncMock, ExampleWritable, ExampleReadable
class LogarithmicSleepTest(unittest.TestCase):
@async_test
async def test_long_sleep(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 0, 0)) as clock:
await timer._logarithmic_sleep(datetime.datetime(2020, 1, 17, 18, 5, 13).astimezone())
self.assertEqual(datetime.datetime(2020, 1, 17, 18, 5, 13), clock.now())
@async_test
async def test_short_sleep(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 0, 0)) as clock:
await timer._logarithmic_sleep(datetime.datetime(2020, 1, 1, 15, 0, 1, 17600).astimezone())
self.assertEqual(datetime.datetime(2020, 1, 1, 15, 0, 1, 17600), clock.now())
@async_test
async def test_sleep_overshoot(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 0, 0), datetime.timedelta(microseconds=27600)) as clock:
await timer._logarithmic_sleep(datetime.datetime(2020, 1, 1, 18, 37, 10).astimezone())
miss_by = clock.now() - datetime.datetime(2020, 1, 1, 18, 37, 10)
self.assertGreaterEqual(miss_by, datetime.timedelta(0))
self.assertLess(miss_by, 2 * datetime.timedelta(microseconds=27600))
@async_test
async def test_negative_delay(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 0, 0)) as clock:
await timer._logarithmic_sleep(datetime.datetime(2020, 1, 1, 14, 59, 25).astimezone())
self.assertEqual(datetime.datetime(2020, 1, 1, 15, 0, 0), clock.now())
class AbstractTimerTest(unittest.TestCase):
class TestTimer(timer._AbstractScheduleTimer):
def __init__(self, times: List[datetime.datetime]):
super().__init__()
self.times = times
self.index = -1
def _next_execution(self) -> Optional[datetime.datetime]:
self.index += 1
if self.index >= len(self.times):
return None
return self.times[self.index]
@async_test
async def test_run(self) -> None:
clock_mock = ClockMock(datetime.datetime(2020, 8, 20, 21, 8, 0), actual_sleep=0.01)
expected_events = [datetime.datetime(2020, 8, 20, 21, 8, 2).astimezone(),
datetime.datetime(2020, 8, 20, 22, 0, 0).astimezone()]
events = []
def store_time(*args, **kwargs):
events.append(clock_mock.now().astimezone())
t = self.TestTimer(expected_events)
with unittest.mock.patch('shc.timer._AbstractScheduleTimer._publish', new=store_time):
with clock_mock:
await t.run()
await asyncio.sleep(0.01) # Allow async tasks to run to make sure all _publish tasks have been executed
self.assertListEqual(expected_events, events)
assert(t.last_execution is not None)
self.assertAlmostEqual(t.last_execution,
expected_events[-1],
delta=datetime.timedelta(seconds=1))
class TimerDecoratorText(unittest.TestCase):
def test_decorators(self) -> None:
for decorator, timer_class, attr in ((timer.every, timer.Every, {'delta': datetime.timedelta(seconds=5)}),
(timer.once, timer.Once, {'offset': datetime.timedelta(seconds=7)}),
(timer.at, timer.At, {'random': datetime.timedelta(seconds=20)}),):
with self.subTest(decorator=decorator.__name__):
async def my_function(_val, _origin):
return
with unittest.mock.patch('shc.base.Subscribable.trigger',
autospec=True, side_effect=lambda s, t: t) as trigger_mock:
returned = decorator(**attr)(my_function)
trigger_mock.assert_called_once()
self.assertIsInstance(trigger_mock.call_args[0][0], timer_class)
for attr_name, attr_value in attr.items():
self.assertEqual(getattr(trigger_mock.call_args[0][0], attr_name), attr_value)
self.assertIs(trigger_mock.call_args[0][1], my_function)
self.assertIs(trigger_mock.call_args[0][1], returned)
class EveryTimerTest(unittest.TestCase):
def test_unaligned(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
every_timer = timer.Every(datetime.timedelta(minutes=5), align=False)
next_execution = every_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(clock.now().astimezone(), next_execution,
delta=datetime.timedelta(seconds=1))
every_timer.last_execution = clock.now().astimezone()
next_execution = every_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(clock.now().astimezone() + datetime.timedelta(minutes=5), next_execution,
delta=datetime.timedelta(seconds=1))
clock.sleep(5 * 60)
every_timer.last_execution = clock.now().astimezone()
next_execution = every_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(clock.now().astimezone() + datetime.timedelta(minutes=5), next_execution,
delta=datetime.timedelta(seconds=1))
def test_unaligned_random(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
every_timer = timer.Every(datetime.timedelta(minutes=5), align=False, random=datetime.timedelta(seconds=20))
self.assertGreaterEqual(every_timer._next_execution(),
clock.now().astimezone() - datetime.timedelta(seconds=20))
self.assertLessEqual(every_timer._next_execution(),
clock.now().astimezone() + datetime.timedelta(seconds=20))
every_timer.last_execution = clock.now().astimezone()
self.assertGreaterEqual(every_timer._next_execution(),
clock.now().astimezone() + datetime.timedelta(minutes=5, seconds=-20))
self.assertLessEqual(every_timer._next_execution(),
clock.now().astimezone() + datetime.timedelta(minutes=5, seconds=20))
clock.sleep(5 * 60 + 20)
every_timer.last_execution = clock.now().astimezone()
self.assertGreaterEqual(every_timer._next_execution(),
clock.now().astimezone() + datetime.timedelta(minutes=5, seconds=-20))
self.assertLessEqual(every_timer._next_execution(),
clock.now().astimezone() + datetime.timedelta(minutes=5, seconds=20))
def test_aligned(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
every_timer = timer.Every(datetime.timedelta(minutes=5), align=True)
base = every_timer._next_execution()
assert(base is not None)
self.assertGreaterEqual(base - clock.now().astimezone(), datetime.timedelta(0))
self.assertLessEqual(base - clock.now().astimezone(), datetime.timedelta(minutes=5))
clock.current_time = base + datetime.timedelta(microseconds=1) # We slept till the first execution
next_execution = every_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(clock.now().astimezone() + datetime.timedelta(minutes=5),
next_execution,
delta=datetime.timedelta(seconds=1))
# A new timer (after a restart 5 minutes later) should give us exactly base + 5 minutes as the first execution
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17) + datetime.timedelta(minutes=5)) as clock:
every_timer = timer.Every(datetime.timedelta(minutes=5), align=True)
next_execution = every_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(base + datetime.timedelta(minutes=5),
next_execution,
delta=datetime.timedelta(seconds=1))
class OnceTimerTest(unittest.TestCase):
def test_immediate(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.Once()
next_execution = once_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(clock.now().astimezone(), next_execution,
delta=datetime.timedelta(seconds=1))
clock.sleep(1)
once_timer.last_execution = clock.now().astimezone()
self.assertIsNone(once_timer._next_execution())
def test_offset(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.Once(datetime.timedelta(hours=1))
next_execution = once_timer._next_execution()
assert(next_execution is not None)
self.assertAlmostEqual(clock.now().astimezone() + datetime.timedelta(hours=1), next_execution,
delta=datetime.timedelta(seconds=1))
clock.sleep(60*60)
once_timer.last_execution = clock.now().astimezone()
self.assertIsNone(once_timer._next_execution())
def test_offset_random(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.Once(datetime.timedelta(hours=1), random=datetime.timedelta(seconds=20))
next_execution = once_timer._next_execution()
self.assertGreaterEqual(next_execution,
clock.now().astimezone() + datetime.timedelta(hours=1, seconds=-20))
self.assertLessEqual(next_execution,
clock.now().astimezone() + datetime.timedelta(hours=1, seconds=20))
clock.sleep(60*60)
once_timer.last_execution = clock.now().astimezone()
self.assertIsNone(once_timer._next_execution())
class AtTimerTest(unittest.TestCase):
def _assert_datetime(self, expected: datetime.datetime, actual: Optional[datetime.datetime]) -> None:
assert(actual is not None)
self.assertAlmostEqual(expected.astimezone(), actual, delta=datetime.timedelta(seconds=.1))
def test_simple_next(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.At(hour=15, minute=7, second=17, millis=200)
self._assert_datetime(datetime.datetime(2020, 1, 1, 15, 7, 17, 200000), once_timer._next_execution())
once_timer = timer.At(hour=15, minute=7, second=25)
self._assert_datetime(datetime.datetime(2020, 1, 1, 15, 7, 25), once_timer._next_execution())
once_timer = timer.At(hour=15, minute=12)
self._assert_datetime(datetime.datetime(2020, 1, 1, 15, 12, 0), once_timer._next_execution())
once_timer = timer.At(hour=16)
self._assert_datetime(datetime.datetime(2020, 1, 1, 16, 0, 0), once_timer._next_execution())
once_timer = timer.At(day=19)
self._assert_datetime(datetime.datetime(2020, 1, 19, 0, 0, 0), once_timer._next_execution())
once_timer = timer.At(month=8)
self._assert_datetime(datetime.datetime(2020, 8, 1, 0, 0, 0), once_timer._next_execution())
once_timer = timer.At(year=2021)
self._assert_datetime(datetime.datetime(2021, 1, 1, 0, 0, 0), once_timer._next_execution())
once_timer = timer.At(weekday=5)
self._assert_datetime(datetime.datetime(2020, 1, 3, 0, 0, 0), once_timer._next_execution())
once_timer = timer.At(weeknum=15)
self._assert_datetime(datetime.datetime(2020, 4, 6, 0, 0, 0), once_timer._next_execution())
def test_spec_forms(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.At(hour=timer.EveryNth(2))
self._assert_datetime(datetime.datetime(2020, 1, 1, 16, 0, 0), once_timer._next_execution())
once_timer = timer.At(hour=timer.EveryNth(6))
self._assert_datetime(datetime.datetime(2020, 1, 1, 18, 0, 0), once_timer._next_execution())
once_timer = timer.At(hour=[13, 17, 20])
self._assert_datetime(datetime.datetime(2020, 1, 1, 17, 0, 0), once_timer._next_execution())
once_timer = timer.At(hour=None, minute=18)
self._assert_datetime(datetime.datetime(2020, 1, 1, 15, 18, 0), once_timer._next_execution())
once_timer = timer.At(hour=timer.EveryNth(3), minute=None, second=None)
self._assert_datetime(datetime.datetime(2020, 1, 1, 15, 7, 17), once_timer._next_execution())
def test_stepback(self) -> None:
with ClockMock(datetime.datetime(2020, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.At(hour=None, minute=0)
self._assert_datetime(datetime.datetime(2020, 1, 1, 16, 0, 0), once_timer._next_execution())
once_timer = timer.At(hour=15, minute=0)
self._assert_datetime(datetime.datetime(2020, 1, 2, 15, 0, 0), once_timer._next_execution())
once_timer = timer.At(month=1, day=1, hour=timer.EveryNth(15), minute=[5, 7], second=16)
self._assert_datetime(datetime.datetime(2021, 1, 1, 0, 5, 16), once_timer._next_execution())
def test_overflows(self) -> None:
with ClockMock(datetime.datetime(2020, 12, 31, 23, 59, 46)) as clock:
once_timer = timer.At(hour=None, minute=None, second=timer.EveryNth(15))
self._assert_datetime(datetime.datetime(2021, 1, 1, 0, 0, 0), once_timer._next_execution())
with ClockMock(datetime.datetime(2019, 2, 1, 15, 7, 17)) as clock:
once_timer = timer.At(day=29)
self._assert_datetime(datetime.datetime(2019, 3, 29, 0, 0, 0), once_timer._next_execution())
with ClockMock(datetime.datetime(2020, 2, 1, 15, 7, 17)) as clock:
once_timer = timer.At(day=29)
self._assert_datetime(datetime.datetime(2020, 2, 29, 0, 0, 0), once_timer._next_execution())
with ClockMock(datetime.datetime(2020, 4, 1, 15, 7, 17)) as clock:
once_timer = timer.At(day=31)
self._assert_datetime(datetime.datetime(2020, 5, 31, 0, 0, 0), once_timer._next_execution())
with ClockMock(datetime.datetime(2019, 1, 1, 15, 7, 17)) as clock:
once_timer = timer.At(weeknum=53)
self._assert_datetime(datetime.datetime(2020, 12, 28, 0, 0, 0), once_timer._next_execution())
with ClockMock(datetime.datetime(2020, 1, 1, 0, 0, 0)) as clock:
once_timer = timer.At(year=2019)
self.assertIsNone(once_timer._next_execution())
def test_exception(self) -> None:
with self.assertRaises(ValueError):
once_timer = timer.At(day=[1, 5, 15], weeknum=timer.EveryNth(2))
class BoolTimerTest(unittest.TestCase):
@async_test
async def test_ton(self) -> None:
begin = datetime.datetime(2020, 12, 31, 23, 59, 46)
call_times = []
def save_time(*args):
call_times.append(datetime.datetime.now())
base = ExampleSubscribable(bool)
ton = timer.TOn(base, datetime.timedelta(seconds=42))
with unittest.mock.patch.object(ton, "_publish", new=Mock(side_effect=save_time)) as publish_mock:
with ClockMock(begin, actual_sleep=0.05) as clock:
self.assertFalse(await ton.read())
# False should not be forwarded, when value is already False
await base.publish(False, [self])
await asyncio.sleep(0.01)
publish_mock.assert_not_called()
# True should be forwarded with delay
await base.publish(True, [self])
await asyncio.sleep(45)
publish_mock.assert_called_with(True, unittest.mock.ANY)
self.assertAlmostEqual(begin + datetime.timedelta(seconds=42.01), call_times[-1],
delta=datetime.timedelta(seconds=.01))
# False should now be forwarded immediately
publish_mock.reset_mock()
await base.publish(False, [self])
await asyncio.sleep(0.01)
publish_mock.assert_called_with(False, unittest.mock.ANY)
# True delay should be suppressable with False ...
publish_mock.reset_mock()
await base.publish(True, [self])
await asyncio.sleep(20)
await base.publish(False, [self])
await asyncio.sleep(30)
publish_mock.assert_not_called()
# ... and not extendable with True
publish_mock.reset_mock()
tic = datetime.datetime.now()
await base.publish(True, [self])
await asyncio.sleep(20)
await base.publish(True, [self])
await asyncio.sleep(30)
publish_mock.assert_called_with(True, unittest.mock.ANY)
self.assertAlmostEqual(tic + datetime.timedelta(seconds=42), call_times[-1],
delta=datetime.timedelta(seconds=.01))
@async_test
async def test_toff(self) -> None:
begin = datetime.datetime(2020, 12, 31, 23, 59, 46)
call_times = []
def save_time(*args):
call_times.append(datetime.datetime.now())
base = ExampleSubscribable(bool)
toff = timer.TOff(base, datetime.timedelta(seconds=42))
with unittest.mock.patch.object(toff, "_publish", new=Mock(side_effect=save_time)) as publish_mock:
with ClockMock(begin, actual_sleep=0.05) as clock:
self.assertFalse(await toff.read())
# False should not be forwarded, when value is already False
await base.publish(False, [self])
await asyncio.sleep(0.01)
publish_mock.assert_not_called()
# True should be forwarded with immediately
await base.publish(True, [self])
await asyncio.sleep(0.01)
publish_mock.assert_called_with(True, unittest.mock.ANY)
# False should now be forwarded with delay
publish_mock.reset_mock()
await base.publish(False, [self])
await asyncio.sleep(45)
publish_mock.assert_called_with(False, unittest.mock.ANY)
self.assertAlmostEqual(begin + datetime.timedelta(seconds=42.01), call_times[-1],
delta=datetime.timedelta(seconds=.01))
# False delay should be suppressable with True ...
await base.publish(True, [self])
await asyncio.sleep(0.01)
publish_mock.reset_mock()
await base.publish(False, [self])
await asyncio.sleep(20)
await base.publish(True, [self])
await asyncio.sleep(30)
publish_mock.assert_not_called()
# ... and not extendable with False
publish_mock.reset_mock()
tic = datetime.datetime.now()
await base.publish(False, [self])
await asyncio.sleep(20)
| |
make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_snapshots(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str type: Only list snapshots matching this type.
:param str schedule: Only list snapshots created by this schedule.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
return data
def delete_snapshot_snapshots_with_http_info(self, **kwargs): # noqa: E501
"""delete_snapshot_snapshots # noqa: E501
Delete all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_snapshots_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str type: Only list snapshots matching this type.
:param str schedule: Only list snapshots created by this schedule.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['type', 'schedule'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_snapshots" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'schedule' in params:
query_params.append(('schedule', params['schedule'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_alias(self, snapshot_alias_id, **kwargs): # noqa: E501
"""get_snapshot_alias # noqa: E501
Retrieve snapshot alias information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_alias(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Retrieve snapshot alias information. (required)
:return: SnapshotAliases
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
return data
def get_snapshot_alias_with_http_info(self, snapshot_alias_id, **kwargs): # noqa: E501
"""get_snapshot_alias # noqa: E501
Retrieve snapshot alias information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_alias_with_http_info(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Retrieve snapshot alias information. (required)
:return: SnapshotAliases
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_alias_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_alias_id' is set
if ('snapshot_alias_id' not in params or
params['snapshot_alias_id'] is None):
raise ValueError("Missing the required parameter `snapshot_alias_id` when calling `get_snapshot_alias`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_alias_id' in params:
path_params['SnapshotAliasId'] = params['snapshot_alias_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases/{SnapshotAliasId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotAliases', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_changelist(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""get_snapshot_changelist # noqa: E501
Retrieve basic information on a changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_changelist(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Retrieve basic information on a changelist. (required)
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotChangelists
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
return data
def get_snapshot_changelist_with_http_info(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""get_snapshot_changelist # noqa: E501
Retrieve basic information on a changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_changelist_with_http_info(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Retrieve basic information on a changelist. (required)
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotChangelists
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_changelist_id', 'limit', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_changelist" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_changelist_id' is set
if ('snapshot_changelist_id' not in params or
params['snapshot_changelist_id'] is None):
raise ValueError("Missing the required parameter `snapshot_changelist_id` when calling `get_snapshot_changelist`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_snapshot_changelist`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_snapshot_changelist`, must be a value greater than or equal to `1`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_changelist`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_changelist`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_changelist_id' in params:
path_params['SnapshotChangelistId'] = params['snapshot_changelist_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/changelists/{SnapshotChangelistId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotChangelists', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_license(self, **kwargs): # noqa: E501
"""get_snapshot_license # noqa: E501
Retrieve license information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_license(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: LicenseLicense
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_license_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_snapshot_license_with_http_info(**kwargs) # noqa: E501
return data
def get_snapshot_license_with_http_info(self, **kwargs): # noqa: E501
"""get_snapshot_license # noqa: E501
Retrieve license information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_license_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: LicenseLicense
If the method | |
# license: Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
# Licensed under the CC BY-NC-SA 4.0 license
# (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
# this code simulate the approximate motion required
# all time unit are picoseconds (1 picosec = 1e-12 sec)
import sys
sys.path.insert(0,'../pipe/')
import numpy as np
import os, json, glob
import imageio
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from utils import *
from tof_class import *
import pdb
import pickle
import time
import scipy.misc
from scipy import sparse
import scipy.interpolate
from copy import deepcopy
import multiprocessing
from kinect_spec import *
import cv2
from numpy import linalg as LA
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
tf.logging.set_verbosity(tf.logging.INFO)
from vis_flow import *
from kinect_init import *
PI = 3.14159265358979323846
raw_depth_new = 0
flg = False
dtype = tf.float32
def gen_approx_motion(scene_ns, array_dir, tof_cam, text_flg = False, do_vis = True):
global flg
# first loading each scene, and we will combine them then
meass = []
depths = []
msks = []
vs = []
v_flg = False
while (v_flg == False):
v_flg = True
# first loading each scene, and we will combine them then
meass = []
depths = []
msks = []
vs = []
Ps = []
for scene_n in scene_ns:
print('Augmenting scene', scene_n)
## load all data
# if the raw file does not exist, just find one and use
if not os.path.exists(array_dir+scene_n[-16:]+'.pickle'):
scenes = glob.glob(array_dir+'*.pickle')
with open(scenes[0],'rb') as f:
data = pickle.load(f)
cam = data['cam']
# separately read the true depth and true rendering
with open(scene_n[0:-16]+'gt/'+scene_n[-16::],'rb') as f:
gt=np.fromfile(f, dtype=np.float32)
depth_true = np.reshape(gt,(cam['dimy']*4,cam['dimx']*4))
with open(scene_n[0:-16]+'ideal/'+scene_n[-16::],'rb') as f:
meas_gt=np.fromfile(f, dtype=np.int32)
meas_gt = np.reshape(meas_gt,(cam['dimy'],cam['dimx'],9)).astype(np.float32)
else:
with open(array_dir+scene_n[-16::]+'.pickle','rb') as f:
data = pickle.load(f)
program = data['program']
cam = data['cam']
cam_t = data['cam_t']
scene = data['scene']
depth_true = data['depth_true']
prop_idx = data['prop_idx']
prop_s = data['prop_s']
res_gt = tof_cam.process_gt_delay_vig_dist_surf_mapmax(cam, prop_idx, prop_s, scene, depth_true)
meas_gt = res_gt['meas']
# directly read pregenerate raw measurement
with open(scene_n[0:-16]+'full/'+scene_n[-16::],'rb') as f:
meas=np.fromfile(f, dtype=np.int32)
meas = np.reshape(meas,(cam['dimy'],cam['dimx'],9)).astype(np.float32)
msk = kinect_mask().astype(np.float32)
meas = [meas[:,:,i]*msk for i in range(meas.shape[2])]
meas = np.stack(meas,-1)
meas = meas / tof_cam.cam['map_max']
# meas = meas[::-1,:,:]
meas_gt = [meas_gt[:,:,i]*msk for i in range(meas_gt.shape[2])]
meas_gt = np.stack(meas_gt,-1)
meas_gt = meas_gt / tof_cam.cam['map_max']
# reduce the resolution of the depth
depth_true[np.where(depth_true==0)] = np.nan # deal with the mix problem at edge
depth_true_s = scipy.misc.imresize(\
depth_true,\
meas.shape[0:2],\
mode='F'\
)
depth_true_s = tof_cam.dist_to_depth(depth_true_s)
depth_true_s[np.where(np.isnan(depth_true_s))] = 0
# load the mask and classification
with open(scene_n[0:-16]+'msk'+'/'+scene_n[-16:],'rb') as f:
msk_array=np.fromfile(f, dtype=np.float32)
msk_array = np.reshape(msk_array,(cam['dimy'],cam['dimx'],4))
msk = {}
msk['background'] = msk_array[:,:,0]
msk['edge'] = msk_array[:,:,1]
msk['noise'] = msk_array[:,:,2]
msk['reflection'] = msk_array[:,:,3]
# compute mask
msk_true_s = msk['background'] * msk['edge']
true = np.stack([depth_true_s,msk_true_s],2)
true = np.concatenate([true, meas_gt], 2)
msk = msk_true_s
if text_flg == True:
# add textures (simply multiply a ratio)
# WARNING: IF YOU WANT TO USE TEXTURES
# CREATE A DIRECTORY:
# ../FLAT/kinect/list/textures-curet/
# PUT THE TEXTURE IMAGES (.png format) INTO IT
# add textures (simply multiply a ratio)
texts = glob.glob('../FLAT/kinect/list/textures-curet/'+'*.png')
idx = np.random.choice(len(texts),1,replace=False)[0]
im_text = cv2.imread(texts[idx],0).astype(np.float32)
im_text /= 255.
lo = np.random.uniform(0,1) # random range
hi = np.random.uniform(lo,1)
im_text = im_text * (hi-lo) + lo
im_text = scipy.misc.imresize(im_text,meas.shape[0:2],mode='F')
im_text = np.expand_dims(im_text,-1)
# apply the texture
meas = meas * im_text
meas_gt = meas_gt * im_text
# compute the camera matrix
xx,yy = np.meshgrid(np.arange(depth_true_s.shape[1]), np.arange(depth_true_s.shape[0]))
ratio = depth_true_s.shape[1]
fov = 0.7
xx = (xx.flatten() - (xx.shape[1]-1)/2)/ratio
yy = (yy.flatten() - (yy.shape[0]-1)/2)/ratio
xx = xx * fov
yy = yy * fov
depth_f = depth_true_s.flatten()
idx = np.where(depth_f != 0)
xx = xx[idx]
yy = yy[idx]
depth_f = depth_f[idx]
idx = np.random.choice(len(depth_f),2000,replace=False)
xx = xx[idx]
yy = yy[idx]
depth_f = depth_f[idx]
pts_3d = np.stack([xx*depth_f, yy*depth_f, depth_f, np.ones(depth_f.shape)],-1)
pts_2d = np.stack([xx, yy, np.ones(depth_f.shape)],-1)
# use the DLT algorithm
a00 = np.zeros(pts_3d.shape)
a01 = -pts_2d[:,2:3]*pts_3d
a02 = pts_2d[:,1:2]*pts_3d
a10 = -a01
a11 = np.zeros(pts_3d.shape)
a12 = -pts_2d[:,0:1]*pts_3d
a20 = -a02
a21 = -a12
a22 = np.zeros(pts_3d.shape)
a0 = np.concatenate([a00, a01, a02],1)
a1 = np.concatenate([a10, a11, a12],1)
a2 = np.concatenate([a20, a21, a22],1)
A = np.concatenate([a0, a1, a2], 0)
U,s,vh=np.linalg.svd(A, full_matrices =False)
v = vh.T
P = np.reshape(v[:,-1],[3,4])
pts_2d_reproj = np.matmul(pts_3d,P.T)
pts_2d_reproj /= pts_2d_reproj[:,-1::]
reproj_err = np.sum(np.abs(pts_2d_reproj - pts_2d))
print('Reprojection error:',reproj_err)
# randomly generating the 6 affine transform parameters
max_pix = 5
max_mov_m = 0.03
mov = 10
while (np.abs(mov).max() >= max_mov_m):
th1 = np.random.normal(0.0,0.01,[3,3])
th1[0,0]+=1
th1[1,1]+=1
th1[2,2]+=1
th2 = np.random.normal(0.0,.01,[3,1])
th3 = np.array([[0,0,0,1]])
th = np.concatenate([th1,th2],1)
th = np.concatenate([th,th3],0)
Y = pts_3d[:,0]
X = pts_3d[:,1]
Z = pts_3d[:,2]
pts_3d_new = np.matmul(pts_3d, th.T)
mov = np.sqrt(np.sum((pts_3d_new - pts_3d)**2,1))
# append the data
meass.append(meas)
depths.append(depth_true_s)
msks.append(msk)
vs.append(th)
Ps.append(P)
# move the object and combine them by channel
y = np.arange(meass[0].shape[0])
x = np.arange(meass[0].shape[1])
xx, yy = np.meshgrid(x,y)
meass_new = []
meass_old = []
vys_new = []
vxs_new = []
vys_inv = []
vxs_inv = []
msks_new = []
depths_new = []
mid = 4
for i in range(9):
meas_v = []
meas_old_v = []
depth_v = []
msk_v = []
depth_old_v = []
vy_v = []
vx_v = []
vy_inv = []
vx_inv = []
for j in range(len(meass)):
# constant transformation
# notice that the velocity is inversed here
th = vs[j]
th = LA.matrix_power(th, i-mid)
#
xx_p = (xx - (xx.shape[1]-1)/2)/ratio
yy_p = (yy - (yy.shape[0]-1)/2)/ratio
zz_p = depths[j]
xx_p = xx_p * fov * zz_p
yy_p = yy_p * fov * zz_p
xx_p = xx_p.flatten()
yy_p = yy_p.flatten()
zz_p = zz_p.flatten()
idx = np.where(zz_p != 0)
yy_p = yy_p[idx]
xx_p = xx_p[idx]
zz_p = zz_p[idx]
# prepare teh data
meas_f = meass[j][:,:,i].flatten()
meas_f = meas_f[idx]
depth_f = depths[j].flatten()
depth_f = depth_f[idx]
msk_f = msks[j].flatten()
msk_f = msk_f[idx]
# do the transformation
pts_3d = np.stack([yy_p, xx_p, zz_p, np.ones(xx_p.shape)],-1)
pts_2d_raw = np.stack([(yy.flatten())[idx], (xx.flatten())[idx]],-1)
pts_2d = np.stack([yy_p / zz_p, xx_p / zz_p],-1)
pts_3d_new = np.matmul(pts_3d, th.T)
P = Ps[j]
pts_2d_new = np.matmul(pts_3d_new,P.T)
pts_2d_new = pts_2d_new[:,0:2]/pts_2d_new[:,2:3]
y_p = pts_2d_new[:,0] / fov * ratio + (xx.shape[0]-1)/2
x_p = pts_2d_new[:,1] / fov * ratio + (xx.shape[1]-1)/2
pts_2d_new_raw = np.stack([y_p, x_p],-1)
pts = np.stack([yy.flatten(), xx.flatten()],-1)
# cut off the regions outside
idx = np.where((y_p<(yy.shape[0]-1))*(y_p>0)*(x_p<(xx.shape[1]-1))*(x_p>0))
y_pc = y_p[idx]
x_pc = x_p[idx]
# add a map of zeros
zero_map = np.zeros(xx.shape)
zero_map[(np.floor(y_pc).astype(np.int32),np.floor(x_pc).astype(np.int32))] = 1
zero_map[(np.ceil(y_pc).astype(np.int32),np.floor(x_pc).astype(np.int32))] = 1
zero_map[(np.floor(y_pc).astype(np.int32),np.ceil(x_pc).astype(np.int32))] = 1
zero_map[(np.ceil(y_pc).astype(np.int32),np.ceil(x_pc).astype(np.int32))] = 1
y_zero = yy[np.where(zero_map==0)]
x_zero = xx[np.where(zero_map==0)]
val_nan = np.nan*x_zero
pts_2d_zero = np.stack([y_zero, x_zero],-1)
pts_2d_new_full = np.concatenate([pts_2d_new_raw, pts_2d_zero],0)
meas_f = np.concatenate([meas_f, val_nan],0)
depth_f = np.concatenate([depth_f, val_nan],0)
msk_f = np.concatenate([msk_f, val_nan],0)
f1 = scipy.interpolate.griddata(pts_2d_new_full,meas_f,pts)
meas_v.append(np.reshape(f1, xx.shape))
meas_old_v.append(meass[j][:,:,i])
f2 = scipy.interpolate.griddata(pts_2d_new_full,depth_f,pts)
depth_v.append(np.reshape(f2, xx.shape))
depth_old_v.append(depths[j])
f3 = scipy.interpolate.griddata(pts_2d_new_full,msk_f,pts)
msk_v.append(np.reshape(f3, xx.shape))
# add the velocity
vy_v.append(np.zeros(yy.shape))
vy_v[-1][(pts_2d_raw[:,0],pts_2d_raw[:,1])] = pts_2d_new_raw[:,0] - pts_2d_raw[:,0]
vx_v.append(np.ones(xx.shape))
vx_v[-1][(pts_2d_raw[:,0],pts_2d_raw[:,1])] = pts_2d_new_raw[:,1] - pts_2d_raw[:,1]
# mask out those regions that interpolates with the background
msk_v[-1][np.where(msk_v[-1]<0.999)] = 0
# combine the raw measurement based on depth
msk_v = np.stack(msk_v, -1)
meas_v = np.stack(meas_v, -1)
meas_old_v = np.stack(meas_old_v, -1)
depth_v = np.stack(depth_v, -1)
depth_old_v = np.stack(depth_old_v, -1)
vy_v = np.stack(vy_v, -1)
vx_v = np.stack(vx_v, -1)
# combine
depth_v[np.where(np.isnan(depth_v))] = 999999999
idx = np.argmin(depth_v, -1)
pts = [yy.flatten(), xx.flatten(), idx.flatten()]
meas_new = np.reshape(meas_v[pts], xx.shape)
vy_new = np.reshape(vy_v[pts], xx.shape)
vx_new = np.reshape(vx_v[pts], xx.shape)
msk_new = np.reshape(msk_v[pts], xx.shape)
depth_new = np.reshape(depth_v[pts], xx.shape)
# remove the
msk_new[np.where(np.isnan(msk_new))] = 0
meas_new[np.where(np.isnan(meas_new))] = 0
depth_old_v[np.where(depth_old_v == 0)] = 999999999
idx = np.nanargmin(depth_old_v, -1)
pts = [yy.flatten(), xx.flatten(), idx.flatten()]
vy_inv = np.reshape(vy_v[pts], xx.shape)
vx_inv = np.reshape(vx_v[pts], xx.shape)
meas_old = np.reshape(meas_old_v[pts], xx.shape)
meass_new.append(meas_new)
vys_new.append(vy_new)
vxs_new.append(vx_new)
msks_new.append(msk_new)
depths_new.append(depth_new)
vys_inv.append(vy_inv)
vxs_inv.append(vx_inv)
meass_old.append(meas_old)
meas_all = np.stack(meass_new, -1)
meas_all = meas_all[20:-20,:,:]
meas_old_all = np.stack(meass_old, -1)
meas_old_all = meas_old_all[20:-20,:,:]
meas_gt = meas_gt[20:-20,:,:]
vys = np.stack(vys_inv, -1)
vxs = np.stack(vxs_inv, -1)
vys = -vys
vxs = -vxs
vys = vys[20:-20,:,:]
vxs = vxs[20:-20,:,:]
meas = meas_all
true = meas_old_all
depth_true = depth_true_s[20:-20,:]
v = np.stack([vys, vxs], -2)
if do_vis:
# visualization
fig = plt.figure()
ax = fig.add_subplot(1,3,1)
plt.imshow(np.mean(np.abs(meas),-1))
plt.title('scene')
plt.axis('off')
ax = fig.add_subplot(1,3,2)
plt.imshow(depth_true)
plt.title('depth')
plt.axis('off')
ax = fig.add_subplot(1,3,3)
v_max = np.max(np.sqrt((v[:,:,0,0]**2 + v[:,:,1,0]**2)))
plt.imshow(viz_flow(v[:,:,0,0],v[:,:,1,0], scaledown=v_max))
plt.title('flow')
plt.axis('off')
plt.show()
# the input of the network
return meas, depth_true, v
if __name__ == '__main__':
# load the images
| |
<gh_stars>10-100
"""
Example for classification on Cifar10 [1]
.. code-block:: python
[1] <NAME>.
Learning Multiple Layers of Features from Tiny Images.
2009.
**Note: the LMDBs can also be found in the data repository, see README.**
Use ``caffe/data/cifar10/get_cifar10.sh`` to download Cifar10 and
``caffe/examples/create_cifar10.sh`` to create the corresponding LMDBs.
Copy them over into ``examples/cifar10`` for the following data structure:
.. code-block:: python
examples/cifar10
|- train_lmdb/
|- test_lmdb/
.. argparse::
:ref: examples.cifar10.get_parser
:prog: cifar10
"""
import os
import cv2
import glob
import numpy
import argparse
from matplotlib import pyplot
# To silence Caffe! Must be added before importing Caffe or modules which
# are importing Caffe.
os.environ['GLOG_minloglevel'] = '3'
import caffe
import tools.solvers
import tools.lmdb_io
import tools.pre_processing
import tools.prototxt
caffe.set_mode_gpu()
def get_parser():
"""
Get the parser.
:return: parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description = 'Caffe example on Cifar-10.')
parser.add_argument('mode', default = 'train',
help = 'mode to run: "train" or "resume"')
parser.add_argument('--train_lmdb', dest = 'train_lmdb', type = str,
help = 'path to train LMDB',
default = 'examples/cifar10/train_lmdb')
parser.add_argument('--test_lmdb', dest = 'test_lmdb', type = str,
help = 'path to test LMDB',
default = 'examples/cifar10/test_lmdb')
parser.add_argument('--working_directory', dest = 'working_directory', type = str,
help = 'path to a directory (created if not existent) where to store the created .prototxt and snapshot files',
default = 'examples/cifar10')
parser.add_argument('--iterations', dest = 'iterations', type = int,
help = 'number of iterations to train or resume',
default = 10000)
parser.add_argument('--image', dest = 'image', type = str,
help = 'path to image for testing',
default = 'examples/cifar10/test_dog.png')
return parser
def main_train():
"""
Train a network on Cifar10 on scratch.
"""
def network(lmdb_path, batch_size):
"""
The network definition given the LMDB path and the used batch size.
:param lmdb_path: path to LMDB to use (train or test LMDB)
:type lmdb_path: string
:param batch_size: batch size to use
:type batch_size: int
:return: the network definition as string to write to the prototxt file
:rtype: string
"""
net = caffe.NetSpec()
net.data, net.labels = caffe.layers.Data(batch_size = batch_size,
backend = caffe.params.Data.LMDB,
source = lmdb_path,
transform_param = dict(scale = 1./255),
ntop = 2)
net.conv1 = caffe.layers.Convolution(net.data, kernel_size = 5, num_output = 32, pad = 2,
stride = 1, weight_filler = dict(type = 'xavier'))
net.pool1 = caffe.layers.Pooling(net.conv1, kernel_size = 3, stride = 2,
pool = caffe.params.Pooling.MAX)
net.relu1 = caffe.layers.ReLU(net.pool1, in_place = True)
net.norm1 = caffe.layers.LRN(net.relu1, local_size = 3, alpha = 5e-05,
beta = 0.75, norm_region = caffe.params.LRN.WITHIN_CHANNEL)
net.conv2 = caffe.layers.Convolution(net.relu1, kernel_size = 5, num_output = 32, pad = 2,
stride = 1, weight_filler = dict(type = 'xavier'))
net.relu2 = caffe.layers.ReLU(net.conv2, in_place = True)
net.pool2 = caffe.layers.Pooling(net.relu2, kernel_size = 3, stride = 2,
pool = caffe.params.Pooling.AVE)
net.norm2 = caffe.layers.LRN(net.pool2, local_size = 3, alpha = 5e-05, beta = 0.75,
norm_region = caffe.params.LRN.WITHIN_CHANNEL)
net.conv3 = caffe.layers.Convolution(net.norm2, kernel_size = 5, num_output = 64, pad = 2,
stride = 1, weight_filler = dict(type = 'xavier'))
net.relu3 = caffe.layers.ReLU(net.conv3, in_place = True)
net.pool3 = caffe.layers.Pooling(net.relu3, kernel_size = 3, stride = 2,
pool = caffe.params.Pooling.AVE)
net.score = caffe.layers.InnerProduct(net.pool3, num_output = 10)
net.loss = caffe.layers.SoftmaxWithLoss(net.score, net.labels)
return net.to_proto()
def count_errors(scores, labels):
"""
Utility method to count the errors given the ouput of the
"score" layer and the labels.
:param score: output of score layer
:type score: numpy.ndarray
:param labels: labels
:type labels: numpy.ndarray
:return: count of errors
:rtype: int
"""
return numpy.sum(numpy.argmax(scores, axis = 1) != labels)
train_prototxt_path = args.working_directory + '/train.prototxt'
test_prototxt_path = args.working_directory + '/test.prototxt'
deploy_prototxt_path = args.working_directory + '/deploy.prototxt'
with open(train_prototxt_path, 'w') as f:
f.write(str(network(args.train_lmdb, 128)))
with open(test_prototxt_path, 'w') as f:
f.write(str(network(args.test_lmdb, 1000)))
tools.prototxt.train2deploy(train_prototxt_path, (1, 3, 32, 32), deploy_prototxt_path)
solver_prototxt_path = args.working_directory + '/solver.prototxt'
solver_prototxt = tools.solvers.SolverProtoTXT({
'train_net': train_prototxt_path,
'test_net': test_prototxt_path,
'test_initialization': 'false', # no testing
'test_iter': 0, # no testing
'test_interval': 1000,
'base_lr': 0.01,
'lr_policy': 'inv',
'gamma': 0.0001,
'power': 0.75,
'stepsize': 1000,
'display': 100,
'max_iter': 1000,
'momentum': 0.95,
'weight_decay': 0.0005,
'snapshot': 0, # only at the end
'snapshot_prefix': args.working_directory + '/snapshot',
'solver_mode': 'CPU'
})
solver_prototxt.write(solver_prototxt_path)
solver = caffe.SGDSolver(solver_prototxt_path)
callbacks = []
# Callback to report loss in console. Also automatically plots the loss
# and writes it to the given file. In order to silence the console,
# use plot_loss instead of report_loss.
report_loss = tools.solvers.PlotLossCallback(100, args.working_directory + '/loss.png')
callbacks.append({
'callback': tools.solvers.PlotLossCallback.report_loss,
'object': report_loss,
'interval': 1,
})
# Callback to report error in console.
report_error = tools.solvers.PlotErrorCallback(count_errors, 60000, 10000,
solver_prototxt.get_parameters()['snapshot_prefix'],
args.working_directory + '/error.png')
callbacks.append({
'callback': tools.solvers.PlotErrorCallback.report_error,
'object': report_error,
'interval': 500,
})
# Callback to save an "early stopping" model.
callbacks.append({
'callback': tools.solvers.PlotErrorCallback.stop_early,
'object': report_error,
'interval': 500,
})
# Callback for reporting the gradients for all layers in the console.
report_gradient = tools.solvers.PlotGradientCallback(100, args.working_directory + '/gradient.png')
callbacks.append({
'callback': tools.solvers.PlotGradientCallback.report_gradient,
'object': report_gradient,
'interval': 1,
})
# Callback for saving regular snapshots using the snapshot_prefix in the
# solver prototxt file.
# Is added after the "early stopping" callback to avoid problems.
callbacks.append({
'callback': tools.solvers.SnapshotCallback.write_snapshot,
'object': tools.solvers.SnapshotCallback(),
'interval': 500,
})
monitoring_solver = tools.solvers.MonitoringSolver(solver)
monitoring_solver.register_callback(callbacks)
monitoring_solver.solve(args.iterations)
def main_resume():
"""
Resume training; assumes training has been started using :func:`examples.cifar10.main_train`.
"""
def count_errors(scores, labels):
"""
Utility method to count the errors given the ouput of the
"score" layer and the labels.
:param score: output of score layer
:type score: numpy.ndarray
:param labels: labels
:type labels: numpy.ndarray
:return: count of errors
:rtype: int
"""
return numpy.sum(numpy.argmax(scores, axis = 1) != labels)
max_iteration = 0
files = glob.glob(args.working_directory + '/*.solverstate')
for filename in files:
filenames = filename.split('_')
iteration = filenames[-1][:-12]
try:
iteration = int(iteration)
if iteration > max_iteration:
max_iteration = iteration
except:
pass
caffemodel = args.working_directory + '/snapshot_iter_' + str(max_iteration) + '.caffemodel'
solverstate = args.working_directory + '/snapshot_iter_' + str(max_iteration) + '.solverstate'
train_prototxt_path = args.working_directory + '/train.prototxt'
test_prototxt_path = args.working_directory + '/test.prototxt'
deploy_prototxt_path = args.working_directory + '/deploy.prototxt'
solver_prototxt_path = args.working_directory + '/solver.prototxt'
assert max_iteration > 0, "could not find a solverstate or snaphot file to resume"
assert os.path.exists(caffemodel), "caffemodel %s not found" % caffemodel
assert os.path.exists(solverstate), "solverstate %s not found" % solverstate
assert os.path.exists(train_prototxt_path), "prototxt %s not found" % train_prototxt_path
assert os.path.exists(test_prototxt_path), "prototxt %s not found" % test_prototxt_path
assert os.path.exists(deploy_prototxt_path), "prototxt %s not found" % deploy_prototxt_path
assert os.path.exists(solver_prototxt_path), "prototxt %s not found" % solver_prototxt_path
solver = caffe.SGDSolver(solver_prototxt_path)
solver.restore(solverstate)
solver.net.copy_from(caffemodel)
solver_prototxt = tools.solvers.SolverProtoTXT()
solver_prototxt.read(solver_prototxt_path)
callbacks = []
# Callback to report loss in console.
report_loss = tools.solvers.PlotLossCallback(100, args.working_directory + '/loss.png')
callbacks.append({
'callback': tools.solvers.PlotLossCallback.report_loss,
'object': report_loss,
'interval': 1,
})
# Callback to report error in console.
report_error = tools.solvers.PlotErrorCallback(count_errors, 60000, 10000,
solver_prototxt.get_parameters()['snapshot_prefix'],
args.working_directory + '/error.png')
callbacks.append({
'callback': tools.solvers.PlotErrorCallback.report_error,
'object': report_error,
'interval': 500,
})
# Callback to save an "early stopping" model.
callbacks.append({
'callback': tools.solvers.PlotErrorCallback.stop_early,
'object': report_error,
'interval': 500,
})
# Callback for reporting the gradients for all layers in the console.
report_gradient = tools.solvers.PlotGradientCallback(100, args.working_directory + '/gradient.png')
callbacks.append({
'callback': tools.solvers.PlotGradientCallback.report_gradient,
'object': report_gradient,
'interval': 1,
})
# Callback for saving regular snapshots using the snapshot_prefix in the
# solver prototxt file.
# Is added after the "early stopping" callback to avoid problems.
callbacks.append({
'callback': tools.solvers.SnapshotCallback.write_snapshot,
'object': tools.solvers.SnapshotCallback(),
'interval': 500,
})
monitoring_solver = tools.solvers.MonitoringSolver(solver, max_iteration)
monitoring_solver.register_callback(callbacks)
monitoring_solver.solve(args.iterations)
def main_test():
"""
Test the latest model obtained by :func:`examples.cifar10.main_train`
or :func:`examples.cifar10.main_resume` on the given input image.
"""
max_iteration = 0
files = glob.glob(args.working_directory + '/*.solverstate')
for filename in files:
filenames = filename.split('_')
iteration = filenames[-1][:-12]
try:
iteration = int(iteration)
if iteration > max_iteration:
max_iteration = iteration
except:
pass
caffemodel = args.working_directory + '/snapshot_iter_' + str(max_iteration) + '.caffemodel'
deploy_prototxt_path = args.working_directory + '/deploy.prototxt'
assert max_iteration > 0, "could not find a solverstate or snaphot file to resume"
assert os.path.exists(caffemodel), "caffemodel %s not found" % caffemodel
assert os.path.exists(deploy_prototxt_path), "prototxt %s not found" % deploy_prototxt_path
net = caffe.Net(deploy_prototxt_path, caffemodel, caffe.TEST)
transformer = caffe.io.Transformer({'data': (1, 3, 32, 32)})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_raw_scale('data', 1/255.)
assert os.path.exists(args.image), "image %s not found" % args.image
image = cv2.imread(args.image)
cv2.imshow('image', | |
<reponame>KOLANICH-libs/websockets
"""
:mod:`websockets.client` defines the WebSocket client APIs.
"""
import asyncio
import collections.abc
import functools
import logging
import warnings
from types import TracebackType
from typing import Any, Generator, List, Optional, Sequence, Tuple, Type, cast
from .exceptions import (
InvalidHandshake,
InvalidHeader,
InvalidMessage,
InvalidStatusCode,
NegotiationError,
RedirectHandshake,
SecurityError,
)
from .extensions.base import ClientExtensionFactory, Extension
from .extensions.permessage_deflate import ClientPerMessageDeflateFactory
from .handshake import build_request, check_response
from .headers import (
build_authorization_basic,
build_extension,
build_subprotocol,
parse_extension,
parse_subprotocol,
)
from .http import USER_AGENT, Headers, HeadersLike, read_response
from .protocol import WebSocketCommonProtocol
from .typing import ExtensionHeader, Origin, Subprotocol
from .uri import WebSocketURI, parse_uri
__all__ = ["connect", "unix_connect", "WebSocketClientProtocol"]
logger = logging.getLogger(__name__)
class WebSocketClientProtocol(WebSocketCommonProtocol):
"""
:class:`~asyncio.Protocol` subclass implementing a WebSocket client.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
"""
is_client = True
side = "client"
def __init__(
self,
*,
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
**kwargs: Any,
) -> None:
self.origin = origin
self.available_extensions = extensions
self.available_subprotocols = subprotocols
self.extra_headers = extra_headers
super().__init__(**kwargs)
def write_http_request(self, path: str, headers: Headers) -> None:
"""
Write request line and headers to the HTTP request.
"""
self.path = path
self.request_headers = headers
logger.debug("%s > GET %s HTTP/1.1", self.side, path)
logger.debug("%s > %r", self.side, headers)
# Since the path and headers only contain ASCII characters,
# we can keep this simple.
request = f"GET {path} HTTP/1.1\r\n"
request += str(headers)
self.transport.write(request.encode())
async def read_http_response(self) -> Tuple[int, Headers]:
"""
Read status line and headers from the HTTP response.
If the response contains a body, it may be read from ``self.reader``
after this coroutine returns.
:raises ~websockets.exceptions.InvalidMessage: if the HTTP message is
malformed or isn't an HTTP/1.1 GET response
"""
try:
status_code, reason, headers = await read_response(self.reader)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as exc:
raise InvalidMessage("did not receive a valid HTTP response") from exc
logger.debug("%s < HTTP/1.1 %d %s", self.side, status_code, reason)
logger.debug("%s < %r", self.side, headers)
self.response_headers = headers
return status_code, self.response_headers
@staticmethod
def process_extensions(
headers: Headers,
available_extensions: Optional[Sequence[ClientExtensionFactory]],
) -> List[Extension]:
"""
Handle the Sec-WebSocket-Extensions HTTP response header.
Check that each extension is supported, as well as its parameters.
Return the list of accepted extensions.
Raise :exc:`~websockets.exceptions.InvalidHandshake` to abort the
connection.
:rfc:`6455` leaves the rules up to the specification of each
:extension.
To provide this level of flexibility, for each extension accepted by
the server, we check for a match with each extension available in the
client configuration. If no match is found, an exception is raised.
If several variants of the same extension are accepted by the server,
it may be configured several times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
"""
accepted_extensions: List[Extension] = []
header_values = headers.get_all("Sec-WebSocket-Extensions")
if header_values:
if available_extensions is None:
raise InvalidHandshake("no extensions supported")
parsed_header_values: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in header_values], []
)
for name, response_params in parsed_header_values:
for extension_factory in available_extensions:
# Skip non-matching extensions based on their name.
if extension_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
extension = extension_factory.process_response_params(
response_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the server sent. Fail the connection.
else:
raise NegotiationError(
f"Unsupported extension: "
f"name = {name}, params = {response_params}"
)
return accepted_extensions
@staticmethod
def process_subprotocol(
headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP response header.
Check that it contains exactly one supported subprotocol.
Return the selected subprotocol.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values:
if available_subprotocols is None:
raise InvalidHandshake("no subprotocols supported")
parsed_header_values: Sequence[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
if len(parsed_header_values) > 1:
subprotocols = ", ".join(parsed_header_values)
raise InvalidHandshake(f"multiple subprotocols: {subprotocols}")
subprotocol = parsed_header_values[0]
if subprotocol not in available_subprotocols:
raise NegotiationError(f"unsupported subprotocol: {subprotocol}")
return subprotocol
async def handshake(
self,
wsuri: WebSocketURI,
origin: Optional[Origin] = None,
available_extensions: Optional[Sequence[ClientExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
) -> None:
"""
Perform the client side of the opening handshake.
:param origin: sets the Origin HTTP header
:param available_extensions: list of supported extensions in the order
in which they should be used
:param available_subprotocols: list of supported subprotocols in order
of decreasing preference
:param extra_headers: sets additional HTTP request headers; it must be
a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name,
value)`` pairs
:raises ~websockets.exceptions.InvalidHandshake: if the handshake
fails
"""
request_headers = Headers()
if wsuri.port == (443 if wsuri.secure else 80): # pragma: no cover
request_headers["Host"] = wsuri.host
else:
request_headers["Host"] = f"{wsuri.host}:{wsuri.port}"
if wsuri.user_info:
request_headers["Authorization"] = build_authorization_basic(
*wsuri.user_info
)
if origin is not None:
request_headers["Origin"] = origin
key = build_request(request_headers)
if available_extensions is not None:
extensions_header = build_extension(
[
(extension_factory.name, extension_factory.get_request_params())
for extension_factory in available_extensions
]
)
request_headers["Sec-WebSocket-Extensions"] = extensions_header
if available_subprotocols is not None:
protocol_header = build_subprotocol(available_subprotocols)
request_headers["Sec-WebSocket-Protocol"] = protocol_header
if extra_headers is not None:
if isinstance(extra_headers, Headers):
extra_headers = extra_headers.raw_items()
elif isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
request_headers[name] = value
request_headers.setdefault("User-Agent", USER_AGENT)
self.write_http_request(wsuri.resource_name, request_headers)
status_code, response_headers = await self.read_http_response()
if status_code in (301, 302, 303, 307, 308):
if "Location" not in response_headers:
raise InvalidHeader("Location")
raise RedirectHandshake(response_headers["Location"])
elif status_code != 101:
raise InvalidStatusCode(status_code)
check_response(response_headers, key)
self.extensions = self.process_extensions(
response_headers, available_extensions
)
self.subprotocol = self.process_subprotocol(
response_headers, available_subprotocols
)
self.connection_open()
class Connect:
"""
Connect to the WebSocket server at the given ``uri``.
Awaiting :func:`connect` yields a :class:`WebSocketClientProtocol` which
can then be used to send and receive messages.
:func:`connect` can also be used as a asynchronous context manager. In
that case, the connection is closed when exiting the context.
:func:`connect` is a wrapper around the event loop's
:meth:`~asyncio.loop.create_connection` method. Unknown keyword arguments
are passed to :meth:`~asyncio.loop.create_connection`.
For example, you can set the ``ssl`` keyword argument to a
:class:`~ssl.SSLContext` to enforce some TLS settings. When connecting to
a ``wss://`` URI, if this argument isn't provided explicitly,
:func:`ssl.create_default_context` is called to create a context.
You can connect to a different host and port from those found in ``uri``
by setting ``host`` and ``port`` keyword arguments. This only changes the
destination of the TCP connection. The host name from ``uri`` is still
used in the TLS handshake for secure connections and in the ``Host`` HTTP
header.
The ``create_protocol`` parameter allows customizing the
:class:`~asyncio.Protocol` that manages the connection. It should be a
callable or class accepting the same arguments as
:class:`WebSocketClientProtocol` and returning an instance of
:class:`WebSocketClientProtocol` or a subclass. It defaults to
:class:`WebSocketClientProtocol`.
The behavior of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit`` is
described in :class:`~websockets.protocol.WebSocketCommonProtocol`.
:func:`connect` also accepts the following optional arguments:
* ``compression`` is a shortcut to configure compression extensions;
by default it enables the "permessage-deflate" extension; set it to
``None`` to disable compression
* ``origin`` sets the Origin HTTP header
* ``extensions`` is a list of supported extensions in order of
decreasing preference
* ``subprotocols`` is a list of supported subprotocols in order of
decreasing preference
* ``extra_headers`` sets additional HTTP request headers; it can be a
:class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs
:raises ~websockets.uri.InvalidURI: if ``uri`` is invalid
:raises ~websockets.handshake.InvalidHandshake: if the opening handshake
fails
"""
MAX_REDIRECTS_ALLOWED = 10
def __init__(
self,
uri: str,
*,
path: Optional[str] = None,
create_protocol: Optional[Type[WebSocketClientProtocol]] = None,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: Optional[float] = None,
max_size: Optional[int] = 2 ** 20,
max_queue: Optional[int] = 2 ** 5,
read_limit: int = 2 ** 16,
write_limit: int = 2 ** 16,
loop: Optional[asyncio.AbstractEventLoop] = None,
legacy_recv: bool = False,
klass: Optional[Type[WebSocketClientProtocol]] = None,
timeout: Optional[float] = None,
compression: Optional[str] = "deflate",
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: | |
self.speed_calc_cycles = speed_calc_cycles
self.keep_new_result_in_memory = keep_new_result_in_memory
# final result as list, other types can be achieved by subclassing
self.final_result = []
# NOTE: it only works using multiprocessing.Queue()
# the Queue class from the module queue does NOT work
self.manager_server = None
self.hostname = socket.gethostname()
self.job_q_on_disk = job_q_on_disk
self.job_q_on_disk_path = job_q_on_disk_path
if self.job_q_on_disk:
fname = _new_rand_file_name(path = self.job_q_on_disk_path, pre='.',end='_jobqdb')
else:
fname = None
self.job_q = ArgsContainer(fname)
self.result_q = ClosableQueue()
self.fail_q = ClosableQueue()
@staticmethod
def _check_bind(host, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
except:
log.critical("test bind to %s:%s failed", host, port)
raise
finally:
s.close()
def _start_manager_thread(self):
self._check_bind(self.hostname, self.port)
# make job_q, result_q, fail_q, const_arg available via network
JobManager_Manager.register('get_job_q', callable=lambda: self.job_q, exposed=['get', 'put', 'qsize'])
JobManager_Manager.register('get_result_q', callable=lambda: self.result_q, exposed=['get', 'put', 'qsize'])
JobManager_Manager.register('get_fail_q', callable=lambda: self.fail_q, exposed=['get', 'put', 'qsize'])
JobManager_Manager.register('get_const_arg', callable=lambda: self.const_arg)
address=('', self.port) #ip='' means local
authkey=self.authkey
self.manager_server = JobManager_Manager(address, authkey).get_server()
server_thr = threading.Thread(target=self.manager_server.serve_forever)
server_thr.daemon = True
server_thr.start()
m_test = BaseManager(('localhost', self.port), authkey)
try:
m_test.connect()
except:
raise ConnectionError("test conntect to JobManager_Manager failed")
log.info("JobManager_Manager thread started on %s:%s (%s)", self.hostname, self.port, authkey)
def __enter__(self):
return self
def __exit__(self, err, val, trb):
# KeyboardInterrupt via SIGINT will be mapped to SystemExit
# SystemExit is considered non erroneous
if (err is None) or (err == SystemExit):
log.debug("normal shutdown")
# bring everything down, dump status to file
self.shutdown()
# no exception traceback will be printed
return True
else:
log.debug("shutdown due to exception '%s'", err.__name__)
# bring everything down, dump status to file
self.shutdown()
# causes exception traceback to be printed
return False
def shutdown(self):
""""stop all spawned processes and clean up
- call process_final_result to handle all collected result
- if job_q is not empty dump remaining job_q
"""
# will only be False when _shutdown was started in subprocess
if sys.version_info[0] == 3:
if self.manager_server is not None:
self.manager_server.stop_event.set()
log.debug("set stopEvent for JobManager_Manager server")
self.manager_server = None
# self.job_q.close()
# self.result_q.close()
# self.fail_q.close()
# log.debug("queues closed!")
# do user defined final processing
self.process_final_result()
log.debug("process_final_result done!")
# print(self.fname_dump)
if self.fname_dump is not None:
if self.fname_dump == 'auto':
fname = "{}_{}.dump".format(self.authkey.decode('utf8'), getDateForFileName(includePID=False))
else:
fname = self.fname_dump
log.info("dump current state to '%s'", fname)
with open(fname, 'wb') as f:
self.__dump(f)
log.debug("dump state done!")
else:
log.info("fname_dump == None, ignore dumping current state!")
# start also makes sure that it was not started as subprocess
# so at default behavior this assertion will allays be True
assert self._pid == os.getpid()
self.show_statistics()
log.info("JobManager_Server was successfully shut down")
def show_statistics(self):
if self.show_stat:
all_jobs = self.job_q.put_items()
succeeded = self.job_q.marked_items()
failed = self.fail_q.qsize()
all_processed = succeeded + failed
id1 = self.__class__.__name__+" "
l = len(id1)
id2 = ' '*l + "| "
print("{}total number of jobs : {}".format(id1, all_jobs))
print("{} processed : {}".format(id2, all_processed))
print("{} succeeded : {}".format(id2, succeeded))
print("{} failed : {}".format(id2, failed))
all_not_processed = all_jobs - all_processed
not_queried = self.job_q.qsize()
queried_but_not_processed = all_not_processed - not_queried
print("{} not processed : {}".format(id2, all_not_processed))
print("{} queried : {}".format(id2, queried_but_not_processed))
print("{} not queried yet : {}".format(id2, not_queried))
def all_successfully_processed(self):
return self.job_q.qsize() == 0
@staticmethod
def static_load(f):
data = {}
data['final_result'] = pickle.load(f)
data['job_q'] = pickle.load(f)
data['fail_list'] = pickle.load(f)
return data
def __load(self, f):
data = JobManager_Server.static_load(f)
self.final_result = data['final_result']
self.job_q = data['job_q']
for fail_item in data['fail_list']:
self.fail_q.put(fail_item)
def __dump(self, f):
pickle.dump(self.final_result, f, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.job_q, f, protocol=pickle.HIGHEST_PROTOCOL)
fail_list = []
try:
while True:
fail_list.append(self.fail_q.get(timeout = 0))
except queue.Empty:
pass
pickle.dump(fail_list, f, protocol=pickle.HIGHEST_PROTOCOL)
def read_old_state(self, fname_dump=None):
if fname_dump == None:
fname_dump = self.fname_dump
if fname_dump == 'auto':
log.critical("fname_dump must not be 'auto' when reading old state")
raise RuntimeError("fname_dump must not be 'auto' when reading old state")
if not os.path.isfile(fname_dump):
log.critical("file '%s' to read old state from not found", fname_dump)
raise RuntimeError("file '{}' to read old state from not found".format(fname_dump))
log.info("load state from file '%s'", fname_dump)
with open(fname_dump, 'rb') as f:
self.__load(f)
self.show_statistics()
def put_arg(self, a):
"""add argument a to the job_q
"""
#hash_bfa = hashlib.sha256(bf.dump(a)).digest()
# if hash_bfa in self.args_dict:
# msg = ("do not add the same argument twice! If you are sure, they are not the same, "+
# "there might be an error with the binfootprint mehtods or a hash collision!")
# log.critical(msg)
# raise ValueError(msg)
# this dict associates an unique index with each argument 'a'
# or better with its binary footprint
#self.args_dict[hash_bfa] = len(self.args_list)
#self.args_list.append(a)
# the actual shared queue
self.job_q.put(a)
#with self._numjobs.get_lock():
# self._numjobs.value += 1
def args_from_list(self, args):
"""serialize a list of arguments to the job_q
"""
for a in args:
self.put_arg(a)
def process_new_result(self, arg, result):
"""Will be called when the result_q has data available.
result is the computed result to the argument arg.
Should be overwritten by subclassing!
"""
self.final_result.append((arg, result))
def process_final_result(self):
"""to implement user defined final processing"""
pass
def print_jm_ready(self):
# please overwrite for individual hooks to notify that the server process runs
print("{} awaits client results".format(self.__class__.__name__))
def bring_him_up(self):
self._start_manager_thread()
if self._pid != os.getpid():
log.critical("do not run JobManager_Server.start() in a subprocess")
raise RuntimeError("do not run JobManager_Server.start() in a subprocess")
# if (self.numjobs - self.numresults) != len(self.args_dict):
# log.debug("numjobs: %s\n" +
# "numresults: %s\n" +
# "len(self.args_dict): %s", self.numjobs, self.numresults, len(self.args_dict))
#
# log.critical(
# "inconsistency detected! (self.numjobs - self.numresults) != len(self.args_dict)! use JobManager_Server.put_arg to put arguments to the job_q")
# raise RuntimeError(
# "inconsistency detected! (self.numjobs - self.numresults) != len(self.args_dict)! use JobManager_Server.put_arg to put arguments to the job_q")
jobqsize = self.job_q.qsize()
if jobqsize == 0:
log.info("no jobs to process! use JobManager_Server.put_arg to put arguments to the job_q")
return
else:
log.info("started (host:%s authkey:%s port:%s jobs:%s)", self.hostname, self.authkey.decode(), self.port,
jobqsize)
print("{} started (host:{} authkey:{} port:{} jobs:{})".format(self.__class__.__name__,
self.hostname,
self.authkey.decode(),
self.port,
jobqsize))
Signal_to_sys_exit(signals=[signal.SIGTERM, signal.SIGINT])
log.debug("ready for processing incoming results")
self.print_jm_ready()
def join(self):
"""
starts to loop over incoming results
When finished, or on exception call stop() afterwards to shut down gracefully.
"""
info_line = progress.StringValue(num_of_bytes=100)
numresults = progress.UnsignedIntValue(0)
numjobs = progress.UnsignedIntValue(self.job_q.put_items())
with progress.ProgressBarFancy(count = numresults,
max_count = numjobs,
interval = self.msg_interval,
speed_calc_cycles = self.speed_calc_cycles,
sigint = 'ign',
sigterm = 'ign',
info_line=info_line) as stat:
if not self.hide_progress:
stat.start()
while numresults.value < numjobs.value:
failqsize = self.fail_q.qsize()
jobqsize = self.job_q.qsize()
markeditems = self.job_q.marked_items()
numresults.value = failqsize + markeditems
info_line.value = ("result_q size:{}, jobs: remaining:{}, "+
"done:{}, failed:{}, in progress:{}").format(self.result_q.qsize(),
jobqsize,
markeditems,
failqsize,
numjobs.value - numresults.value - jobqsize).encode('utf-8')
log.info("infoline {}".format(info_line.value))
# allows for update of the info line
try:
arg, result = self.result_q.get(timeout=self.msg_interval)
except queue.Empty:
continue
self.job_q.mark(arg)
self.process_new_result(arg, result)
if not self.keep_new_result_in_memory:
del arg
del result
log.debug("wait %ss before trigger clean up", self.__wait_before_stop)
time.sleep(self.__wait_before_stop)
def start(self):
self.bring_him_up()
self.join()
class JobManager_Local(JobManager_Server):
def __init__(self,
client_class,
authkey = 'local_jobmanager',
nproc = -1,
delay = 1,
const_arg = None,
port = 42524,
verbose = None,
verbose_client = None,
show_statusbar_for_jobs = False,
show_counter_only = False,
niceness_clients = 19,
msg_interval = 1,
fname_dump = 'auto',
speed_calc_cycles = 50):
JobManager_Server.__init__(self,
authkey = authkey,
const_arg = const_arg,
port = port,
verbose = verbose,
msg_interval = msg_interval,
fname_dump = fname_dump,
speed_calc_cycles = speed_calc_cycles)
self.client_class = client_class
self.port = port
self.nproc = nproc
self.delay = delay
self.verbose_client=verbose_client
self.show_statusbar_for_jobs = show_statusbar_for_jobs
self.show_counter_only = show_counter_only
self.niceness_clients = niceness_clients
@staticmethod
def _start_client(authkey,
port,
client_class,
nproc = 0,
nice = 19,
verbose = None,
show_statusbar_for_jobs = False,
show_counter_only = False):
client = client_class(server='localhost',
authkey = authkey,
port = port,
nproc = nproc,
nice = nice,
verbose = verbose,
show_statusbar_for_jobs = show_statusbar_for_jobs,
show_counter_only = show_counter_only,
use_special_SIG_INT_handler = False)
Signal_to_sys_exit(signals=[signal.SIGINT, signal.SIGTERM])
client.start()
def start(self):
p_client = mp.Process(target=JobManager_Local._start_client,
args=(self.authkey,
self.port,
self.client_class,
self.nproc,
self.niceness_clients,
self.verbose_client,
self.show_statusbar_for_jobs,
self.show_counter_only))
JobManager_Local.bring_him_up(self)
p_client.start()
JobManager_Local.join(self)
progress.check_process_termination(p_client,
prefix = 'local_client',
timeout = 2)
class RemoteKeyError(RemoteError):
pass
class RemoteValueError(RemoteError):
pass
class Signal_handler_for_Jobmanager_client(object):
def __init__(self, client_object, exit_handler, signals=[signal.SIGINT]):
self.client_object = client_object
self.exit_handler = exit_handler
for s in signals:
log.debug("setup Signal_handler_for_Jobmanager_client for signal %s", progress.signal_dict[s])
signal.signal(s, self._handler)
def _handler(self, sig, frame):
log.info("received | |
<filename>Backgammon.py
import numpy as np
import matplotlib.pyplot as plt
# Using this guy's BG board implementation, to learn
# https://github.com/weekend37/Backgammon/blob/master/Backgammon.py
# Then going to build my own computer AI , or alter game slightly
def init_board():
# initializes the game board
#-numbers represent player -1's count on a given board index,
# positive numbers represent player 1s count on a given board index
board = np.zeros(29)
board[1] = -2
board[12] = -5
board[17] = -3
board[19] = -5
board[6] = 5
board[8] = 3
board[13] = 5
board[24] = 2
return board
print(init_board())
def roll_dice():
# rolls the dice
# between 1,6, 2 dice
dice = np.random.randint(1,7,2)
return dice
print(roll_dice())
def check_for_error(board):
# checks for obvious errors
errorInProgram = False
#sum of all positive not equal to 15, player 1s pieces,
#sum of all negative not equal to -15, player -1's pieces
if (sum(board[board>0]) != 15 or sum(board[board<0]) != -15):
# too many or too few pieces on board
errorInProgram = True
print("Too many or too few pieces on board!")
return errorInProgram
def game_over(board):
# returns True if the game is over
return board[27]==15 or board[28]==-15
def pretty_print(board):
#prints board, as BG Board would actually look
string = str(np.array2string(board[1:13])+'\n'+
np.array2string(board[24:12:-1])+'\n'+
np.array2string(board[25:29]))
print("board: \n", string)
board = init_board()
# pretty_print(board)
def legal_move(board, die, player):
# finds legal moves for a board and one dice
# inputs are some BG-board, the number on the die and which player is up
# outputs all the moves (just for the one die)
possible_moves = []
if player == 1:
######You're stuck on rail for this if condition
if board[25] > 0:
#25 -die is the index that you would come into , if you are on the rail if player == 1
# checking to see if there is not a block there, and if so, add [25, spot where you end up]
start_pip = 25-die
if board[start_pip] > -2:
possible_moves.append(np.array([25,start_pip]))
##### not stuck on rail, but all your pieces are in your zone
else:
print(f"you have {sum(board[7:25]>0)} spots with pieces not in your zone")
print(np.max(np.where(board[1:7]>0)[0]+1))
print(np.min(np.where(board[19:25]<0)[0]))
# adding options if player is bearing off
possible_start_pips_list = []
#This is checking for the actual pieces, with amounts > 0, on the board in spots 7:24,
#If this is 0, that means all of player 1s pieces, are in his zone, and he can start
#taking them off
if sum(board[7:25]>0) == 0:
#Once all pieces are in your zone, this checks if the spot represented by the
# die has pips on it
# in player 1's case, this is a positive number, so say there were 3 pips on the 6 spot,
#you append [6,27] to possible moves, 27 representing player 1's rail
if (board[die] > 0):
possible_moves.append(np.array([die,27]))
elif not game_over(board):
#checks if all pieces are off, for player 1, and if pips can be removed:
# everybody's past the dice throw?
#He is checking to see if the maximum amount from the rail is 5, meaning a 6 could take
# a 5 off, but not a 4 off, because s represents furthers spot,
# I changed it to represent first seeing if s <6, otherwise you can't fulfill the condition
s = np.max(np.where(board[1:7]>0)[0]+1)
if s<6:
if s<die:
possible_moves.append(np.array([s,27]))
possible_start_pips = np.where(board[0:25]>0)[0]
#Possible_start_pips for player 1 represents all positions on the board, where you have a piece,
# as the count of pieces on each tile for player 1, is represented by a positive integer, 2 for 2, 1 for 1,
#etc...
possible_start_pips_list.append(possible_start_pips)
# Not stuck on rail, not ready to take your pieces off, this represents all other moves
for s in possible_start_pips:
#He is checking to see if position - die > 0, meaning if it is possible move on board the amount
# of spots represented by the die
end_pip = s-die
if end_pip > 0:
#And if it is, checking to see if opponent does not have block there, block is
#indicated by -2 or less for player 1, as opponents pieces are represented by negative integer
if board[end_pip] > -2:
#Appends starting pip, and then where you could move, represented by end pip
possible_moves.append(np.array([s,end_pip]))
#This represents other player, where all of his values are represented in negative integer counts
elif player == -1:
# dead piece, needs to be brought back to life
# if player == -1, his rail is represented by index 26 on the board, and his count on the rail is
#the absolute value of the negative number on that rail, so if it is <1, needs to get his off the rail
print(np.max(np.where(board[1:7]>0)[0]+1))
print(np.min(np.where(board[19:25]<0)[0]))
if board[26] < 0:
start_pip = die
#in this case, he is coming in from the 0 spot, meaning if he rolls a 6, and opponent has value <2,
#opponent does not have a block, and he can come in
if board[start_pip] < 2:
#youre taking off the rail, and moving to the index of the die, for player -1
possible_moves.append(np.array([26,start_pip]))
# no dead pieces
else:
# adding options if player is bearing off
if sum(board[1:19]<0) == 0:
#again, this represents player -1 having all pieces in spots 19-24, meaning he can start taking them off
if (board[25-die] < 0):
#if the index in his zone is negative, he has spots there, he can take that position off
#in this case, when player -1 takes pieces off, he's going to index 28, that is his spot
possible_moves.append(np.array([25-die,28]))
elif not game_over(board):
# everybody's past the dice throw?
#represents smallest count in player -1's ending zone, where you can take pieces off
s = np.min(np.where(board[19:25]<0)[0])
#this checks if you can start taking pips off from the -1 players zone,
# condition is it has be on index 20 or less, otherwise, normal rolls apply
if s > 0:
if (6-s)<die:
possible_moves.append(np.array([19+s,28]))
# if player -1 can not take pieces off, or is not stuck, this represents all other rolls
# looking for negative numbers at all indexes, then since he's going the other way, adding the die
# to that index, and if other player's count is not 2 or greater, player -1 can move to that spot
possible_start_pips = np.where(board[0:25]<0)[0]
for s in possible_start_pips:
end_pip = s+die
if end_pip < 25:
if board[end_pip] < 2:
possible_moves.append(np.array([s,end_pip]))
return possible_moves
print(legal_move(board, 6, -1))
print(board[24])
#########
# #####################
# ######################
# TODO, left off here:
# #MAYBE START YOUR OWN CODING HERE
#
#
def update_board(board, move, player):
# updates the board
# inputs are some board, one move and the player
# outputs the updated board
board_to_update = np.copy(board)
# if the move is there
if len(move) > 0:
startPip = move[0]
endPip = move[1]
# moving the dead piece if the move kills a piece
kill = board_to_update[endPip]==(-1*player)
if kill:
board_to_update[endPip] = 0
jail = 25+(player==1)
board_to_update[jail] = board_to_update[jail] - player
board_to_update[startPip] = board_to_update[startPip]-1*player
board_to_update[endPip] = board_to_update[endPip]+player
return board_to_update
def legal_moves(board, dice, player):
# finds all possible moves and the possible board after-states
# inputs are the BG-board, the dices rolled and which player is up
# outputs the possible pair of moves (if they exists) and their after-states
moves = []
boards = []
# try using the first dice, then the second dice
possible_first_moves = legal_move(board, dice[0], player)
for m1 in possible_first_moves:
temp_board = update_board(board,m1,player)
possible_second_moves = legal_move(temp_board,dice[1], player)
for m2 in possible_second_moves:
moves.append(np.array([m1,m2]))
boards.append(update_board(temp_board,m2,player))
if dice[0] != dice[1]:
# try using the second dice, then the first one
possible_first_moves = legal_move(board, dice[1], player)
for m1 in possible_first_moves:
temp_board = update_board(board,m1,player)
possible_second_moves = legal_move(temp_board,dice[0], player)
for m2 in possible_second_moves:
moves.append(np.array([m1,m2]))
boards.append(update_board(temp_board,m2,player))
# if there's no pair of moves available, allow one move:
if len(moves)==0:
# first | |
"""
Tests for collaboration.
This includes tests for shares, and uses the API provided by genesets. Ultimately it might be good
to extract each of the API components into their respective reusable modules.
"""
from django.contrib.auth.models import User
from django.test import TestCase
from fixtureless import Factory
from tastypie.test import ResourceTestCaseMixin, TestApiClient
from organisms.models import Organism
from genes.models import Gene
from genesets.models import Geneset
from collaborations.models import Share, Collaboration
from collaborations.utils import get_collaborators, get_invites, get_inviteds
factory = Factory()
class ShareFromTestCase(ResourceTestCaseMixin, TestCase):
"""
Test that collaborators can share.
Test that collaborators can share via the API. These test cases are for two users, u1 and u2
who collaborate and need to share genesets.
"""
def setUp(self):
"""
Set up test environment.
Make two users, log in one of the users, create some genes and an organism, a geneset,
and a collaboration.
"""
super(ShareFromTestCase, self).setUp()
# Make Two Users
self.u1 = "asdf"
self.e1 = "<EMAIL>"
self.p1 = "qwerty"
self.owner = User.objects.create_user(self.u1, self.e1, self.p1)
self.u2 = "asdf2"
self.e2 = "<EMAIL>"
self.p2 = "<PASSWORD>"
self.other = User.objects.create_user(self.u2, self.e2, self.p2)
# log in the owner
self.api_client.client.login(username=self.u1, password=self.p1)
# Make Organism and Genes
self.org = factory.create(Organism)
initial = {'organism': self.org} # Make sure these are for the right organism.
self.g1 = factory.create(Gene, initial)
self.g2 = factory.create(Gene, initial)
# Make our geneset.
self.geneset = Geneset.objects.create(creator=self.owner, title='Test Geneset 1',
organism=self.org, deleted=False,
abstract='Collaboration Invitations.', public=False)
self.geneset_url = '/api/v1/geneset/%s/%s/invite' % (self.geneset.creator.username, self.geneset.slug)
# Make Collaboration, requires reciprocal
c1 = Collaboration(from_user=self.owner, to_user=self.other)
c1.save()
c2 = Collaboration(from_user=self.other, to_user=self.owner)
c2.save()
def testNoEmailPassed(self):
"""
Test invite without passed user.
If the user has permissions to invite, but no user e-mail address was passed,
we should just return the detail view.
"""
resp = self.api_client.post(self.geneset_url, format="json")
self.assertValidJSONResponse(resp)
self.assertEqual(self.deserialize(resp)['title'], 'Test Geneset 1')
def testEmailPassed(self):
"""
Test invite without passed user.
If the user has permissions to invite, but no user e-mail address was passed,
we should just return the detail view.
"""
resp = self.api_client.post(self.geneset_url, data={'email': self.e2}, format="json")
self.assertValidJSONResponse(resp)
self.assertEqual(self.deserialize(resp)['title'], 'Test Geneset 1')
result = Geneset.objects.get(pk=self.deserialize(resp)['id'])
shares = Share.objects.filter(geneset=result)
self.assertEqual(len(shares), 1)
def tearDown(self):
"""
Remove setup components.
tearDown is called by django at the end to clean up any changes to the database that occured
in setUp.
"""
User.objects.all().delete()
Organism.objects.all().delete()
Gene.objects.all().delete()
Geneset.objects.all().delete()
Collaboration.objects.all().delete()
class NoCollaborationShareFromTestCase(ResourceTestCaseMixin, TestCase):
"""
Test that non-collaborating users can't share genesets.
Only collaborators are eligible to share genesets. These tests insure that users that don't
collaborate can't share.
"""
def setUp(self):
super(NoCollaborationShareFromTestCase, self).setUp()
# Make Two Users
self.u1 = "asdf"
self.e1 = "<EMAIL>"
self.p1 = "qwerty"
self.owner = User.objects.create_user(self.u1, self.e1, self.p1)
self.u2 = "asdf2"
self.e2 = "<EMAIL>"
self.p2 = "<PASSWORD>"
self.other = User.objects.create_user(self.u2, self.e2, self.p2)
# log in the owner
self.api_client.client.login(username=self.u1, password=<PASSWORD>)
# Make Organism and Genes
self.org = factory.create(Organism)
initial = {'organism': self.org} # Make sure these are for the right organism.
self.g1 = factory.create(Gene, initial)
self.g2 = factory.create(Gene, initial)
# Make our geneset.
self.geneset = Geneset.objects.create(creator=self.owner, title='Test Geneset 1',
organism=self.org, deleted=False,
abstract='Collaboration Invitations.', public=False)
self.geneset_url = '/api/v1/geneset/%s/%s/invite' % (self.geneset.creator.username, self.geneset.slug)
def testNoEmailPassed(self):
"""
Test invite without passed user.
If the user has permissions to invite, but no user e-mail address was passed,
we should just return the detail view.
"""
resp = self.api_client.post(self.geneset_url, format="json")
self.assertValidJSONResponse(resp)
self.assertEqual(self.deserialize(resp)['title'], 'Test Geneset 1')
def testEmailPassed(self):
"""
Test invite without passed user.
If the user has permissions to invite, but no user e-mail address was passed,
we should just return the detail view.
"""
resp = self.api_client.post(self.geneset_url, data={'email': self.e2}, format="json")
self.assertValidJSONResponse(resp)
self.assertEqual(self.deserialize(resp)['title'], 'Test Geneset 1')
result = Geneset.objects.get(pk=self.deserialize(resp)['id'])
shares = Share.objects.filter(geneset=result)
self.assertEqual(len(shares), 0)
def tearDown(self):
"""
Remove setup components.
tearDown is called by django at the end to clean up any changes to the database that occured
in setUp.
"""
User.objects.all().delete()
Organism.objects.all().delete()
Gene.objects.all().delete()
Geneset.objects.all().delete()
class ShareReverseRequestTestCase(ResourceTestCaseMixin, TestCase):
"""
Test that collaborators cannot add themselves to genesets they don't own yet.
Test that collaborators cannot force a share via the API. These test cases are for two users, u1
and u2 who collaborate, and the request to share comes from a non-creator w/o permissions u2.
"""
def setUp(self):
"""
Set up test environment.
Make two users, log in one of the users, create some genes and an organism, a geneset,
and a collaboration.
"""
super(ShareReverseRequestTestCase, self).setUp()
# Make Two Users
self.u1 = "asdf"
self.e1 = "<EMAIL>"
self.p1 = "qwerty"
self.owner = User.objects.create_user(self.u1, self.e1, self.p1)
self.u2 = "asdf2"
self.e2 = "<EMAIL>"
self.p2 = "qwerty2"
self.other = User.objects.create_user(self.u2, self.e2, self.p2)
# log in the owner
self.api_client.client.login(username=self.u2, password=self.p2)
# Make Organism and Genes
self.org = factory.create(Organism)
initial = {'organism': self.org} # Make sure these are for the right organism.
self.g1 = factory.create(Gene, initial)
self.g2 = factory.create(Gene, initial)
# Make our geneset.
self.geneset = Geneset.objects.create(creator=self.owner, title='Test Geneset 1',
organism=self.org, deleted=False,
abstract='Collaboration Invitations.', public=False)
self.geneset_url = '/api/v1/geneset/%s/%s/invite' % (self.geneset.creator.username, self.geneset.slug)
# Make Collaboration, requires reciprocal
c1 = Collaboration(from_user=self.owner, to_user=self.other)
c1.save()
c2 = Collaboration(from_user=self.other, to_user=self.owner)
c2.save()
def testNoEmailPassed(self):
"""
Test invite without passed user.
If the user has permissions to invite, but no user e-mail address was passed,
we should just return the detail view.
"""
resp = self.api_client.post(self.geneset_url, format="json")
self.assertHttpUnauthorized(resp)
def testEmailPassed(self):
"""
Test invite without passed user.
If the user has permissions to invite, but no user e-mail address was passed,
we should just return the detail view.
"""
resp = self.api_client.post(self.geneset_url, data={'email': self.e2}, format="json")
self.assertHttpUnauthorized(resp)
def tearDown(self):
"""
Remove setup components.
tearDown is called by django at the end to clean up any changes to the database that occured
in setUp.
"""
User.objects.all().delete()
Organism.objects.all().delete()
Gene.objects.all().delete()
Geneset.objects.all().delete()
Collaboration.objects.all().delete()
class CollaborationTestCase(ResourceTestCaseMixin, TestCase):
"""
Test that users can create collaborations.
Test that users can create collaborations via the API.
"""
def setUp(self):
"""
Set up test environment.
Make two user.
"""
super(CollaborationTestCase, self).setUp()
# Make Two Users
self.u1 = "asdf"
self.e1 = "<EMAIL>"
self.p1 = "qwerty"
self.owner = User.objects.create_user(self.u1, self.e1, self.p1)
self.expected_owner = {u'email': u'<EMAIL>', u'resource_uri': u'', u'username': u'asdf'}
self.u2 = "asdf2"
self.e2 = "<EMAIL>"
self.p2 = "qwerty2"
self.other = User.objects.create_user(self.u2, self.e2, self.p2)
self.expected_other = {u'email': u'<EMAIL>', u'resource_uri': u'', u'username': u'asdf2'}
def testInviteNotLoggedIn(self):
"""
Test invite without a logged in user.
Without anyone logged in, 404 should be returned.
"""
resp = self.api_client.post('/api/v1/user/invite', format="json")
self.assertHttpNotFound(resp)
def testRejectNotLoggedIn(self):
"""
Test reject without a logged in user.
Without anyone logged in, 404 should be returned.
"""
resp = self.api_client.post('/api/v1/user/reject', format="json")
self.assertHttpNotFound(resp)
def testInviteNoContent(self):
"""
Test invite with a logged in user but no content.
The user object should be returned without any invites.
"""
client = TestApiClient()
client.client.login(username=self.u1, password=<PASSWORD>)
resp = client.post('/api/v1/user/invite', format="json")
self.assertValidJSONResponse(resp)
self.assertEqual(self.deserialize(resp)['invites'], [])
def testPOSTInvite(self):
"""
Test POST invite with a logged in user.
The user object should be returned with u2 in invites.
"""
client = TestApiClient()
client.client.login(username=self.u1, password=<PASSWORD>)
resp = client.post('/api/v1/user/invite', format="json", data={'email': self.e2})
self.assertValidJSONResponse(resp)
self.assertEqual(self.deserialize(resp)['invites'], [self.expected_other])
self.assertListEqual(list(get_invites(self.owner)), [self.other])
self.assertListEqual(list(get_inviteds(self.other)), [self.owner])
self.assertListEqual(list(get_collaborators(self.owner)), [])
self.assertListEqual(list(get_collaborators(self.other)), [])
def testCollaboration(self):
"""
Test that both users confirming results in a collaboration.
The user objects returned should each be collaborators with each other.
"""
c1 = TestApiClient()
c1.client.login(username=self.u1, password=<PASSWORD>)
r1 = c1.post('/api/v1/user/invite', format="json", data={'email': self.e2})
self.assertValidJSONResponse(r1)
c2 = TestApiClient()
c2.client.login(username=self.u2, password=<PASSWORD>)
r2 = c2.post('/api/v1/user/invite', format="json", data={'email': self.e1})
self.assertValidJSONResponse(r2)
self.assertListEqual(list(get_collaborators(self.owner)), [self.other])
self.assertListEqual(list(get_collaborators(self.other)), [self.owner])
self.assertListEqual(list(get_invites(self.owner)), [])
self.assertListEqual(list(get_inviteds(self.other)), [])
def testRejectInvite(self):
"""
Test that one user rejecting elimintes the invite.
The user objects returned should each have no relationship with each other after an
invite + reject.
"""
c1 = TestApiClient()
c1.client.login(username=self.u1, password=<PASSWORD>)
r1 = c1.post('/api/v1/user/invite', format="json", data={'email': self.e2})
self.assertValidJSONResponse(r1)
c2 = TestApiClient()
c2.client.login(username=self.u2, password=<PASSWORD>)
r2 = c2.post('/api/v1/user/reject', format="json", data={'email': self.e1})
self.assertValidJSONResponse(r2)
self.assertListEqual(list(get_collaborators(self.owner)), [])
self.assertListEqual(list(get_collaborators(self.other)), [])
self.assertListEqual(list(get_invites(self.owner)), [])
self.assertListEqual(list(get_inviteds(self.other)), [])
def testRejectCollaboration(self):
"""
Test that one user rejecting an existing collaboration destroys the collaboration.
The user objects returned should each have no relationship with each other after a
collaboration + reject.
"""
c1 = TestApiClient()
c1.client.login(username=self.u1, password=<PASSWORD>)
r1 = c1.post('/api/v1/user/invite', format="json", data={'email': self.e2})
self.assertValidJSONResponse(r1)
c2 = TestApiClient()
c2.client.login(username=self.u2, password=<PASSWORD>)
r2 = c2.post('/api/v1/user/invite', format="json", data={'email': self.e1})
self.assertValidJSONResponse(r2)
# Collaboration Exists
self.assertListEqual(list(get_collaborators(self.owner)), [self.other])
self.assertListEqual(list(get_collaborators(self.other)), [self.owner])
c2 = TestApiClient()
c2.client.login(username=self.u2, password=<PASSWORD>)
r2 = c2.post('/api/v1/user/reject', format="json", data={'email': self.e1})
self.assertValidJSONResponse(r2)
# Collaboration | |
or AS#3.)
indexAndAsNumber: all|a list of indexes with as# -> [[1, 100], [3, 300], ...]
Example:
protocolObj.configBgpAsPathSegmentListNumber(routerid='192.168.127.12', 3, [[0,28], [3,298],
[4, 828]])
Requirements:
getDeviceGroupByRouterId()
getMultivalues()
configMultivalues()
"""
deviceGroupObj = self.getDeviceGroupByRouterId(routerId=routerId)
if not deviceGroupObj:
raise IxNetRestApiException('No Device Group found for router ID: %s' % routerId)
asIndex = asNumber - 1
asObj = deviceGroupObj.NetworkGroup.find().Ipv4PrefixPools.find() \
.BgpIPRouteProperty.find().BgpAsPathSegmentList.find().BgpAsNumberList.find()[asIndex]
if asObj:
asNumberValueList = asObj.AsNumber.Values
for eachIndexAsNumber in indexAndAsNumber:
index = eachIndexAsNumber[0]
asNumber = eachIndexAsNumber[1]
asNumberValueList[index] = str(asNumber)
self.configMultivalue(asObj.AsNumber, 'valueList', {'values': asNumberValueList})
else:
return IxNetRestApiException('No ipv4PrefixPools bgpIPRouteProperty object found.')
def configBgpAsSetMode(self, routerId, asSetMode):
"""
Description
Configure BGP Route Range AS Path: AS # Set Mode. This API will change all
indexes to the specified asSetMode
Note: In GUI, under Route Range, BGP IP Route Range.
Parameters
asSetMode:
Options: "dontincludelocalas",
"includelocalasasasseq",
"includelocalasasasset",
"includelocalasasasseqconfederation",
"includelocalasasassetconfederation",
"prependlocalastofirstsegment"
"""
deviceGroupObj = self.getDeviceGroupByRouterId(routerId=routerId)
if not deviceGroupObj:
raise IxNetRestApiException('No Device Group found for router ID: %s' % routerId)
if deviceGroupObj.NetworkGroup.find().Ipv4PrefixPools.find().BgpIPRouteProperty.find():
bgpObj = deviceGroupObj.NetworkGroup.find().Ipv4PrefixPools.find() \
.BgpIPRouteProperty.find()
asModeList = []
for i in range(len(bgpObj.AsSetMode.Values)):
asModeList.append(asSetMode)
self.configMultivalue(bgpObj.AsSetMode, 'valueList', {'values': asModeList})
else:
raise IxNetRestApiException('No BGP config found for this router ID: ')
def getObject(self, keys, ngpfEndpointName=None):
"""
Description
This is an internal function usage for getNgpfObjectHandleByName() only.
"""
pass
def getNgpfObjectHandleByName(self, ngpfEndpointObject=None, ngpfEndpointName=None):
"""
Description
Get the NGPF object handle filtering by the NGPF component name.
The NGPF object name is something that you could configure for each NGPF stack.
Stack meaning: topology, deviceGroup, ethernet, ipv44, bgpIpv4Peer, etc
Parameters
ngpfEndpointObject: See below ngpfL2ObjectList and ngpfL3ObjectList.
ngpfEndpointName: The name of the NGPF component object.
Examples:
protocolObj.getNgpfObjectHandleByName(ngpfEndpointObject='topology',
ngpfEndpointName='Topo2')
protocolObj.getNgpfObjectHandleByName(ngpfEndpointObject='ipv4',
ngpfEndpointName='IPv4 1')
protocolObj.getNgpfObjectHandleByName(ngpfEndpointObject='bgpIpv4Peer',
ngpfEndpointName='bgp_2')
protocolObj.getNgpfObjectHandleByName(ngpfEndpointObject='networkGroup',
ngpfEndpointName='networkGroup1')
protocolObj.getNgpfObjectHandleByName(ngpfEndpointObject='ipv4PrefixPools',
ngpfEndpointName='Basic IPv4 Addresses 1')
"""
ngpfMainObjectList = ['topology', 'deviceGroup', 'ethernet', 'ipv4', 'ipv6',
'networkGroup', 'ipv4PrefixPools', 'ipv6PrefixPools']
ngpfL2ObjectList = ['isisL3', 'lacp', 'mpls']
ngpfL3ObjectList = ['ancp', 'bfdv4Interface', 'bgpIpv4Peer', 'bgpIpv6Peer',
'dhcpv4relayAgent', 'dhcpv6relayAgent', 'geneve', 'greoipv4',
'greoipv6', 'igmpHost', 'igmpQuerier', 'lac', 'ldpBasicRouter',
'ldpBasicRouterV6', 'ldpConnectedInterface', 'ldpv6ConnectedInterface',
'ldpTargetedRouter', 'ldpTargetedRouterV6', 'lns', 'mldHost',
'mldQuerier', 'ptp', 'ipv6sr', 'openFlowController', 'openFlowSwitch',
'ospfv2', 'ospfv3', 'ovsdbcontroller', 'ovsdbserver', 'pcc', 'pce',
'pcepBackupPCEs', 'pimV4Interface', 'pimV6Interface', 'ptp', 'rsvpteIf',
'rsvpteLsps', 'tag', 'vxlan'
]
if ngpfEndpointObject not in ngpfL2ObjectList + ngpfL3ObjectList + ngpfMainObjectList:
raise IxNetRestApiException('\nError: No such ngpfEndpointObject: %s' %
ngpfEndpointObject)
if ngpfEndpointObject in ngpfL2ObjectList:
ngpfEndpointObject = ngpfEndpointObject[0].capitalize() + ngpfEndpointObject[1:]
nodesObjList = self.ixNetwork.Topology.find().DeviceGroup.find().Ethernet.find()
ngpfEndpointResponse = getattr(nodesObjList, ngpfEndpointObject)
Obj = ngpfEndpointResponse.find(Name=ngpfEndpointName)
self.ixnObj.logInfo('getNgpfObjectHandleByName: %s' % Obj)
return Obj
elif ngpfEndpointObject in ngpfL3ObjectList:
ngpfEndpointObject = ngpfEndpointObject[0].capitalize() + ngpfEndpointObject[1:]
nodesIpv4ObjList = self.ixNetwork.Topology.find().DeviceGroup.find().Ethernet.find() \
.Ipv4.find()
nodesIpv6ObjList = self.ixNetwork.Topology.find().DeviceGroup.find().Ethernet.find() \
.Ipv6.find()
try:
ngpfEndpointResponse = getattr(nodesIpv4ObjList, ngpfEndpointObject)
Obj = ngpfEndpointResponse.find(Name=ngpfEndpointName)
self.ixnObj.logInfo('getNgpfObjectHandleByName: %s' % Obj)
return Obj
except Exception as e:
print(e)
ngpfEndpointResponse = getattr(nodesIpv6ObjList, ngpfEndpointObject)
Obj = ngpfEndpointResponse.find(Name=ngpfEndpointName)
self.ixnObj.logInfo('getNgpfObjectHandleByName: %s' % Obj)
return Obj
else:
obj = self.ixNetwork
ngpfEndpointIndex = ngpfMainObjectList.index(ngpfEndpointObject)
for eachNgpfEndpoint in ngpfMainObjectList[:ngpfEndpointIndex + 1]:
if eachNgpfEndpoint != ngpfEndpointObject:
eachNgpfEndpoint = eachNgpfEndpoint[0].capitalize() + eachNgpfEndpoint[1:]
eachNgpfEndpointResponse = getattr(obj, eachNgpfEndpoint)
obj = eachNgpfEndpointResponse.find()
else:
eachNgpfEndpoint = eachNgpfEndpoint[0].capitalize() + eachNgpfEndpoint[1:]
eachNgpfEndpointResponse = getattr(obj, eachNgpfEndpoint)
obj = eachNgpfEndpointResponse.find(Name=ngpfEndpointName)
return obj
def getNgpfObjectHandleByRouterId(self, ngpfEndpointObject, routerId):
"""
Description
Get the NGPF object handle filtering by the routerId.
All host interface has a router ID by default and the router ID is
located in the Device Group in the IxNetwork GUI.
Note: Router ID exists only if there are protocols configured.
Parameters
ngpfEndpointObject: <str>: The NGPF endpoint. Example: deviceGroup,
ethernet, ipv4, ipv6, bgpIpv4Peer, ospfv2, etc.
routerId: <str>: The router ID IP address.
Example:
protocolObj.getNgpfObject(ngpfEndpointObject='ipv4', routerId='192.0.0.1')
protocolObj.getNgpfObject(ngpfEndpointObject='bgpIpv4Peer', routerId='172.16.58.3')
protocolObj.getNgpfObject(ngpfEndpointObject='networkGroup', routerId='172.16.58.3')
protocolObj.getNgpfObject(ngpfEndpointObject='ipv4PrefixPools', routerId='172.16.58.3')
"""
ngpfMainObjectList = ['topology', 'deviceGroup', 'ethernet', 'networkGroup',
'ipv4PrefixPools', 'ipv6PrefixPools']
ngpfL2ObjectList = ['isisL3', 'lacp', 'mpls', 'ipv4', 'ipv6', ]
ngpfL3ObjectList = ['ancp', 'bfdv4Interface', 'bgpIpv4Peer', 'bgpIpv6Peer',
'dhcpv4relayAgent', 'dhcpv6relayAgent', 'geneve', 'greoipv4',
'greoipv6', 'igmpHost', 'igmpQuerier', 'lac', 'ldpBasicRouter',
'ldpBasicRouterV6', 'ldpConnectedInterface', 'ldpv6ConnectedInterface',
'ldpTargetedRouter', 'ldpTargetedRouterV6', 'lns', 'mldHost',
'mldQuerier', 'ptp', 'ipv6sr', 'openFlowController',
'openFlowSwitch', 'ospfv2', 'ospfv3', 'ovsdbcontroller', 'ovsdbserver',
'pcc', 'pce', 'pcepBackupPCEs', 'pimV4Interface', 'pimV6Interface',
'ptp', 'rsvpteIf', 'rsvpteLsps', 'tag', 'vxlan'
]
if ngpfEndpointObject not in ngpfL2ObjectList + ngpfL3ObjectList + ngpfMainObjectList:
raise IxNetRestApiException('\nError: No such ngpfEndpointObject: %s' %
ngpfEndpointObject)
deviceGroupObjByRouterId = self.getDeviceGroupByRouterId(routerId=routerId)
for topology in self.ixNetwork.Topology.find():
deviceGroupList = []
for deviceGroupObj in topology.DeviceGroup.find():
deviceGroupList.append(deviceGroupObj)
for deviceGroupObj in deviceGroupList:
if deviceGroupObj == deviceGroupObjByRouterId:
if ngpfEndpointObject == 'topology':
return topology
if ngpfEndpointObject == 'deviceGroup':
return deviceGroupObj
ethernetList = deviceGroupObj.Ethernet.find()
if not ethernetList:
continue
if ngpfEndpointObject == 'ethernet':
for eachEthernetObj in ethernetList:
match = re.match('(/api.*)', eachEthernetObj.href)
if match:
return eachEthernetObj
if ngpfEndpointObject == 'networkGroup':
networkGroupList = deviceGroupObj.NetworkGroup.find()
for eachNetworkGroupObj in networkGroupList:
match = re.match('(/api.*)', eachNetworkGroupObj.href)
if match:
return eachNetworkGroupObj
for ethernet in ethernetList:
# Dynamically get all Ethernet child endpoints
if ngpfEndpointObject in ngpfL2ObjectList:
endpointObject = ngpfEndpointObject[0].capitalize() + \
ngpfEndpointObject[1:]
endpointObjectResponse = getattr(ethernet, endpointObject)
Obj = endpointObjectResponse.find()
return Obj
elif ngpfEndpointObject in ngpfL3ObjectList:
endpointObject = ngpfEndpointObject[0].capitalize() + \
ngpfEndpointObject[1:]
nodesIpv4ObjList = ethernet.Ipv4.find()
nodesIpv6ObjList = ethernet.Ipv6.find()
try:
endpointObjectResponse = getattr(nodesIpv4ObjList, endpointObject)
Obj = endpointObjectResponse.find()
return Obj
except Exception as e:
print(e)
endpointObjectResponse = getattr(nodesIpv6ObjList, endpointObject)
Obj = endpointObjectResponse.find()
return Obj
else:
return None
def getDeviceGroupByRouterId(self, routerId=None, queryDict=None, runQuery=True):
"""
Description
Get the Device Group object handle for the routerId.
Note:
A Device Group could have many IP host (sessions). This is configured as multipliers
in a Device Group. If multiplier = 5, there will be 5 IP host. Each host will
have a unique router ID identifier.
To get the Device Group that has a specific router ID, pass in the router ID for the
parameter routerId.
Parameter
routerId: <str>: The router ID in the format of 192.0.0.1.
queryDict: <dict>: Ignore this parameter. This parameter is only used internally.
runQuery: Ignore this parameter. <bool>: This parameter is only used internally.
Example:
obj = mainObj.getDeviceGroupByRouterId(routerId='192.0.0.3')
How to getMac:
Step 1> Get the Device Group that has routerId
deviceGroupObjHandle = self.getDeviceGroupByRouterId(routerId=routerId)
Step 2> Append the /ethernet/1 endpoint object to the Device Group object.
ethernetObjHandle = deviceGroupObjHandle + '/ethernet/1'
Step 3> Get the mac address using the ethernetObjHandle
return self.getObjAttributeValue(ethernetObjHandle, 'mac')
Return
- deviceGroup object handle
- None if routerid is not found
"""
deviceGroupObj = None
routerDataObj = self.ixNetwork.Topology.find().DeviceGroup.find().RouterData.find()
for eachRouterDataObj in routerDataObj:
routerIdValues = self.getMultivalueValues(eachRouterDataObj.RouterId)
if routerId in routerIdValues:
match = re.match('(/api.*)/routerData', eachRouterDataObj.href)
deviceGroupObj = match.group(1)
deviceGroupObjectList = self.ixNetwork.Topology.find().DeviceGroup.find()
for eachDeviceGroupObject in deviceGroupObjectList:
if eachDeviceGroupObject.href == deviceGroupObj:
return eachDeviceGroupObject
return deviceGroupObj
def getEthernetPropertyValue(self, routerId=None, ngpfEndpointName=None, property=None):
"""
Description
Get any NGPF Ethernet property value based on the router ID or by the NGPF component
name.
Parameters
routerId: <str>: The router ID IP address.
ngpfEndpointName: <str>: The NGPF endpoint name.
property: <str>: The NGPF Ethernet property.
Choices: name, mac, mtu, status, vlanCount, enableVlans
"""
ethernetObj = None
ethernetProperties = ['name', 'mac', 'mtu', 'status', 'vlanCount', 'enableVlans']
if property not in ethernetProperties:
raise IxNetRestApiException('\nError: No such Ethernet property: %s.\n\nAvailable NGPF '
'Ethernet properies: %s' % (property, ethernetProperties))
if routerId:
ethernetObj = self.getNgpfObjectHandleByRouterId(routerId=routerId,
ngpfEndpointObject='ethernet')
if ngpfEndpointName:
ethernetObj = self.getNgpfObjectHandleByName(ngpfEndpointName=ngpfEndpointName,
ngpfEndpointObject='ethernet')
attribute = property[0].capitalize() + property[1:]
return self.ixnObj.getObjAttributeValue(ethernetObj, attribute)
def sendNsNgpf(self, ipv6ObjList):
"""
Description
Send NS out of all the IPv6 objects that you provide in a list.
Parameter
ipv6ObjList: <str>: Provide a list of one or more IPv6 object handles to send arp.
"""
if type(ipv6ObjList) != list:
raise IxNetRestApiException(
'sendNsNgpf error: The parameter ipv6ObjList must be a list of objects.')
self.ixNetwork.Topology.DeviceGroup.Ethernet.Ipv6.SendNs(ipv6ObjList)
def configIpv6Ngpf(self, obj=None, port=None, portName=None,
ngpfEndpointName=None, **kwargs):
"""
Description
Create or modify NGPF IPv6.
To create a new IPv6 stack in NGPF, pass in the Ethernet object.
If modifying, there are four options. 2-4 will query for the IP object handle.
1> Provide the BGP object handle using the obj parameter.
2> Set port: The physical port.
3> Set portName: The vport port name.
4> Set NGPF IP name that you configured.
Parameters
obj: <str>: None or Ethernet obj or IPv6 obj
port: <list>: Format: [ixChassisIp, str(cardNumber), str(portNumber)]
portName: <str>: The virtual port name.
ngpfEndpointName: <str>: The name that you configured for the NGPF BGP.
kwargs:
ipv6Address: <dict>: {'start': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 'direction': 'increment',
'step': '0:0:0:0:0:0:0:1'},
ipv6AddressPortStep: <str>|<dict>: disable|0:0:0:0:0:0:0:1
Incrementing the IP address on each port based on your input.
0:0:0:0:0:0:0:1 means to increment the last octet on each port.
gateway: <dict>: {'start': 'fdf8:f53e:61e4::18', 'direction': 'increment',
'step': '0:0:0:0:0:0:0:1'},
gatewayPortStep: <str>|<dict>: disable|0:0:0:0:0:0:0:1
Incrementing the IP address on each | |
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_select_edit_text(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_select_gpencil(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_select_object(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_select_object_more_less(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_select_paint_mask(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_select_paint_mask_vertex(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
| |
# coding: utf-8
import numpy as np
import random
import tensorflow as tf
import logging
import imageio
import read_data
# from data_generator import DataGenerator
from origin_mil_pick import MIL
# from evaluation.eval_reach import evaluate_vision_reach
# from evaluation.eval_push import evaluate_push
from tensorflow.python.platform import flags
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
FLAGS = flags.FLAGS
LOGGER = logging.getLogger(__name__)
## Dataset/method options
flags.DEFINE_string('experiment', 'pick_place', 'sim_vision_reach or sim_push')
flags.DEFINE_string('data_path', './pick_dataset_origin/human_robot_dataset/',
'path to the directory where demo files that containing robot states and actions are stored')
flags.DEFINE_string('demo_gif_dir', 'data', 'path to the videos of demonstrations')
flags.DEFINE_string('gif_prefix', 'object', 'prefix of the video directory for each task, e.g. object_0 for task 0')
flags.DEFINE_integer('im_width', 264,
'width of the images in the demo videos, 125 for sim_push, and 80 for sim_vision_reach')
flags.DEFINE_integer('im_height', 196,
'height of the images in the demo videos, 125 for sim_push, and 64 for sim_vision_reach')
flags.DEFINE_integer('num_channels', 3, 'number of channels of the images in the demo videos')
flags.DEFINE_integer('T', 3, 'time horizon of the demo videos, 50 for reach, 100 for push')
flags.DEFINE_bool('hsv', False, 'convert the image to HSV format')
flags.DEFINE_bool('use_noisy_demos', False, 'use noisy demonstrations or not (for domain shift)')
flags.DEFINE_string('noisy_demo_gif_dir', None, 'path to the videos of noisy demonstrations')
flags.DEFINE_string('noisy_demo_file', None,
'path to the directory where noisy demo files that containing robot states and actions are stored')
flags.DEFINE_bool('no_action', True, 'do not include actions in the demonstrations for inner update')
flags.DEFINE_bool('no_state', False, 'do not include states in the demonstrations during training')
flags.DEFINE_bool('no_final_eept', False, 'do not include final ee pos in the demonstrations for inner update')
flags.DEFINE_bool('zero_state', True,
'zero-out states (meta-learn state) in the demonstrations for inner update (used in the paper with video-only demos)')
flags.DEFINE_bool('two_arms', False, 'use two-arm structure when state is zeroed-out')
flags.DEFINE_integer('training_set_size', -1, 'size of the training set, 1500 for sim_reach, 693 for sim push, anzero_stated \
-1 for all data except those in validation set')
flags.DEFINE_integer('val_set_size', 150, 'size of the training set, 150 for sim_reach and 76 for sim push')
## Training options
flags.DEFINE_integer('metatrain_iterations', 50000, 'number of metatraining iterations.') # 30k for pushing, 50k for reaching and placing
flags.DEFINE_integer('meta_batch_size', 16, 'number of tasks sampled per meta-update') # 15 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_integer('meta_test_batch_size', 1, 'number of tasks sampled per meta-update') # 15 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_float('meta_lr', 1e-4, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 1,
'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('train_update_lr', 1e-4,
'step size alpha for inner gradient update.') # 0.001 for reaching, 0.01 for pushing and placing
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.') # 5 for placing
flags.DEFINE_bool('clip', True, 'use gradient clipping for fast gradient')
flags.DEFINE_float('clip_max', 100.0, 'maximum clipping value for fast gradient')
flags.DEFINE_float('clip_min', -100.0, 'minimum clipping value for fast gradient')
# flags.DEFINE_float('clip_max', 20.0, 'maximum clipping value for fast gradient')
# flags.DEFINE_float('clip_min', -20.0, 'minimum clipping value for fast gradient')
flags.DEFINE_bool('fc_bt', True, 'use bias transformation for the first fc layer')
flags.DEFINE_bool('all_fc_bt', False, 'use bias transformation for all fc layers')
flags.DEFINE_bool('conv_bt', False, 'use bias transformation for the first conv layer, N/A for using pretraining')
flags.DEFINE_integer('bt_dim', 10, 'the dimension of bias transformation for FC layers')
flags.DEFINE_string('pretrain_weight_path', 'N/A', 'path to pretrained weights')
flags.DEFINE_bool('train_pretrain_conv1', False, 'whether to finetune the pretrained weights')
flags.DEFINE_bool('two_head', True, 'use two-head architecture')
flags.DEFINE_bool('learn_final_eept', False, 'learn an auxiliary loss for predicting final end-effector pose')
flags.DEFINE_bool('learn_final_eept_whole_traj', False, 'learn an auxiliary loss for predicting final end-effector pose \
by passing the whole trajectory of eepts (used for video-only models)')
flags.DEFINE_bool('stopgrad_final_eept', True,
'stop the gradient when concatenate the predicted final eept with the feature points')
flags.DEFINE_integer('final_eept_min', 6, 'first index of the final eept in the action array')
flags.DEFINE_integer('final_eept_max', 8, 'last index of the final eept in the action array')
flags.DEFINE_float('final_eept_loss_eps', 0.1, 'the coefficient of the auxiliary loss')
flags.DEFINE_float('act_loss_eps', 1.0, 'the coefficient of the action loss')
flags.DEFINE_float('loss_multiplier', 100.0,
'the constant multiplied with the loss value, 100 for reach and 50 for push')
flags.DEFINE_bool('use_l1_l2_loss', False, 'use a loss with combination of l1 and l2')
flags.DEFINE_float('l2_eps', 0.01, 'coeffcient of l2 loss')
flags.DEFINE_bool('shuffle_val', False, 'whether to choose the validation set via shuffling or not')
## Model options
flags.DEFINE_integer('random_seed', 0, 'random seed for training')
flags.DEFINE_bool('fp', True, 'use spatial soft-argmax or not')
flags.DEFINE_string('norm', 'layer_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_bool('dropout', False, 'use dropout for fc layers or not')
flags.DEFINE_float('keep_prob', 0.5, 'keep probability for dropout')
flags.DEFINE_integer('num_filters', 64,
'number of filters for conv nets -- 64 for placing, 16 for pushing, 40 for reaching.')
flags.DEFINE_integer('filter_size', 3, 'filter size for conv nets -- 3 for placing, 5 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_conv_layers', 5, 'number of conv layers -- 5 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_strides', 3,
'number of conv layers with strided filters -- 3 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_integer('num_fc_layers', 3, 'number of fully-connected layers')
flags.DEFINE_integer('layer_size', 200, 'hidden dimension of fully-connected layers')
flags.DEFINE_bool('temporal_conv_2_head', True,
'whether or not to use temporal convolutions for the two-head architecture in video-only setting.')
flags.DEFINE_bool('temporal_conv_2_head_ee', False, 'whether or not to use temporal convolutions for the two-head architecture in video-only setting \
for predicting the ee pose.')
flags.DEFINE_integer('temporal_filter_size', 10, 'filter size for temporal convolution')
flags.DEFINE_integer('temporal_num_filters', 32, 'number of filters for temporal convolution')
flags.DEFINE_integer('temporal_num_filters_ee', 32, 'number of filters for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers_ee', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_string('init', 'xavier', 'initializer for conv weights. Choose among random, xavier, and he')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for axis_angle)')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('save_dir', './daml_pick_logs', 'directory for summaries and checkpoints.')
# flags.DEFINE_string('save_dir', './daml_human_pick_logs', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('restore_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('begin_restore_iter', 41000, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training \
(use if you want to test with a different number).')
flags.DEFINE_integer('test_update_batch_size', 1, 'number of demos used during test time')
flags.DEFINE_float('gpu_memory_fraction', 0.9, 'fraction of memory used in gpu')
flags.DEFINE_bool('record_gifs', True, 'record gifs during evaluation')
flags.DEFINE_integer('output_data', 6, '')
flags.DEFINE_integer('color_num', 3, '')
flags.DEFINE_integer('object_num', 4, '')
flags.DEFINE_integer('train_task_num', 6, '')
flags.DEFINE_integer('task_num', 8, '')
flags.DEFINE_integer('demo_num', 5, '')
# flags.DEFINE_integer('index_num', 1, '')
flags.DEFINE_integer('index_range', 20, '')
flags.DEFINE_integer('index_train_range', 20, '')
flags.DEFINE_string('demo_type', 'robot', 'robot or human')
# flags.DEFINE_string('demo_type', 'human', 'robot or human')
flags.DEFINE_string('target_type', 'robot', '')
# flags.DEFINE_float('weight_xy', 0.999, '')
# flags.DEFINE_float('weight_z', 0.001, '')
# flags.DEFINE_float('weight_rxyz', 0.001, '')
flags.DEFINE_float('weight_xy', 1.0, '')
flags.DEFINE_float('weight_z', 0, '')
flags.DEFINE_float('weight_rxyz', 0, '')
flags.DEFINE_string('test_data_color', 'color_yellow', '')
def generate_data(if_train=True):
if if_train:
batch_size = FLAGS.meta_batch_size
else:
batch_size = FLAGS.meta_test_batch_size
color_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.color_num
print('color_list', color_list)
object_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.object_num
print('object_list', object_list)
if if_train:
task_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.train_task_num
else:
task_list = np.random.randint(FLAGS.train_task_num, FLAGS.task_num, size=batch_size)
print('task_list', task_list)
demo_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.demo_num
print('demo_list', demo_list)
target_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.demo_num
print('target_list', target_list)
obsas = []
obsbs = []
stateas = []
statebs = []
actionas = []
actionbs = []
color_num = ['color_blue', 'color_green', 'color_orange', 'color_yellow']
# color_num = ['color_blue', 'color_green', 'color_orange']
object_num = ['object_type_animal', 'object_type_car', 'object_type_dinosaur', 'object_type_tool']
for element in range(0, batch_size):
if if_train:
demo_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[color_list[element]], object_num[object_list[element]], FLAGS.demo_type,
task_list[element], demo_list[element])
target_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[color_list[element]], object_num[object_list[element]], FLAGS.target_type,
task_list[element], target_list[element])
else:
demo_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[-1], object_num[object_list[element]], FLAGS.demo_type,
task_list[element], demo_list[element])
target_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[-1], object_num[object_list[element]], FLAGS.target_type,
task_list[element], target_list[element])
# print('demo_path', demo_path)
# print('target_path', target_path)
index = np.random.randint(0, 20)
if FLAGS.demo_type == 'robot':
obsa, statea, actiona = read_data.Read_Robot_Data2(demo_path, FLAGS.T, index)
elif FLAGS.demo_type == 'human':
obsa, statea, actiona = read_data.Read_Human_Data2(demo_path, FLAGS.T, index)
obsb, stateb, actionb = read_data.Read_Robot_Data2(target_path, FLAGS.T, index)
obsas.append(obsa)
obsbs.append(obsb)
stateas.append(statea)
statebs.append(stateb)
actionas.append(actiona)
actionbs.append(actionb)
obsas = np.reshape(obsas, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
obsbs = np.reshape(obsbs, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
actionas = np.reshape(actionas, [batch_size, FLAGS.T, FLAGS.output_data])
actionbs = np.reshape(actionbs, [batch_size, FLAGS.T, FLAGS.output_data])
stateas = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
statebs = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
return obsas, obsbs, actionas, actionbs, stateas, statebs
def generate_place_test_data(demo_path, target_path, batch_size=1, index=0):
# color_num = ['color_blue', 'color_green', 'color_orange', 'color_yellow']
# object_num = ['object_type_animal', 'object_type_car', 'object_type_dinosaur', 'object_type_tool']
# print('demo_path', demo_path)
# print('target_path', target_path)
if FLAGS.demo_type == 'robot':
obsa, statea, actiona = read_data.Read_Robot_Data2(demo_path, FLAGS.T, index)
elif FLAGS.demo_type == 'human':
obsa, statea, actiona = read_data.Read_Human_Data2(demo_path, FLAGS.T, | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Spectrum energy bin grouping.
There are three classes:
* SpectrumEnergyGroup - one group
* SpectrumEnergyGroups - one grouping, i.e. collection of groups
* SpectrumEnergyGroupMaker - algorithms to compute groupings.
Algorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.
The difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that
just have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses
information from SpectrumObservation (e.g. safe energy range or counts data) and
implements higher-level algorithms.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from copy import deepcopy
import numpy as np
from ..extern.six.moves import UserList
from astropy.units import Quantity
from astropy.table import Table
from astropy.table import vstack as table_vstack
from ..utils.table import table_from_row_data, table_row_to_dict
from ..data import ObservationStats
__all__ = [
'SpectrumEnergyGroup',
'SpectrumEnergyGroups',
'SpectrumEnergyGroupMaker',
]
# TODO: improve the code so that this isn't needed!
INVALID_GROUP_INDEX = -99
# TODO: this is used for input at the moment,
# but for output the `bin_type` field is used.
# Make up your mind!
UNDERFLOW_BIN_INDEX = -1
OVERFLOW_BIN_INDEX = -2
class SpectrumEnergyGroup(object):
"""Spectrum energy group.
Represents a consecutive range of bin indices (both ends inclusive).
"""
fields = [
'energy_group_idx', 'bin_idx_min', 'bin_idx_max',
'bin_type', 'energy_min', 'energy_max',
]
"""List of data members of this class."""
valid_bin_types = ['normal', 'underflow', 'overflow']
"""Valid values for ``bin_types`` attribute."""
def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,
energy_min, energy_max):
self.energy_group_idx = energy_group_idx
self.bin_idx_min = bin_idx_min
self.bin_idx_max = bin_idx_max
if bin_type not in self.valid_bin_types:
raise ValueError('Invalid bin type: {}'.format(bin_type))
self.bin_type = bin_type
self.energy_min = energy_min
self.energy_max = energy_max
@classmethod
def from_dict(cls, data):
data = dict((_, data[_]) for _ in cls.fields)
return cls(**data)
@property
def _data(self):
return [(_, getattr(self, _)) for _ in self.fields]
def __repr__(self):
txt = ['{}={!r}'.format(k, v) for k, v in self._data]
return '{}({})'.format(self.__class__.__name__, ', '.join(txt))
def __eq__(self, other):
return self.to_dict() == other.to_dict()
def to_dict(self):
return OrderedDict(self._data)
@property
def bin_idx_array(self):
"""Numpy array of bin indices in the group."""
return np.arange(self.bin_idx_min, self.bin_idx_max + 1)
@property
def bin_table(self):
"""Create `~astropy.table.Table` with bins in the group.
Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``
"""
table = Table()
table['bin_idx'] = self.bin_idx_array
table['energy_group_idx'] = self.energy_group_idx
table['bin_type'] = self.bin_type
return table
def contains_energy(self, energy):
"""Does this group contain a given energy?"""
return (self.energy_min <= energy) & (energy < self.energy_max)
class SpectrumEnergyGroups(UserList):
"""List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.
A helper class used by the `gammapy.spectrum.SpectrumEnergyMaker`.
"""
def __repr__(self):
return '{}(len={})'.format(self.__class__.__name__, len(self))
def __str__(self):
ss = '{}:\n'.format(self.__class__.__name__)
lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)
ss += '\n'.join(lines)
return ss + '\n'
def copy(self):
"""Deep copy"""
return deepcopy(self)
@classmethod
def from_total_table(cls, table):
"""Create list of SpectrumEnergyGroup objects from table."""
groups = cls()
for energy_group_idx in np.unique(table['energy_group_idx']):
mask = table['energy_group_idx'] == energy_group_idx
group_table = table[mask]
bin_idx_min = group_table['bin_idx'][0]
bin_idx_max = group_table['bin_idx'][-1]
# bin_type = group_table['bin_type']
if energy_group_idx == UNDERFLOW_BIN_INDEX:
bin_type = 'underflow'
elif energy_group_idx == OVERFLOW_BIN_INDEX:
bin_type = 'overflow'
else:
bin_type = 'normal'
energy_min = group_table['energy_min'].quantity[0]
energy_max = group_table['energy_max'].quantity[-1]
group = SpectrumEnergyGroup(
energy_group_idx=energy_group_idx,
bin_idx_min=bin_idx_min,
bin_idx_max=bin_idx_max,
bin_type=bin_type,
energy_min=energy_min,
energy_max=energy_max,
)
groups.append(group)
return groups
@classmethod
def from_group_table(cls, table):
"""Create from energy groups in `~astropy.table.Table` format."""
return cls([
SpectrumEnergyGroup.from_dict(table_row_to_dict(row))
for row in table
])
def to_total_table(self):
"""Table with one energy bin per row (`~astropy.table.Table`).
Columns:
* ``energy_group_idx`` - Energy group index (int)
* ``bin_idx`` - Energy bin index (int)
* ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)
There are no energy columns, because the per-bin energy info
was lost during grouping.
"""
tables = [group.bin_table for group in self]
return table_vstack(tables)
def to_group_table(self):
"""Table with one energy group per row (`~astropy.table.Table`).
Columns:
* ``energy_group_idx`` - Energy group index (int)
* ``energy_group_n_bins`` - Number of bins in the energy group (int)
* ``bin_idx_min`` - First bin index in the energy group (int)
* ``bin_idx_max`` - Last bin index in the energy group (int)
* ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)
* ``energy_min`` - Energy group start energy (Quantity)
* ``energy_max`` - Energy group end energy (Quantity)
"""
rows = [group.to_dict() for group in self]
table = table_from_row_data(rows)
return table
@property
def bin_idx_range(self):
"""Tuple (left, right) with range of bin indices (both edges inclusive)."""
left = self[0].bin_idx_min
right = self[-1].bin_idx_max
return left, right
@property
def energy_range(self):
"""Total energy range (`~astropy.units.Quantity` of length 2)."""
return Quantity([self[0].energy_min, self[-1].energy_max])
@property
def energy_bounds(self):
"""Energy group bounds (`~astropy.units.Quantity`)."""
energy = [_.energy_min for _ in self]
energy.append(self[-1].energy_max)
return Quantity(energy)
def find_list_idx(self, energy):
"""Find the list index corresponding to a given energy."""
for idx, group in enumerate(self):
if group.contains_energy(energy):
return idx
# TODO: do we need / want this behaviour?
# If yes, could add via a kwarg `last_bin_right_edge_inclusive=False`
# For last energy group
# if idx == len(self) - 1 and energy == group.energy_max:
# return idx
raise IndexError('No group found with energy: {}'.format(energy))
def find_list_idx_range(self, energy_min, energy_max):
"""TODO: document.
* Min index is the bin that contains ``energy_range.min``
* Max index is the bin that is below the one that contains ``energy_range.max``
* This way we don't loose any bins or count them twice.
* Containment is checked for each bin as [min, max)
"""
idx_min = self.find_list_idx(energy=energy_min)
idx_max = self.find_list_idx(energy=energy_max) - 1
return idx_min, idx_max
def make_and_replace_merged_group(self, list_idx_min, list_idx_max, bin_type):
"""Merge energy groups and update indexes"""
# Create a merged group object
group = self.make_merged_group(
list_idx_min=list_idx_min,
list_idx_max=list_idx_max,
bin_type=bin_type,
)
# Delete previous groups
[self.pop(list_idx_min) for _ in range(list_idx_max - list_idx_min + 1)]
# Insert the merged group
self.insert(list_idx_min, group)
self.reindex_groups()
return self
def reindex_groups(self):
"""Re-index groups"""
for energy_group_idx, group in enumerate(self):
group.energy_group_idx = energy_group_idx
def make_merged_group(self, list_idx_min, list_idx_max, bin_type):
"""Merge group according to indexes"""
left_group = self[list_idx_min]
right_group = self[list_idx_max]
return SpectrumEnergyGroup(
energy_group_idx=INVALID_GROUP_INDEX,
bin_idx_min=left_group.bin_idx_min,
bin_idx_max=right_group.bin_idx_max,
bin_type=bin_type,
energy_min=left_group.energy_min,
energy_max=right_group.energy_max,
)
# TODO: choose one of the apply energy min / max methods!
def apply_energy_min_old(self, energy):
"""Modify list in-place to apply a min energy cut."""
idx_max = self.find_list_idx(energy)
self.make_and_replace_merged_group(0, idx_max, 'underflow')
def apply_energy_min(self, energy):
t = self.to_group_table()
idx_max = np.where(t['energy_min'].quantity < energy)[0][-1]
self.make_and_replace_merged_group(0, idx_max, 'underflow')
def apply_energy_max_old(self, energy):
"""Modify list in-place to apply a max energy cut."""
idx_min = self.find_list_idx(energy)
idx_max = len(self) - 1
self.make_and_replace_merged_group(idx_min, idx_max, 'overflow')
def apply_energy_max(self, energy):
t = self.to_group_table()
idx_min = np.where(t['energy_max'].quantity > energy)[0][0]
self.make_and_replace_merged_group(idx_min, len(self) - 1, 'overflow')
def clip_to_valid_range(self, list_idx):
"""TODO: document"""
if self[list_idx].bin_type == 'underflow':
list_idx += 1
if self[list_idx].bin_type == 'overflow':
list_idx -= 1
if list_idx < 0:
raise IndexError('list_idx {} < 0'.format(list_idx))
if list_idx >= len(self):
raise IndexError('list_idx {} > len(self)'.format(list_idx))
return list_idx
def apply_energy_binning(self, ebounds):
"""Apply an energy binning."""
for idx in range(len(ebounds) - 1):
energy_min = ebounds[idx]
energy_max = ebounds[idx + 1]
list_idx_min, list_idx_max = self.find_list_idx_range(energy_min, energy_max)
# Be sure to leave underflow and overflow bins alone
# TODO: this is pretty ugly ... make it better somehow!
list_idx_min = self.clip_to_valid_range(list_idx_min)
list_idx_max = self.clip_to_valid_range(list_idx_max)
self.make_and_replace_merged_group(
list_idx_min=list_idx_min,
list_idx_max=list_idx_max,
bin_type='normal',
)
class SpectrumEnergyGroupMaker(object):
"""Energy bin groups for spectral analysis.
This class contains both methods that run algorithms
that compute groupings as well as the results as data members
and methods to debug and assess the results.
The input ``obs`` is used read-only, to access the counts energy
binning, as well as some other info that is used for energy bin grouping.
This class creates the ``groups`` attribute on construction,
with exactly one group per energy bin. It is then modified by calling
methods on this class, usually to declare some bins as under- and
overflow (i.e. not to be used in spectral analysis), and to group
bins (e.g. for flux point computation).
See :ref:`spectrum_energy_group` for examples.
Parameters
----------
obs : `~gammapy.spectrum.SpectrumObservation`
Spectrum observation
Attributes
----------
obs : `~gammapy.spectrum.SpectrumObservation`
Spectrum observation data
groups : `~gammapy.spectrum.SpectrumEnergyGroups`
List of energy groups
See also
--------
SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator
"""
def __init__(self, obs):
self.obs = obs
self.groups = self._groups_from_obs(obs)
@staticmethod
def _groups_from_obs(obs):
"""Compute energy groups list with one group per energy bin.
Parameters
----------
obs : `~gammapy.spectrum.SpectrumObservation`
Spectrum observation data
Returns
-------
groups : `~gammapy.spectrum.SpectrumEnergyGroups`
List of energy groups
"""
# Start with a table with the obs energy binning
table = obs.stats_table()
# Make one group per bin
table['bin_idx'] = np.arange(len(table))
table['energy_group_idx'] = np.arange(len(table))
return SpectrumEnergyGroups.from_total_table(table)
| |
<reponame>Takishima/mindquantum<filename>mindquantum/simulator/simulator.py
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simulator."""
import numpy as np
from mindquantum import mqbackend as mb
from mindquantum.core.circuit import Circuit
from mindquantum.core.gates import BarrierGate, Measure, MeasureResult
from mindquantum.core.gates.basic import BasicGate
from mindquantum.core.operators import Hamiltonian
from mindquantum.core.operators.hamiltonian import MODE
from mindquantum.core.parameterresolver import ParameterResolver
from mindquantum.utils import ket_string
from mindquantum.utils.type_value_check import (
_check_and_generate_pr_type,
_check_input_type,
_check_int_type,
_check_seed,
_check_value_should_not_less,
)
SUPPORTED_SIMULATOR = ['projectq']
def get_supported_simulator():
"""
Get simulator name that supported by MindQuantum.
Returns:
list, The supported simulator list.
"""
return SUPPORTED_SIMULATOR
class Simulator:
"""
Quantum simulator that simulate quantum circuit.
Args:
backend (str): which backend you want. The supported backend can be found
in SUPPORTED_SIMULATOR
n_qubits (int): number of quantum simulator.
seed (int): the random seed for this simulator, if None, seed will generate
by `numpy.random.randint`. Default: None.
Raises:
TypeError: if `backend` is not str.
TypeError: if `n_qubits` is not int.
TypeError: if `seed` is not int.
ValueError: if `backend` is not supported.
ValueError: if `n_qubits` is negative.
ValueError: if `seed` is less than 0 or great than 2**23 - 1.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import qft
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.get_qs()
array([0.5+0.j, 0.5+0.j, 0.5+0.j, 0.5+0.j])
"""
def __init__(self, backend, n_qubits, seed=None):
"""Initialize a Simulator object."""
_check_input_type('backend', str, backend)
_check_int_type('n_qubits', n_qubits)
_check_value_should_not_less('n_qubits', 0, n_qubits)
if seed is None:
seed = np.random.randint(1, 2**23)
_check_seed(seed)
if backend not in SUPPORTED_SIMULATOR:
raise ValueError(f"backend {backend} not supported, now we support {SUPPORTED_SIMULATOR}!")
self.backend = backend
self.seed = seed
self.n_qubits = n_qubits
if backend == 'projectq':
self.sim = mb.projectq(seed, n_qubits)
def copy(self):
"""
Copy this simulator.
Returns:
Simulator, a copy version of this simulator.
Examples:
>>> from mindquantum import RX, Simulator
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(RX(1).on(0))
>>> sim.flush()
>>> sim2 = sim.copy()
>>> sim2.apply_gate(RX(-1).on(0))
>>> sim2
projectq simulator with 1 qubit (little endian).
Current quantum state:
1¦0⟩
"""
sim = Simulator(self.backend, self.n_qubits, self.seed)
sim.sim = self.sim.copy()
return sim
def __str__(self):
"""Return a string representation of the object."""
state = self.get_qs()
s = f"{self.backend} simulator with {self.n_qubits} qubit{'s' if self.n_qubits > 1 else ''} (little endian)."
s += "\nCurrent quantum state:\n"
if self.n_qubits < 4:
s += '\n'.join(ket_string(state))
else:
s += state.__str__()
return s
def __repr__(self):
"""Return a string representation of the object."""
return self.__str__()
def reset(self):
"""
Reset simulator to zero state.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import qft
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.reset()
>>> sim.get_qs()
array([1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j])
"""
self.sim.reset()
def flush(self):
"""
Flush gate that works for projectq simulator.
The projectq simulator will cache several gate and fushion these gate into a bigger gate, and than act on the
quantum state. The flush command will ask the simulator to fushion currently stored gate and act on the quantum
state.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import H
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(H.on(0))
>>> sim.flush()
"""
if self.backend == 'projectq':
self.sim.run()
def apply_gate(self, gate, pr=None, diff=False):
"""
Apply a gate on this simulator, can be a quantum gate or a measurement operator.
Args:
gate (BasicGate): The gate you want to apply.
pr (Union[numbers.Number, numpy.ndarray, ParameterResolver, list]): The
parameter for parameterized gate. Default: None.
diff (bool): Whether to apply the derivative gate on this simulator. Default: False.
Returns:
int or None, if the gate if a measure gate, then return a collapsed state, Otherwise
return None.
Raises:
TypeError: if `gate` is not a BasicGate.
ValueError: if any qubit of `gate` is higher than simulator qubits.
ValueError: if `gate` is parameterized, but no parameter supplied.
TypeError: the `pr` is not a ParameterResolver if `gate` is parameterized.
Examples:
>>> import numpy as np
>>> from mindquantum import Simulator
>>> from mindquantum import RY, Measure
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(RY('a').on(0), np.pi/2)
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
>>> sim.apply_gate(Measure().on(0))
1
>>> sim.get_qs()
array([0.+0.j, 1.+0.j])
"""
_check_input_type('gate', BasicGate, gate)
if not isinstance(gate, BarrierGate):
gate_max = max(max(gate.obj_qubits, gate.ctrl_qubits))
if self.n_qubits < gate_max:
raise ValueError(f"qubits of gate {gate} is higher than simulator qubits.")
if isinstance(gate, Measure):
return self.sim.apply_measure(gate.get_cpp_obj())
if pr is None:
if gate.parameterized:
raise ValueError("apply a parameterized gate needs a parameter_resolver")
self.sim.apply_gate(gate.get_cpp_obj())
else:
pr = _check_and_generate_pr_type(pr, gate.coeff.params_name)
self.sim.apply_gate(gate.get_cpp_obj(), pr.get_cpp_obj(), diff)
return None
def apply_circuit(self, circuit, pr=None):
"""
Apply a circuit on this simulator.
Args:
circuit (Circuit): The quantum circuit you want to apply on this simulator.
pr (Union[ParameterResolver, dict, numpy.ndarray, list, numbers.Number]): The
parameter resolver for this circuit. If the circuit is not parameterized,
this arg should be None. Default: None.
Returns:
MeasureResult or None, if the circuit has measure gate, then return a MeasureResult,
otherwise return None.
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, H
>>> from mindquantum import Simulator
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(Circuit().un(H, 2))
>>> sim.apply_circuit(Circuit().ry('a', 0).ry('b', 1), np.array([1.1, 2.2]))
>>> sim
projectq simulator with 2 qubits (little endian).
Current quantum state:
-0.0721702531972066¦00⟩
-0.30090405886869676¦01⟩
0.22178317006196263¦10⟩
0.9246947752567126¦11⟩
>>> sim.apply_circuit(Circuit().measure(0).measure(1))
shots: 1
Keys: q1 q0│0.00 0.2 0.4 0.6 0.8 1.0
───────────┼───────────┴───────────┴───────────┴───────────┴───────────┴
11│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
│
{'11': 1}
"""
_check_input_type('circuit', Circuit, circuit)
if self.n_qubits < circuit.n_qubits:
raise ValueError(f"Circuit has {circuit.n_qubits} qubits, which is more than simulator qubits.")
if circuit.has_measure_gate:
res = MeasureResult()
res.add_measure(circuit.all_measures.keys())
if circuit.params_name:
if pr is None:
raise ValueError("Applying a parameterized circuit needs a parameter_resolver")
pr = _check_and_generate_pr_type(pr, circuit.params_name)
else:
pr = ParameterResolver()
if circuit.has_measure_gate:
samples = np.array(
self.sim.apply_circuit_with_measure(circuit.get_cpp_obj(), pr.get_cpp_obj(), res.keys_map)
)
samples = samples.reshape((1, -1))
res.collect_data(samples)
return res
if circuit.params_name:
self.sim.apply_circuit(circuit.get_cpp_obj(), pr.get_cpp_obj())
else:
self.sim.apply_circuit(circuit.get_cpp_obj())
return None
def sampling(self, circuit, pr=None, shots=1, seed=None):
"""
Samping the measure qubit in circuit. Sampling do not change the origin quantum state of this simulator.
Args:
circuit (Circuit): The circuit that you want to evolution and do sampling.
pr (Union[None, dict, ParameterResolver]): The parameter
resolver for this circuit, if this circuit is a parameterized circuit.
Default: None.
shots (int): How many shots you want to sampling this circuit. Default: 1
seed (int): Random seed for random sampling. If None, seed will be a random
int number. Default: None.
Returns:
MeasureResult, the measure result of sampling.
Examples:
>>> from mindquantum import Circuit, Measure
>>> from mindquantum import Simulator
>>> circ = Circuit().ry('a', 0).ry('b', 1)
>>> circ += Measure('q0_0').on(0)
>>> circ += Measure('q0_1').on(0)
>>> circ += Measure('q1').on(1)
>>> sim = Simulator('projectq', circ.n_qubits)
>>> res = sim.sampling(circ, {'a': 1.1, 'b': 2.2}, shots=100, seed=42)
>>> res
shots: 100
Keys: q1 q0_1 q0_0│0.00 0.122 0.245 0.367 0.49 0.612
──────────────────┼───────────┴───────────┴───────────┴───────────┴───────────┴
000│▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
│
011│▒▒▒▒▒▒▒▒▒
│
100│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
│
111│▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
│
{'000': 18, '011': 9, '100': 49, '111': 24}
"""
if not circuit.all_measures.map:
raise ValueError("circuit must have at least one measurement gate.")
_check_input_type("circuit", Circuit, circuit)
if self.n_qubits < circuit.n_qubits:
raise ValueError(f"Circuit has {circuit.n_qubits} qubits, which is more than simulator qubits.")
_check_int_type("sampling shots", shots)
_check_value_should_not_less("sampling shots", 1, shots)
if circuit.parameterized:
if pr is None:
raise ValueError("Sampling a parameterized circuit need a ParameterResolver")
if not isinstance(pr, (dict, ParameterResolver)):
raise TypeError("pr requires a dict or a ParameterResolver, but get {}!".format(type(pr)))
pr = ParameterResolver(pr)
else:
pr = ParameterResolver()
if seed is None:
seed = int(np.random.randint(1, 2 << 20))
else:
_check_seed(seed)
res = MeasureResult()
res.add_measure(circuit.all_measures.keys())
sim = self
if circuit.is_measure_end and not circuit.is_noise_circuit:
sim = Simulator(self.backend, self.n_qubits, self.seed)
sim.set_qs(self.get_qs())
sim.apply_circuit(circuit.remove_measure(), pr)
circuit = Circuit(circuit.all_measures.keys())
samples = np.array(
sim.sim.sampling(circuit.get_cpp_obj(), pr.get_cpp_obj(), shots, res.keys_map, seed)
).reshape((shots, -1))
res.collect_data(samples)
return res
def apply_hamiltonian(self, hamiltonian: Hamiltonian):
"""
Apply hamiltonian to a simulator, this hamiltonian can be hermitian or non hermitian.
Note:
The quantum state may be not a normalized quantum state after apply hamiltonian.
Args:
hamiltonian | |
# Sample Code For Assignment #3
# Printing Formatted Data in Python
# Copyright (C) 2021 <NAME>
# MIT Open Source Initiative Approved License
# hw_assignment_3.py
# CIS-135 Python
# Resources:
# https://www.w3schools.com/python/ref_string_format.asp
# https://www.w3schools.com/python/python_string_formatting.asp
# https://www.w3schools.com/python/python_strings.asp
# http://stackoverflow.com/questions/21208376/ddg#21208495
# https://stackoverflow.com/questions/21208376/converting-float-to-dollars-and-cents
# RAW DATA
# Category Type Count Total Value
# Adapter Bluetooth Mobile 284 7,097.16
# Network Switch Cisco 103 10,298.97
# Laptop computer Chromebook 7 2,100.00
# SSD External 83 9,877.00
# What is a Header?
# What is table data?
# Header Data
# Category Type Count Total Value
# Table Data
# Adapter Bluetooth Mobile 284 7,097.16
# Network Switch Cisco 103 10,298.97
# Laptop computer Chromebook 7 2,100.00
# SSD External 83 9,877.00
# When we think of a table, we think of rows and columns that are
# all the same width:
#|____|____|____|____|____|
#|____|____|____|____|____|
#|____|____|____|____|____|
# Sometimes, though, we need more/less width to accommodate data
# of different lengths
#|__________|____|_________|_|__|
#|__________|____|_________|_|__|
#|__________|____|_________|_|__|
#|__________|____|_________|_|__|
# # As we can see, not all data is the same length, and columns do not
# # always naturally align:
# print("\nCategory Type Count Total Value")
# print("Adapter Bluetooth Mobile 284 7,097.16")
# # Even using tabs between data items does not always do the trick.
# print("\nCategory\t\t\tType\t\t\tCount\t\t\tTotal\t\t\tValue")
# print("Adapter Bluetooth\t\t\tMobile\t\t\t284\t\t\t7,097.16")
# # A more robust solution is needed.
# # We can turn to a formatted print statement to help us out:
# # A formatted print statement is a string which we can use
# # to insert data into, when we want to.
# simple_format = "\nData is inserted where the curly braces appear: {}"
# # To use a formatted print statement we use the print statement and a
# # call to the .format() method. Inside the (parenthesis) of the formatted
# # print statement we insert the variable or data.
# print(simple_format)
# print(simple_format.format("Hello World"))
# # We can also do this with a variable
# hs = "Hello Student"
# print(simple_format.format(hs))
# # The {} can be anywhere in the statement:
# what_to_do = "\nLearning {} is fun."
# language_name = "Python"
# print(what_to_do.format(language_name))
# # We can also insert and format numbers using the Print statement
#pi_is = "\nThe first ten digits of pi are: {}"
# pi_to_ten = 3.1415926535
#print(pi_is.format(pi_to_ten))
# # We can reuse statements as many times as we like.
# pi_to_four = "\nThe first four digits of pi are: {:.5}"
# print(pi_to_four.format(pi_to_ten))
# # We can also add padding to our formatted print statements.
# print()
# pi_dec_five = "{:6.5} are the first five decimals of pi."
# pi_dec_four = "{:6.4} are the first four decimals of pi."
# pi_dec_three = "{:6.3} are the first three decimals of pi."
# pi_dec_two = "{:6.2} are the first two decimals of pi."
#
# print(pi_dec_five.format(pi_to_ten))
# print(pi_dec_four.format(pi_to_ten))
# print(pi_dec_three.format(pi_to_ten))
# print(pi_dec_two.format(pi_to_ten))
# # And can also add alignment to our formatted print statements.
# print()
# pi_dec_five = "{:<6.5} are the first five decimals of pi."
# pi_dec_four = "{:<6.4} are the first four decimals of pi."
# pi_dec_three = "{:<6.3} are the first three decimals of pi."
# pi_dec_two = "{:<6.2} are the first two decimals of pi."
# print(pi_dec_five.format(pi_to_ten))
# print(pi_dec_four.format(pi_to_ten))
# print(pi_dec_three.format(pi_to_ten))
# print(pi_dec_two.format(pi_to_ten))
# # Raw Data as Tuples
# title = ("Category", "Type", "Count", "Total Value")
# row1 = ("Adapter","Bluetooth","Mobile","284","7097.16")
# row2 = ("Network Switch","Cisco","103","10,298.97")
# row3 = ("Laptop computer","Chromebook","7","2100")
# row4 = ("SSD","External","83","9877")
#
# # # RAW DATA as List
# header = ["Category", "Type", "Count", "Total Value"]
# r1 = ["Adapter","Bluetooth Mobile","284","7,097.16"]
# r2 = ["Network Switch","Cisco","103","10,298.97"]
# r3 = ["Laptop computer","Chromebook","7","2,100.00"]
# r4 = ["SSD","External","83","9,877.00"]
# # Given the raw data, we look for the longest of the strings:
# # Laptop Computer = 17 characters so 20 characters would be a good length for column 1
# # Bluetooth Mobile = 16 characters so 20 would be a good length for col 2
# # Count = 5 characters so 10 would be a good length for col 3
# # Total Value = 11 characters so 15 would be a good length for col 4
# # Setup the placeholders: {:20}{:20}{:10}{:15}
# # Create the placeholder string: {:20}{:15}{:9}{:15}
# # When we print from a tuple or a list (data is in this format above),
# # we need to adjust the placeholder by adding an integer to the left
# # of the colon in each placeholder.
# # for example to print the first piece of data we use {0:20}
# # to print the second piece of data we use {1:15} etc.
#
# print_string = "{0:20}{1:20}{2:>10}{3:>15}"
#
# # # Call (use) the print string to print out a sample of the data:
# print()
# print(print_string.format(header[0],header[1],header[2],header[3]))
# print(print_string.format(r1[0],r1[1],r1[2],r1[3]))
# print(print_string.format(r2[0],r2[1],r2[2],r2[3]))
# print(print_string.format(r3[0],r3[1],r3[2],r3[3]))
# print(print_string.format(r4[0],r4[1],r4[2],r4[3]))
#
#
# Notes on Homework Assignment #3
myInt = 10
myString = "Clinton"
chevy = "Cheverolet Silverado 586,675 $ 28,595.00"
chevy = "Cheverolet Equinox 1,270,994 $ 25,000.00"
# # Variable = one named space in memory
# # Variable that can hold mulipiple piece of information.
# my_variable = 10 # 'string' double/flaot # First container
# my_tuple = 10,20,30,40,50,60,70 # Second type of container
# my_tuple = (10,10,10,20,30,40,50,60,70,80) # Cannot update a tuple, using same name = new
# my_list = [40,20,35,"twenty",10.2,10.11,["clinton","garwood",[1,2,3,4]]] # Third type of data continer
# my_set = {30,100,20,30,30,30,30} # fourth data container
# my_dictionary = {'first_name': 'Clinton', 'last_name': 'Garwood', 'id': 36643.00 } # fifth type
# print(my_list)
# # RAW DATA:
# Chevrolet Silverado 586675 28595
# Chevrolet Equinox 270994 25000
# Ford F-Series 787422 30635
# GMC Sierra 253016 29695
# Honda CR-V 333502 26525
# Honda Civic 261225 22000
# Lamborghini Huracan 1095 208571
# Toyota RAV4 430387 27325
# Toyota Camry 29434825965
# r1 = ["Chevrolet", "Silverado", 586675, 28595]
# r2 = ["Chevrolet", "Equinox", 270994, 25000]
# r3 = ["Ford", "F-Series", 787422, 30635]
# r4 = ["GMC", "Sierra", 253016,29695]
# r5 = ["Honda", "CR-V", 333502,26525]
# r6 = ["Honda", "Civic", 261225,22000]
# r7 = ["Lamborghini", "Huracan",1095,208571]
# r8 = ["Toyota", "RAV4", 430387,27325]
# r9 = ["Toyota", "Camry", 294348,25965]
# # # Example of printing raw data as a string literal in a print statement:
# print("\nChevrolet Silverado 586675 28595")
# print("Chevrolet Equinox 270994 25000")
# print("Ford F-Series 787422 30635")
# print("GMC Sierra 253016 29695")
# print("Problems here as rows are not lined up and numbers are not formatted\n")
# # # Printing raw data using some basic formatting a print statement :
# # # This shows using a tab between each value
# # # Chevrolet Silverado 586675 28595
# print("\n\tChevrolet\tSilverado\t586675\t28595")
# print("\tChevrolet\tEquinox\t270994\t25000")
# print("\tFord\tF-Series\t787422\t30635")
# print("\tGMC\tSierra\t253016\t29695")
# print("Problems continue with row alignment and numbers formatting\n")
#
# print(r1)
# print(r2)
# print(r3)
# print(r4)
# print(r5)
# print(r6)
# print(r7)
# print(r8)
# print(r9)
# # More on printing simple strings:
# print("Working with raw data creates problems, both with")
# print("handling the data initially, and more importantly when")
# print("we want to update or change data based on program input")
# # Using data buckets to control the data.
# # Python has many differnt kinds of buckets for data.
# # You have already seen variables
# number_10 = 10
#
# # What if we want to store two pieces of data in a single container?
# # A tuple can be used to store more than one piece of data.
# # https://www.w3schools.com/python/python_tuples.asp
# # https://docs.python.org/3/library/stdtypes.html?highlight=tuple#tuple
# my_tuple = (10, 20)
# one_to_ten_tuple = (1,2,3,4,5,6,7,8,9,10)
# # We can store as many items (in an ordered fashion) as we want in a tuple.
# # Printing tuples work just like variables
# print(my_tuple)
# print(one_to_ten_tuple)
#
# # Python Lists:
# # https://www.w3schools.com/python/python_lists.asp
# # https://docs.python.org/3/library/stdtypes.html?#list
# # Another bucket we can use to store multiple pieces of (ordered data) in
# # Python is called a List.
# my_list = [10, 20]
# one_to_ten_list = [1,2,3,4,5,6,7,8,9,10]
# # We can store as many items (in an ordered fashion) as we want in a list.
# # and printing lists works just like variables
# print(my_list)
# print(one_to_ten_list)
# #
# print("\n\tIt is better, down the road, to use Lists that tuples")
# print("\tLists are mutable -- because can be changed and updated.\n")
#
# # How would our raw data look if it were formatted as a tuple:
# # Can format Data as Tuples:
ti = ("Car Make", "Car Model", "Units Sold", "Starting Price")
sep = ["--------", "---------", "----------", "--------------"]
#
# # How would our raw data look if it were formatted as a list:
# # Can Format data as Lists
cs = ["Chevrolet", "Silverado", 586675, 28595]
ce = ["Chevrolet", "Equinox", 270994, 25000.00]
fo = ["Ford", "F-Series", 787422, 30635]
gm = ["GMC", "Sierra", 253016, 29695]
hv = ["Honda", "CR-V", 333502, 26525]
hc = ["Honda", "Civic", 261225, 22000]
lh = ["Lamborghini", "Huracan", 1095, 208571]
tr = ["Toyota", "RAV4", 430387, 27325]
tc = ["Toyota", "Camry", 294348, 25965]
# # Once our raw data is in a proper bucket, we can use a formatted print
# # string to handle the formatting fine-tuning, and also to add symbols like
# # decimal points, and row padding and alignment.
#
# # A formatted print string literal, allows us to define how | |
import os, sys, pickle
import requests
import random
import time
import operator
import math
import progressbar
import numpy as np
import pandas as pd
import multiprocessing as mp
import difflib
import matplotlib.pyplot as plt
import inspect
from decimal import Decimal
from rdkit import Chem, DataStructs, RDConfig
from rdkit.Chem import AllChem, rdmolops
from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from scipy.spatial.distance import squareform, cdist
from scipy import stats
class Protein(object):
"""!
An object to represent a protein
"""
def __init__(self, id_, sig):
## @var id_
# PDB or UniProt ID for the given protein
self.id_ = id_
## @var alt_id
# Used when a second identifier mapping is available (such as SIFTs project)
self.alt_id = ''
## @var sig
# List of scores representing each drug interaction with the given protein
self.sig = sig
## @var pathways
# List of Pathway objects in which the given protein is involved
self.pathways = []
## @var indications
# List of Indication objects to which the protein is associated
self.indications = []
## @var name
# str: the common name of the protein (not currently used)
self.name = ''
## @var gene
# str: the gene name from which the protein is produced
self.gene = ''
class Compound(object):
"""!
An object to represent a compound/drug
"""
def __init__(self, name, id_, index, status='N/A'):
## @var name
# str: Name of the Compound (e.g., 'caffeine')
self.name = name
## @var id_
# int: CANDO id from mapping file (e.g., 1, 10, 100, ...)
self.id_ = id_
## @var index
# int: The order in which the Compound appears in the mapping file (e.g, 1, 2, 3, ...)
self.index = index
## @var status
# str: The clinical trial status of the compound from DrugBank ('approved' or 'other')
self.status = status
## @var sig
# list: Signature is essentially a column of the Matrix
self.sig = []
## @var aux_sig
# list: Potentially temporary signature for things like pathways, where "c.sig" needs to be preserved
self.aux_sig = []
## @var indications
# list: This is every indication the Compound is associated with from the
# mapping file
self.indications = []
## @var similar
# list: This is the ranked list of compounds with the most similar interaction signatures
self.similar = []
## @var similar_computed
# bool: Have the distances of all Compounds to the given Compound been computed?
self.similar_computed = False
## @var similar_sorted
# bool: Have the most similar Compounds to the given Compound been sorted?
self.similar_sorted = False
## @var cluster_id
# int: The cluster id this Compound was assigned from clustering method
self.cluster_id = []
## @var adrs
# list: List of ADRs associated with this Compound
self.adrs = []
## @var alt_ids
# dict: dict of other ids inputted with compound mapping
self.alt_ids = {}
## @var metabolites
# list: List of all metabolites from the compound
self.metabolites = []
## @var is_metabolite
# bool: bool if the drug is a metabolite itself
self.is_metabolite = False
## @var parent
# Compound: Compound object to which this compound is a metabolite
self.parent = None
## @var compounds
# List Compound: Compound objects to which this compound is associated
self.compounds = []
def add_indication(self, ind):
"""!
Add an Indication to the list of Indications associated to this Compound
@param ind object: Indication object to add
"""
self.indications.append(ind)
class Compound_pair(object):
"""!
An object to represent a compound/drug-pair
"""
def __init__(self, name, id_, index):
## @var name
# str: Name of the Compound (e.g., 'caffeine')
self.name = name
## @var id_
# int: CANDO id from mapping file (e.g., 1, 10, 100, ...)
self.id_ = id_
## @var index
# int: The order in which the Compound appears in the mapping file (e.g, 1, 2, 3, ...)
self.index = index
## @var sig
# list: Signature is essentially a column of the Matrix
self.sig = []
## @var aux_sig
# list: Potentially temporary signature for things like pathways, where "c.sig" needs to be preserved
self.aux_sig = []
## @var similar
# list: This is the ranked list of compounds with the most similar interaction signatures
self.similar = []
## @var similar_computed
# bool: Have the distances of all Compounds to the given Compound been computed?
self.similar_computed = False
## @var similar_sorted
# bool: Have the most similar Compounds to the given Compound been sorted?
self.similar_sorted = False
## @var adrs
# list: List of ADRs associated with this Compound
self.adrs = []
def add_adr(self, adr):
"""!
Add an ADR to the list of Indications associated to this Compound
@param ind object: Indication object to add
"""
self.adrs.append(adr)
class Indication(object):
"""!
An object to represent an indication (disease)
"""
def __init__(self, ind_id, name):
## @var id_
# str: MeSH or OMIM ID for the indication from the mapping file
self.id_ = ind_id
## @var name
# str: Name for the indication from the mapping file
self.name = name
## @var compounds
# list: Every associated compound object from the mapping file
self.compounds = []
## @var pathways
# list: Every pathway associated to the indication from the mapping file
self.pathways = []
## @var proteins
# list: Every protein associated to the indication form the mapping file
self.proteins = []
## @var pathogen
# bool: Whether or not this indication is caused by a pathogen
self.pathogen = None
class Pathway(object):
"""!
An object to represent a pathway
"""
def __init__(self, id_):
## @var proteins
# list: Protein objects associated with the given Pathway
self.proteins = []
## @var id_
# str: Identification for the given Pathway
self.id_ = id_
## @var indications
# list: Indication objects associated with the given Pathway
self.indications = []
class ADR(object):
"""!
An object to represent an adverse reaction
"""
def __init__(self, id_, name):
## @var id_
# str: Identification for the given ADR
self.id_ = id_
## @var name
# str: Name of the given ADR
self.name = name
## @var compounds
# list: Compound objects associated with the given ADR
self.compounds = []
## @var compounds
# List: Compound object pairs (tuples) associated with the given ADR
self.compound_pairs = []
class CANDO(object):
"""!
An object to represent all aspects of CANDO (compounds, indications, matrix, etc.)
To instantiate you need the compound mapping (c_map), an
indication mapping file (i_map), and typically and a compound-protein matrix (matrix=) or
or precomputed compound-compound distance matrix (read_rmsds=), but those are optional.
"""
def __init__(self, c_map, i_map, matrix='', compound_set='all', compute_distance=False, save_dists='',
read_dists='', pathways='', pathway_quantifier='max', indication_pathways='', indication_proteins='',
similarity=False, dist_metric='rmsd', protein_set='', rm_zeros=False, rm_compounds='',
ddi_compounds='', ddi_adrs='', adr_map='', protein_distance=False, protein_map='', ncpus=1):
## @var c_map
# str: File path to the compound mapping file (relative or absolute)
self.c_map = c_map
## @var i_map
# str: File path to the indication mapping file (relative or absolute)
self.i_map = i_map
## @var matrix
# str: File path to the cando matrix file (relative or absolute)
self.matrix = matrix
## @var compound_set
# str or List str: what compounds to use, such as all, approved, experimental, etc
self.compound_set = compound_set
## @var protein_set
# str: File path to protein subset file (relative or absolute)
self.protein_set = protein_set
## @var pathways
# str: File path to pathway file
self.pathways = []
self.accuracies = {}
## @var compute_distance
# bool: Calculate the distance for each Compound against all other Compounds using chosen distance metric
self.compute_distance = compute_distance
## @var protein_distance
# bool: Calculate the distance for each Protein against all other Proteins using chosen distance metric
self.protein_distance = protein_distance
self.clusters = {}
## @var rm_zeros
# bool: Remove Compounds with all-zero signatures from CANDO object
self.rm_zeros = rm_zeros
## @var rm_compounds
# list: Compounds to remove from the CANDO object
self.rm_compounds = rm_compounds
self.rm_cmpds = []
## @var save_dists
# bool: Write the calculated | |
# --
# Copyright (c) 2008-2021 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
"""The XHTML renderer
This renderer only depends on the ``nagare.renderers.xml`` module.
Having not dependencies to the Nagare framework make it suitable to be used in
others frameworks.
"""
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from os import path
from collections import OrderedDict
from lxml import etree as ET
from nagare.renderers import xml
from nagare.renderers.xml import TagProp
# ---------------------------------------------------------------------------
# Common attributes
# -----------------
componentattrs = {'id', 'class', 'style', 'title'}
i18nattrs = {'lang', 'dir'}
eventattrs = {
'onclick', 'ondblclick', 'onmousedown', 'onmouseup', 'onmousemove',
'onmouseover', 'onmouseout', 'onkeypress', 'onkeydown', 'onkeyup'
}
allattrs = componentattrs | i18nattrs | eventattrs
focusattrs = {'accesskey', 'tabindex', 'onfocus', 'onblur'}
# ---------------------------------------------------------------------------
class Url(object):
def __init__(self, url):
self.url = url
self.parts = list(urlparse.urlparse(url))
def is_url(self):
return bool(self.parts[0])
def is_absolute(self):
return self.parts[2].startswith('/')
def absolute(self, url_prefix, always_relative=False, **params):
"""Convert a relative URL of a static content to an absolute one
In:
- ``url`` -- url to convert
- ``url_prefix`` -- URL prefix of the static contents
Return:
- an absolute URL
"""
if not self.is_url():
if always_relative or not self.is_absolute():
self.parts[2] = path.join(url_prefix or '/', self.parts[2].lstrip('/'))
if params:
params = ['%s=%s' % param for param in reversed(list(params.items()))]
self.parts[4] = (self.parts[4] + '&' + '&'.join(params)).lstrip('&')
return urlparse.urlunparse(self.parts)
def absolute_url(url, url_prefix, always_relative=False, **params):
return Url(url).absolute(url_prefix, always_relative, **params)
absolute_asset_url = absolute_url # noqa: E305
class Tag(xml.Tag):
"""A html tag
"""
def tostring(self, method='html', encoding='utf-8', pipeline=True, **kw):
"""Serialize in HTML the tree beginning at this tag
In:
- ``encoding`` -- encoding of the XML
- ``pipeline`` -- if False, the ``meld:id`` attributes are deleted
Return:
- the HTML
"""
return super(Tag, self).tostring(method, encoding, pipeline, **kw)
def error(self, msg):
"""Mark this tag as erroneous
In:
- ``msg`` -- the error message
Return:
- ``self``
"""
return self.renderer.decorate_error(self, msg)
class HrefAttribute(Tag):
ASSET_ATTR = 'href'
def absolute_url(self, url):
return self.renderer.absolute_asset_url(url)
def on_change(self):
super(HrefAttribute, self).on_change()
url = self.get(self.ASSET_ATTR, None)
if url is not None:
self.set(self.ASSET_ATTR, self.absolute_url(url))
class Link(HrefAttribute):
def on_change(self):
if self.get('rel', '') == 'stylesheet':
super(Link, self).on_change()
class SrcAttribute(HrefAttribute):
ASSET_ATTR = 'src'
Embed = Input = Script = SrcAttribute # noqa: E305
class Img(SrcAttribute):
def on_change(self):
super(Img, self).on_change()
url = self.get('lowsrc', None)
if url is not None:
self.set('lowsrc', self.renderer.absolute_asset_url(url))
class HeadRenderer(xml.XmlRenderer):
"""The HTML head Renderer
This renderer knows about the possible tags of a html ``<head>``
"""
# Tag factories
# -------------
base = TagProp('base', {'id', 'href', 'target'})
head = TagProp('head', i18nattrs | {'id', 'profile'})
link = TagProp('link', allattrs | {'charset', 'href', 'hreflang', 'type', 'rel', 'rev', 'media', 'target'}, Link)
meta = TagProp('meta', i18nattrs | {'id', 'http_equiv', 'name', 'content', 'scheme'})
title = TagProp('title', i18nattrs | {'id'})
style = TagProp('style', i18nattrs | {'id', 'media', 'type'})
script = TagProp('script', i18nattrs | {'id', 'async', 'charset', 'defer', 'src', 'type'}, Script)
_parser = ET.HTMLParser()
_parser.set_element_class_lookup(ET.ElementDefaultClassLookup(element=Tag))
def __init__(self, static_url=None, assets_version=None):
"""Renderer initialisation
The ``HeadRenderer`` keeps track of the javascript and css used by every views,
to be able to concatenate them into the ``<head>`` section.
"""
super(HeadRenderer, self).__init__()
# Directory where are located the static contents of the application
self.static_url = static_url
self.assets_version = assets_version
self._named_css = OrderedDict() # CSS code
self._css_url = OrderedDict() # CSS URLs
self._named_javascript = OrderedDict() # Javascript code
self._javascript_url = OrderedDict() # Javascript URLs
def fromfile(self, source, tags_factory=Tag, fragment=False, no_leading_text=False, **kw):
return super(HeadRenderer, self).fromfile(source, tags_factory, fragment, no_leading_text, **kw)
def fromstring(self, text, tags_factory=Tag, fragment=False, no_leading_text=False, **kw):
return super(HeadRenderer, self).fromstring(text, tags_factory, fragment, no_leading_text, **kw)
@staticmethod
def absolute_url(url, url_prefix, always_relative=False, **params):
return absolute_url(url, url_prefix, always_relative, **params)
def absolute_asset_url(self, url, static_prefix=None, always_relative=False, **params):
url = Url(url)
if self.assets_version and not url.is_absolute():
params.setdefault('ver', self.assets_version)
return url.absolute(static_prefix if static_prefix is not None else self.static_url, always_relative, **params)
def css(self, id_, style, bottom=False, **attributes):
"""Memorize an in-line named css style
In:
- ``id_`` -- unique id of this css style (to prevent double definition)
- ``style`` -- the css style
- ``attributes`` -- attributes of the generated ``<style>`` tag
"""
self._named_css.setdefault(id_, (style, attributes, bottom))
def css_url(self, url, bottom=False, url_params=None, **attributes):
"""Memorize a css style URL
In:
- ``url`` -- the css style URL
- ``attributes`` -- attributes of the generated ``<link>`` tag
"""
self._css_url.setdefault(self.absolute_asset_url(url, **(url_params or {})), (attributes, bottom))
def javascript(self, id_, script, bottom=False, **attributes):
"""Memorize an in-line named javascript code
In:
- ``id_`` -- unique id of this javascript code (to prevent double definition)
- ``script`` -- the javascript code
- ``attributes`` -- attributes of the generated ``<script>`` tag
"""
self._named_javascript.setdefault(id_, (script, attributes, bottom))
def javascript_url(self, url, bottom=False, url_params=None, **attributes):
"""Memorize a javascript URL
In:
- ``url`` -- the javascript URL
- ``attributes`` -- attributes of the the generated ``<script>`` tag
Return:
- ``()``
"""
self._javascript_url.setdefault(self.absolute_asset_url(url, **(url_params or {})), (attributes, bottom))
def render_top(self):
# Create the tags to include the CSS styles and the javascript codes
head = self.root
if isinstance(head, ET.ElementBase) and (head.tag == 'head'):
# If a ``<head>`` tag already exist, take its content
head = self.head(head[:], dict(head.attrib))
else:
head = self.head(head)
head.extend(
self.link(rel='stylesheet', type='text/css', href=url, **attributes)
for url, (attributes, bottom)
in self._css_url.items()
if not bottom
)
head.extend(
self.script(type='text/javascript', src=url, **attributes)
for url, (attributes, bottom)
in self._javascript_url.items()
if not bottom
)
head.extend(
self.style(css, type='text/css', data_nagare_css=name, **attributes)
for name, (css, attributes, bottom)
in self._named_css.items()
if not bottom
)
head.extend(
self.script(js, type='text/javascript', data_nagare_js=name, **attributes)
for name, (js, attributes, bottom)
in self._named_javascript.items()
if not bottom
)
return head
def render_bottom(self):
return [
self.link(rel='stylesheet', type='text/css', href=url, **attributes)
for url, (attributes, bottom)
in self._css_url.items()
if bottom
] + [
self.script(type='text/javascript', src=url, **attributes)
for url, (attributes, bottom)
in self._javascript_url.items()
if bottom
] + [
self.style(css, type='text/css', data_nagare_css=name, **attributes)
for name, (css, attributes, bottom)
in self._named_css.items()
if bottom
] + [
self.script(js, type='text/javascript', data_nagare_js=name, **attributes)
for name, (js, attributes, bottom)
in self._named_javascript.items()
if bottom
]
class Renderer(xml.XmlRenderer):
doctype = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">'
content_type = 'text/html'
head_renderer_factory = HeadRenderer
componentattrs = {'id', 'class', 'style', 'title'}
i18nattrs = {'lang', 'dir'}
eventattrs = {
'onclick', 'ondblclick', 'onmousedown', 'onmouseup', 'onmousemove',
'onmouseover', 'onmouseout', 'onkeypress', 'onkeydown', 'onkeyup'
}
focusattrs = {'accesskey', 'tabindex', 'onfocus', 'onblur'}
cellhalignattrs = {'align', 'char', 'charoff'}
cellvalignattrs = {'valign'}
allattrs = componentattrs | i18nattrs | eventattrs
# The HTML tags
# -------------
a = TagProp('a', allattrs | focusattrs | {
'charset', 'type', 'name', 'href', 'hreflang', 'rel',
'rev', 'shape', 'coords', 'target', 'oncontextmenu'
})
abbr = TagProp('abbr', allattrs)
acronym = TagProp('acronym', allattrs)
address = TagProp('address', allattrs)
applet = TagProp('applet', componentattrs | {
'codebase', 'archive', 'code', 'object', 'alt', 'name', 'width',
'height', 'align', 'hspace', 'vspace'
})
area = TagProp('area', allattrs | focusattrs | {'shape', 'coords', 'href', 'nohref', 'alt', 'target'})
b = TagProp('b', allattrs)
basefont = TagProp('basefont', componentattrs | i18nattrs | {'id', 'size', 'color', 'face'})
bdo = TagProp('bdo', componentattrs | eventattrs | {'lang', 'dir'})
big = TagProp('big', allattrs)
blockquote = TagProp('blockquote', allattrs | {'cite'})
body = TagProp('body', allattrs | {
'onload', 'onunload', 'onfocus', 'background', 'bgcolor', 'text',
'link', 'vlink', 'alink', 'leftmargin', 'topmargin', 'marginwidth', 'marginheight'
})
br = TagProp('br', componentattrs | {'clear'})
button = TagProp('button', allattrs | focusattrs | {'name', 'value', 'type', 'disabled'})
caption = TagProp('caption', allattrs | {'align'})
center = TagProp('center', allattrs)
cite = TagProp('cite', allattrs)
code = TagProp('code', allattrs)
col = TagProp('col', allattrs | cellhalignattrs | cellvalignattrs | {'span', 'width'})
colgroup = TagProp('colgroup', allattrs | cellhalignattrs | cellvalignattrs | {'span', 'width'})
dd = TagProp('dd', allattrs)
del_ = TagProp('del', allattrs | {'cite', 'datetime'})
dfn = TagProp('dfn', allattrs)
dir = TagProp('dir', allattrs | {'compact'})
div = TagProp('div', allattrs | {'align'})
dl = TagProp('dl', allattrs | {'compact'})
dt = TagProp('dt', allattrs)
em = TagProp('em', allattrs)
embed = TagProp('embed', {
'width', 'height', 'src', 'controller', 'src', 'target',
'border', 'pluginspage', 'quality', 'type', 'bgcolor', 'menu'
}, Embed)
fieldset = TagProp('fieldset', allattrs)
font = TagProp('font', componentattrs | i18nattrs | {'face', 'size', 'color'})
form = TagProp('form', allattrs | {
'action', 'method', 'name', 'enctype',
'onsubmit', 'onreset', 'accept_charset', 'target'
})
frame = TagProp('frame', set())
frameset = TagProp('frameset', componentattrs | {
'rows', 'cols', 'onload', 'onunload', 'framespacing', 'border',
'marginwidth', 'marginheight', 'frameborder', 'noresize', 'scrolling'
})
h1 = TagProp('h1', allattrs | {'align'})
h2 = TagProp('h2', allattrs | {'align'})
h3 | |
<filename>horizomer/benchmark/reformat_input.py<gh_stars>1-10
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, The Horizomer Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
"""
Reformat input files to format accepted by given HGT tool
=========================================================
"""
import click
from os.path import join
from skbio import TreeNode, TabularMSA, Sequence, Protein, DNA
from collections import OrderedDict
def join_trees(gene_tree,
species_tree,
output_tree_fp):
""" Concatenate Newick trees into one file (species followed by gene).
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
output_tree_fp: string
file path to output species and gene tree
See Also
--------
skbio.TreeNode
"""
with open(output_tree_fp, 'w') as output_tree_f:
output_tree_f.write(
"%s\n%s\n" % (str(species_tree)[:-1], str(gene_tree)[:-1]))
def trim_gene_tree_leaves(gene_tree):
""" Keep only string before first '_' delimiter in node ID.
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance
See Also
--------
skbio.TreeNode
Notes
-----
This function will keep only the word before the first '_' in the
complete node ID. In ALF simulated sequences, the genes are labeled
as "SPECIES_GENE". Most phylogenetic reconciliation tools
require the associations between species leaves and gene leaves to
be equal, therefore needing to remove the _GENENAME part in the gene
tree.
"""
for node in gene_tree.tips():
node.name = node.name.split()[0]
def species_gene_mapping(gene_tree,
species_tree):
""" Find the association between the leaves in species and gene trees.
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
Returns
-------
mapping_leaves_t: OrderedDict
Mapping between the species tree leaves and the gene tree leaves;
species tips are the keys and gene tips are the values
See Also
--------
skbio.TreeNode
Notes
-----
Given the label format "SPECIES" for the species leaves and
"SPECIES_GENE" in the gene leaves, report the associations between all
species and gene leaves. Only one instance of the '_' delimiter is
allowed in the gene leaves and this is used as a separator between the
species name and the gene name.
Ex.
mapping = {"SE001":["SE001_1", "SE001_2"],
"SE002":["SE002_1"]}
"""
mapping_leaves = {}
for node in species_tree.tips():
if node.name not in mapping_leaves:
mapping_leaves[node.name] = []
else:
raise ValueError(
"Species tree leaves must be uniquely labeled: %s" % node.name)
for node in gene_tree.tips():
species, gene = node.name.split()
if species in mapping_leaves:
mapping_leaves[species].append("%s_%s" % (species, gene))
else:
raise ValueError(
"Species %s does not exist in the species tree" % species)
return OrderedDict(sorted(mapping_leaves.items(),
key=lambda x: x[1], reverse=True))
def remove_branch_lengths(tree):
""" Set branch lengths to None.
Parameters
----------
tree: skbio.TreeNode
TreeNode instance
See Also
--------
skbio.TreeNode
"""
for node in tree.postorder():
node.length = None
def id_mapper(ids):
mapping = {}
for _id in ids:
mapping[_id] = _id.split('/')[0]
return mapping
def reformat_rangerdtl(gene_tree,
species_tree,
output_tree_fp):
""" Reformat input trees to the format accepted by RANGER-DTL.
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
output_tree_fp: string
file path to output trees (species followed by gene)
See Also
--------
skbio.TreeNode
Notes
-----
The species name in the leaves of species and gene trees must be equal.
For multiple genes from the same species, the format "SPECIES_GENE" is
acceptable in the gene trees.
"""
remove_branch_lengths(tree=gene_tree)
remove_branch_lengths(tree=species_tree)
join_trees(gene_tree,
species_tree,
output_tree_fp)
def reformat_trex(gene_tree,
species_tree,
output_tree_fp):
""" Reformat input trees to the format accepted by T-REX.
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
output_tree_fp: string
file path to output trees (species followed by gene)
See Also
--------
skbio.TreeNode
Notes
-----
Binary trees only, leaves of species and gene trees must have equal
names.
"""
# trim gene tree leaves to exclude '_GENENAME' (if exists)
trim_gene_tree_leaves(gene_tree)
# join species and gene tree into one file
join_trees(gene_tree,
species_tree,
output_tree_fp)
def reformat_riatahgt(gene_tree,
species_tree,
output_tree_fp):
""" Reformat input trees to the format accepted by RIATA-HGT (PhyloNet).
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
output_tree_fp: string
file path to output trees (Nexus format)
See Also
--------
skbio.TreeNode
Notes
-----
Input to RIATA-HGT is a Nexus file. The number of leaves in the species
and gene tree must be equal with the same naming.
"""
nexus_file = """#NEXUS
BEGIN TREES;
Tree speciesTree = %s
Tree geneTree = %s
END;
BEGIN PHYLONET;
RIATAHGT speciesTree {geneTree};
END;
"""
# trim gene tree leaves to exclude '_GENENAME' (if exists)
trim_gene_tree_leaves(gene_tree)
with open(output_tree_fp, 'w') as output_tree_f:
output_tree_f.write(nexus_file % (str(species_tree)[:-1],
str(gene_tree)[:-1]))
def reformat_jane4(gene_tree,
species_tree,
output_tree_fp):
""" Reformat input trees to the format accepted by Jane4.
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
output_tree_fp: string
file path to output trees (Nexus format)
See Also
--------
skbio.TreeNode
Notes
-----
Input to Jane4 is a Nexus file, the trees cannot not contain branch
lengths and the species/gene leaves mapping is required.
"""
nexus_file = """#NEXUS
begin host;
tree host = %s
endblock;
begin parasite;
tree parasite = %s
endblock;
begin distribution;
Range %s;
endblock;
"""
# create a mapping between the species and gene tree leaves
mapping_dict = species_gene_mapping(gene_tree=gene_tree,
species_tree=species_tree)
remove_branch_lengths(tree=gene_tree)
remove_branch_lengths(tree=species_tree)
mapping_str = ""
for species in mapping_dict:
for gene in mapping_dict[species]:
mapping_str = "%s%s:%s, " % (mapping_str, gene, species)
with open(output_tree_fp, 'w') as output_tree_f:
output_tree_f.write(nexus_file % (str(species_tree),
str(gene_tree),
mapping_str[:-2]))
def reformat_treepuzzle(gene_tree,
species_tree,
gene_msa_fa_fp,
output_tree_fp,
output_msa_phy_fp):
""" Reformat input trees to the format accepted by Tree-Puzzle.
Parameters
----------
gene_tree: skbio.TreeNode
TreeNode instance for gene tree
species_tree_fp: skbio.TreeNode
TreeNode instance for species tree
gene_msa_fa_fp: string
file path to gene alignments in FASTA format
output_tree_fp: string
file path to output trees (Nexus format)
output_msa_phy_fp: string
file path to output MSA in PHYLIP format
See Also
--------
skbio.TreeNode
"""
# remove the root branch length (output with ALF)
for node in gene_tree.postorder():
if node.is_root():
node.length = None
for node in species_tree.postorder():
if node.is_root():
node.length = None
# trim gene tree leaves to exclude '_GENENAME' (if exists)
trim_gene_tree_leaves(gene_tree)
join_trees(gene_tree,
species_tree,
output_tree_fp)
# trim FASTA sequence labels to exclude '/GENENAME' (if exists)
msa_fa = TabularMSA.read(gene_msa_fa_fp, constructor=Protein)
msa_fa.reassign_index(minter='id')
mapping = id_mapper(msa_fa.index)
msa_fa.reassign_index(mapping=mapping)
msa_fa.write(output_msa_phy_fp, format='phylip')
def _merge_genbank_seqs(genbank_fp):
""" Merge one to multiple sequences in a GenBank file into one.
Parameters
----------
genbank_fp: string
file path to genome in GenBank format
Returns
-------
tuple of (
skbio.Sequence,
Genome sequence, genes and metadata
dict of { list of [ string, int, int, string ] }
Gene name : translation, start, end, and strand
)
"""
loci = []
nucl_seq = ''
genes = {}
nseq = 0 # number of nucleotide sequences
with open(genbank_fp, 'r') as input_f:
for line in input_f:
if line.startswith('//'):
nseq += 1
abs_pos = 0 # absolute position in concantenated nucleotide sequence
for i in range(nseq):
gb = Sequence.read(genbank_fp, seq_num=i+1, format='genbank')
locus_name = gb.metadata['LOCUS']['locus_name']
size = gb.metadata['LOCUS']['size']
loci.append([locus_name, size])
nucl_seq += str(gb)
for feature in gb.interval_metadata.query(metadata={'type': 'CDS'}):
m = feature.metadata
if 'protein_id' in m:
protein_id = m['protein_id'].replace('\"', '')
if protein_id not in genes:
translation = m['translation'].replace(' ', '') \
.replace('\"', '')
strand = m['strand']
start = feature.bounds[0][0] + abs_pos + 1
end = feature.bounds[0][1] + abs_pos
genes[protein_id] = [translation, start, end, strand]
abs_pos += int(size)
gb = DNA(nucl_seq)
# generate mock metadata for the merged sequence
gb.metadata['LOCUS'] = {'locus_name': 'locus001', 'size': len(nucl_seq),
'unit': 'bp', 'shape': 'circular',
'division': 'CON', 'mol_type': 'DNA',
'date': '01-JAN-1900'}
gb.metadata['id'] = 'locus001'
gid = 1 # assign an incremental integer to the current gene
gb.interval_metadata._intervals = []
for (gene, l) in sorted(genes.items(), key=lambda x: x[1][1]):
# generate "gene" and "CDS" records for each protein-coding gene
location = str(l[1]) + '..' + str(l[2]) # start and end coordinates
if l[3] == '-': # negative strand
location = 'complement(' + location + ')'
feature = {'type': 'gene', 'locus_tag': 'gene' + str(gid),
'__location': location}
gb.interval_metadata.add([(l[1] - 1, l[2])], metadata=feature)
feature = {'type': 'CDS', 'locus_tag': 'gene' + str(gid),
'__location': location, 'protein_id': gene,
'translation': l[0]}
gb.interval_metadata.add([(l[1] - 1, l[2])], metadata=feature)
gid += 1
return (gb, genes)
def reformat_egid(genbank_fp,
output_dir):
""" Reformat input genome to the formats accepted by EGID.
Parameters
----------
genbank_fp: string
file path to genome in GenBank format
output_dir: string
output directory path
Notes
-----
Input to EGID are five obsolete NCBI standard files: gbk, fna, faa, | |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import Iterable
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from arch.api.utils import log_utils
from federatedml.util import consts
LOGGER = log_utils.getLogger()
class Evaluation(object):
def __init__(self, eval_type='binary'):
self.eval_type = eval_type
self.thresholds = None
self.normalize = False
self.eval_func = {
consts.AUC: self.auc,
consts.KS: self.ks,
consts.LIFT: self.lift,
consts.PRECISION: self.precision,
consts.RECALL: self.recall,
consts.ACCURACY: self.accuracy,
consts.EXPLAINED_VARIANCE: self.explain_variance,
consts.MEAN_ABSOLUTE_ERROR: self.mean_absolute_error,
consts.MEAN_SQUARED_ERROR: self.mean_squared_error,
consts.MEAN_SQUARED_LOG_ERROR: self.mean_squared_log_error,
consts.MEDIAN_ABSOLUTE_ERROR: self.median_absolute_error,
consts.R2_SCORE: self.r2_score,
consts.ROOT_MEAN_SQUARED_ERROR: self.root_mean_squared_error
}
self.regression_support_func = [
consts.EXPLAINED_VARIANCE,
consts.MEAN_ABSOLUTE_ERROR,
consts.MEAN_SQUARED_ERROR,
consts.MEAN_SQUARED_LOG_ERROR,
consts.MEDIAN_ABSOLUTE_ERROR,
consts.R2_SCORE,
consts.ROOT_MEAN_SQUARED_ERROR,
]
def report(self, labels, pred_scores, metrics, thresholds=None, pos_label=None):
"""
Define the report of each evaluation method in metrics.
Parameters
----------
labels : value list. The labels of data set.
pred_scores : value list. The predict results of model. It should be corresponding to labels each data.
metrics: str list. It includes one or several evaluations methods you want. The evaluation method include "auc", "ks", "lift",
"precision", "recall", "accuracy", "explained_variance", "mean_absolute_error", "mean_squared_log_error",
"median_absolute_error", "r2_score", "root_mean_squared_error".
thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not,
if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None
pos_label: The same as DataIO label type. This parameter effective only for 'binary'. If input label in parameter labels is pos_label, it will be
set to 1, and set to 0 if not. If pos_label is None, do nothing to labels. Default None
Returns
----------
dict
The key of return is element in metrics and the value is evaluation result. For instance, if metrics is ["auc", "precision"], thresholds is [0.5, 0.7],
the return is { 'auc': 0.81, 'precision': [ (0.5, 0.77), (0.7, 0.66) ] }. (0.81, 0.77, 0.66 are examples)
"""
if metrics is None:
LOGGER.warning("Not metrics can be found in evaluation, return None")
return None
if self.eval_type == consts.REGRESSION:
new_metrics = []
for metric in metrics:
if metric in self.regression_support_func:
new_metrics.append(metric)
metrics = new_metrics
if len(metrics) == 0:
LOGGER.warning("Not metrics can be found in evaluation of regression, return None")
return None
self.thresholds = thresholds
if self.thresholds is None and self.eval_type == consts.BINARY:
self.thresholds = [0.5]
elif self.eval_type == consts.MULTY:
self.thresholds = None
new_labels = []
new_pred_scores = []
for i in range(labels.shape[0]):
if labels[i] is not None:
if self.eval_type == consts.BINARY and pos_label is not None:
if pos_label == labels[i]:
new_labels.append(1)
else:
new_labels.append(0)
else:
new_labels.append(labels[i])
new_pred_scores.append(pred_scores[i])
eval_res = {}
if len(new_labels) == 0:
LOGGER.warning("Each of labels is None, can not evaluation!")
for metric in metrics:
eval_res[metric] = None
return eval_res
labels = np.array(new_labels)
pred_scores = np.array(new_pred_scores)
for metric in metrics:
if metric in self.eval_func:
res = self.eval_func[metric](labels, pred_scores)
eval_res[metric] = self.__evaluation_format_translate(res, self.thresholds, self.eval_type)
else:
LOGGER.warning("can not find evaluation of {}".format(metric))
self.thresholds = None
return eval_res
def __evaluation_format_translate(self, results, thresholds, eval_type):
"""
Transform evaluation result's format for output
"""
if isinstance(results, float):
return np.around(results, 4)
else:
evaluation_format = []
if eval_type == consts.BINARY:
for i in range(len(thresholds)):
if isinstance(results[i], Iterable):
score = results[i][-1]
else:
score = results[i]
if isinstance(score, float):
score = np.around(score, 4)
res = (thresholds[i], score)
evaluation_format.append(res)
else:
if isinstance(results, float):
results = np.around(results, 4)
elif isinstance(results, dict):
for key in results:
if isinstance(results[key], float):
results[key] = np.around(results[key], 4)
evaluation_format = results
return evaluation_format
def auc(self, labels, pred_scores):
"""
Compute AUC for binary classification.
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
The AUC
"""
if self.eval_type == consts.BINARY:
return roc_auc_score(labels, pred_scores)
else:
LOGGER.warning("auc is just suppose Binary Classification! return None as results")
return None
def explain_variance(self, labels, pred_scores):
"""
Compute explain variance
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
The explain variance
"""
return explained_variance_score(labels, pred_scores)
def mean_absolute_error(self, labels, pred_scores):
"""
Compute mean absolute error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A non-negative floating point.
"""
return mean_absolute_error(labels, pred_scores)
def mean_squared_error(self, labels, pred_scores):
"""
Compute mean square error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A non-negative floating point value
"""
return mean_squared_error(labels, pred_scores)
def mean_squared_log_error(self, labels, pred_scores):
"""
Compute mean squared logarithmic error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A non-negative floating point value
"""
return mean_squared_log_error(labels, pred_scores)
def median_absolute_error(self, labels, pred_scores):
"""
Compute median absolute error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A positive floating point value
"""
return median_absolute_error(labels, pred_scores)
def r2_score(self, labels, pred_scores):
"""
Compute R^2 (coefficient of determination) score
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
The R^2 score
"""
return r2_score(labels, pred_scores)
def root_mean_squared_error(self, labels, pred_scores):
"""
Compute the root of mean square error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Return
----------
float
A positive floating point value
"""
return np.sqrt(mean_squared_error(labels, pred_scores))
def ks(self, labels, pred_scores):
"""
Compute Kolmogorov-Smirnov
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A positive floating point value
"""
if self.eval_type == consts.BINARY:
fpr, tpr, thresholds = roc_curve(np.array(labels), np.array(pred_scores), drop_intermediate=0)
return max(tpr - fpr)
else:
LOGGER.warning("ks is just suppose Binary Classification! return None as results")
return None
def lift(self, labels, pred_scores, thresholds=None):
"""
Compute lift of binary classification.
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not,
if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None
Returns
----------
float
The lift
"""
if thresholds is None:
thresholds = self.thresholds
if thresholds is None and self.eval_type == consts.BINARY:
thresholds = [0.5]
if self.eval_type == consts.BINARY:
lift_operator = Lift()
return lift_operator.compute(labels, pred_scores, thresholds=thresholds)
else:
LOGGER.warning("lift is just suppose Binary Classification! return None as results")
return None
def precision(self, labels, pred_scores, thresholds=None, result_filter=None):
"""
Compute the precision
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
thresholds: value list. This parameter effective only | |
and $0^{\circ}$ (p-value < 0.025)."
sw.append_new_tag(ps_1_1, "OriginalTestsPassedAndNormalAngleSummary", tex_tag_file_name)
ps_1_2 = "To test whether trial-to-trial excitability fluctuations also modulate the remaining " + str(num_tests_not_passed) + " non-stationary Stage 2 clusters, "
sw.append_new_tag(ps_1_2, "NumNonStationary", tex_tag_file_name)
if (analysis_dict_key in ["SelectivelyDifferenced", "SelectivelyDifferencedBoxJenkins"]):
true_where_undifferenced = np.asarray(sdds[analysis_dict_key + '_selective_differences_undifferenced'])
where_undifferenced = np.where(true_where_undifferenced)
num_undifferenced = np.sum(true_where_undifferenced)
true_where_differenced = np.asarray(sdds[analysis_dict_key + '_selective_differences_differenced'])
where_differenced = np.where(true_where_differenced)
num_differenced = np.sum(true_where_differenced)
sw.draw_neighbouring_bar_chart([[num_undifferenced], [num_differenced]], ('Clusters'), specific_nonstationarity_dir + "ClustersDifferenced.pdf", '', ('Undifferenced', 'Differenced'), "")
true_where_differenced_and_undifferenced = np.logical_or(true_where_undifferenced, true_where_differenced)
number_of_differences = np.hstack((np.asarray(sdds[analysis_dict_key + '_index_to_use_0'])[np.where(true_where_differenced_and_undifferenced)], np.asarray(sdds[analysis_dict_key + '_index_to_use_1'])[np.where(true_where_differenced_and_undifferenced)]))
hist_number_of_selective_differences, _ = np.histogram(number_of_differences, range=[0,2], bins=3)
if (analysis_dict_key in ["SelectivelyDifferenced"]):
data_part = sw.percent_and_frac_string(num_differenced, num_tests_not_passed_ORIGINAL)
ps_2 = data_part + " of non-stationary clusters had differencing applied to at least one neuron. "
sw.append_new_tag(ps_2, "SelectivelyDifferencedSummaryFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "SelectivelyDifferencedSummaryNum", tex_tag_file_name)
ps_3 = "In total, " + str(number_correlated) + " of the " + str(num_for_type) + " criteria fulfilling clusters were linearly correlated following differencing (p-value < 0.05) (Fig. 6h). "
ps_3 += str(num_correlated_and_different_from_45_and_different_from_0_tests_passed) + " of the correlated criteria fulfilling clusters had $\\theta_{45}$ angles between and significantly different from $0^{\circ}$ (p-value < 0.025) and less than $45^{\circ}$ (p-value < 0.025). "
ps_3 += "Of these, normality was not rejected for " + str(num_correlated_and_different_from_45_tests_passed_and_normal) + " clusters (Henze-Zirkler p > 0.05). "
ps_3 += "Fig. 6j shows that the distribution of estimated angles for the successfully differenced clusters. Fig. 8? shows the difference between original and differenced estimated clusters?"
sw.append_new_tag(ps_3, "s", tex_tag_file_name)
data_part = sw.percent_and_frac_string(num_tests_passed_and_correlated, num_differenced)
ps_5 = data_part + " of these clusters fulfilled the stationarity and correlation criteria (see Methods), of which "
sw.append_new_tag(ps_5, "SelectivelyDifferencedTestsPassedNewSummaryFullString1", tex_tag_file_name)
sw.append_new_tag(data_part, "SelectivelyDifferencedTestsPassedNewSummaryNum1", tex_tag_file_name)
if (analysis_dict_key in ["SelectivelyDifferencedBoxJenkins"]):
true_where_was_box_jenkinsed = np.asarray(sdds[analysis_dict_key + '_was_box_jeckinsed'])
true_where_differenced_and_boxed = np.logical_and(true_where_differenced, true_where_was_box_jenkinsed)
num_true_where_differenced_and_boxed = np.sum(true_where_differenced_and_boxed)
true_where_tests_passed_differenced_and_boxed = np.logical_and(true_where_tests_passed, true_where_differenced_and_boxed)
true_where_tests_passed_and_normal_differenced_and_boxed = np.logical_and(true_where_tests_passed_and_normal, true_where_differenced_and_boxed)
num_true_where_tests_passed_differenced_and_boxed = np.sum(true_where_tests_passed_differenced_and_boxed)
num_true_where_tests_passed_and_normal_differenced_and_boxed = np.sum(true_where_tests_passed_and_normal_differenced_and_boxed)
data_part = sw.percent_and_frac_string(num_true_where_differenced_and_boxed, num_differenced)
ps_8 = data_part + " of these differenced clusters had an AR and/or MA model applied to at least one neuron. "
sw.append_new_tag(ps_8, "SelectivelyDifferencedBoxJenkinsDifferencedSummary", tex_tag_file_name)
sw.append_new_tag(data_part, "SelectivelyDifferencedBoxJenkinsDifferencedSummaryNum", tex_tag_file_name)
data_part_1 = sw.percent_and_frac_string(num_tests_passed, num_true_where_differenced_and_boxed)
ps_9 = data_part_1 + " of ARIMA modelled clusters fulfilled the criteria, of which "
sw.append_new_tag(ps_9, "SelectivelyDifferencedBoxJenkinsCorrelatedSummary1", tex_tag_file_name)
sw.append_new_tag(data_part_1, "SelectivelyDifferencedBoxJenkinsCorrelatedSummaryNum1", tex_tag_file_name)
differenced_boxed_ARs = np.hstack((np.asarray(sdds[analysis_dict_key + '_AR_p_0'])[np.where(true_where_differenced_and_boxed)], np.asarray(sdds[analysis_dict_key + '_AR_p_1'])[np.where(true_where_differenced_and_boxed)]))
differenced_boxed_MAs = np.hstack((np.asarray(sdds[analysis_dict_key + '_MA_q_0'])[np.where(true_where_differenced_and_boxed)], np.asarray(sdds[analysis_dict_key + '_MA_q_1'])[np.where(true_where_differenced_and_boxed)]))
differenced_boxed_ARs_histo_counts = np.histogram(differenced_boxed_ARs, range=[0, 5], bins=5)[0].tolist()
differenced_boxed_MAs_histo_counts = np.histogram(differenced_boxed_MAs, range=[0, 5], bins=5)[0].tolist()
sw.draw_neighbouring_bar_chart([differenced_boxed_ARs_histo_counts, differenced_boxed_MAs_histo_counts], ('0', '1', '2', '3', '4'), specific_nonstationarity_dir + "Differenced_ARIMA.pdf", '', ('AR', 'MA'), 'Order', custom_y_tick_locators=[220, 20])
# Angles
sw.plot_angle_vs_reliability_plots(sdds[analysis_dict_key + '_BS_PCA_mean_angle_up_to_45'], sdds['PCA_ellipse_overall_reliability'], sdds['PCA_ellipse_conj_reliability'], specific_angle_analysis_dir, "Angle_Vs_Reliability_BS_PCA")
sw.normal_histo_plot([sdds[analysis_dict_key + '_BS_PCA_different_from_45_sd_method']], specific_angle_analysis_dir + "Diff45_BS_Sd_PVal_Hist", bins=20, x_axis_label="p-value", y_axis_label="Frequency", alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([sdds[analysis_dict_key + '_PCA_BS_empirical_pvalue_different_from_45']], specific_angle_analysis_dir + "Diff45_BS_PCA_empirical_pvalue_hist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([sdds[analysis_dict_key + '_PCA_BS_empirical_pvalue_different_from_0']], specific_angle_analysis_dir + "Diff0_BS_PCA_empirical_pvalue_hist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", alpha=0.78, add_chi_squared_text=True)
sw.plot_angle_confidence_bound_plots(sdds[analysis_dict_key + '_BS_PCA_mean_angle_up_to_45'], sdds[analysis_dict_key + '_PCA_BS_empirical_CI_lower'], sdds[analysis_dict_key + '_PCA_BS_empirical_CI_upper'], sdds[analysis_dict_key + '_is_still_correlated'], sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45'], specific_angle_analysis_dir, "AngleCI_BS_PCA")
sw.basic_x_y_plot([sdds["Original" + '_BS_PCA_mean_angle']], [sdds[analysis_dict_key + '_BS_PCA_mean_angle']], specific_angle_analysis_dir + "OriginalAngle_Vs_Angle_BS_PCA", draw_y_equals_x=True, y_equals_x_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
sw.basic_x_y_plot([sdds[analysis_dict_key + '_BS_PCA_mean_angle']], [sdds[analysis_dict_key + '_FA_angle_BS_mean']], specific_angle_analysis_dir + "Angle_BS_PCA_Vs_FA", draw_y_equals_x=True, y_equals_x_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
# Non Stationarity
acf_rvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_acf_rvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_acf_rvalues_1'])))
acf_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_acf_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_acf_pvalues_1'])))
acf_positive_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_acf_positive_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_acf_positive_pvalues_1'])))
acf_negative_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_acf_negative_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_acf_negative_pvalues_1'])))
ccf_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_ccf_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_ccf_pvalues_1'])))
ccf_positive_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_ccf_positive_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_ccf_positive_pvalues_1'])))
ccf_negative_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_ccf_negative_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_ccf_negative_pvalues_1'])))
pccf_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_pccf_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_pccf_pvalues_1'])))
pccf_positive_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_pccf_positive_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_pccf_positive_pvalues_1'])))
pccf_negative_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_pccf_negative_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_pccf_negative_pvalues_1'])))
pacf_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_pacf_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_pacf_pvalues_1'])))
pacf_positive_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_pacf_positive_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_pacf_positive_pvalues_1'])))
pacf_negative_pvalues = np.vstack((np.asarray(sdds[analysis_dict_key + '_STs_pacf_negative_pvalues_0']), np.asarray(sdds[analysis_dict_key + '_STs_pacf_negative_pvalues_1'])))
sharipo_normality_pvalues = np.hstack((np.asarray(sdds[analysis_dict_key + '_sharipo_normality_p_0']), np.asarray(sdds[analysis_dict_key + '_sharipo_normality_p_1'])))
kpss_stationarity_pvalues = np.hstack((np.asarray(sdds[analysis_dict_key + '_KPSS_STs_0_pvalue']), np.asarray(sdds[analysis_dict_key + '_KPSS_STs_1_pvalue'])))
ADFuller_STs_0_and_1_pvalue = np.hstack((sdds[analysis_dict_key + '_ADFuller_STs_0_pvalue'], sdds[analysis_dict_key + '_ADFuller_STs_1_pvalue']))
TI_Vs_STs_LR_0_and_1_pvalue = np.hstack((sdds[analysis_dict_key + '_TI_Vs_STs_LR_0_pvalue'], sdds[analysis_dict_key + '_TI_Vs_STs_LR_1_pvalue']))
(number_of_examples, number_of_lags) = acf_pvalues.shape
for number_of_lags_for_cumulative_plot in [5, number_of_lags]:
particular_number_of_lags_specific_lag_pvals_nonstationary_dir = specific_lag_pvals_nonstationary_dir + str(number_of_lags_for_cumulative_plot) + "Lags/"
sw.mkdir(particular_number_of_lags_specific_lag_pvals_nonstationary_dir)
sw.cumulative_histo_plot([acf_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_ACF_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([acf_positive_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_ACF_PVal_positive_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([acf_negative_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_ACF_PVal_negative_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([ccf_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_CCF_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([ccf_positive_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_CCF_PVal_positive_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([ccf_negative_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_CCF_PVal_negative_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pacf_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_PACF_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pacf_positive_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_PACF_PVal_positive_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pacf_negative_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_PACF_PVal_negative_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pccf_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_PCCF_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pccf_positive_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_PCCF_PVal_positive_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pccf_negative_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags_for_cumulative_plot)], particular_number_of_lags_specific_lag_pvals_nonstationary_dir + analysis_dict_key + "_PCCF_PVal_negative_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
if (plot_all_lag_histograms):
acf_dir = specific_lag_pvals_nonstationary_dir + "ACFs/"; sw.mkdir(acf_dir)
acf_positive_dir = specific_lag_pvals_nonstationary_dir + "ACFs_Positive/"; sw.mkdir(acf_positive_dir)
acf_negative_dir = specific_lag_pvals_nonstationary_dir + "ACFs_Negative/"; sw.mkdir(acf_negative_dir)
ccf_dir = specific_lag_pvals_nonstationary_dir + "CCFs/"; sw.mkdir(ccf_dir)
ccf_positive_dir = specific_lag_pvals_nonstationary_dir + "CCFs_Positive/"; sw.mkdir(ccf_positive_dir)
ccf_negative_dir = specific_lag_pvals_nonstationary_dir + "CCFs_Negative/"; sw.mkdir(ccf_negative_dir)
pccf_dir = specific_lag_pvals_nonstationary_dir + "PCCFs/"; sw.mkdir(pccf_dir)
pccf_positive_dir = specific_lag_pvals_nonstationary_dir + "PCCFs_Positive/"; sw.mkdir(pccf_positive_dir)
pccf_negative_dir = specific_lag_pvals_nonstationary_dir + "PCCFs_Negative/"; sw.mkdir(pccf_negative_dir)
pacf_dir = specific_lag_pvals_nonstationary_dir + "PACFs/"; sw.mkdir(pacf_dir)
pacf_positive_dir = specific_lag_pvals_nonstationary_dir + "PACFs_Positive/"; sw.mkdir(pacf_positive_dir)
pacf_negative_dir = specific_lag_pvals_nonstationary_dir + "PACFs_Negative/"; sw.mkdir(pacf_negative_dir)
for lag_index_zeroed in range(number_of_lags):
sw.normal_histo_plot([acf_rvalues[:, lag_index_zeroed]], acf_dir + "RValues_ACFLag" + str(lag_index_zeroed + 1) + suf, bins=40, histo_range=[-1.0, 1.0], x_axis_label="r-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20], alpha=0.78)
for number_of_bins, histo_range in zip([20, 20], [[0.0, 1.0], [0.0, 0.1]]):
for lag_index_zeroed in range(number_of_lags):
sw.normal_histo_plot([acf_pvalues[:, lag_index_zeroed]], acf_dir + str(histo_range[1]) + "_ACFLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([acf_positive_pvalues[:, lag_index_zeroed]], acf_positive_dir + str(histo_range[1]) + "_ACFPositiveLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([acf_negative_pvalues[:, lag_index_zeroed]], acf_negative_dir + str(histo_range[1]) + "_ACFNegativeLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([ccf_pvalues[:, lag_index_zeroed]], ccf_dir + str(histo_range[1]) + "_CCFLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([ccf_positive_pvalues[:, lag_index_zeroed]], ccf_positive_dir + str(histo_range[1]) + "_CCFPositiveLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([ccf_negative_pvalues[:, lag_index_zeroed]], ccf_negative_dir + str(histo_range[1]) + "_CCFNegativeLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pacf_pvalues[:, lag_index_zeroed]], pacf_dir + str(histo_range[1]) + "_PACFLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pacf_positive_pvalues[:, lag_index_zeroed]], pacf_positive_dir + str(histo_range[1]) + "_PACFPositiveLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pacf_negative_pvalues[:, lag_index_zeroed]], pacf_negative_dir + str(histo_range[1]) + "_PACFNegativeLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pccf_pvalues[:, lag_index_zeroed]], pccf_dir + str(histo_range[1]) + "_PCCFLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pccf_positive_pvalues[:, lag_index_zeroed]], pccf_positive_dir + str(histo_range[1]) + "_PCCFPositiveLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pccf_negative_pvalues[:, lag_index_zeroed]], pccf_negative_dir + str(histo_range[1]) + "_PCCFNegativeLag" + str(lag_index_zeroed + 1) + suf, bins=number_of_bins, histo_range=histo_range, x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[histo_range[1], 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
# Bartlett's Sphericity
sw.normal_histo_plot([sdds[analysis_dict_key + '_bartlett_spherecity_p_value']], bartlett_specific_nonstationarity_dir + "BartlettsSphericity_pvalues" + suf, bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
# Henze-Zirkler
sw.normal_histo_plot([sdds[analysis_dict_key + '_henze-zirkler_multivariate_normality_p']], HZ_specific_nonstationarity_dir + "Henze-Zirkler_pvalues" + suf, bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
# Sharipo
sw.normal_histo_plot([sharipo_normality_pvalues], sharipo_normality_specific_nonstationarity_dir + "Sharipo_pvalues" + suf, bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
# KPSS
sw.normal_histo_plot([kpss_stationarity_pvalues], KPSS_stationarity_specific_nonstationarity_dir + "KPSS_pvalues" + suf, bins=6, histo_range=[0.0, 0.12], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.12, 0.02], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
if (analysis_dict_key == "Original"):
kpss_num_less_than_pvalue = np.sum(kpss_stationarity_pvalues < 0.05)
kpss_string = "For the " + str(num_for_type) + " angled clusters, " + str(kpss_num_less_than_pvalue) + " of the corresponding " + str(2 * num_for_type) + " ($=2*" + str(num_for_type) + "$) single neuron cluster first spike sequences were determined as KPSS non-stationary (p < 0.05; Fig. 4d)."
sw.append_new_tag(kpss_string, "ClusterSingleUnitKPSSStationaritySummary", | |
<filename>pincer/commands.py
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
import logging
import re
from asyncio import iscoroutinefunction, gather
from copy import deepcopy
from inspect import Signature, isasyncgenfunction
from typing import (
Optional, Dict, List, Any, Tuple, get_origin, get_args, Union,
ForwardRef, _eval_type
)
from . import __package__
from .exceptions import (
CommandIsNotCoroutine, CommandAlreadyRegistered, TooManyArguments,
InvalidAnnotation, CommandDescriptionTooLong, InvalidCommandGuild,
InvalidCommandName
)
from .objects import ThrottleScope, AppCommand, Role, User, Channel, Guild
from .objects.app import (
AppCommandOptionType, AppCommandOption, AppCommandOptionChoice,
ClientCommandStructure, AppCommandType
)
from .utils import (
get_signature_and_params, get_index, should_pass_ctx, Coro, Snowflake,
MISSING, choice_value_types, Choices
)
from .utils.types import Singleton, TypeCache, Descripted
COMMAND_NAME_REGEX = re.compile(r"^[\w-]{1,32}$")
_log = logging.getLogger(__package__)
_options_type_link = {
# TODO: Implement mentionable:
Signature.empty: AppCommandOptionType.STRING,
str: AppCommandOptionType.STRING,
int: AppCommandOptionType.INTEGER,
bool: AppCommandOptionType.BOOLEAN,
float: AppCommandOptionType.NUMBER,
User: AppCommandOptionType.USER,
Channel: AppCommandOptionType.CHANNEL,
Role: AppCommandOptionType.ROLE,
}
def command(
name: Optional[str] = None,
description: Optional[str] = "Description not set",
enable_default: Optional[bool] = True,
guild: Union[Snowflake, int, str] = None,
cooldown: Optional[int] = 0,
cooldown_scale: Optional[float] = 60,
cooldown_scope: Optional[ThrottleScope] = ThrottleScope.USER
):
"""
Command option types are designated by using type hints.
str - String
int - Integer
bool - Boolean
float - Number
pincer.objects.User - User
pincer.objects.Channel - Channel
pincer.objects.Role - Role
Mentionable is not implemented
"""
# TODO: Fix docs
# TODO: Fix docs w guild
# TODO: Fix docs w cooldown
# TODO: Fix docs w context
# TODO: Fix docs w argument descriptions
# TODO: Fix docs w argument choices
def decorator(func: Coro):
if not iscoroutinefunction(func) and not isasyncgenfunction(func):
raise CommandIsNotCoroutine(
f"Command with call `{func.__name__}` is not a coroutine, "
"which is required for commands."
)
cmd = name or func.__name__
if not re.match(COMMAND_NAME_REGEX, cmd):
raise InvalidCommandName(
f"Command `{cmd}` doesn't follow the name requirements."
"Ensure to match the following regex:"
f" {COMMAND_NAME_REGEX.pattern}"
)
try:
guild_id = int(guild) if guild else MISSING
except ValueError:
raise InvalidCommandGuild(
f"Command with call `{func.__name__}` its `guilds` parameter "
"contains a non valid guild id."
)
if len(description) > 100:
raise CommandDescriptionTooLong(
f"Command `{cmd}` (`{func.__name__}`) its description exceeds "
"the 100 character limit."
)
if reg := ChatCommandHandler.register.get(cmd):
raise CommandAlreadyRegistered(
f"Command `{cmd}` (`{func.__name__}`) has already been "
f"registered by `{reg.call.__name__}`."
)
sig, params = get_signature_and_params(func)
pass_context = should_pass_ctx(sig, params)
if len(params) > (25 + pass_context):
raise TooManyArguments(
f"Command `{cmd}` (`{func.__name__}`) can only have 25 "
f"arguments (excluding the context and self) yet {len(params)} "
"were provided!"
)
options: List[AppCommandOption] = []
for idx, param in enumerate(params):
if idx == 0 and pass_context:
continue
annotation, required = sig[param].annotation, True
argument_description: Optional[str] = None
choices: List[AppCommandOptionChoice] = []
if isinstance(annotation, str):
TypeCache()
annotation = eval(annotation, TypeCache.cache, globals())
if isinstance(annotation, Descripted):
argument_description = annotation.description
annotation = annotation.key
if len(argument_description) > 100:
raise CommandDescriptionTooLong(
f"Tuple annotation `{annotation}` on parameter "
f"`{param}` in command `{cmd}` (`{func.__name__}`), "
"argument description too long. (maximum length is 100 "
"characters)"
)
if get_origin(annotation) is Union:
args = get_args(annotation)
if type(None) in args:
required = False
# Do NOT use isinstance as this is a comparison between
# two values of the type type and isinstance does NOT
# work here.
union_args = [t for t in args if t is not type(None)]
annotation = (
get_index(union_args, 0)
if len(union_args) == 1
else Union[Tuple[List]]
)
if get_origin(annotation) is Choices:
args = get_args(annotation)
if len(args) > 25:
raise InvalidAnnotation(
f"Choices/Literal annotation `{annotation}` on "
f"parameter `{param}` in command `{cmd}` "
f"(`{func.__name__}`) amount exceeds limit of 25 items!"
)
choice_type = type(args[0])
if choice_type is Descripted:
choice_type = type(args[0].key)
for choice in args:
choice_description = choice
if isinstance(choice, Descripted):
choice_description = choice.description
choice = choice.key
if choice_type is tuple:
choice_type = type(choice)
if type(choice) not in choice_value_types:
# Properly get all the names of the types
valid_types = list(map(
lambda x: x.__name__,
choice_value_types
))
raise InvalidAnnotation(
f"Choices/Literal annotation `{annotation}` on "
f"parameter `{param}` in command `{cmd}` "
f"(`{func.__name__}`), invalid type received. "
"Value must be a member of "
f"{', '.join(valid_types)} but "
f"{type(choice).__name__} was given!"
)
elif not isinstance(choice, choice_type):
raise InvalidAnnotation(
f"Choices/Literal annotation `{annotation}` on "
f"parameter `{param}` in command `{cmd}` "
f"(`{func.__name__}`), all values must be of the "
"same type!"
)
choices.append(AppCommandOptionChoice(
name=choice_description,
value=choice
))
annotation = choice_type
param_type = _options_type_link.get(annotation)
if not param_type:
raise InvalidAnnotation(
f"Annotation `{annotation}` on parameter "
f"`{param}` in command `{cmd}` (`{func.__name__}`) is not "
"a valid type."
)
options.append(
AppCommandOption(
type=param_type,
name=param,
description=argument_description or "Description not set",
required=required,
choices=choices or MISSING
)
)
ChatCommandHandler.register[cmd] = ClientCommandStructure(
call=func,
cooldown=cooldown,
cooldown_scale=cooldown_scale,
cooldown_scope=cooldown_scope,
app=AppCommand(
name=cmd,
description=description,
type=AppCommandType.CHAT_INPUT,
default_permission=enable_default,
options=options,
guild_id=guild_id
)
)
_log.info(f"Registered command `{cmd}` to `{func.__name__}`.")
return func
return decorator
class ChatCommandHandler(metaclass=Singleton):
"""
Class containing methods used to handle various commands
"""
managers: Dict[str, Any] = {}
register: Dict[str, ClientCommandStructure] = {}
# Endpoints:
__get = "/commands"
__delete = "/commands/{command.id}"
__update = "/commands/{command.id}"
__add = "/commands"
__add_guild = "/guilds/{command.guild_id}/commands"
__get_guild = "/guilds/{guild_id}/commands"
__update_guild = "/guilds/{command.guild_id}/commands/{command.id}"
__delete_guild = "/guilds/{command.guild_id}/commands/{command.id}"
# TODO: Fix docs
def __init__(self, client):
# TODO: Fix docs
self.client = client
self._api_commands: List[AppCommand] = []
logging.debug(
"%i commands registered.",
len(ChatCommandHandler.register.items())
)
self.client.throttler.throttle = dict(map(
lambda cmd: (cmd.call, {}),
ChatCommandHandler.register.values()
))
self.__prefix = f"applications/{self.client.bot.id}"
async def get_commands(self) -> List[AppCommand]:
# TODO: Fix docs
# TODO: Update if discord adds bulk get guild commands
guild_commands = await gather(*map(
lambda guild: self.client.http.get(
self.__prefix + self.__get_guild.format(
guild_id=guild.id if isinstance(guild, Guild) else guild
)
),
self.client.guilds
))
return list(map(
AppCommand.from_dict,
await self.client.http.get(self.__prefix + self.__get)
+ [cmd for guild in guild_commands for cmd in guild]
))
async def remove_command(self, cmd: AppCommand, keep=False):
# TODO: Fix docs
# TODO: Update if discord adds bulk delete commands
remove_endpoint = self.__delete_guild if cmd.guild_id else self.__delete
await self.client.http.delete(
self.__prefix + remove_endpoint.format(command=cmd)
)
if not keep and ChatCommandHandler.register.get(cmd.name):
del ChatCommandHandler.register[cmd.name]
async def remove_commands(
self,
commands: List[AppCommand],
/,
keep: List[AppCommand] = None
):
# TODO: Fix docs
await gather(*list(map(
lambda cmd: self.remove_command(cmd, cmd in (keep or [])),
commands
)))
async def update_command(self, cmd: AppCommand, changes: Dict[str, Any]):
# TODO: Fix docs
# TODO: Update if discord adds bulk update commands
update_endpoint = self.__update_guild if cmd.guild_id else self.__update
await self.client.http.patch(
self.__prefix + update_endpoint.format(command=cmd),
data=changes
)
for key, value in changes.items():
setattr(ChatCommandHandler.register[cmd.name], key, value)
async def update_commands(
self,
to_update: Dict[AppCommand, Dict[str, Any]]
):
# TODO: Fix docs
await gather(*list(map(
lambda cmd: self.update_command(cmd[0], cmd[1]),
to_update.items()
)))
async def add_command(self, cmd: AppCommand):
# TODO: Fix docs
add_endpoint = self.__add
if cmd.guild_id:
add_endpoint = self.__add_guild.format(command=cmd)
res = await self.client.http.post(
self.__prefix + add_endpoint,
data=cmd.to_dict()
)
ChatCommandHandler.register[cmd.name].app.id = Snowflake(res['id'])
async def add_commands(self, commands: List[AppCommand]):
# TODO: Fix docs
await gather(*list(map(
lambda cmd: self.add_command(cmd),
commands
)))
async def __init_existing_commands(self):
# TODO: Fix docs
self._api_commands = await self.get_commands()
for api_cmd in self._api_commands:
cmd = ChatCommandHandler.register.get(api_cmd.name)
if cmd and cmd.app == api_cmd:
cmd.app = api_cmd
async def __remove_unused_commands(self):
"""
Remove commands that are registered by discord but not in use
by the current client!
"""
registered_commands = list(map(
lambda registered_cmd: registered_cmd.app,
ChatCommandHandler.register.values()
))
keep = []
def predicate(target: AppCommand) -> bool:
for reg_cmd in registered_commands:
reg_cmd: AppCommand = reg_cmd
if target == reg_cmd:
return False
elif target.name == reg_cmd.name:
keep.append(target)
return True
to_remove = list(filter(predicate, self._api_commands))
await self.remove_commands(to_remove, keep=keep)
self._api_commands = list(filter(
lambda cmd: cmd not in to_remove,
self._api_commands
))
async def __update_existing_commands(self):
"""
Update all commands where its structure doesn't match the
structure that discord has registered.
"""
to_update: Dict[AppCommand, Dict[str, Any]] = {}
def get_changes(
api: AppCommand,
local: AppCommand
) -> Dict[str, Any]:
update: Dict[str, Any] = {}
if api.description != local.description:
update["description"] = local.description
if api.default_permission != local.default_permission:
update["default_permission"] = local.default_permission
options: List[Dict[str, Any]] = []
if api.options is not MISSING:
if len(api.options) == len(local.options):
def get_option(args: Tuple[int, Any]) \
-> Optional[Dict[str, Any]]:
index, api_option = args
if opt := get_index(local.options, index):
return opt.to_dict()
options = list(filter(
lambda opt: opt is not None,
map(get_option, enumerate(api.options))
))
else:
options = local.options
if api.options is not MISSING and list(
map(AppCommandOption.from_dict, options)) != api.options:
update["options"] = options
return update
for idx, api_cmd in enumerate(self._api_commands):
for loc_cmd in ChatCommandHandler.register.values():
if api_cmd.name != | |
.5*( np.cumsum(u, axis = 1) + np.cumsum(v, axis = 0) )
# Caculate stramelines function
def _streamlines(u, v):
return .5*( np.cumsum(u, axis = 0) - np.cumsum(v, axis = 1) )
# Calculate vorticity approximating hte veloctiy gradient by numerical diffenciation
def _vorticity(u, v):
return np.gradient(u)[1] - np.gradient(v)[0]
# Calculate divergence approximating hte veloctiy gradient by numerical diffenciation
def _divergence(u, v):
return np.gradient(u)[1] + np.gradient(v)[0]
# Calculate the magnitude of a vector
def _magnitude(u, v):
return np.sqrt(u**2 + v**2)
# Finding index on a field by thresholding magnitude
def _index_velocity_vectors(u, v, tau):
return _magnitude(u, v) > tau
# Bayesian approach for selecting the most likely vector a posteriori
# def _vector_selection(X_, u_, v_, n_tr, n_ts):
# # Bayesian Sample Selection with Full, Spheric or Diagonal Covariance Function options
# def __bayesian_sample_selection(u_, v_, n_tr, n_ts, _cov = 'full'):
# # Calculating the prior data convariance matrix
# def ___prior(Y):
# Sp = np.zeros((Y.shape[1], Y.shape[1]))
# # Loop removing each sample from the dataset
# # and calculating the covariance matrix for the entire set
# for i in range(Y.shape[0]):
# Z = np.delete(Y, i, axis = 0) - Y[i, :]
# Sp += np.matmul(Z.T, Z)/Y.shape[0]
# return Sp/Y.shape[0]
# # Calculating the posterior probabilities
# def ___posterior(Y, pl, Sp):
# # Variable Initialization
# pp_ = np.zeros((Y.shape[0]))
# # Loop centering likelihood in each sample
# for y, i in zip(Y, range(Y.shape[0])):
# # Posteriot probabilities
# pp_[i] = (pl + multivariate_normal(y, Sp).logpdf(Y)).sum()
# return pp_
# Y_ = np.concatenate((u_[..., np.newaxis], v_[..., np.newaxis]), axis = 1)
# dim = Y_.shape[1]
# # Defining Gaussian function parameters likelihood
# print(Y_)
# m_ = np.mean(Y_, axis = 0)
# S_ = np.cov(Y_.T) + np.eye(dim)*1e-5
# print(S_)
# pl_ = multivariate_normal(m_, S_).logpdf(Y_) # Defining Gaussian function parameters for the prior
# if 'full': Sp = ___prior(Y_) # Full Covariance Matrix
# if 'spheric': Sp = np.eye(dim) # Spheric Covariance Matrix
# if 'diag': Sp = np.eye(dim) * np.var(Y_, axis = 0) # Diagonal Covariance Matrix
# # Finding for each sample as prior mean the posterior likelihoods
# pp_ = ___posterior(Y_, pl_, Sp)
# pp_ += abs(pp_.min()) + 1e-10
# # Select test index randomly from the index remaining..
# idx_ = np.random.choice(np.argsort(pp_)[::-1], size = n_tr + n_ts, p = pp_/pp_.sum(), replace = False)
# idx_ = idx_[np.random.permutation(idx_.shape[0])]
# # Finding maximum likihoods for training
# return idx_[:n_tr], idx_[-n_ts:]
# Do Bayesian Selection on the velocity vectors per each row
def __select_vector_per_distance(X_, u_, v_):
# Index of vectors
idx_ = np.arange(u_.shape[0], dtype = int)
# Variables Initialization
index_tr_, index_ts_ = [], []
# Loop over distance data
for idx_dist_, n_tr, n_ts in zip(X_[0], X_[1], X_[2]):
# Select Velocity Vectors
idx_tr_, idx_ts_ = __bayesian_sample_selection(u_[idx_dist_], v_[idx_dist_], n_tr, n_ts)
# Save selected Vectors
index_tr_.append( idx_[idx_dist_][idx_tr_] )
index_ts_.append( idx_[idx_dist_][idx_ts_] )
# Stack all selected velocity vectors in an array
return np.concatenate(index_tr_, axis = 0), np.concatenate(index_ts_, axis = 0)
# Adapt Sample number selected to the actual number of samples available
def __adaptative_sample_number(X_, n_tr, n_ts, n_samples, per = 3):
# No. of maximum vector per row
n_tot = n_tr + n_ts
n_row = n_tot//6
# Variables Initialization
idx_, n_tr_, n_ts_ = [], [], []
# Check if there is enough Vectors
if n_samples > n_tot//per:
# loop over distancs available
for y in np.unique(X_[:, 1]):
idx_.append(X_[:, 1] == y)
# How many vectors there are with that distance?
n = idx_[-1].sum()//2
# Get as much vectors as there are availables
if n < n_row:
n_tr_.append( n )
n_ts_.append( n )
else:
n_tr_.append( n_row )
n_ts_.append( n_row )
# Flag - There is enough data
return [idx_, n_tr_, n_ts_], True
# Flag - There is not enough data
else: return [idx_, n_tr_, n_ts_], False
# Adapt Sample number selected to the actual number of samples available
n_samples = u_.shape[0]
D_, flag = __adaptative_sample_number(X_, n_tr, n_ts, n_samples)
# Sufficient amount of vectors so select vectors
if flag: idx_tr_, idx_ts_ = __select_vector_per_distance(D_, u_, v_)
# Do not select any vector
else: idx_tr_, idx_ts_ = [], []
# Return Only training and test Selected Samples
return X_[idx_tr_, :], u_[idx_tr_], v_[idx_tr_], X_[idx_ts_, :], u_[idx_ts_], v_[idx_ts_]
# Redece the dimensions of the clouds velocity field
def _cloud_velocity_field_processing(F_, M_, X_, Y_, U_, V_, x_, y_, step_size, lag, tau = 1e-2):
# Applying an average windown over a vector field
def __mean_velocity_field(F_, M_, tau, step_size):
# Average only between velotiy vectors
def __find_velocity_field_(g_, m_):
if m_.sum() != 0: return np.mean(g_[m_])
else: return 0.
D, N, K = F_.shape
# Variable Initialization
M_ = M_ > tau
f_ = np.zeros((D//step_size, N//step_size, K))
k = step_size//2
# Loop over field compoenent
for i in np.arange(K):
# Loop over step window throughout y-axis
for ii, d in zip(np.arange(k, D + k, step_size), np.arange(D//step_size)):
# Loop over step window throughout X-axis
for iii, n in zip(np.arange(k, N + k, step_size), np.arange(N//step_size)):
# Mean window pixel
f_[d, n, i] = __find_velocity_field_(g_ = F_[(ii - k):(ii + k), (iii - k):(iii + k), i], m_ = M_[(ii - k):(ii + k), (iii - k):(iii + k)])
# Return Average Velocity in vector form
return f_[..., 0].flatten(), f_[..., 1].flatten()
# Lagged list of consecutive vectors
def __lag_data(X_lag, Y_lag, u_lag, v_lag, xy_, uv_, lag):
# Keep the desire number of lags on the list by removing the last and aadding at the bigging
if len(X_lag) == lag:
X_lag.pop(0)
Y_lag.pop(0)
u_lag.pop(0)
v_lag.pop(0)
# Keep adding until we have the desired number of lag time stamps
X_lag.append(xy_[0])
Y_lag.append(xy_[1])
u_lag.append(uv_[0])
v_lag.append(uv_[1])
return X_lag, Y_lag, u_lag, v_lag
# Applying mean window to reduce velocity field dimensions
u_, v_ = __mean_velocity_field(F_, M_, tau, step_size)
# Index of thresholding velocity vectors to remove noisy vectors
idx_ = _magnitude(u_, v_) > tau
# Lagging data for wind velocity field estimation
return __lag_data(X_, Y_, U_, V_, xy_ = [x_[idx_], y_[idx_]], uv_ = [u_[idx_], v_[idx_]], lag = lag)
# Finding index of pixels expecting to intercept the Sun for each horizon (k)
def _pixels_selection(XYZ_, U_, V_, Phi_, Psi_, x_sun_, A_sun_, N_y, N_x, G_, X_, Y_, radius_1):
# Estimate circule center for selection according to estimate arrivale time of a pixel
def __estimate_time(XYZ_, U_, V_, idx_2):
# Initialize variables, identify pixels on the streamline, and proximity sorting index definition
idx_3 = idx_2 > 0
i_ = np.argsort(idx_2[idx_3] - 1)
# Space distance on the x and y axis on a non-linear-metric
z_ = XYZ_[idx_3, 2][i_]
y_ = XYZ_[idx_3, 1][i_]
# Numerical differenciation on a non-linear frame-metric
dz_ = np.gradient(z_)
dy_ = np.gradient(y_)
# Calculate the velocity components for each streamline pixel
w_ = np.sqrt(U_[idx_3][i_]**2 + (dy_*V_[idx_3][i_])**2)
# Integrating and solving for time for each component
t_ = np.cumsum(dz_/(w_ + 1e-25))
# Organizing time instants on the matrix
idx_ = np.argsort(idx_2.flatten())
t_hat_ = np.zeros(idx_.shape)
t_hat_[idx_[-t_.shape[0]:]] = t_
return t_hat_.reshape(idx_2.shape)
# Selecting pixels by streamlines and potential lines that intercept the Sun
def __select_intercepting_potential_line(Psi_mean):
return Psi_ > Psi_mean
# Connected components form the Sun streamline alon equipotential streamlines
def __select_intercepting_streamline(Phi_, Psi_, x_sun, y_sun, Phi_mean, idx_1_):
# Finding connected pixel
def ___next_pixel_coordiantes(Phi_, i, j, idx_sun, idx_):
# Defining errors matrix
E_ = np.zeros((3, 3))
# loop over nerbouring pixels
for k in [-1, 0, 1]:
for m in [-1, 0, 1]:
c = idx_[i + k - 1: i + k + 2, j + m - 1: j + m + 2].sum()
if idx_[i + k, j + m] or (k == 0 and m == 0) or c > 2:
E_[1 + k, 1 + m] = np.inf
else:
E_[1 + k, 1 + m] = ( Phi_mean - Phi_[i + k, j + m])**2
# Unravel error matrix coordiantes of min value error
k_, m_ = np.where(E_ == E_.min())
# Updating new streamline pixel coordiantes
i_new, j_new = i + k_[0] - 1, j + m_[0] - 1
return i_new, j_new
# Variables initialization
i, j, idx_2_ = y_sun, x_sun, np.zeros((N_y, N_x), dtype = int)
# Position Initialization
count = 1
idx_1_[i, j], idx_2_[i, j] = True, count
# Loop to the edge of the frame
while True:
count += 1
# Finding next pixel on the streamline
i, j = | |
flops: %.3f G, params: %.3f M' % (flops / 1000 / 1000 / 1000, params / 1000 / 1000))
@classmethod
def demo_mscale(cls):
from thop import profile
'''
320 : flops: 15.5 G, params: 0.2 M
160 : flops: 3.9 G, params: 0.2 M
80 : flops: 1.0 G, params: 0.2 M
40 : flops: 0.2 G, params: 0.2 M
20 : flops: 0.1 G, params: 0.2 M
10 : flops: 0.0 G, params: 0.2 M
5 : flops: 0.0 G, params: 0.2 M
'''
a = _Upsample_flow_v2()
for i in [320, 160, 80, 40, 20, 10, 5]:
feature = np.zeros((1, 32, i, i))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_,), verbose=False)
print('%s : flops: %.3f G, params: %.3f M' % (i, flops / 1000 / 1000 / 1000, params / 1000 / 1000))
feature = np.zeros((1, 3, 320, 320))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_, True), verbose=False)
print('%s : flops: %.3f G, params: %.3f M' % ('output level', flops / 1000 / 1000 / 1000, params / 1000 / 1000))
class _Upsample_flow_v3(tools.abstract_model):
def __init__(self):
super(_Upsample_flow_v3, self).__init__()
class FlowEstimatorDense_temp(tools.abstract_model):
def __init__(self, ch_in, f_channels=(128, 128, 96, 64, 32)):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, 2, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x5)
return x5, x_out
class ContextNetwork_temp(nn.Module):
def __init__(self, num_ls=(3, 128, 128, 128, 96, 64, 32, 16)):
super(ContextNetwork_temp, self).__init__()
self.num_ls = num_ls
cnt = 0
cnt_in = num_ls[0]
self.cov1 = conv(num_ls[0], num_ls[1], 3, 1, 1)
cnt += 1 # 1
cnt_in += num_ls[cnt]
self.cov2 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 2)
cnt += 1 # 2
cnt_in += num_ls[cnt]
self.cov3 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 4)
cnt += 1 # 3
cnt_in += num_ls[cnt]
self.cov4 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 8)
cnt += 1 # 4
cnt_in += num_ls[cnt]
self.cov5 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 16)
cnt += 1 # 5
cnt_in += num_ls[cnt]
self.cov6 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 1)
cnt += 1
cnt_in += num_ls[cnt]
self.final = conv(cnt_in, num_ls[cnt + 1], isReLU=False)
def forward(self, x):
x = torch.cat((self.cov1(x), x), dim=1)
x = torch.cat((self.cov2(x), x), dim=1)
x = torch.cat((self.cov3(x), x), dim=1)
x = torch.cat((self.cov4(x), x), dim=1)
x = torch.cat((self.cov5(x), x), dim=1)
x = torch.cat((self.cov6(x), x), dim=1)
x = self.final(x)
return x
class ContextNetwork_temp_2(nn.Module):
def __init__(self, num_ls=(3, 128, 128, 128, 96, 64, 32, 16)):
super(ContextNetwork_temp_2, self).__init__()
self.convs = nn.Sequential(
conv(num_ls[0], num_ls[1], 3, 1, 1),
conv(num_ls[1], num_ls[2], 3, 1, 2),
conv(num_ls[2], num_ls[3], 3, 1, 4),
conv(num_ls[3], num_ls[4], 3, 1, 8),
conv(num_ls[4], num_ls[5], 3, 1, 16),
conv(num_ls[5], num_ls[6], 3, 1, 1),
conv(num_ls[6], num_ls[7], isReLU=False)
)
def forward(self, x):
return self.convs(x)
self.dense_estimator = FlowEstimatorDense_temp(32, (64, 64, 64, 32, 16))
self.context_estimator = ContextNetwork_temp_2(num_ls=(self.dense_estimator.num_feature_channel + 2, 64, 64, 64, 32, 32, 16, 2))
# self.dense_estimator = FlowEstimatorDense_temp(32, (128, 128, 96, 64, 32))
# self.context_estimator = ContextNetwork_temp_2(num_ls=(self.dense_estimator.num_feature_channel + 2, 128, 128, 128, 96, 64, 32, 2))
self.upsample_output_conv = nn.Sequential(conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2), )
def forward(self, flow_pre, x_raw, if_output_level=False):
if if_output_level:
x = self.upsample_output_conv(x_raw)
else:
x = x_raw
feature, x_out = self.dense_estimator(x)
flow = flow_pre + x_out
flow_fine_f = self.context_estimator(torch.cat([feature, flow], dim=1))
x_out = flow + flow_fine_f
if if_output_level:
x_out = upsample2d_flow_as(x_out, x_raw, mode="bilinear", if_rate=True)
return x_out
@classmethod
def demo_mscale(cls):
from thop import profile
'''
320 : flops: 55.018 G, params: 0.537 M
160 : flops: 13.754 G, params: 0.537 M
80 : flops: 3.439 G, params: 0.537 M
40 : flops: 0.860 G, params: 0.537 M
20 : flops: 0.215 G, params: 0.537 M
10 : flops: 0.054 G, params: 0.537 M
5 : flops: 0.013 G, params: 0.537 M
output level : flops: 3.725 G, params: 0.553 M
'''
a = _Upsample_flow_v3()
for i in [320, 160, 80, 40, 20, 10, 5]:
feature = np.zeros((1, 32, i, i))
flow_pre = np.zeros((1, 2, i, i))
feature_ = torch.from_numpy(feature).float()
flow_pre_ = torch.from_numpy(flow_pre).float()
flops, params = profile(a, inputs=(flow_pre_, feature_,), verbose=False)
print('%s : flops: %.3f G, params: %.3f M' % (i, flops / 1000 / 1000 / 1000, params / 1000 / 1000))
feature = np.zeros((1, 3, 320, 320))
feature_ = torch.from_numpy(feature).float()
flow_pre = np.zeros((1, 2, 80, 80))
flow_pre_ = torch.from_numpy(flow_pre).float()
flops, params = profile(a, inputs=(flow_pre_, feature_, True), verbose=False)
print('%s : flops: %.3f G, params: %.3f M' % ('output level', flops / 1000 / 1000 / 1000, params / 1000 / 1000))
class _Upsample_flow_v4(tools.abstract_model):
def __init__(self, if_mask, if_small=False):
super(_Upsample_flow_v4, self).__init__()
class FlowEstimatorDense_temp(tools.abstract_model):
def __init__(self, ch_in, f_channels=(128, 128, 96, 64, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x5)
return x5, x_out
class ContextNetwork_temp(nn.Module):
def __init__(self, num_ls=(3, 128, 128, 128, 96, 64, 32, 16)):
super(ContextNetwork_temp, self).__init__()
self.num_ls = num_ls
cnt = 0
cnt_in = num_ls[0]
self.cov1 = conv(num_ls[0], num_ls[1], 3, 1, 1)
cnt += 1 # 1
cnt_in += num_ls[cnt]
self.cov2 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 2)
cnt += 1 # 2
cnt_in += num_ls[cnt]
self.cov3 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 4)
cnt += 1 # 3
cnt_in += num_ls[cnt]
self.cov4 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 8)
cnt += 1 # 4
cnt_in += num_ls[cnt]
self.cov5 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 16)
cnt += 1 # 5
cnt_in += num_ls[cnt]
self.cov6 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 1)
cnt += 1
cnt_in += num_ls[cnt]
self.final = conv(cnt_in, num_ls[cnt + 1], isReLU=False)
def forward(self, x):
x = torch.cat((self.cov1(x), x), dim=1)
x = torch.cat((self.cov2(x), x), dim=1)
x = torch.cat((self.cov3(x), x), dim=1)
x = torch.cat((self.cov4(x), x), dim=1)
x = torch.cat((self.cov5(x), x), dim=1)
x = torch.cat((self.cov6(x), x), dim=1)
x = self.final(x)
return x
class ContextNetwork_temp_2(nn.Module):
def __init__(self, num_ls=(3, 128, 128, 128, 96, 64, 32, 16)):
super(ContextNetwork_temp_2, self).__init__()
self.convs = nn.Sequential(
conv(num_ls[0], num_ls[1], 3, 1, 1),
conv(num_ls[1], num_ls[2], 3, 1, 2),
conv(num_ls[2], num_ls[3], 3, 1, 4),
conv(num_ls[3], num_ls[4], 3, 1, 8),
conv(num_ls[4], num_ls[5], 3, 1, 16),
conv(num_ls[5], num_ls[6], 3, 1, 1),
conv(num_ls[6], num_ls[7], isReLU=False)
)
def forward(self, x):
return self.convs(x)
self.if_mask = if_mask
self.if_small = if_small
if self.if_small:
f_channels_es = (32, 32, 32, 16, 8)
f_channels_ct = (32, 32, 32, 16, 16, 8)
else:
f_channels_es = (64, 64, 64, 32, 16)
f_channels_ct = (64, 64, 64, 32, 32, 16)
if if_mask:
self.dense_estimator_mask = FlowEstimatorDense_temp(32, f_channels=f_channels_es, ch_out=3)
num_ls = (self.dense_estimator_mask.num_feature_channel + 3,) + f_channels_ct + (3,)
self.context_estimator_mask = ContextNetwork_temp_2(num_ls=num_ls)
# self.dense_estimator = FlowEstimatorDense_temp(32, (128, 128, 96, 64, 32))
# self.context_estimator = ContextNetwork_temp_2(num_ls=(self.dense_estimator.num_feature_channel + 2, 128, 128, 128, 96, 64, 32, 2))
self.upsample_output_conv = nn.Sequential(conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2), )
else:
self.dense_estimator = FlowEstimatorDense_temp(32, f_channels=f_channels_es, ch_out=2)
num_ls = (self.dense_estimator.num_feature_channel + 2,) + f_channels_ct + (2,)
self.context_estimator = ContextNetwork_temp_2(num_ls=num_ls)
# self.dense_estimator = FlowEstimatorDense_temp(32, (128, 128, 96, 64, 32))
# self.context_estimator = ContextNetwork_temp_2(num_ls=(self.dense_estimator.num_feature_channel + 2, 128, 128, 128, | |
utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
# Sometimes blow up. In the following, tzinfo.dst()
# implementation may return None or not None depending on
# whether DST is assumed to be in effect. In this situation,
# a ValueError should be raised by astimezone().
class tricky_notok(ok):
def dst(self, dt):
if dt.year == 2000:
return None
else:
return 10*HOUR
dt = self.theclass(2001, 1, 1).replace(tzinfo=utc_real)
self.assertRaises(ValueError, dt.astimezone, tricky_notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertFalse(as_date == as_datetime)
self.assertFalse(as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Nevertheless, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertEqual(as_date.__eq__(as_datetime), True)
different_day = (as_date.day + 1) % 20 + 1
as_different = as_datetime.replace(day= different_day)
self.assertEqual(as_date.__eq__(as_different), False)
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_extra_attributes(self):
for x in [date.today(),
time(),
datetime.utcnow(),
timedelta(),
tzinfo(),
timezone(timedelta())]:
with self.assertRaises(AttributeError):
x.abc = 1
def test_check_arg_types(self):
class Number:
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
for xx in [decimal.Decimal(10),
decimal.Decimal('10.9'),
Number(10)]:
with self.assertWarns(DeprecationWarning):
self.assertEqual(datetime(10, 10, 10, 10, 10, 10, 10),
datetime(xx, xx, xx, xx, xx, xx, xx))
with self.assertRaisesRegex(TypeError, '^an integer is required '
r'\(got type str\)$'):
datetime(10, 10, '10')
f10 = Number(10.9)
with self.assertRaisesRegex(TypeError, '^__int__ returned non-int '
r'\(type float\)$'):
datetime(10, 10, f10)
class Float(float):
pass
s10 = Float(10.9)
with self.assertRaisesRegex(TypeError, '^integer argument expected, '
'got float$'):
datetime(10, 10, s10)
with self.assertRaises(TypeError):
datetime(10., 10, 10)
with self.assertRaises(TypeError):
datetime(10, 10., 10)
with self.assertRaises(TypeError):
datetime(10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10, 10, 10, 10.)
#############################################################################
# Local Time Disambiguation
# An experimental reimplementation of fromutc that respects the "fold" flag.
class tzinfo2(tzinfo):
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Returned value satisfies
# dt + ldt.utcoffset() = ldt
off0 = dt.replace(fold=0).utcoffset()
off1 = dt.replace(fold=1).utcoffset()
if off0 is None or off1 is None or dt.dst() is None:
raise ValueError
if off0 == off1:
ldt = dt + off0
off1 = ldt.utcoffset()
if off0 == off1:
return ldt
# Now, we discovered both possible offsets, so
# we can just try four possible solutions:
for off in [off0, off1]:
ldt = dt + off
if ldt.utcoffset() == off:
return ldt
ldt = ldt.replace(fold=1)
if ldt.utcoffset() == off:
return ldt
raise ValueError("No suitable local time found")
# Reimplementing simplified US timezones to respect the "fold" flag:
class USTimeZone2(tzinfo2):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt | |
= {'type': key, 'parsed_data': value}
handle[len(handle):] = [handleitem]
handle_json = simplejson.dumps(handle)
self._debugmsg('modifyHandle', "JSON: " + str(handle_json))
response, _ = self.http.request(uri, method='PUT', headers=hdrs,
body=handle_json)
output = self._checkresponsecode("modifyHandle", response.status)
if output is None or False:
return False
else:
return True
def deleteHandle(self, prefix, key, suffix=''):
"""Delete a handle from the server.
Parameters:
prefix: URI to the resource, or the prefix if suffix is not ''.
suffix: The suffix of the handle. Default: ''.
Returns True if deleted, False otherwise.
"""
uri = self._geturi(prefix, '', '', suffix)
if not key or key is "":
hdrs = self._getheader("DELETE")
self._debugmsg('deleteHandle', "DELETE Handle " + prefix + "/"
+ suffix + " of URI " + uri)
response, _ = self.http.request(uri, method='DELETE',
headers=hdrs)
else:
self._debugmsg('deleteHandle', "DELETE field " + key + " of URI"
+ uri)
handle_json = self.retrieveHandle(prefix, suffix)
if not handle_json:
self._debugmsg('deleteHandle',
"Cannot modify an unexisting handle: " + uri)
return False
keyfound = False
handle = simplejson.loads(handle_json)
for item in handle:
if 'type' in item and item['type'] == key:
keyfound = True
self._debugmsg('deleteHandle', "Found key " + key +
" value=" + str(item['parsed_data']))
self._debugmsg('deleteHandle', "Remove Key's Field")
del handle[handle.index(item)]
break
if keyfound is False:
self._debugmsg('deleteHandle', "No Value of key is found. "
"Quiting....")
return False
else:
hdrs = self._getheader("UPDATE")
handle_json = simplejson.dumps(handle)
self._debugmsg('deleteHandle', "JSON: " + str(handle_json))
response, _ = self.http.request(uri, method='PUT', headers=hdrs,
body=handle_json)
output = self._checkresponsecode("deleteHandle", response.status)
self._debugmsg('deleteHandle', "OUTPUT = " + str(output))
if (output is None) or (output is False):
return False
else:
return True
def updateHandleWithLocation(self, prefix, value, suffix=''):
"""Update the 10320/LOC handle type field of the handle record.
Parameters:
prefix: URI to the resource, or the prefix if suffix is not ''.
value: New value to store in "10320/LOC"
suffix: The suffix of the handle. Default: ''.
Returns True if updated, False otherwise.
"""
uri = self._geturi(prefix, '', value, suffix)
loc10320 = self.getValueFromHandle(prefix, "10320/LOC", suffix)
self._debugmsg('updateHandleWithLocation', "found 10320/LOC: " +
str(loc10320))
if loc10320 is None:
loc10320 = ('<locations><location id="0" href="' + value +
'" /></locations>')
response = self.modifyHandle(prefix, "10320/LOC", loc10320, suffix)
if not response:
self._debugmsg('updateHandleWithLocation',
"Cannot update handle: " + uri +
" with location: " + value)
return False
else:
lt = LocationType(loc10320, self.debug)
response = lt.checkInclusion(value)
if response:
self._debugmsg('updateHandleWithLocation',
"the location " + value +
" is already included!")
else:
resp, content = lt.addLocation(value)
if not resp:
self._debugmsg('updateHandleWithLocation',
"the location " + value +
" cannot be added")
else:
if not self.modifyHandle(prefix, "10320/LOC",
content, suffix):
self._debugmsg('updateHandleWithLocation',
"Cannot update handle: " + uri +
" with location: " + value)
else:
self._debugmsg('updateHandleWithLocation',
"location added")
return True
return False
return True
def removeLocationFromHandle(self, prefix, value, suffix=''):
"""Remove one of the 10320/LOC handle type values
from the handle record.
Parameters:
prefix: URI to the resource, or the prefix if suffix is not ''.
value: Value to be deleted from the "10320/LOC".
suffix: The suffix of the handle. Default: ''.
Returns True if removed, False otherwise.
"""
uri = self._geturi(prefix, '', '', suffix)
loc10320 = self.getValueFromHandle(prefix, "10320/LOC", suffix)
if loc10320 is None:
self._debugmsg('removeLocationFromHandle',
"Cannot remove location: " + value +
" from handle: " + uri +
", the field 10320/LOC does not exists")
return False
else:
lt = LocationType(loc10320, self.debug)
if not lt.checkInclusion(value):
self._debugmsg('removeLocationFromHandle', "the location " +
value + " is not included!")
return False
else:
response, content = lt.removeLocation(value)
if response:
if self.modifyHandle(prefix, "10320/LOC", content, suffix):
return True
self._debugmsg('removeLocationFromHandle', "the location " +
value + " cannot be removed")
return False
def updateLocationInHandle(self, prefix, oldvalue, newvalue, suffix=''):
"""Update one of the 10320/LOC handle type values
in the handle record.
Parameters:
prefix: URI to the resource, or the prefix if suffix is not ''.
oldvalue: Value to be updated/replaced in the "10320/LOC".
newvalue: Value to be updated/put in the "10320/LOC".
suffix: The suffix of the handle. Default: ''.
Returns True if removed, False otherwise.
"""
uri = self._geturi(prefix, '', '', suffix)
loc10320 = self.getValueFromHandle(prefix, "10320/LOC", suffix)
if loc10320 is None:
self._debugmsg('updateLocationInHandle',
"Cannot update location: " + oldvalue +
" from handle: " + uri +
", the field 10320/LOC does not exists")
return False
else:
lt = LocationType(loc10320, self.debug)
if not lt.checkInclusion(oldvalue):
self._debugmsg('updateLocationInHandle', "the location " +
oldvalue + " is not included!")
return False
else:
response, content = lt.updateLocation(oldvalue, newvalue)
if response:
if self.modifyHandle(prefix, "10320/LOC", content, suffix):
return True
self._debugmsg('removeLocationFromHandle', "the location " +
value + " cannot be updated")
return False
################################################################################
# EPIC Client Location Type Class #
################################################################################
class LocationType(object):
"""Class implementing a 10320/LOC handle type.
Expected format for 10320/LOC handle type:
<locations>
<location id="0" href="location" country="xx" weight="0" />
</locations>
"""
def __init__(self, field, debug=False):
self.domfield = minidom.parseString(field)
self.debug = debug
def _debugmsg(self, method, msg):
"""Internal: Print a debug message if debug is enabled."""
if self.debug:
print "[", method, "]", msg
def isEmpty(self):
"""Check if the 10320/LOC handle type field is empty.
Parameters:
Returns True and 0 if empty,
False and the number of locations otherwise.
"""
locations = self.domfield.getElementsByTagName("location")
if locations.length == 0:
self._debugmsg('isEmpty', "the 10320/LOC field is empty")
return True, 0
self._debugmsg('isEmpty', "the 10320/LOC field contains " +
str(locations.length) + " locations")
return False, str(locations.length)
def checkInclusion(self, loc):
"""Check if a 10320/LOC handle type value is included.
Parameters:
loc: The replica location PID value.
Returns True if it is included, False otherwise.
"""
locations = self.domfield.getElementsByTagName("location")
for url in locations:
if url.getAttribute('href') == loc:
self._debugmsg('checkInclusion',
"the location (" + loc + ") is included")
return True
self._debugmsg('checkInclusion',
"the location (" + loc + ") is not included")
return False
def removeLocation(self, loc):
"""Remove a replica PID from the 10320/LOC handle type field.
Parameters:
loc: The replica location PID value.
Returns True and the 10320/LOC handle type field itself
if the value is removed, False and None otherwise.
"""
main = self.domfield.childNodes[0]
locations = self.domfield.getElementsByTagName("location")
for url in locations:
if url.getAttribute('href') == loc:
main.removeChild(url)
self._debugmsg('removeLocation', "removed location: " + loc)
return True, main.toxml()
self._debugmsg('removeLocation', "cannot remove location: " + loc)
return False, None
def addLocation(self, loc):
"""Add a replica PID to the 10320/LOC handle type field.
Parameters:
loc: The replica location PID value.
Returns True and the 10320/LOC handle type field itself
if the value is added, False and None otherwise.
"""
try:
newurl = self.domfield.createElement("location")
_, content = self.isEmpty()
newurl.setAttribute('id', content)
newurl.setAttribute('href', loc)
self.domfield.childNodes[0].appendChild(newurl)
main = self.domfield.childNodes[0]
self._debugmsg('addLocation', "added new location: " + loc)
return True, main.toxml()
except TypeError:
self._debugmsg('addLocation', "an XML TypeError occurred, "
"adding the new location: " + loc)
return False, None
except AttributeError:
self._debugmsg('addLocation', "an XML AttributeError occurred, "
"adding the new location: " + loc)
return False, None
def updateLocation(self, oldloc, newloc):
"""Update a entry from the 10320/LOC handle type field.
Parameters:
oldloc: The value to replace in the 10320/LOC field.
newloc: The new value for the 10320/LOC field.
Returns True and the 10320/LOC handle type field itself
if the value is updated, False and None otherwise.
"""
main = self.domfield.childNodes[0]
locations = self.domfield.getElementsByTagName("location")
for url in locations:
if url.getAttribute('href') == oldloc:
newurl = self.domfield.createElement("location")
_, content = self.isEmpty()
newurl.setAttribute('id', url.getAttribute('id'))
newurl.setAttribute('href', newloc)
main.replaceChild(newurl, url)
self._debugmsg('updateLocation', "updated location: " + oldloc
+ "with: " + newloc)
return True, main.toxml()
self._debugmsg('updateLocation', "cannot update location: " + loc)
return False, None
###############################################################################
# EPIC Client Credentials Class #
###############################################################################
class Credentials(object):
"""
get credentials from different storages, right now
irods or filesystem. please store credentials in the
following format, otherwise there are problems...
{
"baseuri": "https://epic_api_endpoint here",
"username": "USER",
"prefix": "YYY",
"password": "ZZZZZZZ",
"accept_format": "application/json",
"debug": "False"
}
"""
def __init__(self, store, filename):
"""initialize member variables"""
self.store = store
self.filename = filename
self.debug = False
self.baseuri = None
self.username = None
self.prefix = None
self.password = None
self.accept_format = 'application/json'
def parse(self):
"""parse credentials from json file on filespace/irods.
if you want to use irods you need embedded python!
This method terminates the program on error
"""
if self.store == "os":
try:
filehandle = open(self.filename, "r")
except IOError | |
import matplotlib.pyplot as plt
import numpy as np
from numpy import cross, eye
from scipy.linalg import expm, norm
import pandas as pd
from scipy.spatial.transform import Rotation as R
from pyts.decomposition import SingularSpectrumAnalysis
def modeshape_sync_lstsq(mode_shape_vec):
"""
Creates a straight line fit in the complex plane and alligns the mode shape with the real-axis.
:param mode_shape_vec: Mode shape vector
:type mode_shape_vec: array(float)
:return _n: Alligned mode shape vector
"""
_n = np.zeros_like(mode_shape_vec)
for i in range(np.shape(mode_shape_vec)[1]):
_mode = mode_shape_vec[:,i]
z = np.arctan(np.average(np.imag(_mode)/np.real(_mode),weights = np.abs(_mode)**1e4))
_n[:,i] = _mode*(np.cos(-1*z)+1j*np.sin(-1*z))
return _n
def modeshape_scaling_DP(mode_shape_vec, driving_point,sync = True):
"""
Scales mode shapes according to the driving point measurement.
:param mode_shape_vec: Mode shape vector
:type mode_shape_vec: array(float)
:param driving_point: Driving point location
:type driving_point: int
:param sync: Allign mode shape with the real-axis
:type sync: bool, optional
:return: Scalled mode shape
"""
_mode = mode_shape_vec
for i in range(np.shape(mode_shape_vec)[1]):
_mode[:,i] = _mode[:,i]/np.sqrt(mode_shape_vec[driving_point,i])
if sync:
_mode = modeshape_sync_lstsq(_mode)
return _mode
def MCF(mod):
"""
Calculate Mode Complexity Factor (MCF)
:param mod: Mode shape
:type mod: array(float)
:return: Mode complexity factor
"""
sxx = np.real(mod).T@np.real(mod)
syy = np.imag(mod).T@np.imag(mod)
sxy = np.real(mod).T@np.imag(mod)
mcf = (1 - ((sxx-syy)**2+4*sxy**2)/((sxx+syy)**2))
return mcf
def flatten_FRFs(Y):
"""
Flattens input FRF matrix Y from shape (out,in,freq) in (out x in,freq)
:param Y: Matrix of FRFs [out,in,f]
:type Y: array(float)
:return: Matrix of FRFs [out x in,f]
"""
new = np.zeros((Y.shape[0] * Y.shape[1], Y.shape[2]), dtype=complex)
_len = Y.shape[1]
for i in range(Y.shape[0]):
new[_len * i:_len * (i + 1), :] = Y[i, :, :]
return new
def unflatten_modes(_modes_acc,Y):
"""
Unflattens mode shapes based on the shape of the input FRF matrix [out x in] in [out, in]
:param _modes_acc: Mode shape [out x in]
:type _modes_acc: array(float)
:param Y:
:return: Unflattened mode shape [out, in]
"""
new_mode = np.zeros((Y.shape[0],Y.shape[1],_modes_acc.shape[1]),dtype = complex)
_len = Y.shape[1]
for i in range(Y.shape[0]):
new_mode[i,:,:] = _modes_acc[i*_len:(i+1)*_len,:]
return new_mode
def complex_plot(mode_shape,color = "k"):
"""
Plots a mode shape on a radial plot.
:param mode_shape: mode shape
:type mode_shape: array(float)
:param color: Color of the plot
:type color: str
"""
plt.figure(figsize = (3,3))
ax1 = plt.subplot(111,projection = "polar")
for x in mode_shape:
ax1.plot([0,np.angle(x)],[0,np.abs(x)],marker='.',color = color,alpha = 0.5)
plt.yticks([])
def complex_plot_3D(mode_shape):
"""
Plots a 3D mode shape on a radial plot.
:param mode_shape: 3D mode shape
:type mode_shape: array(float)
"""
plt.figure(figsize = (3,3))
ax1 = plt.subplot(111,projection = "polar")
for i,color in enumerate(["tab:red","tab:green","tab:blue"]):
for x in mode_shape[:,i]:
ax1.plot([0,np.angle(x)],[0,np.abs(x)],marker='.',color = color,alpha = 0.5)
plt.yticks([])
def mode_animation(mode_shape, scale, no_points=60,abs_scale = True):
"""
Creates an animation sequence from the mode shape and scales the displacemetns.
:param mode_shape: mode shape
:type mode_shape: array(float)
:param scale: mode shape
:type scale: float
:param no_points: Number of points in the animation sequence
:type no_points: int, optional
:return: Animation sequence
"""
ann = np.zeros((mode_shape.shape[0], mode_shape.shape[1], no_points))
for g, _t in enumerate(np.linspace(0, 2, no_points)):
ann[:, :, g] = (np.real(mode_shape) * np.cos(2 * np.pi * _t) - np.imag(mode_shape) * np.sin(
2 * np.pi * _t))
if abs_scale:
ann = ann / np.max(ann) * scale
else:
ann = ann * scale
return ann
def coh_frf(y_1, y_2):
"""
Calculates values of coherence between two FRFs.
:param y_1: FRF 1
:type y_1: array(float)
:param y_2: FRF 2
:type y_2: array(float)
:return: coherence criterion
"""
y_1_k = np.conjugate(y_1)
y_2_k = np.conjugate(y_2)
def vector(h_, h_K):
"""
:param h_: complex vector
:param h_K: conjugated complex vector
:return: vector product
"""
vec = np.dot(h_, h_K)
return vec
coh = np.abs(vector((y_1 + y_2), (y_1_k + y_2_k))) / 2 / (vector(y_1_k, y_1) + vector(y_2_k, y_2))
coh_abs = np.abs(coh)
return coh_abs
def dict_animation(_modeshape,a_type,mesh= None,pts = None,fps = 30,r_scale = 10,no_points = 60, object_list = None,abs_scale = True):
"""
Creates a predefined dictionary for animation sequency in the 3D display.
:param _modeshape: A mode shape or response to be animated
:type _modeshape: array(float)
:param a_type: Animation type ("modeshape" or "object")
:type a_type: str
:param mesh: Mesh to be animated
:type mesh: array(float), optional
:param pts: Points to be animated
:type pts: array(float), optional
:param fps: Frames per second of the animation
:type fps: int, optional
:param r_scale: Relative scale of the displacement
:type r_scale: float, optional
:param no_points: Number of points in the animation sequence
:type no_points: int, optional
:param object_list: A list containing objects to be animated
:type object_list: list, optional
:return:
"""
mode_dict = dict()
mode_dict["animation_pts"] = mode_animation(_modeshape, r_scale, no_points=no_points,abs_scale = abs_scale)
mode_dict["fps"] = fps
if a_type == "modeshape":
mode_dict["or_pts"] = pts
mode_dict["mesh"] = mesh
mode_dict["scalars"] = True
elif a_type == "object":
mode_dict["objects_list"] = object_list
return mode_dict
def CMIF(FRF, return_svector=False):
"""
Calculates a CMIF parameter of input FRF matrix
:param FRF: Input FRF matrix
:type FRF: array(float)
:param singular_vector: Return corresponding singular vectors
:type singular_vector: bool, optional
:return: CMIF parameters (singular values with or without left and right singular vectors)
"""
_f = FRF.shape[0]
val = np.min([FRF.shape[1], FRF.shape[2]])
_S = np.zeros((_f, val))
if return_svector:
_U = np.zeros((_f, FRF.shape[1], FRF.shape[1]), dtype="complex")
_V = np.zeros((_f, FRF.shape[2], FRF.shape[2]), dtype="complex")
for i in range(_f):
if return_svector:
U, S, VH = np.linalg.svd(FRF[i, :, :], full_matrices=True, compute_uv=True)
V = np.conj(VH).T
_S[i, :] = S
_U[i, :, :] = U
_V[i, :, :] = V
else:
S = np.linalg.svd(FRF[i, :, :], full_matrices=True, compute_uv=False)
_S[i, :] = S
if return_svector:
return _U, _S, _V
else:
return _S
def TSVD(matrix,reduction = 0):
"""
Filters a FRF matrix with a truncated singular value decomposition (TSVD) by removing the smallest singular values.
:param matrix: Matrix to be filtered by singular value decomposition
:type matrix: array(float)
:param reduction: Number of singular values not taken into account by reconstruction of the matrix
:type reduction: int, optional
:return: Filtered matrix
:rtype: array(float)
"""
U, s, VH = np.linalg.svd(matrix)
kk = s.shape[1] - reduction
Uk = U[:, :, :kk]
Sk = np.zeros((matrix.shape[0], kk, kk))
for i in range(matrix.shape[0]):
Sk[i] = np.diag(s[i, :kk])
Vk = VH[:, :kk, :]
return Uk @ Sk @ Vk
def M(axis, theta):
"""
Calculates rotational matrix based on the Euler-Rodrigues formula.
:param axis: Axis of rotation
:type axis: array(float)
:param theta: Angle of rotation
:type theta: float
:return: Rotational matrix
"""
t = expm(cross(eye(3), axis / norm(axis) * (theta)))
return t
def angle(vector1, vector2):
"""
Calculates angle of rotation between two 3D vectors.
:param vector1: 3D vector
:type vector1: array(float)
:param vector2: 3D vector
:type vector2: array(float)
:return: angle
"""
v1_u = unit_vector(vector1)
v2_u = unit_vector(vector2)
minor = np.linalg.det(np.stack((v1_u[-2:], v2_u[-2:])))
if minor == 0:
sign = 1
else:
sign = -np.sign(minor)
dot_p = np.dot(v1_u, v2_u)
dot_p = min(max(dot_p, -1.0), 1.0)
return sign * np.arccos(dot_p)
def rotation_matrix_from_vectors(vec1, vec2):
"""
Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3D "source" vector
:type vec1: array(float)
:param vec2: A 3D "destination" vector
:type vec2: array(float)
:return: Rotational matrix which when applied to vec1, aligns it with vec2.
"""
vec1 += np.random.random(3) / 1e10
vec2 += np.random.random(3) / 1e10
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
if (np.abs(a) == np.abs(b)).all():
return np.diag([1, 1, 1])
else:
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def unit_vector(vector):
"""
Returns the unit vector of input vector.
:param vector: A 3D "source" vector
:type vector: array(float)
:return unit vector:
"""
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""
Calculates angle of rotation between two 3D vectors.
:param vector1: 3D vector
:type vector1: array(float)
:param vector2: 3D vector
:type vector2: array(float)
:return: angle
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def generate_channels_from_sensors(df):
"""
Generates a set of channels based on the orientation of sensors. CUrrent implementation assumes that each sensor has
three channels (i.e. tri-axial sensors).
:param df: A DataFrame containing information on sensors
:type df: pd.DataFrame
:return: A DataFrame containing information on channels
"""
columns_chann = ["Name", "Description", "Quantity", "Grouping",
"Position_1", "Position_2", "Position_3", "Direction_1", "Direction_2", "Direction_3"]
df_ch = pd.DataFrame(columns=columns_chann)
axes = ["x", "y", "z"]
| |
cmake-optional">[</span>')
elif self.output_format == "latex":
self._output(r"\sphinxoptional{")
else:
self._output("[")
def depart_desc_cmake_optional(self, node):
"""Depart function for :cls:`desc_cmake_optional`."""
del self._param_seperator_stack[-1]
if self.output_format == "html":
self._output('<span class="optional cmake-optional">]</span>')
elif self.output_format == "latex":
self._output("}")
else:
self._output("]")
def visit_desc_cmake_group(self, node):
"""Visitor function for :cls:`desc_cmake_group`."""
self._handle_basic_parameter_visit(node)
self._param_seperator_stack.append(node.child_text_separator)
self._first_param = True
if self.output_format == "html":
self._output('<span class="sig-cmake-paramgrp">(</span>')
elif self.output_format == "latex":
self._output(r"\sphinxcmakeparamgrp{")
else:
self._output("(")
def depart_desc_cmake_group(self, node):
"""Depart function for :cls:`desc_cmake_group`."""
del self._param_seperator_stack[-1]
if self.output_format == "html":
self._output('<span class="sig-cmake-paramgrp">)</span>')
elif self.output_format == "latex":
self._output("}")
else:
self._output(")")
def visit_desc_cmake_choice(self, node):
"""Visitor function for :cls:`desc_cmake_choice`."""
self._handle_basic_parameter_visit(node)
self._first_param = True
if self.output_format == "html":
self._param_seperator_stack.append(
'<span class="sig-cmake-choice">|</span>')
elif self.output_format == "latex":
self._param_seperator_stack.append("}{")
self._output(r"\sphinxcmakechoice{")
else:
self._param_seperator_stack.append("|")
def depart_desc_cmake_choice(self, node):
"""Depart function for :cls:`desc_cmake_choice`."""
del self._param_seperator_stack[-1]
if self.output_format == "latex":
self._output("}")
def visit_node(translator, node, output_format):
"""
Generic node visitor function.
This makes sure that *translator* has an instance of :cls:`TranslatorState`
as its :attr:`!cmake_state` attribute and calls the respective
:func:`!visit_XXX` function on that.
"""
if not hasattr(translator, "cmake_state"):
translator.cmake_state = TranslatorState(translator, output_format)
method = getattr(translator.cmake_state, "visit_" + type(node).__name__)
method(node)
def depart_node(translator, node):
"""
Generic node depart function.
This calls the respective :func:`!depart_XXX` function on
``translator.cmake_state``.
"""
if not hasattr(translator, "cmake_state"):
_logger.error(
__("depart_node() called on this node without previous call to "
"visit_node() with the same translator"),
location = node)
raise nodes.SkipNode()
method = getattr(translator.cmake_state, "depart_" + type(node).__name__)
method(node)
# Directives
# -------------------------------------------------------------
class CMakeObjectDescription(ObjectDescription):
"""Base class for directives documenting CMake objects"""
has_content = True
required_arguments = 1
allow_nesting = False
option_spec = {
"noindex": directives.flag,
"noindexentry": directives.flag
}
@property
def object_type(self):
raise NotImplementedError()
def handle_signature(self, sig, signode):
domain = self.env.get_domain(self.domain)
# By default, just use the entire signature as object name
name = sig
dispname = domain.make_object_display_name(name, self.object_type)
signode += desc_name(text = dispname)
return name
def add_target_and_index(self, name, sig, signode):
domain = self.env.get_domain(self.domain)
# Set the node ID that is used for referencing the node
node_id = make_id(self.env, self.state.document, "cmake", name)
signode["ids"].append(node_id)
self.state.document.note_explicit_target(signode)
# Register the node at the domain, so it can be cross-referenced and
# appears in the CMake index
add_to_index = "noindexentry" not in self.options
domain.register_object(name, self.object_type, node_id, add_to_index,
signode)
# Add an entry in the global index
if add_to_index:
type_str = domain.get_type_name(
domain.object_types[self.object_type])
dispname = domain.make_object_display_name(name, self.object_type)
index_text = "{} ({})".format(dispname, type_str)
key = _get_index_sort_str(self.env, dispname)[0].upper()
self.indexnode["entries"].append(
("single", index_text, node_id, "", key))
class CMakeVariableDescription(CMakeObjectDescription):
"""Directive describing a CMake variable."""
doc_field_types = [
Field("type", names = ("type",), label = _("Type"), has_arg = False),
Field("default", names = ("default",), label = _("Default value"),
has_arg = False),
GroupedField("value", names = ("value",), label = _("Possible values"))
]
object_type = "variable"
# Regex used to parse variable description signatures
_sig_regex = re.compile(r'(?P<name>\w+)(?:\s+(?P<value>.+))?')
def handle_signature(self, sig, signode):
domain = self.env.get_domain(self.domain)
sig_match = self._sig_regex.fullmatch(sig)
if sig_match is None:
_logger.error(
__("Invalid variable signature: {sig}").format(sig = sig),
location = signode)
raise ValueError
name = sig_match["name"]
value = sig_match["value"]
dispname = domain.make_object_display_name(name, "variable")
signode += desc_name(text = dispname)
if value is not None:
signode += desc_annotation(text = " = " + value)
return name
class CMakeFunctionDescription(CMakeObjectDescription):
"""Directive describing a CMake macro/function"""
doc_field_types = [
GroupedField("parameter",
names =["param", "parameter", "arg", "argument", "keyword",
"option"],
label = _("Parameters"))
]
object_type = "function"
# Basic regex used for parsing macro/function signatures
_base_sig_regex = re.compile("\s*(?P<name>\w+)\s*\((?P<params>.*)\)\s*")
# Regexes used for tokenizing a macro/function parameter list
# token_type => regex, has_argument
_param_token_regexes = {
"argument": (re.compile("\s*<(\w+)>\s*"), True),
"keyword": (re.compile("\s*(\w+)\s*"), True),
"ellipsis": (re.compile("\s*\.\.\.\s*"), False),
"group_start": (re.compile("\s*\(\s*"), False),
"group_end": (re.compile("\s*\)\s*"), False),
"optional_start": (re.compile("\s*\[\s*"), False),
"optional_end": (re.compile("\s*\]\s*"), False),
"choice": (re.compile("\s*\|\s*"), False)
}
class _ParamListParseError(Exception):
"""
Exception thrown during signature parsing if a macro/function
parameter list has invalid syntax.
"""
pass
class _ParamListTokenizationError(_ParamListParseError):
"""Exception thrown by :meth:`_tokenize_parameter_list`."""
def __init__(self, unexcpected_text, pos):
super().__init__(
__("Unexpected text at column {pos}: {unexpected}").format(
pos = pos, unexpected = unexcpected_text))
class _ParamListUnexpectedTokenError(_ParamListParseError):
"""Exception thrown on unexpected parameter list tokens."""
def __init__(self, unexpected_token, pos):
super().__init__(
__("Unexpected token at position {pos}: "
"{raw} ({token_type})").format(
pos = pos, raw = unexpected_token[1],
token_type = unexpected_token[0]))
@classmethod
def _tokenize_parameter_list(cls, params):
"""
Tokenizes a CMake macro/function parameter description using the token
descriptions in _param_token_regexes.
Returns a list of tuples of:
* (token_type, raw, argument) for tokens that have an argument
* (token_type, raw) for tokens that do not have an argument
"""
pos = 0
while pos < len(params):
token_recognized = False
for token_type, (regex, has_argument) in (
cls._param_token_regexes.items()):
match = regex.match(params, pos)
if match is None:
continue
if has_argument:
yield token_type, match[0].strip(), match[1]
else:
yield token_type, match[0].strip()
token_recognized = True
pos = match.end()
break
if not token_recognized:
raise cls._ParamListTokenizationError(params[pos:], pos)
@classmethod
def _parse_parameters(cls, tokenized_params, root_node):
"""
Parses a tokenized parameters as returned by
:meth:`_tokenize_parameter_list` and appends corresponding doctree nodes
to the given root node.
"""
# Temporary storage for nested nodes
stack = [root_node]
for i, token in enumerate(tokenized_params):
new_stack_frame = False
if token[0] == "argument":
stack[-1] += desc_cmake_parameter(
text = "<{}>".format(token[2]))
elif token[0] == "keyword":
stack[-1] += desc_cmake_keyword(text = token[2])
elif token[0] == "ellipsis":
if len(stack[-1].children) == 0:
raise cls._ParamListUnexpectedTokenError(token, i)
stack[-1] += desc_annotation(text = "\u2026")
elif token[0] == "group_start":
stack.append(desc_cmake_group())
stack[-2] += stack[-1]
new_stack_frame = True
elif token[0] == "group_end":
if type(stack[-1]) != desc_cmake_group:
raise cls._ParamListUnexpectedTokenError(token, i)
child_cnt = len(stack[-1].children)
if child_cnt == 1:
# Replace the group with the single contained element
stack[-1].parent.replace(stack[-1], stack[-1].children[0])
elif child_cnt == 0:
# Remove the group
stack[-1].parent -= stack[-1]
del stack[-1]
elif token[0] == "optional_start":
stack.append(desc_cmake_optional())
stack[-2] += stack[-1]
new_stack_frame = True
elif token[0] == "optional_end":
if type(stack[-1]) != desc_cmake_optional:
raise cls._ParamListUnexpectedTokenError(token, i)
child_cnt = len(stack[-1].children)
if child_cnt == 0:
# Remove empty optional
stack[-1].parent -= stack[-1]
elif (child_cnt == 1
and type(stack[-1].children[0]) == desc_cmake_group):
# If the only child is a group, we can place the content of
# that group directly into the optional node
group = stack[-1].children[0]
stack[-1] += group.children
stack[-1] -= group
del stack[-1]
elif token[0] == "choice":
if (type(stack[-1]) == desc_cmake_choice
or len(stack[-1].children) == 0):
raise cls._ParamListUnexpectedTokenError(token, i)
prev = stack[-1].children[-1] # Previous node
if type(prev) == desc_cmake_choice:
# Put the choice node back on top of the stack, so the next
# node gets added to it
stack.append(prev)
else:
# Wrap prev in a desc_cmake_choice
stack.append(desc_cmake_choice())
prev.parent.replace(prev, stack[-1])
stack[-1] += prev
new_stack_frame = True
else:
assert False, "Unknown token type: " + token[0]
# desc_cmake_choice is different from other grouping nodes in the
# sense that we do not add child nodes to it until an ending token
# is encountered, but only add it to the to of the stack if a choice
# token is encountered and then add a single node defined by the
# following token. After this token we remove the desc_cmake_choice
# from the stack again.
frame_index = -2 if new_stack_frame else -1
if type(stack[frame_index]) == desc_cmake_choice:
del stack[frame_index]
if len(stack) > 1:
raise cls._ParamListParseError(
__("Unexpected end of parameter list"))
def handle_signature(self, sig, signode):
base_match = self._base_sig_regex.fullmatch(sig)
if base_match is None:
_logger.error(
__("Invalid macro/function signature: {sig}".format(sig = sig)),
location = signode)
raise ValueError
name = base_match["name"]
params = base_match["params"]
paramlist = desc_cmake_parameterlist()
paramlist.child_text_separator = " "
try:
tokenized_params = self._tokenize_parameter_list(params)
self._parse_parameters(tokenized_params, paramlist)
except self._ParamListParseError as ex:
_logger.error(
__("Failed to parse parameters for macro/function {name}: "
"{msg}").format(name = name, msg = str(ex)),
location = signode)
raise ValueError
signode += desc_name(text = name)
signode += paramlist
return name
class CMakeModuleDescription(CMakeObjectDescription):
"""Directive describing a CMake module."""
| |
# -----------------------------------------------------------
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
# Nvidia Source Code License-NC
# Code written by <NAME>.
# -----------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pdb
import copy
import math
import numpy as np
import os.path as osp
from absl import app, flags
import torch
import torchvision
import torch.nn as nn
from ..utils import mesh
from . import geom_utils
from . import net_blocks as nb
#-------------- flags -------------#
#----------------------------------#
flags.DEFINE_boolean('symmetric', True, 'Use symmetric mesh or not')
flags.DEFINE_boolean('multiple_cam_hypo', True, 'If true use multiple camera hypotheses')
flags.DEFINE_integer('nz_feat', 200, 'Encoded image feature size')
flags.DEFINE_integer('z_dim', 350, 'noise dimension of VAE')
flags.DEFINE_integer('gpu_num', 1, 'gpu number')
flags.DEFINE_integer('num_hypo_cams', 8, 'number of hypo cams')
flags.DEFINE_boolean('az_ele_quat', False, 'Predict camera as azi elev')
flags.DEFINE_float('scale_lr_decay', 0.05, 'Scale multiplicative factor')
flags.DEFINE_float('scale_bias', 1.0, 'Scale bias factor')
flags.DEFINE_boolean('use_texture', True, 'if true uses texture!')
flags.DEFINE_boolean('symmetric_texture', True, 'if true texture is symmetric!')
flags.DEFINE_integer('tex_size', 6, 'Texture resolution per face')
flags.DEFINE_integer('subdivide', 3, '# to subdivide icosahedron, 3=642verts, 4=2562 verts')
flags.DEFINE_boolean('use_deconv', False, 'If true uses Deconv')
flags.DEFINE_string('upconv_mode', 'bilinear', 'upsample mode')
flags.DEFINE_boolean('only_mean_sym', False, 'If true, only the meanshape is symmetric')
#------------- Modules ------------#
#----------------------------------#
class ResNetConv(nn.Module):
def __init__(self, n_blocks=4):
super(ResNetConv, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
self.n_blocks = n_blocks
def forward(self, x):
n_blocks = self.n_blocks
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
if n_blocks >= 1:
x = self.resnet.layer1(x)
if n_blocks >= 2:
x = self.resnet.layer2(x)
if n_blocks >= 3:
x = self.resnet.layer3(x)
if n_blocks >= 4:
x = self.resnet.layer4(x)
return x
class Encoder(nn.Module):
"""
Current:
Resnet with 4 blocks (x32 spatial dim reduction)
Another conv with stride 2 (x64)
This is sent to 2 fc layers with final output nz_feat.
"""
def __init__(self, input_shape, n_blocks=4, nz_feat=100, batch_norm=True, z_dim=200):
super(Encoder, self).__init__()
self.resnet_conv = ResNetConv(n_blocks=4)
self.enc_conv1 = nb.conv2d(batch_norm, 512, 256, stride=2, kernel_size=4)
nc_input = 256 * (input_shape[0] // 64) * (input_shape[1] // 64)
self.enc_fc = nb.fc_stack(nc_input, nz_feat, 2)
self.mean_fc = nn.Sequential(nn.Linear(nz_feat, nz_feat),
nn.LeakyReLU(),
nn.Linear(nz_feat, z_dim))
self.logvar_fc = nn.Sequential(nn.Linear(nz_feat, nz_feat),
nn.LeakyReLU(),
nn.Linear(nz_feat, z_dim))
nb.net_init(self.enc_conv1)
def sampling(self, mu, logvar):
var = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(var.size()).normal_()
eps = eps.cuda()
return eps.mul(var).add_(mu)
def forward(self, img):
resnet_feat = self.resnet_conv(img)
out_enc_conv1 = self.enc_conv1(resnet_feat)
out_enc_conv1 = out_enc_conv1.view(img.size(0), -1)
feat = self.enc_fc(out_enc_conv1)
mean = self.mean_fc(feat)
logvar = self.logvar_fc(feat)
return feat, self.sampling(mean, logvar), mean, logvar
class TexturePredictorUV(nn.Module):
"""
Outputs mesh texture
"""
def __init__(self, nz_feat, F, T, opts, img_H=64, img_W=128, n_upconv=5, nc_init=256, predict_flow=False, symmetric=False, num_sym_faces=624):
super(TexturePredictorUV, self).__init__()
self.feat_H = img_H // (2 ** n_upconv)
self.feat_W = img_W // (2 ** n_upconv)
self.nc_init = nc_init
self.symmetric = symmetric
self.num_sym_faces = num_sym_faces
self.F = F
self.T = T
self.predict_flow = predict_flow
self.enc = nb.fc_stack(nz_feat, self.nc_init*self.feat_H*self.feat_W, 2)
if predict_flow:
nc_final=2
else:
nc_final=3
self.decoder = nb.decoder2d(n_upconv, None, nc_init, init_fc=False, nc_final=nc_final, use_deconv=opts.use_deconv, upconv_mode=opts.upconv_mode)
def forward(self, feat, uv_sampler):
bs = feat.size(0)
uvimage_pred = self.enc(feat)
uvimage_pred = uvimage_pred.view(uvimage_pred.size(0), self.nc_init, self.feat_H, self.feat_W)
# B x 2 or 3 x H x W
self.uvimage_pred = self.decoder(uvimage_pred)
if torch.sum(self.uvimage_pred != self.uvimage_pred) > 0:
print('Texture branch got Nan!!')
pdb.set_trace()
self.uvimage_pred = torch.tanh(self.uvimage_pred)
tex_pred = torch.nn.functional.grid_sample(self.uvimage_pred, uv_sampler)
tex_pred = tex_pred.view(tex_pred.size(0), -1, self.F, self.T, self.T).permute(0, 2, 3, 4, 1)
if self.symmetric:
# Symmetrize.
tex_left = tex_pred[:, -self.num_sym_faces:]
return torch.cat([tex_pred, tex_left], 1), self.uvimage_pred
else:
# Contiguous Needed after the permute..
return tex_pred.contiguous(), self.uvimage_pred
class ShapePredictor(nn.Module):
"""
Outputs mesh deformations
"""
def __init__(self, nz_feat, num_verts):
super(ShapePredictor, self).__init__()
self.pred_layer = nn.Linear(nz_feat, num_verts * 3)
self.pred_layer.weight.data.normal_(0, 0.0001)
def forward(self, feat):
delta_v = self.pred_layer(feat)
# Make it B x num_verts x 3
delta_v = delta_v.view(delta_v.size(0), -1, 3)
return delta_v
class QuatPredictor(nn.Module):
def __init__(self, nz_feat, nz_rot=4, classify_rot=False):
super(QuatPredictor, self).__init__()
self.pred_layer = nn.Linear(nz_feat, nz_rot)
self.classify_rot = classify_rot
def forward(self, feat):
quat = self.pred_layer(feat)
if self.classify_rot:
quat = torch.nn.functional.log_softmax(quat)
else:
quat = torch.nn.functional.normalize(quat)
return quat
def initialize_to_zero_rotation(self,):
nb.net_init(self.pred_layer)
self.pred_layer.bias = nn.Parameter(torch.FloatTensor([1,0,0,0]).type(self.pred_layer.bias.data.type()))
return
class ScalePredictor(nn.Module):
def __init__(self, nz, bias=1.0, lr=1.0):
super(ScalePredictor, self).__init__()
self.pred_layer = nn.Linear(nz, 1)
self.lr = lr
self.bias = bias
def forward(self, feat):
scale = self.lr * self.pred_layer.forward(feat) + self.bias # b
scale = torch.nn.functional.relu(scale) + 1E-12 # minimum scale is 0.0
return scale
class TransPredictor(nn.Module):
"""
Outputs [tx, ty] or [tx, ty, tz]
"""
def __init__(self, nz, orth=True):
super(TransPredictor, self).__init__()
if orth:
self.pred_layer = nn.Linear(nz, 2)
else:
self.pred_layer = nn.Linear(nz, 3)
def forward(self, feat):
trans = self.pred_layer(feat)
# print('trans: ( Mean = {}, Var = {} )'.format(trans.mean().data[0], trans.var().data[0]))
return trans
class QuatPredictorAzEle(nn.Module):
def __init__(self, nz_feat, dataset='others'):
super(QuatPredictorAzEle, self).__init__()
self.pred_layer = nn.Linear(nz_feat, 3)
self.register_buffer('axis', torch.eye(3).float())
self.dataset = dataset
def forward(self, feat):
angles = 0.1*self.pred_layer.forward(feat)
angles = torch.tanh(feat)
azimuth = math.pi/6 * angles[...,0]
# # Birds
if self.dataset == 'cub':
elev = math.pi/2 * (angles[...,1])
cyc_rot = math.pi/3 * (angles[...,2])
else:
# cars # Horse & Sheep
elev = math.pi/9 * (angles[...,1])
cyc_rot = math.pi/9 * (angles[...,2])
q_az = self.convert_ax_angle_to_quat(self.axis[1], azimuth)
q_el = self.convert_ax_angle_to_quat(self.axis[0], elev)
q_cr = self.convert_ax_angle_to_quat(self.axis[2], cyc_rot)
quat = geom_utils.hamilton_product(q_el.unsqueeze(1), q_az.unsqueeze(1))
quat = geom_utils.hamilton_product(q_cr.unsqueeze(1), quat)
return quat.squeeze(1)
def convert_ax_angle_to_quat(self, ax, ang):
qw = torch.cos(ang/2)
qx = ax[0] * torch.sin(ang/2)
qy = ax[1] * torch.sin(ang/2)
qz = ax[2] * torch.sin(ang/2)
quat = torch.stack([qw, qx, qy, qz], dim=1)
return quat
def initialize_to_zero_rotation(self,):
nb.net_init(self.pred_layer)
return
class Camera(nn.Module):
def __init__(self, nz_input, az_ele_quat=False, scale_lr=0.05, scale_bias=1.0, dataset='others'):
super(Camera, self).__init__()
self.fc_layer = nb.fc_stack(nz_input, nz_input, 2)
if az_ele_quat:
self.quat_predictor = QuatPredictorAzEle(nz_input, dataset)
else:
self.quat_predictor = QuatPredictor(nz_input)
self.prob_predictor = nn.Linear(nz_input, 1)
self.scale_predictor = ScalePredictor(nz_input)
self.trans_predictor = TransPredictor(nz_input)
def forward(self, feat):
feat = self.fc_layer(feat)
quat_pred = self.quat_predictor.forward(feat)
prob = self.prob_predictor(feat)
scale = self.scale_predictor.forward(feat)
trans = self.trans_predictor.forward(feat)
return torch.cat([quat_pred, prob, scale, trans], dim=1)
def init_quat_module(self,):
self.quat_predictor.initialize_to_zero_rotation()
class MultiCamPredictor(nn.Module):
def __init__(self, nc_input, ns_input, nz_channels, nz_feat=100, num_cams=8,
aze_ele_quat=False, scale_lr=0.05, scale_bias=1.0, dataset='others'):
super(MultiCamPredictor, self).__init__()
self.fc = nb.fc_stack(nz_feat, nz_feat, 2, use_bn=False)
self.scale_predictor = ScalePredictor(nz_feat)
nb.net_init(self.scale_predictor)
self.trans_predictor = TransPredictor(nz_feat)
nb.net_init(self.trans_predictor)
self.prob_predictor = nn.Linear(nz_feat, num_cams)
self.camera_predictor = nn.ModuleList([Camera(nz_feat,aze_ele_quat, scale_lr=scale_lr,
scale_bias=scale_bias, dataset=dataset) for i in range(num_cams)])
nb.net_init(self)
for cx in range(num_cams):
self.camera_predictor[cx].init_quat_module()
self.quat_predictor = QuatPredictor(nz_feat)
self.quat_predictor.initialize_to_zero_rotation()
self.num_cams = num_cams
base_rotation = torch.FloatTensor([0.9239, 0, 0.3827 , 0]).unsqueeze(0).unsqueeze(0) ##pi/4
base_bias = torch.FloatTensor([ 0.7071, 0.7071, 0, 0]).unsqueeze(0).unsqueeze(0)
cam_biases = [base_bias]
for i in range(1,self.num_cams):
cam_biases.append(geom_utils.hamilton_product(base_rotation, cam_biases[i-1]))
cam_biases = torch.stack(cam_biases).squeeze()
self.register_buffer("cam_biases", cam_biases)
return
def forward(self, feat):
feat = self.fc(feat)
cameras = []
for cx in range(self.num_cams):
cameras.append(self.camera_predictor[cx].forward(feat))
cameras = torch.stack(cameras, dim=1)
quats = cameras[:, :, 0:4]
prob_logits = cameras[:, :, 4]
camera_probs = nn.functional.softmax(prob_logits, dim=1)
scale = self.scale_predictor.forward(feat).unsqueeze(1).repeat(1, self.num_cams, 1)
trans = self.trans_predictor.forward(feat).unsqueeze(1).repeat(1, self.num_cams, 1)
scale = cameras[:,:,5:6]
trans = cameras[:,:,6:8]
new_quats = quats
cam = torch.cat([scale, trans, new_quats, camera_probs.unsqueeze(-1)], dim=2)
return self.sample(cam) + (quats,)
def sample(self, cam):
'''
cams : B x num_cams x 8 Vector. Last column is probs.
'''
dist = torch.distributions.multinomial.Multinomial(probs=cam[:, :, 7])
sample = dist.sample()
sample_inds = torch.nonzero(sample)[:, None, 1]
sampled_cam = torch.gather(cam, dim=1, index=sample_inds.unsqueeze(-1).repeat(1, 1, 8)).squeeze()[:, 0:7]
return sampled_cam, sample_inds, cam[:, :, 7], cam[:, :, 0:7]
#------------ Mesh Net ------------#
#----------------------------------#
class MeshNet(nn.Module):
def __init__(self, input_shape, opts, nz_feat = 100, axis = 0, temp_path = None):
# Input shape is H x W of the image.
super(MeshNet, self).__init__()
self.opts = opts
self.pred_texture = opts.use_texture
self.symmetric = opts.symmetric
self.symmetric_texture = opts.symmetric_texture
self.pred_cam = opts.pred_cam
self.nz_feat = nz_feat
self.z_dim = opts.z_dim
self.batch_size = opts.batch_size
verts, faces = mesh.create_sphere(opts.subdivide)
num_verts = verts.shape[0]
if self.symmetric:
verts, faces, num_indept, num_sym, num_indept_faces, num_sym_faces = mesh.make_symmetric(verts, faces, axis=axis)
num_sym_output = num_indept + num_sym
self.num_output = num_sym_output
self.num_sym = num_sym
self.num_indept = num_indept
self.num_indept_faces = num_indept_faces
self.num_sym_faces = num_sym_faces
# mean shape is only half.
mean_v = nn.Parameter(torch.Tensor(verts[:num_sym_output]).cuda())
if(temp_path is not None):
mean_v = torch.load(osp.join(temp_path, "mean_v.pth"))
self.register_buffer('mean_v', mean_v)
# Needed for symmetrizing..
self.flip = torch.ones(1, 3).cuda()
self.flip[0, axis] = -1
else:
self.mean_v = nn.Parameter(torch.Tensor(verts), requires_grad=False)
self.num_output = num_verts
verts_np = verts
faces_np = faces
self.verts_np = verts_np
self.faces_np = faces_np
self.faces = torch.LongTensor(faces).cuda()
self.encoder = Encoder(input_shape, n_blocks=4, nz_feat=nz_feat, z_dim=opts.z_dim)
self.shape_predictor = ShapePredictor(opts.z_dim, num_verts=self.num_output)
if(self.pred_cam):
if opts.multiple_cam_hypo:
self.cam_predictor = MultiCamPredictor(512, 8, 128, nz_feat=opts.nz_feat,
num_cams=opts.num_hypo_cams, aze_ele_quat=opts.az_ele_quat,
scale_lr=opts.scale_lr_decay, scale_bias=opts.scale_bias,
dataset = 'cub')
else:
self.cam_predictor = Camera(opts.nz_feat,)
if self.pred_texture:
if self.symmetric_texture:
num_faces = self.num_indept_faces + self.num_sym_faces
else:
num_faces = faces.shape[0]
uv_sampler = mesh.compute_uvsampler(verts_np, faces_np[:num_faces], tex_size=opts.tex_size)
uv_sampler = torch.FloatTensor(uv_sampler).cuda()
uv_sampler = uv_sampler.unsqueeze(0).repeat(int(self.opts.batch_size/self.opts.gpu_num), 1, 1, 1, 1)
self.F = uv_sampler.size(1)
self.T = uv_sampler.size(2)
uv_sampler = uv_sampler.view(-1, self.F, self.T*self.T, 2)
self.register_buffer('uv_sampler', uv_sampler)
img_H = int(2**np.floor(np.log2(np.sqrt(num_faces) * opts.tex_size)))
img_W = 2 * img_H
self.num_faces = num_faces
if(self.symmetric_texture):
self.texture_predictor = TexturePredictorUV(
nz_feat, self.F, self.T, opts, img_H=img_H, img_W=img_W, predict_flow=True, symmetric=opts.symmetric_texture, num_sym_faces=self.num_sym_faces)
else:
self.texture_predictor = TexturePredictorUV(
nz_feat, self.F, self.T, opts, img_H=img_H, | |
- 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
O0OO0O = IIi11I1 . find ( "password=" )
if ( O0OO0O == - 1 ) : return ( False )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
IIi11I1 = IIi11I1 [ O0OO0O : : ]
o0o0O0O00oOOo = IIi11I1 . find ( "\n" )
if ( o0o0O0O00oOOo == - 1 ) : return ( False )
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
II1iiIIiIii = IIi11I1 [ : o0o0O0O00oOOo ] . split ( "=" )
if ( II1iiIIiIii [ 1 ] == "" ) :
oO0oOo0 = lisp_hash_password ( password )
if ( II1iiIIiIii [ 2 ] != oO0oOo0 ) : return ( False )
else :
if ( II1iiIIiIii [ 1 ] != password ) : return ( False )
if 5 - 5: iIii1I11I1II1 / I11i / i1IIi % OoooooooOO
if 50 - 50: Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
return ( True )
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
def lisp_validate_user ( ) :
iiIiI = bottle . request . forms . get ( 'username' )
if 38 - 38: IiII . Ii1I
if ( iiIiI == None ) :
IIIIIIIiI = bottle . request . get_cookie ( "lisp-login" )
if ( IIIIIIIiI ) : return ( True )
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
IIo00ooo = bottle . request . forms . get ( 'password' )
if ( iiIiI == None or IIo00ooo == None ) : return ( False )
if 31 - 31: O0 * o0oOOo0O0Ooo % o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if ( lisp_find_user_account ( iiIiI , IIo00ooo ) == False ) : return ( False )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
OO0ooo0o0 = None if os . getenv ( "LISP_NO_USER_TIMEOUT" ) == "" else LISP_USER_TIMEOUT
if 69 - 69: I1ii11iIi11i - I1Ii111
bottle . response . set_cookie ( "lisp-login" , iiIiI , max_age = OO0ooo0o0 )
return ( True )
if 16 - 16: Oo0Ooo
if 14 - 14: i1IIi - O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
if 74 - 74: O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
def lisp_get_user ( ) :
iiIiI = bottle . request . forms . get ( 'username' )
if ( iiIiI ) : return ( iiIiI )
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
return ( bottle . request . get_cookie ( "lisp-login" ) )
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
def lisp_login_page ( ) :
I1i1iii = '''
<center><br><br>
<form action="/lisp/login" method="post">
<font size="3"><i>
Username:<input type="text" name="username" />
{}Password:<input type="password" name="password" />
{}<input style="background-color:transparent;border-radius:10px;" type="submit" value="Login" />
</i></font></form>
</center>
''' . format ( lisp . lisp_space ( 4 ) , lisp . lisp_space ( 2 ) )
return ( lisp_banner_top ( True ) + I1i1iii + "<br><hr>" )
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
if 66 - 66: Oo0Ooo
def lisp_is_any_xtr_logging_on ( log_type ) :
OO0OO00oo0 = "egrep '" + log_type + " = '" + " ./lisp.config"
OO0OO00oo0 = getoutput ( OO0OO00oo0 )
if ( OO0OO00oo0 == "" ) : return ( False )
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
OO0OO00oo0 = OO0OO00oo0 . split ( "\n" )
for oO000o in OO0OO00oo0 :
o0Oo = oO000o . find ( " {} = " . format ( log_type ) ) != - 1
if ( oO000o [ 0 ] == " " and o0Oo ) :
OO0OO00oo0 = oO000o . replace ( " " , "" )
OO0OO00oo0 = OO0OO00oo0 . split ( "=" )
if ( OO0OO00oo0 [ 1 ] == "yes" ) : return ( True )
if 57 - 57: OOooOOo / Oo0Ooo
if 69 - 69: oO0o - Oo0Ooo % IiII
return ( False )
if 50 - 50: OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
def lisp_landing_page ( ) :
IiIi1111ii = lisp . green ( "yes" , True )
iI1I1II1 = lisp . red ( "no" , True )
if 92 - 92: OoooooooOO - OoooooooOO * OoO0O00 % I1IiiI
ooooOoO0O = IiIi1111ii if lisp . lisp_is_running ( "lisp-itr" ) else iI1I1II1
IIII = IiIi1111ii if lisp . lisp_is_running ( "lisp-etr" ) else iI1I1II1
IIIIoOo = IiIi1111ii if lisp . lisp_is_running ( "lisp-rtr" ) else iI1I1II1
Oo0oOo0O0O0o = IiIi1111ii if lisp . lisp_is_running ( "lisp-mr" ) else iI1I1II1
IiiIIiIIii1iI = IiIi1111ii if lisp . lisp_is_running ( "lisp-ms" ) else iI1I1II1
Oo0O0O000 = IiIi1111ii if lisp . lisp_is_running | |
(341, 'L', 'K'), (342, 'F', 'A'), (343, 'S', 'E'), (344, 'K', 'L'), (345, 'A', 'H'), (346, 'E', 'M'), (347, 'L', 'S'), (348, 'H', 'T'), (349, 'M', 'L'), (350, 'S', 'N'), (351, 'T', 'L'), (352, 'L', 'I'), (353, 'N', 'M'), (354, 'L', 'I'), (355, 'I', 'S'), (356, 'M', 'V'), (357, 'I', 'L'), (358, 'S', 'T')]
"""
output=[]
laststart=-1
compacting=None
for (start1,original1,change1),(start2,original2,change2) in get_kmer_list(listformat,k=2):
if original1 is None: original1=""
if original2 is None: original2=""
if change1 is None: change1=""
if change2 is None: change2=""
if start2==start1+1:
if not compacting:
compacting=[start1,original1+original2,change1+change2]
else:
compacting[1]+=original2
compacting[2]+=change2
else:
if not compacting:
output.append([start1,original1,change1])
else:
output.append(compacting)
compacting=None
return output
def revcomp(strseq):
"""
cuts out sequence objects: input and output are strings.
"""
seqob=Bio.Seq.Seq(str(strseq),Bio.Alphabet.IUPAC.extended_dna)
return str(seqob.reverse_complement())
def extract_subsequence(sequence,start,end=None,strand=1,modifications=[]):
"""
Modifications, e.g. [[1224,1223,'G','A'],[1336,1337,'T','C'],[1488,1489,'TC','TCC']]
"""
#assert 0<=start<=len(sequence)
#assert 0<=end<=len(sequence)
originalstart=start
originalend=end
if type(sequence)==str:
sequence=Bio.Seq.Seq(str(sequence),Bio.Alphabet.IUPAC.extended_dna)
subseq=sequence.seq[start:end]
if modifications: #check modifications are coherent
#print "CHECKING MODIFICATIONS",modifications
#RngS=set(range(start,end))
#assert RngS
checkedmodifications=[]
nochangemodifications=[]
for Ms,Me,orig,mod in modifications:
OST=overlap_status(start,end,"subsequence",
Ms,Me,"modification",
report=False)
if OST=="doesn't overlap":
#print "NO OVERLAP",Ms,Me,orig,mod
continue
#print ("CHANGE {}-{} {}>{} ({})"
# .format(Ms,Me,orig,mod,OST))
#if len(orig)>1:
# print "#.",Ms,Me,orig,mod,strand
if OST=="overlaps":
#check if modifications overlap end of sequence
#and alter start and end accordingly
if Ms<start:
#print ">",subseq
warning=("GIVEN FEATURE {}-{}({}), CHANGING "
"START {} TO {} BECAUSE OF "
"OVERLAPPING MODIFICATION {}-{}"
.format(start,end,strand,start,Ms,Ms,Me))
#self.problems["start change"]=warning
start=Ms
if Me>end:
warning=("GIVEN FEATURE {}-{}({}), CHANGING "
"END {} TO {} BECAUSE OF "
"OVERLAPPING MODIFICATION {}-{}"
.format(start,end,strand,end,Me,Ms,Me))
#self.problems["start change"]=warning
end=Me
if orig==mod:
#print "NO CHANGE",Ms,Me,orig,mod
nochangemodifications.append([Ms,Me,orig,mod])
continue
#if overlaps or contains...
checkedmodifications.append((Ms,Me,orig,mod))
#Adjust range if modifications extend beyond it
if start!=originalstart or end!=originalend:
subseq=sequence.seq[start:end]
#ALSO DONT FORGET TO FLAG THAT IT HAS CHANGED, so return start,end
#Just check no modifications
for Ms,Me,orig,mod in nochangemodifications:
Ls=Ms-start-1 # CORE CONVERSION
Le=Ls+len(orig) # MATHEMATICS
#Check modification sequences to avoid errors
if subseq[Ls:Le]!=orig:
print ("PROBLEM WITH ({},{},{}) "
"(ORIGINAL START {} ORIGINAL END {})"
"MODIFICATION ({},{},{},{}) "
"READING {} at {} {}, NOT {}"
.format(start,end,strand,
originalstart,originalend,
Ms,Me,orig,mod,
subseq[Ls:Le],Ms,Me,orig))
#sys.exit()
#else:
# print ("CHECKING NOMOD (GLOBAL {}-{},{}) "
# " {}-{}={},{} "
# "(LOCAL {}-{}) "
# "FOUND {}"
# .format(start,end,strand,
# Ms,Me,orig,mod,
# Ls,Le,
# subseq[Ls:Le]))
#Now apply modifications
localmodifications=[]
for Ms,Me,orig,mod in checkedmodifications:
#if strand==1: #
Ls=Ms-start-1 # CORE CONVERSION
Le=Ls+len(orig) # MATHEMATICS
#EXTRA CHECK
if Ls<0:
#Sometimes the modification coordinates are off
#and need correcting
#print "SHIFTING FEATURE START"
start+=Ls
end+=Ls
Ms+=Ls
Me+=Ls
subseq=sequence.seq[start:end]
Ls=0
Le=Ls+len(orig)
#print "newLs {} newMs {} newsubseq {}".format(Ls,Ms,subseq)
#Correct
#elif strand==-1: #
# Ls=Me-end+1 #
# Le=Ls+len(orig) #
# RCRefSeq=revcomp(orig)
#Check modification sequences to avoid errors
#print "LOCAL",Ls,Le
localmodifications.append([Ls,Le,orig,mod])
#if subseq[Ls:Le]==orig and len(orig)>1:
# print "looked for {} found {}".format(orig,subseq[Ls:Le])
if subseq[Ls:Le]!=orig:
print ">",Ls,Le,subseq
print ("PROBLEM WITH ({},{},{}) "
"(ORIGINAL START {} ORIGINAL END {})"
"MODIFICATION ({},{},{},{}) "
"READING {} at {} {}, NOT {}"
.format(start,end,strand,
originalstart,originalend,
Ms,Me,orig,mod,
subseq[Ls:Le],Ms,Me,orig))
print ">>",sequence.seq[start-1:end+1]
#sys.exit()
#else:
# print "NO PROBLEM!"
if localmodifications:
#print ">SEQUENCE_CHANGE",localmodifications
subseq=sequence_change(subseq,localmodifications)
#if strand==-1:
# print "STRAND -1",revcomp(subseq)
#else:
# print "STRAND 1",subseq
if strand==-1:
return revcomp(subseq),start,end
else:
return str(subseq),start,end
#
class GFF_reader(object):
geneswithintrons=0
nintrons=0
"""
N.B. SeqIO converts locations to be list-index based
rather than the genetic style.
E.g. a GFF file lists a location as 7235-9016
When entered as a SeqFeature this becomes [7234:9016]
geneticpositions = 1234|56|789 i.e. 5-6
listindexpositions = 0123[45]678 i.e. 4:6
allftypes=['snRNA_gene', 'telomeric_repeat',
'Y_prime_element', 'transposable_element_gene',
'origin_of_replication', 'mating_type_region',
'LTR_retrotransposon', 'chromosome', 'centromere',
'matrix_attachment_site', 'pseudogene', 'ncRNA_gene',
'telomerase_RNA_gene', 'telomere', 'long_terminal_repeat',
'X_element', 'rRNA_gene', 'ARS_consensus_sequence', 'ARS',
'region', 'non_transcribed_region', 'tRNA_gene',
'silent_mating_type_cassette_array', 'blocked_reading_frame',
'snoRNA_gene', 'gene', 'X_element_combinatorial_repeat']
"""
def __init__(self,filepath,
ftypes=["gene","intron","cds",
"chromosome","rRNA_gene",
"tRNA_gene","snoRNA_gene",
"snRNA_gene"],
checks=True,#False,
allelesbystrainfilename="",
alleledetailsfilename=""):
self.ftypes=ftypes
with open(filepath,"rb") as fileob:
#GEX=GFF.GFFExaminer()
#GAL=GEX.available_limits(fileob)
#pprint(GAL)
if ftypes:
LI={"gff_type":self.ftypes}
else:
LI=None
self.seqD=Bio.SeqIO.to_dict(GFF.parse(fileob))#,
# limit_info=LI))
fileob.close()
#self.list_by_order=[]
self.index_by_type=defaultdict(list)
self.index_by_location={}
self.allfeatures=[]
self.allregionwrappers=[]
self.chrnames=[]
"""
NB index_by_location uses genetic positions, not
list indices like SeqIO converts them to.
"""
self.index_by_name={}
self.sequences={}
self.problems={}
self.RW=""
for chrname,seq in self.seqD.items():
self.chrnames.append(chrname)
CN=convert_chrName_to_number(chrname)
self.index_by_location[chrname]=defaultdict(list)
self.sequences[chrname]=seq
for i,feat in enumerate(seq.features):
#xfeats=[feat]
feat.chrname=chrname
feat.CN=CN
#unpack(feat)
#sys.exit()
if "gene" in feat.type:
if checks:
self._check_chromosome_is_right(CN,feat)
self._check_introns(feat)
if feat.type!="chromosome":
self.allfeatures.append(feat)
self.index_by_name[feat.id]=feat
#self.list_by_order.append(feat)
self.index_by_type[feat.type].append(feat)
ST=int(feat.location._start)+1
EN=int(feat.location._end)
vals=range(ST,EN+1)
for l in vals:
self.index_by_location[chrname][l].append(feat)
else:
self.index_by_type[feat.type].append(feat)
if checks:
self._check_chromosome_lengths_match_sequence()
self._check_genes()
#print self.index_by_type.keys()
def __iter__(self):
for feat in self.allfeatures:
yield feat
def Xget_subsequence(self,chromosome,start,end=None,strand=1):
"""
start and end are GENETIC POSITIONS not list-index
"""
if chromosome in getattr(self,"_CAD",{}):
chromosome=self._CAD[chromosome]
if chromosome not in self.sequences:
return None
if end is None: end=start
seq=self.sequences[chromosome][start-1:end]
if strand==1: return seq
else: return seq.reverse_complement()
def get_feature(self,lookup=None):
if lookup is None:
return sample(self.index_by_name.values(),1)[0]
elif lookup in self.index_by_type:
return sample(self.index_by_type[lookup],1)[0]
else:
return self.index_by_name.get(lookup,None)
def get_features_in_range(self,
chrname,
geneticstart,
geneticend,
ftype=["gene","intergenic"]):
overlapping_feats=set([])
for loc in range(geneticstart,geneticend+1):
feats=[f for f in self.index_by_location[chrname][loc]
if f.type in ftype]
overlapping_feats.update(set(feats))
return list(overlapping_feats)
def _check_chromosome_lengths_match_sequence(self):
if "chromosome" in self.ftypes:
for chr_feat in self.index_by_type["chromosome"]:
seq=self.sequences[chr_feat.id]
fL,sL=len(chr_feat),len(seq)
if fL!=sL:
k="chromosome length doesn't match"
v=("feat length = {} but seq length = {}"
.format(fL,sL))
self.problems[k]=v
def _check_genes(self):
for i,gene in enumerate(self.index_by_type["gene"]):
STRT,END=gene.location._start,gene.location._end
#NB END seems to be base after last base so..
LEN=(END-STRT)
STRAND=gene.location._strand
#gene.sequence=gene.extract(chr)
gl=self._check_gene_length(gene)
#if not gl:
# return
ss=self._check_start_stop(gene)
#gene.prot=gene.sequence.seq.translate()
#self._check_codons(gene)
if gene.problems:
if "geneproblems" not in self.problems:
self.problems["geneproblems"]={}
self.problems["geneproblems"][gene.id]=gene.problems
self._report(gene)
def _check_chromosome_is_right(self,CN,feat):
if feat.id[0]=="Q":
if CN!=100:
feat.problems["chromosome not right?"]="Gene name={} but chromosome #={}".format(feat.id,CN)
return False
else:
if feat.type=="gene":
featchrletter=feat.id[1]
if ord(featchrletter)-64!=CN:
feat.problems["chromosome not right?"]="Gene name={} but chromosome #={}".format(feat.id,CN)
return False
return True
def _check_introns(self,gene):
gene.codingsequence=[]
gene.intronseqs=[]
gene.introns=[]
gene.exons=[]
gene.problems={}
gene.warnings={}
CHRSEQ=self.sequences[gene.chrname]
STRAND=gene.location._strand
if getattr(gene,"sub_features",[]):
#First, for convenience, repartition introns and exons
#into FEAT.exons and FEAT.introns variable lists (and
#self.subsections) rather than FEAT.sub_features[0].subfeatures
mRNAs=gene.sub_features
if not mRNAs:
self.problems["gff problem"]=">NO mRNAs in gene feature"
return False
if len(mRNAs)!=1:
self.problems["gff problem"]=">TOO MANY ({}) mRNAs".format(len(mRNAs))
return False
tempintronseqs=[]
exoncounter=0
introncounter=0
for subfeat in mRNAs[0].sub_features:
START=subfeat.location._start
END=subfeat.location._end
subfeat.id=""
assert subfeat.location._strand==STRAND
#print "GETTING SUBFEAT IN _check_introns",gene.id
SUBSEQ,start,end=extract_subsequence(CHRSEQ,START,END,STRAND)
if subfeat.type=="CDS":
exoncounter+=1
gene.codingsequence.append(SUBSEQ)
subfeat.id="{}_exon_{}".format(gene.id,exoncounter)
gene.exons.append(subfeat)
elif "intron" in subfeat.type:
introncounter+=1
gene.intronseqs.append(SUBSEQ)
subfeat.id="{}_intron_{}".format(gene.id,introncounter)
#print subfeat.id,SUBSEQ
gene.introns.append(subfeat)
if gene.CN==100:
gene.warnings["mitochondrial intron"]=SUBSEQ
continue #ignore mitochondrial genes
if SUBSEQ[:2] not in intronstarts:
gene.problems["intron {} start problem".format(subfeat.id)]=SUBSEQ[:2]
if SUBSEQ[-2:] not in intronends:
gene.problems["intron {} end problem".format(subfeat.id)]=SUBSEQ[-2:]
elif "plus_1_translational_frameshift" in subfeat.type:
gene.warnings["unusual"]="plus_1_translational_frameshift"
else:
gene.problems["unresolved subfeat type"]=subfeat
#print "^^^THAT WAS ",subfeat.id
if introncounter>0:
gene.warnings["introns"]=introncounter
if STRAND==-1:
gene.codingsequence.reverse()
gene.intronseqs.reverse()
gene.codingsequence="".join(gene.codingsequence)
def _check_gene_length(self,gene):
if not hasattr(gene,"codingsequence"):
print "NO CODING SEQUENCE"
print gene
sys.exit()
x,y=divmod(len(gene.codingsequence),3)
if len(gene.codingsequence)%3!=0:
cod=("{} codons + {}"
.format(x,gene.codingsequence[-y]))
gene.problems["bad gene length"]=cod
return False
return True
def _check_start_stop(self,gene):
if gene.CN==100:
gene.warnings["mitochondrial gene"]=True
if gene.codingsequence[:3] not in startcodons:
gene.problems["bad start codon"]=gene.codingsequence[:3]
if gene.codingsequence[-3:] not in stopcodons:
gene.problems["bad stop codon"]=gene.codingsequence[-3:]
def _report(self,gene):
print ">PROBLEM:",gene.id,gene.problems
print
def make_chraliasdict(self,chraliaspath):
if chraliaspath:
with open(chraliaspath,"rU") as fileob:
reader=csv.reader(fileob,delimiter="\t")
self._CAD={}
self._CO=[]
for row in reader:
self._CAD[row[1]]=row[0]
self._CO.append(row[0])
fileob.close()
else:
self._CAD={k:k for k in seqD.keys()}
self._CO=self._CAD.keys()[:]
self._CO.sort()
def make_strainaliasdict(self,strainaliaspath):
if strainaliaspath:
with open(strainaliaspath,"rU") as fileob:
reader=csv.reader(fileob,delimiter="\t")
self._SAD={row[2]:"AESW12fc"+row[4] for row in list(reader)[1:]}
fileob.close()
def _turn_indices_into_regions(self,indices):
"""
e.g. [1,2,3,6,7,8,9] becomes [(1,3),(6,9)]
Solution from https://stackoverflow.com/questions/10987777/python-converting-a-list-of-indices-to-slices
"""
sind=sorted(indices)
return [[next(v)]+list(v)[-1:]
for k,v in
groupby(sind,lambda x,c=count():x-next(c))]
def make_intergenic_features(self,
types=['rRNA_gene',
'snRNA_gene',
'tRNA_gene',
'snoRNA_gene',
'gene']):
"""
Again, must be careful with different indexing systems
genes 1234 56 789
inds 0123 45 678
slices = [0:4],[4:6],[6:9]
NB
This function also attaches introns to the genes containing them
"""
for chrname,seq in self.sequences.items():
CN=convert_chrName_to_number(chrname)
allbases=set(range(1,len(seq)+1)) #genetic positions
flankinglookup=defaultdict(list)
for feat in seq.features:
if feat.type in types:
ST=int(feat.location._start)+1
EN=int(feat.location._end)
flankinglookup[ST].append(feat.id)
flankinglookup[EN].append(feat.id)
featbases=range(ST,EN+1)
allbases-=set(featbases)
for region in self._turn_indices_into_regions(allbases):
if len(region)==2:
a,b=region
elif len(region)==1:
a=b=region[0]
else:
print ">>",region
continue
leftgeneid=flankinglookup.get(a-1,[""])[0] #
rightgeneid=flankinglookup.get(b+1,[""])[0] #
rnm="{}><{}".format(leftgeneid,rightgeneid)
if rnm.startswith(">"): rnm=rnm[1:]
elif rnm.endswith("<"): rnm=rnm[:-1]
sf=Bio.SeqFeature.SeqFeature(Bio.SeqFeature.FeatureLocation(a-1,b,strand=1),
type="intergenic",
id=rnm)
sf.chrname=seq.id
sf.CN=CN
#sf.sequence=sf.extract(seq)
seq.features.append(sf)
self.allfeatures.append(sf)
#print sf
for bs in range(a,b+1):
self.index_by_location[chrname][bs].append(sf)
self.index_by_type["intergenic"].append(sf)
self.index_by_name[rnm]=sf
self.allfeatures.sort(key=lambda f: (f.CN,f.location._start))
def add_vcfs(self,
vcfpath,
ftype=['rRNA_gene',
'snRNA_gene',
'tRNA_gene',
'snoRNA_gene',
"gene",
"intergenic"]):
self.vcfpath=vcfpath
with open(vcfpath,'rb') as vcfFO:
vcf_reader=vcf.Reader(vcfFO)
#self.vcfeats=[]
openfeatures=[]
self.allelelookup={}
self.alleledetails={}
for i,VR in enumerate(vcf_reader):
#vcfpath chromosome labels take form of e.g. ref|NC_001133|
#so must be converted, using data from chraliaspath, to
#e.g. chrI to match sequence names used in gff file
VR.chrname=getattr(self,"_CAD",
{VR.CHROM:
VR.CHROM}).get(VR.CHROM,
VR.CHROM)
chrsequence=self.sequences[VR.chrname]
#NB---------------------------------------------------
#pyvcf also uses the zero-based, half-open coordinate system
#like list indices rather than genetic coordinates.
#https://pyvcf.readthedocs.io/en/latest/API.html#vcf-reader
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
geneticstart,geneticend=VR.POS,VR.POS+len(VR.REF)-1
slicestart,sliceend=geneticstart-1,geneticend
#THIS ACTED AS A | |
<gh_stars>0
from models import Entry
from models import User
from models import Project
from models import Vendor
from models import Objective
from models import EvaluationCriteria
from models import DEFAULT_PROJECT_NAME
from models import project_db_key
from functools import wraps
import json
import time
import datetime
from flask import request, session, Response, url_for, redirect
from google.appengine.api import mail
from google.appengine.api import app_identity
import math
import urllib2
firebase_server_key = "<KEY>"
fcm_server = "https://fcm.googleapis.com/fcm/send"
fcm_headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'Authorization' : firebase_server_key}
sender_address = "DAR Admin <<EMAIL>> "
total_max_limit = 1000
gae_environments = {'daranalysis-200000' : 'blue',
'daranalysis-160000' : 'red',
'daranalysis-201000' : 'amber',
'daranalysis-202000' : 'yellow',
'daranalysis-203000' : 'green'}
super_user_name = "Superuser"
CREATE_MODE = "__CREATE__"
ENTRY_SAVED_TITLE = "DAR Entry Saved"
ENTRY_SAVED_MESSAGE = "Hello {toUser}, {aboutUser} has just saved DAR entry"
DAR_TITLE = 'This is my {string} formatted with {args} arguments'
PROJECT_REMINDER_TITLE = "DAR Project Reminder ({env}) : Your DAR needs to completed"
PROJECT_REMINDER_MESSAGE = "As an admin your DAR {projectId} in {env} environment, \
it needs to be attended to, please remind users using Manage button"
def get_project_db_name(rname=DEFAULT_PROJECT_NAME):
return rname
#Gets evaluation_criteria from db - this needs to implement evaluation_criteria-lifecycle - right now it is a singleton
def get_projects_from_db(userId):
if userId:
project_query = Project.query(Project.userIds.IN([userId]))
else:
project_query = Project.query()
return project_query.fetch(total_max_limit)
#Gets evaluation_criteria from db - this needs to implement evaluation_criteria-lifecycle - right now it is a singleton
def get_project_from_db(projectId):
project_query = Project.query(Project.projectId == projectId)
if project_query.count() < 1:
return None
else:
return project_query.fetch(1)[-1]
def get_entry_from_db(projectId, userId):
entrys_query = Entry.query(Entry.user.identity == userId, Entry.project.projectId == projectId)
if entrys_query.count() < 1:
return None
else:
return entrys_query.fetch(1)[-1]
def get_entrys_from_given_project_db(projectId):
entrys_query = Entry.query(Entry.project.projectId == projectId)
return entrys_query.fetch(total_max_limit)
def get_entrys_from_given_user_db(projectId, userId):
entrys_query = Entry.query(Entry.user.identity == userId, Entry.project.projectId == projectId)
return entrys_query.fetch(total_max_limit)
def get_users_from_db(projectId=None):
if projectId and projectId != "":
project = get_project_from_db(projectId)
if project is not None:
userIds = project.userIds
users = []
for userId in userIds:
user = get_user_from_db(userId)
users.append(user)
return users
else:
users_q = User.query(User.type != super_user_name)
users = users_q.fetch(total_max_limit)
return users
return None
def get_user_from_db(userId):
if "@" in userId:
users_q = User.query(User.email == userId)
else:
users_q = User.query(User.identity == userId)
if users_q.count() < 1:
return None
else:
return users_q.fetch(1)[-1]
def update_users_project(projectId, userIds):
project = get_project_from_db(projectId)
project.userIds = userIds
project.put()
return project
def update_user(userId, email, type, password, projectIds):
user = get_user_from_db(userId)
if user is None:
project_name = get_project_db_name()
user = User(parent=project_db_key(project_name))
user.identity = userId
user.projectIds = []
user.email = email
user.type = type
user.password = password
if projectIds:
for projId in projectIds:
if projId and projId != CREATE_MODE and projId not in user.projectIds:
user.projectIds.append(projId)
project = get_project_from_db(projId)
if project:
project.userIds.append(userId)
project.put()
user.put()
time.sleep(1)
#repeat to create empty entrys by default
if projectIds:
for projId in projectIds:
project = get_project_from_db(projId)
if project:
entry = get_entry_from_db(projId, userId)
if entry is None:
update_entry(projId, userId, None, None, None, None)
return user
def getArrayOfDict(bos):
# this is not good
bot = bos[0]
# this is even worse
bot = '[' + bot + ']'
bol = json.loads(bot)
return bol
def update_project(projectId, department, group, description, defaultPassword, userIds, vendorIds, due_date, bos):
project_name = get_project_db_name()
project = get_project_from_db(projectId)
if project is None:
project = Project(parent=project_db_key(project_name))
project.projectId = projectId
project.objectiveIds = []
project.department = department
project.description = description
project.group = group
project.defaultPassword = <PASSWORD>
project.userIds = userIds
project.vendorIds = vendorIds
for ui in userIds:
user = get_user_from_db(ui)
if user and projectId not in user.projectIds:
user.projectIds.append(projectId)
user.put()
for vi in vendorIds:
vendor = get_user_from_db(vi)
if vendor and projectId not in vendor.projectIds:
vendor.projectIds.append(projectId)
vendor.put()
if due_date is None or due_date == "":
project.due_date = datetime.datetime.now()
else:
project.due_date = datetime.datetime.strptime(due_date.split(" ")[0], "%Y-%m-%d")
bol = getArrayOfDict(bos)
if len(project.objectiveIds) > 0:
nnbos = []
for bo in bol:
nnbos.append(bo["objectiveId"])
nnecs = []
if "evaluation_criteria" in bo:
for ec in bo["evaluation_criteria"]:
nnecs.append(ec["evaluation_criterionId"])
rbo = get_objective_from_db(projectId, bo["objectiveId"])
if rbo:
for pec in rbo.evaluation_criteriaIds:
if pec not in nnecs:
print "deleting " + pec
delete_evaluation_criterion_from_db(projectId, rbo.objectiveId, pec)
for pbo in project.objectiveIds:
if pbo not in nnbos:
print "deleting " + pbo
project.objectiveIds.remove(pbo)
delete_objective_from_db(projectId, pbo)
# get basic in
project.put()
#get more stuff
for bo in bol:
#print bo["objectiveId"] + ", " + bo["description"] + ", " + bo["weight"]
boid = bo["objectiveId"]
nbo = get_objective_from_db(projectId, boid)
if nbo is None:
nbo = Objective(parent=project_db_key(project_name))
nbo.objectiveId = boid
nbo.projectId = projectId
nbo.evaluation_criteriaIds = []
nbo.description = bo["description"]
nbo.weight = int(bo["weight"])
if "evaluation_criteria" in bo:
for ec in bo["evaluation_criteria"]:
ecid = ec["evaluation_criterionId"]
nec = get_evaluation_criteria_from_db(projectId, boid, ecid)
#print "\t" + projectId + ", " + ec["evaluation_criterionId"] + ", " + ec["evaluation_criterion"] + "\n\t" + str(nec)
if nec is None:
nec = EvaluationCriteria(parent=project_db_key(project_name))
nec.evaluation_criterionId = ecid
nec.objectiveId = boid
nec.projectId = projectId
nec.evaluation_criterion = ec["evaluation_criterion"]
nec.put()
if ecid in nbo.evaluation_criteriaIds:
iiidx = nbo.evaluation_criteriaIds.index(ecid)
nbo.evaluation_criteriaIds[iiidx] = ecid
else:
nbo.evaluation_criteriaIds.append(ecid)
nbo.put()
if nbo.objectiveId in project.objectiveIds:
iidx = project.objectiveIds.index(nbo.objectiveId)
project.objectiveIds[iidx] = nbo.objectiveId
else:
project.objectiveIds.append(nbo.objectiveId)
project.put()
return project
def get_objective_from_db(projectId, objectiveId):
objectives_query = Objective.query(Objective.objectiveId == objectiveId,
Objective.projectId == projectId)
if objectives_query.count() < 1:
return None
else:
return objectives_query.fetch(1)[-1]
def get_evaluation_criteria_from_db(projectId, objectiveId, evaluation_criterionId):
evaluation_criteria_query = EvaluationCriteria.query(EvaluationCriteria.evaluation_criterionId == evaluation_criterionId,
EvaluationCriteria.objectiveId == objectiveId,
EvaluationCriteria.projectId == projectId)
if evaluation_criteria_query.count() < 1:
return None
else:
return evaluation_criteria_query.fetch(1)[-1]
def get_entry_status(projectId, userId):
project = get_project_from_db(projectId)
entry = get_entry_from_db(projectId, userId)
lenv = len(project.vendorIds)
lene = 0
for objectiveId in project.objectiveIds:
objective = get_objective_from_db(projectId, objectiveId)
if objective:
evaluation_criteriaIds = objective.evaluation_criteriaIds
if evaluation_criteriaIds:
lene += len(evaluation_criteriaIds)
tlenv = lene * lenv
if entry and entry.vendor_output:
vsplits = json.loads(entry.vendor_output)
elenv = len(vsplits.keys())
if entry is None:
return "Incomplete"
else:
if (entry and entry.evaluation_criteria_output is None) or \
(entry and entry.evaluation_criteria_output and len(entry.evaluation_criteria_output) == 0) or \
(entry and entry.evaluation_criteria_output and len(entry.evaluation_criteria_output) < lene) or \
(entry and entry.vendor_output is None) or \
(entry and entry.vendor_output and elenv == 0) or \
(entry and entry.vendor_output and elenv < tlenv):
cur_date = datetime.datetime.now()
if project.due_date < cur_date:
return "Late"
else:
return "Incomplete"
else:
return "OK"
def get_project_status(projectId):
entrys = get_entrys_from_given_project_db(projectId)
status = "OK"
total = len(entrys)
if total > 0:
current = 0
for entry in entrys:
status = get_entry_status(projectId, entry.user.identity)
if status == "OK":
current += 1
percentage = float (current * 100 / total)
else:
percentage = 0
status = "Incomplete"
return status, percentage
def delete_evaluation_criterion_from_db(projectId, objectiveId, ecid):
eval_criterion = get_evaluation_criteria_from_db(projectId, objectiveId, ecid)
objective = get_objective_from_db(projectId, objectiveId)
objective.evaluation_criteriaIds.remove(ecid)
objective.put()
if eval_criterion:
key = eval_criterion.key
if key:
print '\tdeleting ' + eval_criterion.evaluation_criterionId
key.delete()
def delete_objective_from_db(projectId, objectiveId):
objective = get_objective_from_db(projectId, objectiveId)
if objective:
for ecid in objective.evaluation_criteriaIds:
# print objective
# print " *** looking for : " + objectiveId + ", " + ecid
evaluation_criterion = get_evaluation_criteria_from_db(projectId, objectiveId, ecid)
if evaluation_criterion:
key = evaluation_criterion.key
if key:
print '\tdeleting ' + evaluation_criterion.evaluation_criterion
key.delete()
key = objective.key
if key:
print '\tdeleting ' + objective.objectiveId
key.delete()
def delete_project_from_db(projectId):
print 'deleting ' + projectId
project = get_project_from_db(projectId)
entrys = get_entrys_from_given_project_db(projectId)
vendorIds = project.vendorIds
for vendorId in vendorIds:
vendor = get_vendor_from_db(vendorId)
if vendor:
if projectId in vendor.projectIds:
vendor.projectIds.remove(projectId)
vendor.put()
userIds = project.userIds
for userId in userIds:
user = get_vendor_from_db(userId)
if user:
if projectId in user.projectIds:
user.projectIds.remove(projectId)
user.put()
for objectiveId in project.objectiveIds:
delete_objective_from_db(projectId, objectiveId)
for entry in entrys:
key = entry.key
if key:
key.delete()
key = project.key
if key:
key.delete()
def delete_entry_from_db(entry):
key = entry.key
if key:
key.delete()
def delete_users_from_db():
users = get_users_from_db(None)
if users:
for user in users:
delete_user_from_db(user.identity)
def delete_user_from_db(userId):
user = get_user_from_db(userId)
if user:
projectIds = user.projectIds
for projectId in projectIds:
entrys = get_entry_from_db(projectId, userId)
for entry in entrys:
delete_entry_from_db(entry)
key = user.key
if key:
key.delete()
def update_entry(projectId, userId, evaluation_criteria, evaluation_criteria_output, vendor_output, weights):
entry = get_entry_from_db(projectId, userId)
if entry is None:
project_name = DEFAULT_PROJECT_NAME
entry = Entry(parent=project_db_key(project_name))
entry.user = get_user_from_db(userId)
entry.project = get_project_from_db(projectId)
if evaluation_criteria:
entry.evaluation_criteria = evaluation_criteria.split(",")
if evaluation_criteria_output:
entry.evaluation_criteria_output = evaluation_criteria_output.split(",")
if weights:
sweights = json.loads(weights)
for weight in sweights:
entry.weights.append(weight + ":" + str(sweights[weight]))
if vendor_output:
entry.vendor_output = vendor_output
entry.put()
return entry
def get_vendors_from_db(projectId=None):
if projectId and projectId != "":
project = get_project_from_db(projectId)
if project is not None:
vendorIds = project.vendorIds
vendors = []
for vendorId in vendorIds:
vendor = get_vendor_from_db(vendorId)
vendors.append(vendor)
return vendors
else:
vendors_q = Vendor.query()
vendors = vendors_q.fetch(total_max_limit)
return vendors
return None
def get_vendor_from_db(vendorId):
vendors_q = Vendor.query(Vendor.identity == vendorId)
if vendors_q.count() < 1:
return None
else:
return vendors_q.fetch(1)[-1]
def update_vendor(vendorId, email, projectIds):
vendor = get_vendor_from_db(vendorId)
if vendor is None:
project_name = get_project_db_name()
| |
<reponame>timgates42/bpython
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import contextlib
import errno
import greenlet
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
import time
import unicodedata
from six.moves import range
from pygments import format
from bpython._py3compat import PythonLexer
from pygments.formatters import TerminalFormatter
import blessings
import curtsies
from curtsies import FSArray, fmtstr, FmtStr, Termmode
from curtsies import fmtfuncs
from curtsies import events
import bpython
from bpython.repl import Repl as BpythonRepl, SourceNotFound
from bpython.config import (
Struct,
loadini,
default_config_path,
getpreferredencoding,
)
from bpython.formatter import BPythonFormatter
from bpython import autocomplete
from bpython.translations import _
from bpython._py3compat import py3, is_main_thread
from bpython.pager import get_pager_command
from bpython.curtsiesfrontend import replpainter as paint
from bpython.curtsiesfrontend import sitefix
from bpython.curtsiesfrontend.coderunner import CodeRunner, FakeOutput
from bpython.curtsiesfrontend.filewatch import ModuleChangedEventHandler
from bpython.curtsiesfrontend.interaction import StatusBar
from bpython.curtsiesfrontend.manual_readline import edit_keys
from bpython.curtsiesfrontend import events as bpythonevents
from bpython.curtsiesfrontend.parse import parse as bpythonparse
from bpython.curtsiesfrontend.parse import func_for_letter, color_for_letter
from bpython.curtsiesfrontend.preprocess import preprocess
from bpython.curtsiesfrontend.interpreter import (
Interp,
code_finished_will_parse,
)
from curtsies.configfile_keynames import keymap as key_dispatch
if not py3:
import imp
import pkgutil
logger = logging.getLogger(__name__)
INCONSISTENT_HISTORY_MSG = "#<---History inconsistent with output shown--->"
CONTIGUITY_BROKEN_MSG = "#<---History contiguity broken by rewind--->"
HELP_MESSAGE = """
Thanks for using bpython!
See http://bpython-interpreter.org/ for more information and
http://docs.bpython-interpreter.org/ for docs.
Please report issues at https://github.com/bpython/bpython/issues
Features:
Try using undo ({config.undo_key})!
Edit the current line ({config.edit_current_block_key}) or the entire session ({config.external_editor_key}) in an external editor. (currently {config.editor})
Save sessions ({config.save_key}) or post them to pastebins ({config.pastebin_key})! Current pastebin helper: {config.pastebin_helper}
Reload all modules and rerun session ({config.reimport_key}) to test out changes to a module.
Toggle auto-reload mode ({config.toggle_file_watch_key}) to re-execute the current session when a module you've imported is modified.
bpython -i your_script.py runs a file in interactive mode
bpython -t your_script.py pastes the contents of a file into the session
A config file at {config_file_location} customizes keys and behavior of bpython.
You can also set which pastebin helper and which external editor to use.
See {example_config_url} for an example config file.
Press {config.edit_config_key} to edit this config file.
"""
EXAMPLE_CONFIG_URL = "https://raw.githubusercontent.com/bpython/bpython/master/bpython/sample-config"
EDIT_SESSION_HEADER = """### current bpython session - make changes and save to reevaluate session.
### lines beginning with ### will be ignored.
### To return to bpython without reevaluating make no changes to this file
### or save an empty file.
"""
# more than this many events will be assumed to be a true paste event,
# i.e. control characters like '<Ctrl-a>' will be stripped
MAX_EVENTS_POSSIBLY_NOT_PASTE = 20
# This is needed for is_nop and should be removed once is_nop is fixed.
if py3:
unicode = str
class FakeStdin(object):
"""The stdin object user code will reference
In user code, sys.stdin.read() asks the user for interactive input,
so this class returns control to the UI to get that input."""
def __init__(self, coderunner, repl, configured_edit_keys=None):
self.coderunner = coderunner
self.repl = repl
self.has_focus = False # whether FakeStdin receives keypress events
self.current_line = ""
self.cursor_offset = 0
self.old_num_lines = 0
self.readline_results = []
if configured_edit_keys:
self.rl_char_sequences = configured_edit_keys
else:
self.rl_char_sequences = edit_keys
def process_event(self, e):
assert self.has_focus
logger.debug("fake input processing event %r", e)
if isinstance(e, events.PasteEvent):
for ee in e.events:
if ee not in self.rl_char_sequences:
self.add_input_character(ee)
elif e in self.rl_char_sequences:
self.cursor_offset, self.current_line = self.rl_char_sequences[e](
self.cursor_offset, self.current_line
)
elif isinstance(e, events.SigIntEvent):
self.coderunner.sigint_happened_in_main_context = True
self.has_focus = False
self.current_line = ""
self.cursor_offset = 0
self.repl.run_code_and_maybe_finish()
elif e in ("<Esc+.>",):
self.get_last_word()
elif e in ["<ESC>"]:
pass
elif e in ["<Ctrl-d>"]:
if self.current_line == "":
self.repl.send_to_stdin("\n")
self.has_focus = False
self.current_line = ""
self.cursor_offset = 0
self.repl.run_code_and_maybe_finish(for_code="")
else:
pass
elif e in ["\n", "\r", "<Ctrl-j>", "<Ctrl-m>"]:
line = self.current_line
self.repl.send_to_stdin(line + "\n")
self.has_focus = False
self.current_line = ""
self.cursor_offset = 0
self.repl.run_code_and_maybe_finish(for_code=line + "\n")
else: # add normal character
self.add_input_character(e)
if self.current_line.endswith(("\n", "\r")):
pass
else:
self.repl.send_to_stdin(self.current_line)
def add_input_character(self, e):
if e == "<SPACE>":
e = " "
if e.startswith("<") and e.endswith(">"):
return
assert len(e) == 1, "added multiple characters: %r" % e
logger.debug("adding normal char %r to current line", e)
c = e if py3 else e.encode("utf8")
self.current_line = (
self.current_line[: self.cursor_offset]
+ c
+ self.current_line[self.cursor_offset :]
)
self.cursor_offset += 1
def readline(self):
self.has_focus = True
self.repl.send_to_stdin(self.current_line)
value = self.coderunner.request_from_main_context()
self.readline_results.append(value)
return value
def readlines(self, size=-1):
return list(iter(self.readline, ""))
def __iter__(self):
return iter(self.readlines())
def isatty(self):
return True
def flush(self):
"""Flush the internal buffer. This is a no-op. Flushing stdin
doesn't make any sense anyway."""
def write(self, value):
# XXX IPython expects sys.stdin.write to exist, there will no doubt be
# others, so here's a hack to keep them happy
raise IOError(errno.EBADF, "sys.stdin is read-only")
def close(self):
# hack to make closing stdin a nop
# This is useful for multiprocessing.Process, which does work
# for the most part, although output from other processes is
# discarded.
pass
@property
def encoding(self):
return "UTF8"
# TODO write a read() method?
class ReevaluateFakeStdin(object):
"""Stdin mock used during reevaluation (undo) so raw_inputs don't have to
be reentered"""
def __init__(self, fakestdin, repl):
self.fakestdin = fakestdin
self.repl = repl
self.readline_results = fakestdin.readline_results[:]
def readline(self):
if self.readline_results:
value = self.readline_results.pop(0)
else:
value = "no saved input available"
self.repl.send_to_stdouterr(value)
return value
class ImportLoader(object):
def __init__(self, watcher, loader):
self.watcher = watcher
self.loader = loader
def load_module(self, name):
module = self.loader.load_module(name)
if hasattr(module, "__file__"):
self.watcher.track_module(module.__file__)
return module
if not py3:
# Remember that pkgutil.ImpLoader is an old style class.
class ImpImportLoader(pkgutil.ImpLoader):
def __init__(self, watcher, *args):
self.watcher = watcher
pkgutil.ImpLoader.__init__(self, *args)
def load_module(self, name):
module = pkgutil.ImpLoader.load_module(self, name)
if hasattr(module, "__file__"):
self.watcher.track_module(module.__file__)
return module
class ImportFinder(object):
def __init__(self, watcher, old_meta_path):
self.watcher = watcher
self.old_meta_path = old_meta_path
def find_module(self, fullname, path=None):
for finder in self.old_meta_path:
loader = finder.find_module(fullname, path)
if loader is not None:
return ImportLoader(self.watcher, loader)
if not py3:
# Python 2 does not have the default finders stored in
# sys.meta_path. Use imp to perform the actual importing.
try:
result = imp.find_module(fullname, path)
return ImpImportLoader(self.watcher, fullname, *result)
except ImportError:
return None
return None
class BaseRepl(BpythonRepl):
"""Python Repl
Reacts to events like
- terminal dimensions and change events
- keystrokes
Behavior altered by
- number of scroll downs that were necessary to render array after each
display
- initial cursor position
outputs:
- 2D array to be rendered
BaseRepl is mostly view-independent state of Repl - but self.width and
self.height are important for figuring out how to wrap lines for example.
Usually self.width and self.height should be set by receiving a window
resize event, not manually set to anything - as long as the first event
received is a window resize event, this works fine.
Subclasses are responsible for implementing several methods.
"""
def __init__(
self,
locals_=None,
config=None,
banner=None,
interp=None,
orig_tcattrs=None,
):
"""
locals_ is a mapping of locals to pass into the interpreter
config is a bpython config.Struct with config attributes
banner is a string to display briefly in the status bar
interp is an interpreter instance to use
original terminal state, useful for shelling out with normal terminal
"""
logger.debug("starting init")
if config is None:
config = Struct()
loadini(config, default_config_path())
# If creating a new interpreter on undo would be unsafe because initial
# state was passed in
self.weak_rewind = bool(locals_ or interp)
if interp is None:
interp = Interp(locals=locals_)
interp.write = self.send_to_stdouterr
if banner is None:
if config.help_key:
banner = (
_("Welcome to bpython!")
+ " "
+ _("Press <%s> for help.") % config.help_key
)
else:
banner = None
# only one implemented currently
config.autocomplete_mode = autocomplete.SIMPLE
if config.cli_suggestion_width <= 0 or config.cli_suggestion_width > 1:
config.cli_suggestion_width = 1
self.reevaluating = False
self.fake_refresh_requested = False
self.status_bar = StatusBar(
config,
"",
request_refresh=self.request_refresh,
schedule_refresh=self.schedule_refresh,
)
self.edit_keys = edit_keys.mapping_with_config(config, key_dispatch)
logger.debug("starting parent init")
super(BaseRepl, self).__init__(interp, config)
self.formatter = BPythonFormatter(config.color_scheme)
# overwriting what bpython.Repl put there
# interact is called to interact with the status bar,
# so we're just using the same object
self.interact = self.status_bar
# line currently being edited, without ps1 (usually '>>> ')
self._current_line = ""
# current line of output - stdout and stdin go here
self.current_stdouterr_line = ""
# lines separated whenever logical line
# length goes over what the terminal width
# was at the time of original output
self.display_lines = []
# this is every line that's been executed; it gets smaller on rewind
self.history = []
# formatted version of lines in the buffer kept around so we can
# unhighlight parens using | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.bigtable_v2.types import bigtable
from google.cloud.bigtable_v2.types import data
from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport
from .client import BigtableClient
class BigtableAsyncClient:
"""Service for reading from and writing to existing Bigtable
tables.
"""
_client: BigtableClient
DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT
table_path = staticmethod(BigtableClient.table_path)
parse_table_path = staticmethod(BigtableClient.parse_table_path)
common_billing_account_path = staticmethod(
BigtableClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
BigtableClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(BigtableClient.common_folder_path)
parse_common_folder_path = staticmethod(BigtableClient.parse_common_folder_path)
common_organization_path = staticmethod(BigtableClient.common_organization_path)
parse_common_organization_path = staticmethod(
BigtableClient.parse_common_organization_path
)
common_project_path = staticmethod(BigtableClient.common_project_path)
parse_common_project_path = staticmethod(BigtableClient.parse_common_project_path)
common_location_path = staticmethod(BigtableClient.common_location_path)
parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigtableAsyncClient: The constructed client.
"""
return BigtableClient.from_service_account_info.__func__(BigtableAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigtableAsyncClient: The constructed client.
"""
return BigtableClient.from_service_account_file.__func__(BigtableAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> BigtableTransport:
"""Returns the transport used by the client instance.
Returns:
BigtableTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(BigtableClient).get_transport_class, type(BigtableClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, BigtableTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the bigtable client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.BigtableTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = BigtableClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
def read_rows(
self,
request: Union[bigtable.ReadRowsRequest, dict] = None,
*,
table_name: str = None,
app_profile_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]:
r"""Streams back the contents of all requested rows in
key order, optionally applying the same Reader filter to
each. Depending on their size, rows and cells may be
broken up across multiple responses, but atomicity of
each row will still be preserved. See the
ReadRowsResponse documentation for details.
Args:
request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]):
The request object. Request message for
Bigtable.ReadRows.
table_name (:class:`str`):
Required. The unique name of the table from which to
read. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
This corresponds to the ``table_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
app_profile_id (:class:`str`):
This value specifies routing for
replication. If not specified, the
"default" application profile will be
used.
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]:
Response message for
Bigtable.ReadRows.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, app_profile_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = bigtable.ReadRowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if table_name is not None:
request.table_name = table_name
if app_profile_id is not None:
request.app_profile_id = app_profile_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.read_rows,
default_retry=retries.Retry(
initial=0.01,
maximum=60.0,
multiplier=2,
predicate=retries.if_exception_type(),
deadline=43200.0,
),
default_timeout=43200.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("table_name", request.table_name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def sample_row_keys(
self,
request: Union[bigtable.SampleRowKeysRequest, dict] = None,
*,
table_name: str = None,
app_profile_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]:
r"""Returns a sample of row keys in the table. The
returned row keys will delimit contiguous sections of
the table of approximately equal size, which can be used
to break up the data for distributed tasks like
mapreduces.
Args:
request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]):
The request object. Request message for
Bigtable.SampleRowKeys.
table_name (:class:`str`):
Required. The unique name of the table from which to
sample row keys. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
This corresponds to the ``table_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
app_profile_id (:class:`str`):
This value specifies routing for
replication. If not specified, the
"default" application profile will be
used.
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]:
Response message for
Bigtable.SampleRowKeys.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, app_profile_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = bigtable.SampleRowKeysRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if table_name is not None:
request.table_name = table_name
if app_profile_id is not None:
request.app_profile_id = app_profile_id
# Wrap the RPC method; | |
<gh_stars>0
#! /usr/bin/env python3
import random
import time
class Remote:
_remote_type_alias_map = {
'fut089': 'rgbcct'
}
_remote_type_parameters_map = {
'rgbw': {
'retries': 10,
'delay': 0.1,
'channels': [9, 40, 71],
'syncword': [0x258B, 0x147A],
'zones': [1, 2, 3, 4],
'features': [
'can_set_brightness',
'has_brightness',
'has_white',
'has_night',
'has_color'
],
'brightness_range': [0, 25],
'button_map': {
'slider': 0x00,
'on': 0x01,
'white': 0x11,
'off': 0x02,
'night': 0x12,
'zone_on:1': 0x03,
'zone_on:2': 0x05,
'zone_on:3': 0x07,
'zone_on:4': 0x09,
'zone_white:1': 0x13,
'zone_white:2': 0x15,
'zone_white:3': 0x17,
'zone_white:4': 0x19,
'zone_off:1': 0x04,
'zone_off:2': 0x06,
'zone_off:3': 0x08,
'zone_off:4': 0x0A,
'zone_night:1': 0x14,
'zone_night:2': 0x16,
'zone_night:3': 0x18,
'zone_night:4': 0x1A,
'speed_up': 0x0B,
'speed_down': 0x0C,
'change_color_mode': 0x0D,
'zone_set_brightness': 0x0E,
'zone_set_color': 0x0F
}
},
'cct': {
'retries': 3,
'delay': 0.11,
'channels': [4, 39, 74],
'syncword': [0x55AA, 0x050A],
'brightness_range': [0, 9],
'temperature_output_range': [0, 9],
'temperature_input_range': [6500, 3000],
'zones': [1, 2, 3, 4],
'features': [
'has_max_brightness',
'has_brightness',
'has_temperature',
'has_night',
'is_white'
],
'button_map': {
'on': 0x05,
'off': 0x09,
'max': 0x15,
'night': 0x19,
'zone_on:1': 0x08,
'zone_on:2': 0x0D,
'zone_on:3': 0x07,
'zone_on:4': 0x02,
'zone_max:1': 0x18,
'zone_max:2': 0x1D,
'zone_max:3': 0x17,
'zone_max:4': 0x12,
'zone_off:1': 0x0B,
'zone_off:2': 0x03,
'zone_off:3': 0x0A,
'zone_off:4': 0x06,
'zone_night:1': 0x1B,
'zone_night:2': 0x13,
'zone_night:3': 0x1A,
'zone_night:4': 0x16,
'brightness_up': 0x0C,
'brightness_down': 0x04,
'temperature_up': 0x0E,
'temperature_down': 0x0F
}
},
'lyh_cct': {
'retries': 70,
'delay': 0.2,
'channels': [24],
'syncword': [0x6F67, 0xA118],
'message_length': 13,
'format_config': {
'crc_enabled': 0,
'packet_length_encoded': 0,
'auto_ack': 1,
'auto_term_tx': 0
},
'brightness_range': [0, 9],
'temperature_output_range': [0, 9],
'temperature_input_range': [6500, 3000],
'zones': [1, 2, 3],
'features': [
'has_brightness',
'has_temperature',
'is_white'
],
'button_map': {
'on': 0x05,
'off': 0x09,
'max': 0x15,
'night': 0x19,
'zone_on:1': 0x08,
'zone_on:2': 0x0D,
'zone_on:3': 0x07,
'zone_on:4': 0x02,
'zone_max:1': 0x18,
'zone_max:2': 0x1D,
'zone_max:3': 0x17,
'zone_max:4': 0x12,
'zone_off:1': 0x0B,
'zone_off:2': 0x03,
'zone_off:3': 0x0A,
'zone_off:4': 0x06,
'zone_night:1': 0x1B,
'zone_night:2': 0x13,
'zone_night:3': 0x1A,
'zone_night:4': 0x16,
'brightness_up': 0x0C,
'brightness_down': 0x04,
'temperature_up': 0x0E,
'temperature_down': 0x0F
}
}
}
_remote_type_parameters_map_unimplemented = {
'rgbcct': {
'channels': [8, 39, 70],
'syncword': [0x1809, 0x7236]
},
'rgb': {
'channels': [3, 38, 73],
'syncword': [0xBCCD, 0x9AAB]
},
'fut020': {
'channels': [6, 41, 76],
'syncword': [0xAA55, 0x50A0]
}
}
def __init__(self, radio, remote_type, remote_id, message_id = None, config = None):
# Pull in the config for this remote type
self._config = self._get_type_parameters(remote_type)
self._config['radio_queue'] = '__DEFAULT__'
# Allow the user to specify some more parameters
if config is not None:
self._config.update(config)
# Store parameters
self._radio = radio
self._type = remote_type
self._id = remote_id
# Initialize the message ID for this remote
if message_id is None:
self._message_id = random.randint(0, 255)
else:
self._message_id = message_id
return None
def _scale_int(self, input_value, input_range_low, input_range_high, output_range_low, output_range_high):
input_range = input_range_high - input_range_low
output_range = output_range_high - output_range_low
input_value = input_value - input_range_low
output = input_value * (output_range / input_range)
output = output + output_range_low
output = int(output + 0.5)
return output
def _debug(self, message):
if 'debug_log_command' in self._config:
self._config['debug_log_command'](message)
return None
def _get_type_parameters(self, remote_type):
config = {}
config.update(self._remote_type_parameters_map[remote_type])
# Supply default config values
if 'retries' not in config:
config['retries'] = 3
if 'delay' not in config:
config['delay'] = 0.1
setattr(self, '_compute_button_message', getattr(self, '_compute_button_message_' + remote_type))
setattr(self, '_parse_button_message', getattr(self, '_parse_button_message_' + remote_type))
setattr(self, 'pair', getattr(self, '_pair_' + remote_type))
setattr(self, 'unpair', getattr(self, '_unpair_' + remote_type))
return config
def _compute_button_and_zone_from_button_id(self, button_id):
button_info = {}
button_info['button'] = 'unknown=' + str(button_id)
for button_name, button_value in self._config['button_map'].items():
if button_value == button_id:
button_info['button'] = button_name
break
# If the button name has a zone, split it out into its own parameter
if button_info['button'].find(':') != -1:
button_name_zone = button_info['button'].split(':')
button_info['button'] = button_name_zone[0]
button_info['zone'] = int(button_name_zone[1])
return button_info
def _compute_button_message_lyh_cct(self, button_info):
# XXX: This protocol has not been completely reversed yet
if 'zone' in button_info:
if button_info['zone'] is None:
del button_info['zone']
message_id = button_info['message_id']
retval = None
if button_info['button'] == 'on' and 'zone' not in button_info:
if 'zone' not in button_info:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x66, 0x01, 0x59, 0xad, 0x07]
elif button_info['zone'] == 1:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0xb1, 0x68, 0x00, 0x67, 0x01, 0xd3, 0xff, 0x46]
elif button_info['zone'] == 2:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0x31, 0x69, 0x80, 0x67, 0x01, 0xd4, 0xc8, 0x11]
elif button_info['zone'] == 3:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0x31, 0x6a, 0x00, 0x68, 0x81, 0x55, 0xe0, 0x72]
if button_info['button'] == 'off':
if 'zone' not in button_info:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x66, 0x01, 0xd2, 0xf6, 0x46]
elif button_info['zone'] == 1:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0xb1, 0x68, 0x80, 0x68, 0x01, 0x4d, 0x4f, 0x0a]
elif button_info['zone'] == 2:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0x31, 0x69, 0x00, 0x69, 0x01, 0x4e, 0x80, 0x41]
elif button_info['zone'] == 3:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0x31, 0x6a, 0x80, 0x69, 0x81, 0xcf, 0x6f, 0x68]
if button_info['button'] == 'brightness_up':
if message_id % 2 == 0:
retval = [0x05, 0xb3, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x39, 0x81, 0xa7, 0x33, 0x7e]
else:
retval = [0x05, 0xb3, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x3c, 0x81, 0x2a, 0x63, 0x18]
if button_info['button'] == 'brightness_down':
if message_id % 2 == 0:
retval = [0x85, 0xb2, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x3d, 0x01, 0x2b, 0xc6, 0x61]
else:
retval = [0x85, 0xb2, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x45, 0x01, 0xb3, 0x1d, 0x3f]
if button_info['button'] == 'temperature_up':
if message_id % 2 == 0:
retval = [0x85, 0xb4, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x4b, 0x01, 0xbb, 0x9c, 0x4b]
else:
retval = [0x85, 0xb4, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x4e, 0x81, 0x3e, 0x26, 0x00]
if button_info['button'] == 'temperature_down':
if message_id % 2 == 0:
retval = [0x05, 0xb5, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x46, 0x01, 0x37, 0xd5, 0x69]
else:
retval = [0x05, 0xb5, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x4a, 0x01, 0x3b, 0x1a, 0x06]
if button_info['button'] == 'max':
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x66, 0x81, 0xd9, 0x07, 0x22]
if retval is None:
self._debug("Unsupported button: {}".format(button_info))
return None
# XXX: This probably breaks the CRC :-(
if 'zone' in button_info:
retval[6] = (retval[6] & 0xf0) | (0x07 + button_info['zone'])
else:
retval[6] = (retval[6] & 0xf0) | 0x0f
retval.append(0x00)
retval.append(0x0F)
return retval
def _parse_button_message_lyh_cct(self, button_message):
return {'raw': button_message}
return None
def _pair_lyh_cct(self, zone):
# XXX
return None
def _unpair_lyh_cct(self, zone):
# XXX
return None
def _compute_button_message_cct(self, button_info):
remote_id = button_info['remote_id']
message_id = button_info['message_id']
# Header consists of magic (0x5A), follow by 16-bit remote ID
header = [0x5A, (remote_id >> 8) & 0xff, remote_id & 0xff]
# Determine zone, default to all
zone = button_info.get('zone', 0)
# Some buttons need to be converted to zones
button_name = button_info['button']
if button_name in ['zone_on', 'zone_off', 'zone_max', 'zone_night']:
button_name = "{}:{}".format(button_name, zone)
# Look up the button
button_id = self._config['button_map'][button_name]
# Compute message body
body = [zone, button_id, message_id]
# Compute the whole message so far
message = header + body
# Compute message trailer
## Include a CRC, for good measure
crc = len(message) + 1
for byte in message:
crc = crc + byte
crc = crc & 0xff
trailer = [crc]
message = message + trailer
return message
def _parse_button_message_cct(self, button_message):
button_info = {}
# Verify the header -- if it is not valid, return None
if button_message[0] != 0x5A:
return None
# Parse out common parts of the message
button_info['remote_id'] = (button_message[1] << 8) | button_message[2]
button_info['zone'] = button_message[3]
button_info['message_id'] = button_message[5]
# Remove the all zone
if button_info['zone'] == 0:
del button_info['zone']
# Map the button ID to a button name
button_id = button_message[4]
button_info.update(self._compute_button_and_zone_from_button_id(button_id))
return button_info
def _pair_cct(self, zone):
self._send_button({
'button': 'zone_on',
'zone': zone
})
# Ensure that the "on" button cannot be hit soon after
# because it might trigger the unpair flow
time.sleep(5)
return True
def _unpair_cct(self, zone):
for retry in range(7):
self._send_button({
'button': 'zone_on',
'zone': zone
})
return True
def _compute_button_message_rgbw(self, button_info):
remote_id = button_info['remote_id']
message_id = button_info['message_id']
# Allow setting color for all zones
if button_info['button'] == 'set_color':
button_info['button'] = 'zone_set_color'
if 'zone' in button_info:
del button_info['zone']
# Allow setting brightness for all zones
if button_info['button'] == 'set_brightness':
button_info['button'] = 'zone_set_brightness'
if 'zone' in button_info:
del button_info['zone']
# Header consists of magic (0xB0), follow by 16-bit remote ID
header = [0xB0, (remote_id >> 8) & 0xff, remote_id & 0xff]
# Default value for most buttons, since they do not need it
brightness = 0
color = 0
# Some buttons need to be converted to zones
button_name = button_info['button']
if button_name in ['zone_on', 'zone_off', 'zone_white', 'zone_night']:
button_name = button_name + ':' + str(button_info['zone'])
button_id = self._config['button_map'][button_name]
# Brightness and Color buttons should also set the appropriate
# parameters
if button_info['button'] == 'zone_set_brightness':
## Brightness is a range of [0..25] (26 steps)
## Shifted 3 bitsleft
brightness = button_info['brightness']
if brightness < 0:
brightness = 0
elif brightness > 25:
brightness = 25
brightness = 31 - ((brightness + 15) % 32)
brightness = brightness << 3
elif button_info['button'] == 'zone_set_color':
color = button_info['color']
# The zone number is also encoded into the brightness byte
if 'zone' not in button_info:
zone_value = 0
else:
zone_value = button_info['zone']
brightness |= zone_value & 0b111
# Compute message
body = [color, brightness, button_id, message_id]
# Compute whole message
message = header + body
return message
def _parse_button_message_rgbw(self, button_message):
button_info = {}
# Verify the header -- if it is not valid, return None
if button_message[0] != 0xB0:
return None
# Parse out common parts of the message
button_info['remote_id'] = (button_message[1] << 8) | button_message[2]
button_info['color'] = button_message[3]
button_info['brightness'] = button_message[4]
button_info['message_id'] = button_message[6]
# Map the button ID to a button name
button_id = button_message[5]
button_info.update(self._compute_button_and_zone_from_button_id(button_id))
if button_info['button'] == 'zone_set_brightness':
brightness = button_info['brightness']
zone = brightness & 0b111
if zone != 0:
button_info['zone'] = zone
else:
button_info['button'] = 'set_brightness'
# Compute brightness value, there are 26 brightness steps, [16, 0][31, 23]
brightness = brightness >> 3
brightness = 31 - ((brightness + 15) % 32)
button_info['brightness'] = brightness
return button_info
def _pair_rgbw(self, zone):
self._send_button({
'button': 'zone_on',
'zone': zone
})
return False
def _unpair_rgbw(self, zone):
self._send_button({
'button': 'zone_on',
'zone': zone
})
self._send_button({
'button': 'zone_white',
'zone': zone
})
return False
def _get_next_message_id(self):
# Determine next message ID
self._message_id = (self._message_id + 1) & 0xff
return self._message_id
def _send_button(self, button_info, post_delay = None):
# Include the remote ID unless one was supplied
button_info = button_info.copy()
if 'remote_id' not in button_info:
button_info['remote_id'] = self._id
# Get the next message ID for this remote
if 'message_id' not in button_info:
message_id = self._get_next_message_id()
button_info['message_id'] = message_id
else:
self._message_id = button_info['message_id']
# Compute message
message = self._compute_button_message(button_info)
# Transmit
if 'delay' in button_info:
delay = button_info['delay']
else:
delay = self._config['delay']
if 'retries' in button_info:
retries = button_info['retries']
else:
retries = self._config['retries']
format_config = self._config.get('format_config', None)
if post_delay is not None:
delay = post_delay
self._debug("Sending {}={} n={} times | |
# tt4 = 'O2_ORR_DW16_data_PDX_Ch1'
# 'N2_20cls_300_100_10_DW28_898'
self.EC_run_slice = self.EC_run_slice.loc[
self.EC_run_slice.basename.str.contains(tt4)
]
self.EC_run_slice = self.EC_run_slice.loc[
self.EC_run_slice.PAR_file.isin(ttpfs)
]
# EC_run_slice = EC_run_slice.iloc[-10:-7]
if "ORR" in self.EC_run_slice.PAR_exp.unique():
self.EC_run_slice = self.EC_index.loc[
self.EC_index.PAR_date_min.isin(self.EC_run_slice.PAR_date_min.unique())
]
def prepare_ovv_EC_run(self):
results, out = [], []
EC_run_groupby = ["PAR_exp", "EXP_date", "EXP_dir"]
run_group_template = namedtuple("Run_groups", "PAR_exp EXP_date EXP_dir grp")
EC_run_slice_grp_date_dir = self.EC_run_slice.groupby(by=EC_run_groupby)
# EC_run_slice_grp_date_dir = EC_run_slice.loc[EC_run_slice.PAR_file.isin(_n2faillst)].groupby(by=EC_run_groupby)
run_group_lst = [
run_group_template(n[0], n[1], n[2], gr)
for n, gr in EC_run_slice_grp_date_dir
]
ovv_all_date_dirs = pd.concat(
[ECRunOVV.MakeOVVperExpDir(self, run_group) for run_group in run_group_lst]
)
ovv = ovv_all_date_dirs.loc[
ovv_all_date_dirs.SampleID.isin(self.EC_run_slice.SampleID.unique())
]
self.run_ovv = ovv
ovv_exp_grp = ovv.groupby("PAR_exp")
ovv_lst_exps = ovv.PAR_exp.unique()
# PAR_exp_grp_lst = [ovv.groupby(by='PAR_exp' ]
_logger.info(
f"=== PAR DW starting: groups {len(EC_run_slice_grp_date_dir)},\nLength slice {len(self.EC_run_slice)}, index {len(self.EC_index)} ===="
)
self.ovv_exp_grp = ovv_exp_grp
###### === EC Analysis Run over test1 in multiprocess ==== #######
def update_type_run_kwargs(self):
# if 'N2_act' in _exp_type:
# elif 'ORR' in _exp_type or 'O2_nan' in _exp_type:
if "EIS" in self.ovv_exp_grp.groups.keys():
# ==== UPDATING EIS KWARGS =====
# EIS_kwargs = {'EIS_skip_set' : False, 'EIS_use_prepars_set' : True, 'FreqLim_set' : 30E3}
EIS_kwargs = dict(
EIS_skip_set=False,
EIS_use_prepars_set=True,
FreqLim=15e3,
EIS_plot_combined=True,
EIS_single_output="Text,Plot",
perform_prefit=True,
TrimData=False,
FitOnlyTrimmedData=False,
linKK_trimming_factor=3,
export_raw_data=True,
DP_DRT_fit=False,
GP_DRT_fit=False,
)
EIS_kwargs.update(run_kwargs)
print(
f'Skip EIS: {EIS_kwargs["EIS_skip_set"]}. Use prePars: {EIS_kwargs["EIS_use_prepars_set"]}, Frequency limit: {EIS_kwargs["FreqLim"]:.0f}'
)
EIS_kwargs.update(run_EIS.get_eis_suggestions(), compare=self.EC_run_slice)
self.run_kwargs.update(**EIS_kwargs)
# self.run_kwargs.update(**dict(ovv_exp_grp = self.ovv_exp_grp))
# self.run_kwargs.update({'EIS' : {**EIS_kwargs}})
# elif 'HPRR' in _exp_type:
# elif 'OER' in _exp_type:
# elif 'HER' in _exp_type:
# elif 'RHE' in _exp_type:
def start_N2(self, run_kwargs, _exp_type="N2_act"):
try:
index_fls = {
"N2_CVs": {"path": self.POST_DIR.joinpath("N2_CVs_all.pkl")},
"N2_actions": {
"path": self.POST_DIR("VERSASTAT").PostDir.joinpath(
"N2_CVs_actions.pkl"
)
},
}
run_N2.N2_act(self.ovv_exp_grp, index_fls=index_fls, **self.run_kwargs)
pass
except Exception as e:
_logger.error(
f'Run N2_act {run_kwargs["input_run"]} failed for {_exp_type}, because {e}'
)
def EC_Analysis_run(self):
#%% EC_Analysis_run
for _exp_type, _exp_grp in self.ovv_exp_grp:
_logger.warning(
f"Starting {_exp_type} len({len(_exp_grp)}) {self.__class__.__name__} from {self.input_run}. =========="
)
if "N2_act" in _exp_type:
self.start_N2()
elif "ORR" in _exp_type or "O2_nan" in _exp_type:
try:
# run_ORR.ORR(self.ovv_exp_grp, **self.run_kwargs)
run_ORR.ORR_run_loop(self)
except Exception as e:
_logger.error(
f'Run ORR {run_kwargs["input_run"]} failed for {_exp_type}, because {e}'
)
elif "EIS" in _exp_type and not "HER" in _exp_type:
# EIS_kwargs = {'EIS_skip_set' : False, 'EIS_use_prepars_set' : True, 'FreqLim_set' : 30E3}
# EIS_kwargs = dict(EIS_skip_set = False,EIS_use_prepars_set = True, FreqLim = 25E3,
# EIS_plot_combined=True, EIS_single_output = 'Text,Plot',
# perform_prefit = True, TrimData = False, FitOnlyTrimmedData = False, linKK_trimming_factor = 3,
# export_raw_data = True, DP_DRT_fit = False, GP_DRT_fit = False)
# EIS_kwargs.update(run_kwargs)
# print(f'Skip EIS: {EIS_kwargs["EIS_skip_set"]}. Use prePars: {EIS_kwargs["EIS_use_prepars_set"]}, Frequency limit: {EIS_kwargs["FreqLim"]:.0f}')
# # exp, gr = 'EIS', ExpTypes_gr.get_group('EIS')
if self.run_kwargs.get("EIS_skip_set", False) == False:
try:
_logger.warning(
f"=====Starting EIS {self.__class__.__name__} from {self.input_run}. =========="
)
# eis_run_ovv.eis_run_group_ovv(self.ovv_exp_grp,self.run_kwargs)
# eis_run_ovv.EIS_Preparator(self.ovv_exp_grp, **self.run_kwargs)
self.run_kwargs.update({})
run.EIS_run_loop(self)
# eis_run_ovv.EIS_Preparator(self)
except Exception as e:
_logger.error(
f'EIS run failed command "{run_kwargs["input_run"]}" failed for {_exp_type}, because {e}'
)
# TODO Fix run EIS_HER files, only 2 with 0 rpm...
# ovv_Dest_dir.joinpath('{})
# index_info.append(index)
elif "HPRR" in _exp_type:
pass
elif "OER" in _exp_type:
try:
run_OER.OER(self.ovv_exp_grp, **self.run_kwargs)
except Exception as e:
_logger.info(
f'Run {run_kwargs["input_run"]} failed for {_exp_type}, because {e}'
)
elif "HER" in _exp_type and not "EIS" in _exp_type:
try:
run_HER.HER(self.ovv_exp_grp, **self.run_kwargs)
except Exception as e:
_logger.info(
f'Run {run_kwargs["input_run"]} failed for {_exp_type}, because {e}'
)
elif "RHE" in _exp_type:
pass
else:
# print('No run, unknown experiment type:', exp)
_logger.info("No run, unknown experiment type: {0}".format(_exp_type))
###### === EC Analysis Run over test1 grouped by Date ==== #######
#%% Testing functions
def gas_filter(test1, run):
gas_search = re.search("(O2|N2)", run)
if gas_search:
gas_select = gas_search.group()
test1 = test1.loc[test1.Gas == gas_select]
return test1
def PF_select(key):
selector = {
"eispASTsHA": [
"O2_EIS-range_1500rpm_JOS1_pAST-sHA_285",
"N2_EIS-range_1500rpm_pAST-sHA_JOS1_288",
"N2_EIS-range_1500rpm_JOS3_288",
]
}
return selector.get(key, [])
def check_idx(EC_index, test_obj):
# EIS_EC_index = EC_run_slice.reset_index()
# loc[EC_run_slice.PAR_exp.str.contains('EIS')].reset_index()
test_obj = "N2_EIS-range_0rpm_JOS2_272"
test_obj = "O2_EIS-range_1500rpm_JOS9_r1_283"
test_obj = "O2_EIS-range_1500rpm_DW51_280"
test_obj = "O2_EIS-range_1500rpm_JOS9_899"
test_obj = "N2_EIS-range_0rpm_JOS7_264"
tj = "N2_EIS-range_1500rpm_JOS4_268"
tj = "O2_EIS-range_1500rpm_JOS3_270"
tj = "O2_EIS-range_1500rpm_JOS3_285"
tj = "O2_EIS-range_1500rpm_JOS3_pAST-sHA_285"
tj = "O2_EIS-range_1500rpm_JOS1_pAST-sHA_285"
tj = "O2_EIS-range_1500rpm_JOS1_pAST-sHA_285"
tj = "N2_EIS-range_1500rpm_pAST-sHA_JOS1"
tj = "O2_ORR_JOS4_257"
test_idx = EC_index.loc[EC_index.basename.str.contains(tj)].index[0]
# test_idx = EIS_EC_index.loc[EIS_EC_index.basename.str.contains(test_obj)].index[0]
print(
f"index: {test_idx}, other {100*[n for n,i in enumerate(EC_index.index) if i == test_idx][0]/len(EC_index):.0f}"
)
return
#%% === Index Run Selection ===
def index_run_selection(EC_index, **run_kwargs):
run = run_kwargs.get("input_run", "n empty")
print(f"Run command: {run}")
if run.startswith("y"):
if "ya" in run or run == "yes all" or run == "y std":
test1 = EC_index
# test3 = OnlyRecentMissingOVV.query('(EXP_date >= 20190301) & ((SampleID >= "DW01") | (SampleID == "JOS12") | (SampleID == "JOS13") | (SampleID == "JOS14") | (SampleID == "JOS15")) & (PAR_exp == "EIS")')
# test1 = OnlyRecentMissingOVV.query('(EXP_date >= 20190103) & (SampleID == "JOS15") & (PAR_exp == "EIS")')
elif run == "ys":
test1 = EC_index[
EC_index.EXP_date.isin(EC_index.query('PAR_exp == "HPRR" ').EXP_date)
]
elif "yrecent" in run:
test1 = EC_index.query("(PAR_date >= 20190506)").loc[
EC_index.SampleID.str.contains("JOS4|DW")
]
# .head(20)
if "slicedate" in run:
test1 = test1.loc[
(test1.Date_PAR_EXP > pd.Timedelta("-2 days 0 hours")), :
]
elif "testN2EIS" in run:
# test1 = OnlyRecentMissingOVV.query('(EXP_date >= 20191020)')
# test1 = OnlyRecentMissingOVV.query('(EXP_date == 20190325)')
# test1 = EC_index.loc[EC_index.PAR_file.str.contains('H2O2 Daten_BA')]
_ll = [
"/mnt/DATA/EKTS_CloudStation/CloudStation/Experimental data/Raw_data/VERSASTAT/2019-01-Jan/25.01.2019_0.1MH2SO4_cell2/N2_EIS-range_0rpm_HER_JOS4_257.par",
"/mnt/DATA/EKTS_CloudStation/CloudStation/Experimental data/Raw_data/VERSASTAT/2019-01-Jan/25.01.2019_0.1MH2SO4_cell3/N2_EIS-range_0rpm_JOS4_postAST-LC_257.par",
]
test1 = EC_index.loc[EC_index.PAR_file.isin(_ll)]
# str.contains('/mnt/DATA/EKTS_CloudStation/CloudStation/Experimental data/Raw_data/VERSASTAT/2019-03-Mrt/25.03.2019_0.1MH2SO4_cell2/O2_ORR_JOS3_270_2_#2_Disc_Parstat.par')]
# test1 = OnlyRecentMissingOVV.query('(EXP_date == 20190912) & (pH < 7) & (SampleID == "JOS2")')
elif "highload" in run:
test1 = EC_index.loc[EC_index.PAR_file.str.contains("high-load_267")].query(
'PAR_exp != "EIS" & EXP_date == 20190912'
)
# 'O2_ORR_JOS12_3rpm_258_#2_Disc_Parstat'
# test1 = OnlyRecentMissingOVV.query('(EXP_date >= 20190101)')
elif "serie" in run:
# test1 = OnlyRecentMissingOVV.query('(EXP_date >= 20191020)')
# test1 = OnlyRecentMissingOVV.query('(EXP_date == 20190325)')
serm = [
i
for i in run.split()
if i in PostChar.SampleSelection.Series.index.values
]
test1 = EC_index.query(
'(EXP_date >= 20190101) & (pH < 7) & (SampleID == "DW28") & (PAR_file.str.contains("3rpms"))'
)
# EC_run_slice.loc[EC_run_slice.PAR_file.str.contains('3rpms')]
# test1 = OnlyRecentMissingOVV.query('(EXP_date <= 20190228) & (EXP_date >= 20190101)')
# test1 = OnlyRecentMissingOVV.query('(EXP_date >= 20180101) & (EXP_date <= 20180131)')
# test1 = OnlyRecentMissingOVV.query('(EXP_date == 20180123) & SampleID_folder == "DW28"')
# test1 = OnlyRecentMissingOVV.query('(EXP_date == 20190125)')
# PAR_date < pd.to_datetime('20190901') and PAR_date > pd.to_datetime('20190827')
elif "missing" in run:
# EIS_missing = FileHelper.FindExpFolder('VERSASTAT').EISmissing
# EIS_missing = pd.read_excel(FileHelper.FindExpFolder('VERSASTAT').PostDir.joinpath('OVV_EIS_missing.xlsx'))
# test1 = OnlyRecentMissingOVV.loc[OnlyRecentMissingOVV.EXP_date.isin(EIS_missing.EXP_date.unique())]
# refits = 'DW16','DW19','DW17','DW28'
# test1 = OnlyRecentMissingOVV.loc[OnlyRecentMissingOVV.SampleID.isin(refits) & OnlyRecentMissingOVV.pH == 1]
test1 = EC_index.loc[
EC_index.basename.str.contains("SDFe2AL", case=False)
].query("pH < 7")
# test1 = OnlyRecentMissingOVV.loc[OnlyRecentMissingOVV.SampleID.str.contains('SD')]
elif "orrmiss" in run:
PostDestDir = FindExpFolder("VERSASTAT").PostDir
test1 = pd.read_pickle(PostDestDir.joinpath("ORR_missing.pkl.compress"))
elif "eismiss" in run:
PostDestDir = FindExpFolder("VERSASTAT").PostDir
eis_refit = pd.read_pickle(
PostDestDir.joinpath("EIS_ORR_refit_pars.pkl.compress")
).PAR_file.unique()
test1 = EC_index.loc[EC_index.PAR_file.isin(eis_refit)].tail(1)
elif "all eis" in run:
EIS_EC_index = EC_index.loc[
EC_index.PAR_exp.str.contains("EIS")
].reset_index()
test1 = EIS_EC_index
if "JOS" in run:
test1 = EIS_EC_index.loc[
EIS_EC_index.SampleID.isin(["JOS1", "JOS2", "JOS3", "JOS4", "JOS5"])
& EIS_EC_index.basename.str.contains("1500")
]
if "JOSn" in run:
test1 = EIS_EC_index.iloc[~EIS_EC_index.index.isin(test1.index)]
if "JOS4" in run:
# pass
test1 = EIS_EC_index.loc[
(EIS_EC_index.SampleID.str.contains("JOS4"))
& EIS_EC_index.basename.str.contains("1500")
& (EIS_EC_index.Gas == "O2")
].tail(1)
if "continue" in run:
test1 = test1.loc[
test1.PAR_date
>= test1.loc[
test1.PAR_file.str.contains("O2_EIS-range_0rpm_HER_JOS5_257"),
"PAR_date",
].iloc[0]
]
test1 = test1.loc[test1.PAR_date <= test1.loc[1734, "PAR_date"]]
elif "rpm" in run:
# print("RPM")
eisrpm_miss = pd.read_pickle(
FindExpFolder("VERSASTAT").PostDir.joinpath(
"EIS_RPM_series.pkl.compress"
)
)
# print('eisrpm',len(eisrpm_miss))
test1 = EC_index.loc[
EC_index.PAR_file.isin(
[Path(i) for i in eisrpm_miss.PAR_file.unique()]
)
]
# print('test',len(test1))
elif "recent" in run:
eis_miss_recent = pd.read_pickle(
FindExpFolder("VERSASTAT").PostDir.joinpath("EIS_pars_nonrecent")
)
test1 = EC_index.loc[
EC_index.PAR_file.isin(
[Path(i) for i in eis_miss_recent.PAR_file.unique()]
)
]
elif "rest" in run:
eis_metaf = pd.read_excel(
list(
FindExpFolder("PorphSiO2").compare.parent.rglob(
"2020-24-03_EIS_Porph_SiO2/meta_data*EIS*origin.xlsx"
)
)[0],
index_col=[0],
)
porhp_refit = EIS_EC_index.loc[
EIS_EC_index.basename.isin(eis_metaf.basename.unique())
].index
tt_inf = "2019-02-Feb/12.02.2019_0.1MKOH_cell2/O2_EIS-range_1500rpm_JOS6_postAST_899"
idx_dbg = EIS_EC_index.loc[
EIS_EC_index.PAR_file.str.contains(tt_inf)
].index[0]
not_idx = list(porhp_refit) + [idx_dbg + i for i in range(-7, 7)]
test1 = EIS_EC_index.loc[
(~EIS_EC_index.index.isin(not_idx)) & (EIS_EC_index.index > 215)
]
elif "porph" and "refit" in run:
eis_metaf = pd.read_excel(
list(
FindExpFolder("PorphSiO2").compare.parent.rglob(
"2020-24-03_EIS_Porph_SiO2/meta_data*EIS*origin.xlsx"
)
)[0],
index_col=[0],
)
test1 = EC_index.loc[EC_index.basename.isin(eis_metaf.basename.unique())]
if "not" in run:
_not_porph = EC_index.loc[
~EC_index.SampleID.isin(eis_metaf.SampleID.unique())
& EC_index.PAR_exp.str.contains("EIS")
]
test1 = _not_porph.loc[
_not_porph.SampleID.str.contains("DW|JOS12|JOS14|JOS15")
]
elif "samples" in run:
test1 = EC_index.loc[
EC_index.SampleID.isin(eis_metaf.SampleID.unique())
& ~EC_index.PAR_exp.str.contains("EIS")
]
if "postAST" in run:
test1 = EC_index.loc[
EC_index.SampleID.isin(eis_metaf.SampleID.unique())
& EC_index.PAR_exp.str.contains("EIS")
& EC_index.basename.str.contains("AST")
]
# & (~EC_index.PAR_file.isin(eis_metaf.PAR_file.unique()))]
elif "JOS eismetaf" in run:
eis_metaf = pd.read_excel(
list(
FindExpFolder("PorphSiO2").compare.parent.rglob(
"2020-24-03_EIS_Porph_SiO2/meta_data*EIS*origin.xlsx"
)
)[0],
index_col=[0],
)
JOS2 = pd.DataFrame()
if "ttN2" in run:
JOS2 = eis_metaf.loc[
eis_metaf.basename.str.contains("N2_EIS-range_1500rpm_JOS2_288")
]
JOS2 = eis_metaf.loc[
eis_metaf.basename.str.contains("N2_EIS-range_1500rpm_JOS4_288")
]
JOS2 = eis_metaf.loc[
eis_metaf.basename.str.contains("N2_EIS-range_1500rpm_JOS1_288")
]
elif "ttO2" in run:
JOS4 = eis_metaf.loc[
eis_metaf.basename.str.contains("O2_EIS-range_1500rpm_JOS5_285")
]
JOS2 = eis_metaf.loc[
eis_metaf.basename.str.contains("O2_EIS-range_1500rpm_JOS2_288")
]
JOS2 = eis_metaf.loc[
eis_metaf.basename.str.contains("O2_EIS-range_1500rpm_JOS1_285")
]
else:
JOS4 = eis_metaf.loc[
eis_metaf.basename.str.contains("EIS-range_1500rpm_JOS2")
]
# JOS4 = eis_metaf.loc[eis_metaf.basename.str.contains('JOS5')]
# eis_metaf.loc[eis_metaf.basename.str.contains('O2_EIS-range_1500rpm_JOS5_285|N2_EIS-range_1500rpm_JOS5_285')]
if not JOS2.empty:
eis_metaf = JOS2
test1 = EC_index.loc[EC_index.basename.isin(eis_metaf.basename.unique())]
elif "pAST-sHA_JOS7" | |
from flask import Flask, render_template, request, session, redirect, url_for, flash, jsonify, g, json
from flask_babel import Babel
from flask_uploads import UploadSet, IMAGES, configure_uploads
from flaskext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
from datetime import datetime
from forms import LoginForm, RetrievalForm, AddUserForm, CreateNewItem,AddNewLocation,ExistingItemsLocation, RemoveItem, TransferItem
from apscheduler.schedulers.background import BackgroundScheduler
# from exceptions import InsufficientQtyError, ContainsItemsError
# from apscheduler.jobstores.mongodb import MongoDBJobStore
# from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
import os, copy, re, csv, json_decode, imaging, pytz
# from flask.ext.cache import Cache
# pip2 install flask
# pip2 install mysql-python
# pip2 install mysqlclient
# pip2 install flask-SQLAlchemy
# pip2 install flask-babel
# pip2 install flask-wtf
# pip2 install flask-mysql
# pip2 install flask-uploads
# pip2 install pytz
# pip2 install numpy
# pip2 install scipy
# pip2 install statsmodels
# pip2 install pandas
# pip2 install Pillow
# eb init -p python2.7 aim
# eb init
# eb create flask-env
# eb open
# eb terminate flask-env
##########################
## CONFIG ##
##########################
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
application = Flask(__name__, instance_relative_config=True)
application.config.from_object('config.DevConfig') # default configurations
application.config.from_pyfile('config.py') # override with instanced configuration (in "/instance"), if any
#application.config.from_pyfile('myConfig1.py')
#application.config.from_pyfile('myConfig2.py')
# Babel init
babel = Babel(application)
# mysql init
mysql = MySQL()
mysql.init_app(application)
# global vars
adminmode = False
role = ""
# Configure the image uploading via Flask-Uploads
photos = UploadSet('images', IMAGES)
configure_uploads(application, photos)
sched = BackgroundScheduler()
# jobstores = {
# 'mongo': MongoDBJobStore(),
# 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
# }
# Exception classes, feel free to use.
# Called in admin()
class InsufficientQtyError(Exception):
pass
class ContainsItemsError(Exception):
pass
class InvalidPasswordError(Exception):
pass
###########################
## METHODS ##
###########################
# TODO: encapsulate all methods in separate classes and .py files
# Query for form select fields.
# Currently called by admin()
def choices(table, column, *args):
choices = []
conn = mysql.connect()
cursor = conn.cursor()
query = "SELECT {} FROM {}".format(column, table)
cursor.execute(query)
data1 = cursor.fetchall()
data2 = sorted(set(list(data1)))
for i in data2:
y=str(i[0])
x=(y,y)
choices.append(x)
return choices
# For populating select fields in admin forms with tags.
# Called by admin()
def storeTags():
choices = []
cursor = mysql.connect().cursor()
cursor.execute("SELECT tid, tname, storeroom FROM TagInfo;")
data = sorted(set(list(cursor.fetchall())))
for d in data:
value = d[0]
text = str(d[2]) + " - " + str(d[1])
pair = (value, text)
choices.append(pair)
return choices
def getAllTags():
cursor = mysql.connect().cursor()
cursor.execute("SELECT tid, tname, storeroom, remarks FROM TagInfo;")
data = sorted(set(list(cursor.fetchall())))
allTags = []
for i in data:
allTags.append({
'tid': i[0],
'tname': i[1].encode('ascii'),
'storeroom': i[2].encode('ascii'),
'remarks': i[3].encode('ascii')
})
return allTags
# Returns all the items based on category and amount in or out within the last month for each item
# Called by category()
def getAllInventory(category):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT iid, name, qty_left, reorder_pt, out_by, picture, category, ROUND(price,2) FROM Ascott_InvMgmt.view_item_locations WHERE category = '{}';".format(category))
data = cursor.fetchall()
print(data)
# cursor.execute(
# "SELECT DISTINCT iid FROM Ascott_InvMgmt.Items WHERE category = '{}';".format(category))
# unique_iid = cursor.fetchall()
# print(unique_iid)
items = []
counter = 0
for item in data:
if item[0] == counter:
pass
else:
cursor.execute(
"SELECT action, qty_moved FROM Ascott_InvMgmt.Logs WHERE month(date_time) = month(now()) AND year(date_time) = year(now()) AND item={};".format(item[0]))
in_out_data = cursor.fetchall()
delivered_out = 0
received = 0
for i in in_out_data:
if i[0].encode('ascii') == 'out':
delivered_out = delivered_out + (-1*int(i[1]))
elif i[0].encode('ascii') == "in":
received = received + int(i[1])
value_in = received*item[7]
value_out = delivered_out*item[7]
cursor.execute(
"SELECT qty_left FROM Ascott_InvMgmt.view_item_locations WHERE iid={};".format(item[0]))
location_qty = cursor.fetchall()
remaining_quantity = 0
for i in location_qty:
remaining_quantity += i[0]
initial_quantity = remaining_quantity + delivered_out - received
items.append(
{"iid":item[0],
"name": item[1],
"remaining": remaining_quantity,
"reorder": item[3],
"unit": item[4],
"starting": initial_quantity,
"received": received,
"demand": delivered_out,
"picture": item[5].encode('ascii'),
"category": item[6].encode('ascii'),
"value_in": value_in,
"value_out": value_out,
"price": item[7]
})
counter = item[0]
return items
# Quick query for inventory for mobile and web Inventory views.
# Called by inventory() and shelf()
# If location is None, we can infer that user has admin rights, and can therefore see the qty left.
def inventoryQuick(location):
items = []
conn = mysql.connect()
cursor = conn.cursor()
if location == None:
cursor.execute("""SELECT iid, name, category, picture, SUM(qty_left), reorder_pt, out_by FROM view_item_locations
GROUP BY iid;""")
data = cursor.fetchall()
for d in data:
items.append(
{"iid":d[0],
"name": d[1].encode('ascii'),
"category": d[2].encode('ascii'),
"picture": d[3].encode('ascii'),
"remaining": d[4],
"reorder": d[5],
"unit": d[6].encode('ascii')
})
else:
cursor.execute("""SELECT iid, name, category, picture, out_by FROM view_item_locations
WHERE tag='{}' AND reorder_pt >= 0;""".format(location))
data = cursor.fetchall()
conn.commit()
for d in data:
items.append(
{"iid":d[0],
"name": d[1].encode('ascii'),
"category": d[2].encode('ascii'),
"picture":d[3].encode('ascii'),
"unit":d[4].encode('ascii')
})
return items
# Stock Update Function for RA, Runner and Supervisors.
# Called by item() and shelf().
# Returns True if the stock was updated successfully, False otherwise.
def stockUpdate(iid, tagId, inputQty, user, action, time):
try:
conn = mysql.connect()
cursor = conn.cursor()
print(iid, tagId)
cursor.execute("SELECT qty_left, in_out_ratio FROM view_item_locations WHERE iid='{}' AND tag='{}';".format(iid, tagId))
data = cursor.fetchall()
print("data" ,data)
old_qty = data[0][0]
if action == 'out':
qty_left = old_qty - inputQty
qty_diff = inputQty * (-1) # make qty_input negative to reflect taking qty OUT of store.
if qty_left < 0:
raise InsufficientQtyError("Not enough in store!")
elif action == 'in':
print inputQty
print(inputQty*data[0][1])
qty_left = old_qty + inputQty*data[0][1]
qty_diff = qty_left - old_qty
else:
qty_left = inputQty
qty_diff = qty_left - old_qty # change the value of qty to the difference
conn = mysql.connect()
cursor = conn.cursor()
update_items_query = "UPDATE TagItems SET qty_left={} WHERE iid={} AND tag={};".format(qty_left, iid, tagId)
# general query for all actions
# print(update_items_query)
cursor.execute(update_items_query)
conn.commit()
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT storeroom FROM TagInfo WHERE tid={};".format(tagId))
location = cursor.fetchall()[0][0]
# Log action
# conn = mysql.connect()
# cursor = conn.cursor()
update_logs_query = """INSERT INTO Logs (user, date_time, action, qty_moved, qty_left, item, location)
VALUES ('{}', '{}', '{}', {}, {}, {}, '{}');""".format(user, time, action, qty_diff, qty_left, iid, location)
# print(update_logs_query)
cursor.execute(update_logs_query)
conn.commit()
return ['Success!', "success"]
except InsufficientQtyError as e:
return [e.args[0], "danger"]
except Exception as e:
return ["STOCK UPDATE ERROR: %s" % e, "danger"]
# Returns all the items based on location. KIV for possible supervisor view filtering.
def getFromLevels(location):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT name, category, tag FROM Ascott_InvMgmt.view_item_locations WHERE tag={};".format(location))
data=cursor.fetchall()
things = []
for item in data:
things.append(
{"name": item[0],
"category": item[1],
"location":item[2]})
return things
# Returns the logs that occurred within the current month.
# Called by logs()
def getAllLogs():
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT user, date_time, action, qty_moved, qty_left, item, location FROM Ascott_InvMgmt.Logs WHERE month(date_time) = month(now()) AND year(date_time) = year(now());")
data=cursor.fetchall()
print(data)
things = []
if data != None:
for row in data:
print(row[5])
cursor.execute("SELECT name, category FROM Items WHERE iid={};".format(row[5]))
info = cursor.fetchall()[0]
item_name = info[0].encode('ascii')
category = info[1].encode('ascii')
things.append({"name": row[0].encode('ascii'),
"dateTime": row[1],
"action":row[2],
"move":row[3],
"remaining":row[4],
"category":category,
"item":item_name,
"location":row[6]})
# print(things)
return things
# Returns inventory items that are below threshold levels
# Called by dashboard()
def getInventoryLow():
THRESHOLD = 1.2
cursor = mysql.connect().cursor()
cursor.execute("""SELECT iid, name, qty_left, reorder_pt, picture, category, out_by FROM Ascott_InvMgmt.view_item_locations
WHERE qty_left <= '"""+str(THRESHOLD)+"""'*reorder_pt AND
qty_left > 0
ORDER BY name ASC;""")
data = cursor.fetchall()
r = []
for i in data:
r.append({"iid": i[0],
"name": i[1].encode('ascii'),
"qty_left": i[2],
"reorder_pt": i[3],
"picture": i[4].encode('ascii'),
"category": i[5].encode('ascii'),
"unit":i[6].encode('ascii')})
return r
# Called by dashboard()
def getDailyLogs():
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT user, date_time, action, qty_moved, qty_left, item, location FROM Ascott_InvMgmt.Logs WHERE day(date_time) = day(now());")
conn.commit()
data=cursor.fetchall()
things = []
for row in data:
cursor = mysql.connect().cursor()
cursor.execute("SELECT name FROM Items WHERE iid={};".format(row[5]))
item_name = cursor.fetchall()[0][0]
things.append({"name": row[0].encode('ascii'),
"dateTime": row[1],
"action":row[2].encode('ascii'),
"move":row[3],
"remaining":row[4],
"item":item_name.encode('ascii'),
"location":row[6].encode('ascii')})
return things
# POST for getting chart data
@application.route('/api/getChartData', methods=["POST"])
def getChartData():
print "CHART: content_type - ", request.content_type
print "CHART: request.json - ", request.json
if not request.json:
print "CHART: Bad JSON format, aborting chart creation..."
page_not_found(400)
else:
items = request.get_json()
iid = items[0]["iid"]
r = []
conn = mysql.connect()
cursor = conn.cursor()
for i in items:
# get transaction logs per tag
tag = i["tag"]
query = "SELECT date_time, qty_left FROM Ascott_InvMgmt.Logs WHERE item = {} AND location = '{}'".format(iid, tag)
cursor.execute(query)
data = cursor.fetchall()
r.append({
"loc": i["location"],
"val": data})
return jsonify(r)
# POST for getting chart data
@application.route('/api/editReorder', methods=["POST"])
def editReorder():
print "REORDER: content_type - ", request.content_type
print "REORDER: request.json - ", request.json
if not request.json:
print "REORDER: Bad JSON format, aborting reorder modification..."
page_not_found(400)
else:
data = request.get_json()
name = data["name"].encode('ascii')
reorder = data["reorder"]
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"UPDATE Ascott_InvMgmt.Items SET reorder_pt={} WHERE (name='{}' | |
side='right') - 1
monthtime = startyear - self._yearday_splits[startmonth_idx]
# fix up the leapyears with a different yearday split table
leapmask = (year % 4) == 0
startmonth_idx_leap = self._yearday_splits_leap.searchsorted(startyear[leapmask], side='right') - 1
monthtime[leapmask] = startyear[leapmask] - self._yearday_splits_leap[startmonth_idx_leap]
# unlike month, weekday, hour, etc. day of month starts at 1
monthday = monthtime + 1
return _apply_inv_mask(self, monthday)
# ------------------------------------------------------------
@property
def day_of_week(self):
'''
Returns array of integers from Monday (0) -> Sunday (6)
>>> d = Date(['2019-02-11', '2019-02-12', '2019-02-13', '2019-02-14', '2019-02-15', '2019-02-16', '2019-02-17'])
>>> d.day_of_week
FastArray([0, 1, 2, 3, 4, 5, 6])
'''
arr = (self._fa + EPOCH_DAY_OF_WEEK) % 7
return _apply_inv_mask(self, arr)
# ------------------------------------------------------------
@property
def is_weekend(self):
'''
Returns boolean array, True when day of week is Saturday or Sunday
>>> d = Date(['2019-02-11', '2019-02-12', '2019-02-13', '2019-02-14', '2019-02-15', '2019-02-16', '2019-02-17'])
>>> d.is_weekend
FastArray([False, False, False, False, False, True, True])
'''
return _apply_inv_mask(self, self.day_of_week > 4)
# ------------------------------------------------------------
@property
def is_weekday(self):
'''
Returns boolean array, True when day of week is Monday-Friday
>>> d = Date(['2019-02-11', '2019-02-12', '2019-02-13', '2019-02-14', '2019-02-15', '2019-02-16', '2019-02-17'])
>>> d.is_weekday
FastArray([ True, True, True, True, True, False, False])
'''
return _apply_inv_mask(self, self.day_of_week < 5)
# ------------------------------------------------------------
@property
def seconds_since_epoch(self):
'''
Many existing python datetime routines expect seconds since epoch.
This call is to eliminate "magic numbers" like 3600 from code.
'''
return _apply_inv_mask(self, self._fa * SECONDS_PER_DAY)
# ------------------------------------------------------------
@classmethod
def hstack(cls, dates):
'''
hstacks Date objects and returns a new Date object.
Will be called by riptable.hstack() if the first item in the sequence is a Date object.
Parameters:
-----------
dates : list or tuple of Date objects
>>> d1 = Date('2015-02-01')
>>> d2 = Date(['2016-02-01', '2017-02-01', '2018-02-01'])
>>> hstack([d1, d2])
Date([2015-02-01, 2016-02-01, 2017-02-01, 2018-02-01])
'''
# pass the subclass to the parent class routine
return hstack_any(dates, cls, Date)
# ------------------------------------------------------------
@classmethod
def range(cls, start, end=None, days=None, step=1, format=None, closed=None):
"""
Returns a Date object of dates from start date to end date.
Parameters
----------
start : str or int
Start date in int format YYYYMMDD, or string in ``format``.
end : str or int, optional
Start date in int format YYYYMMDD, or string in ``format``.
If not specified, days is required.
days : int, optional (required if ``end`` is None)
Number of days to generate.
step : int, optional, default 1
Spacing between date values.
format : str, optional
Format to convert start/end values if they are string
closed : `left`, `right`, or None (default)
If `left`, omit the end date.
If `right`, omit the start date.
If None, include both.
Only applies when constructing from start, end date with step of 1.
Examples
--------
>>> Date.range('2019-02-01', '2019-02-07')
Date([2019-02-01, 2019-02-02, 2019-02-03, 2019-02-04, 2019-02-05, 2019-02-06, 2019-02-07])
>>> Date.range('2019-02-01', '2019-02-07', step=2)
Date([2019-02-01, 2019-02-03, 2019-02-05])
>>> Date.range('2019-02-01', '2019-02-07', closed='right')
Date([2019-02-02, 2019-02-03, 2019-02-04, 2019-02-05, 2019-02-06, 2019-02-07])
Returns
-------
`Date`
Range of dates in given interval spaced by `step`.
"""
if isinstance(start, (int, np.integer)):
start = str(start)
# convert separately for more accurate error
if isinstance(start, (str, bytes)):
start = cls(start, format=format)._fa[0]
else:
raise TypeError(f'Start date must be string or integer. Got {type(start)}')
if end is None:
if days is None:
raise ValueError(f'Must set either ``end`` or ``days`` keyword.')
# compensate for step
end = start + (days * step)
end = cls(end)._fa[0]
else:
if isinstance(end, (int, np.integer)):
end = str(end)
if not isinstance(end, (str, bytes)):
raise TypeError(f'End date must be string or integer. Got {type(start)}')
end = cls(end, format=format)._fa[0]
if days is None and step == 1:
# include one or both ends
if closed is None:
end += 1
elif closed == 'right':
end += 1
start += 1
elif closed == 'left':
pass
else:
raise ValueError(f'Closed has to be either "left", "right" or None. Got {closed}')
arr = arange(start, end, step, dtype=np.int32)
return cls(arr)
# ------------------------------------------------------------
def _date_compare_check(self, funcname, other):
'''
Funnel for all comparison operations.
Helps Date interact with DateTimeNano, TimeSpan.
'''
caller = self._fa
if isinstance(other, (DateSpan, TimeSpan, DateSpanScalar, TimeSpanScalar)):
raise TypeError(f'Cannot perform {funcname} comparison operation between {type(self)} and {type(other)}.')
elif isinstance(other, DateTimeNano):
caller = self._fa * NANOS_PER_DAY
to_tz = other._timezone._to_tz
# fix the timezone to match the display of the DateTimeNano
caller = DateTimeNano(self._fa * NANOS_PER_DAY, from_tz=to_tz, to_tz=to_tz)
# looks weird now, saving explicit branchesfor if any forbidden types appear
elif isinstance(other, Date):
other = other._fa
elif isinstance(other, (str, bytes)):
other = Date(other)
# Categorical will fall through to constructor too
elif isinstance(other, np.ndarray):
other = Date(other)
# let everything else fall through for FastArray to catch
# restore invalids
return self._preserve_invalid_comparison(caller, other, funcname)
# -------------------COMPARISONS------------------------------
# ------------------------------------------------------------
def __ne__(self, other):
return self._date_compare_check('__ne__', other)
def __eq__(self, other):
return self._date_compare_check('__eq__', other)
def __ge__(self, other):
return self._date_compare_check('__ge__', other)
def __gt__(self, other):
return self._date_compare_check('__gt__', other)
def __le__(self, other):
return self._date_compare_check('__le__', other)
def __lt__(self, other):
return self._date_compare_check('__lt__', other)
# ------------------------------------------------------------
def __add__(self, value):
'''
Addition rules:
------------------
Date + Date = TypeError
Date + DateTimeNano = TypeError
Date + DateSpan = Date
Date + TimeSpan = DateTimeNano
All other operands will be treated as DateSpan and return Date.
'''
return self._funnel_mathops('__add__', value)
def __iadd__(self, value):
return self._funnel_mathops('__iadd__', value)
def __radd__(self, value):
return self._funnel_mathops('__add__', value)
# ------------------------------------------------------------
def __sub__(self, value):
'''
Subtraction rules:
------------------
Date - Date = DateSpan
Date - DateSpan = Date
Date - DateTimeNano = TimeSpan
Date - TimeSpan = DateTimeNano
All other operands will be treated as DateSpan and return Date.
'''
if isinstance(value, Date):
func = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
# need routine for int32 - int32 => int32 (operands have 0 as invalid, result has sentinel as invalid)
# right now, using the double return, gets recasted in the constructor
op = MATH_OPERATION.SUBDATETIMES
functup = (self, value)
result = func(functup, op, 0)
return DateSpan(result)
elif isinstance(value, DateTimeNano):
caller = DateTimeNano(self._fa * NANOS_PER_DAY, from_tz=value._timezone._from_tz)
return caller - value
else:
return self._funnel_mathops('__sub__', value)
def __isub__(self, value):
return self._funnel_mathops('__isub__', value)
def __rsub__(self, value):
if isinstance(value, (Date, DateTimeNano)):
return value.__sub__(self)
else:
raise NotImplementedError
def __mul__(self, other): raise NotImplementedError
def __matmul__(self, other): raise NotImplementedError
# need to check properties to see if division is happening
#def __truediv__(self, other): raise NotImplementedError
#def __floordiv__(self, other): raise NotImplementedError
#def __mod__(self, other): raise NotImplementedError
#def __divmod__(self, other): raise NotImplementedError
def __pow__(self, other, modulo=None): raise NotImplementedError
def __lshift__(self, other): raise NotImplementedError
def __rshift__(self, other): raise NotImplementedError
def __and__(self, other): raise NotImplementedError
def __xor__(self, other): raise NotImplementedError
def __or__(self, other): raise NotImplementedError
def __rmul__(self, other): raise NotImplementedError
def __rmatmul__(self, other): raise NotImplementedError
def __rtruediv__(self, other): raise NotImplementedError
def __rfloordiv__(self, other): raise NotImplementedError
def __rmod__(self, other): raise NotImplementedError
def __rdivmod__(self, other): raise NotImplementedError
def __rpow__(self, other): raise NotImplementedError
def __rlshift__(self, other): raise NotImplementedError
def __rrshift__(self, other): raise NotImplementedError
def __rand__(self, other): raise NotImplementedError
def __rxor__(self, other): raise NotImplementedError
def __ror__(self, other): raise NotImplementedError
def __imul__(self, other): raise NotImplementedError
def __imatmul__(self, other): raise NotImplementedError
def __itruediv__(self, other): raise NotImplementedError
def __ifloordiv__(self, other): raise NotImplementedError
def __imod__(self, other): raise NotImplementedError
def __ipow__(self, other, modulo=None): raise NotImplementedError
def __ilshift__(self, other): raise NotImplementedError
def __irshift__(self, other): raise NotImplementedError
def __iand__(self, other): raise NotImplementedError
def __ixor__(self, other): raise NotImplementedError
def __ior__(self, other): raise NotImplementedError
def __neg__(self): raise NotImplementedError
def __pos__(self): raise NotImplementedError
def __abs__(self): raise NotImplementedError
def __invert__(self): raise NotImplementedError
def __complex__(self): raise NotImplementedError
def __int__(self): raise NotImplementedError
def __float__(self): raise NotImplementedError
def __round__(self, ndigits=0): raise NotImplementedError
def __trunc__(self): raise NotImplementedError
def __floor__(self): raise NotImplementedError
def __ceil__(self): raise NotImplementedError
# ------------------------------------------------------------
def _check_mathops(self, funcname, value):
'''
This gets called after a math operation has been performed on the Date's FastArray.
Return type may differ based on operation. Preserves invalids from original input.
Parameters:
-----------
funcname : name of ufunc
value : original operand in math operation
returns return_type, other_inv_mask
'''
# for now, make Date the default return type
return_type = Date
other_inv_mask = None
if isinstance(value, Date):
if funcname in ('__add__', '__iadd__', '__isub__'):
raise TypeError(f'Cannot {funcname} operation between Date and Date')
| |
<gh_stars>1-10
# -*- coding:utf-8 -*-
"""
* Copyright@2016 Jingtum Inc. or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from config import Config
from account import FinGate
from logger import logger
from server import APIServer
from serialize import JingtumBaseDecoder
class JingtumOperException(Exception):
pass
class Operation(FinGate):
def __init__(self, src_address):
super(Operation, self).__init__()
self.src_address = src_address
self.src_secret = ""
self.is_sync = False
self.api_helper = APIServer()
from server import g_test_evn
if g_test_evn:
self.api_helper.setTest(True)
self.validateAddress(src_address)
def validateAddress(self, address):
if not JingtumBaseDecoder.verify_checksum(JingtumBaseDecoder.decode_base(address, 25)):
raise JingtumOperException("Invalid address: %s" % str(address))
def submit(self):
#print self.oper()
from server import g_test_evn
if g_test_evn:
self.api_helper.setTest(True)
return self.api_helper.post(*self.oper())
def addSrcSecret(self, src_secret):
self.src_secret = src_secret
def addSync(self, is_sync):
self.is_sync = is_sync
class PaymentOperation(Operation):
def __init__(self, src_address):
super(PaymentOperation, self).__init__(src_address)
self.amt = {}
self.dest_address = ""
self.path = ""
def para_required(func):
def _func(*args, **args2):
if len(args[0].amt) == 0:
#logger.error("addAmount first:" + func.__name__)
raise JingtumOperException("addAmount first before oper.")
elif args[0].dest_address == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addDestAddress first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def addAmount(self, currency_type, currency_value, issuer=""):
self.amt["value"] = str(currency_value)
self.amt["currency"] = str(currency_type)
self.amt["issuer"] = str(issuer)
def addDestAddress(self, dest_address):
self.dest_address = dest_address
def addPath(self, path):
self.path = path
@para_required
def oper(self):
_payment = {}
_payment["destination_amount"] = self.amt
_payment["source_account"] = self.src_address
_payment["destination_account"] = self.dest_address
_payment["payment_path"] = self.path
_para = {}
_para["secret"] = self.src_secret
_para["payment"] = _payment
_para["client_resource_id"] = self.getNextUUID()
if self.is_sync:
url = 'accounts/{address}/payments?validated=true'
else:
url = 'accounts/{address}/payments'
url = url.format(address=self.src_address)
return url, _para
class OrderOperation(Operation):
def __init__(self, src_address):
super(OrderOperation, self).__init__(src_address)
self.order_type = "buy"
self.takerpays = {}
self.takergets = {}
def para_required(func):
def _func(*args, **args2):
if len(args[0].takerpays) == 0:
#logger.error("setTakePays first:" + func.__name__)
raise JingtumOperException("setTakePays first before oper.")
elif len(args[0].takergets) == 0:
#logger.error("setTakeGets first:" + func.__name__)
raise JingtumOperException("setTakeGets first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setOrderType(self, is_sell):
self.order_type = "sell" if is_sell else "buy"
def setTakePays(self, currency_type, currency_value, counterparty=""):
self.takerpays["value"] = str(currency_value)
self.takerpays["currency"] = str(currency_type)
self.takerpays["counterparty"] = str(counterparty)
def setTakeGets(self, currency_type, currency_value, counterparty=""):
self.takergets["value"] = str(currency_value)
self.takergets["currency"] = str(currency_type)
self.takergets["counterparty"] = str(counterparty)
@para_required
def oper(self):
_order = {}
_order["type"] = self.order_type
_order["taker_pays"] = self.takerpays
_order["taker_gets"] = self.takergets
_para = {}
_para["secret"] = self.src_secret
_para["order"] = _order
if self.is_sync:
url = 'accounts/{address}/orders?validated=true'
else:
url = 'accounts/{address}/orders'
url = url.format(address=self.src_address)
return url, _para
class CancelOrderOperation(Operation):
"""docstring for CancelOrder"""
def __init__(self, src_address):
super(CancelOrderOperation, self).__init__(src_address)
self.order_num = 0
def para_required(func):
def _func(*args, **args2):
if args[0].order_num == 0:
#logger.error("setOrderNum first:" + func.__name__)
raise JingtumOperException("setOrderNum first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setOrderNum(self, order_num):
self.order_num = order_num
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
if self.is_sync:
url = 'accounts/{address}/orders/{order}?validated=true'
else:
url = 'accounts/{address}/orders/{order}'
url = url.format(address=self.src_address, order=self.order_num)
return url, _para, "DELETE"
class AddRelation(Operation):
def __init__(self, src_address):
super(AddRelation, self).__init__(src_address)
self.amt = {}
self.counterparty = ""
self.relation_type = ""
def para_required(func):
def _func(*args, **args2):
if len(args[0].amt) == 0:
#logger.error("addAmount first:" + func.__name__)
raise JingtumOperException("addAmount first before oper.")
elif args[0].relation_type == "":
#logger.error("setRelationType first:" + func.__name__)
raise JingtumOperException("setRelationType first before oper.")
elif args[0].counterparty == "":
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def addAmount(self, currency_type, currency_value, issuer=""):
self.amt["limit"] = str(currency_value)
self.amt["currency"] = str(currency_type)
self.amt["issuer"] = str(issuer)
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setRelationType(self, relation_type):
self.relation_type = relation_type
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
_para["type"] = self.relation_type
_para["counterparty"] = self.counterparty
_para["amount"] = self.amt
if self.is_sync:
url = 'accounts/{address}/relations?validated=true'
else:
url = 'accounts/{address}/relations'
url = url.format(address=self.src_address)
return url, _para
class RemoveRelation(Operation):
def __init__(self, src_address):
super(RemoveRelation, self).__init__(src_address)
self.amt = {}
self.counterparty = ""
self.relation_type = ""
def para_required(func):
def _func(*args, **args2):
if len(args[0].amt) == 0:
#logger.error("addAmount first:" + func.__name__)
raise JingtumOperException("addAmount first before oper.")
elif args[0].relation_type == "":
#logger.error("setRelationType first:" + func.__name__)
raise JingtumOperException("setRelationType first before oper.")
elif args[0].counterparty == "":
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def addAmount(self, currency_type, currency_value, issuer=""):
self.amt["limit"] = str(currency_value)
self.amt["currency"] = str(currency_type)
self.amt["issuer"] = str(issuer)
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setRelationType(self, relation_type):
self.relation_type = relation_type
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
_para["type"] = self.relation_type
_para["counterparty"] = self.counterparty
_para["amount"] = self.amt
url = 'accounts/{address}/relations'
url = url.format(address=self.src_address)
return url, _para, "DELETE"
class WalletSettings(Operation):
def __init__(self, src_address):
super(WalletSettings, self).__init__(src_address)
def oper(self):
_para = {}
_para["secret"] = self.src_secret
url = 'accounts/{address}/settings'
url = url.format(address=self.src_address)
return url, _para
class AddTrustLine(Operation):
def __init__(self, src_address):
super(AddTrustLine, self).__init__(src_address)
self.counterparty = ""
self.currency = ""
self.frozen = False
def para_required(func):
def _func(*args, **args2):
if len(args[0].counterparty) == 0:
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].currency == "":
#logger.error("setCurrency first:" + func.__name__)
raise JingtumOperException("setCurrency first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setLimit(self, limit):
self.trust_limit = limit
def setCurrency(self, currency):
self.currency = currency
def setTrustlineFrozen(self, frozen):
self.frozen = frozen
@para_required
def oper(self):
_trust = {}
_trust["limit"] = self.trust_limit
_trust["currency"] = self.currency
_trust["counterparty"] = self.counterparty
_trust["account_trustline_frozen"] = self.frozen
_para = {}
_para["secret"] = self.src_secret
_para["trustline"] = _trust
if self.is_sync:
url = 'accounts/{address}/trustlines?validated=true'
else:
url = 'accounts/{address}/trustlines'
url = url.format(address=self.src_address)
return url, _para
class RemoveTrustLine(Operation):
def __init__(self, src_address):
super(RemoveTrustLine, self).__init__(src_address)
self.counterparty = ""
self.currency = ""
self.frozen = False
def para_required(func):
def _func(*args, **args2):
if len(args[0].counterparty) == 0:
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].currency == "":
#logger.error("setCurrency first:" + func.__name__)
raise JingtumOperException("setCurrency first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setLimit(self, limit):
self.trust_limit = limit
def setCurrency(self, currency):
self.currency = currency
def setTrustlineFrozen(self, frozen):
self.frozen = frozen
@para_required
def oper(self):
_trust = {}
_trust["limit"] = 0
_trust["currency"] = self.currency
_trust["counterparty"] = self.counterparty
_trust["account_trustline_frozen"] = self.frozen
_para = {}
_para["secret"] = self.src_secret
_para["trustline"] = _trust
if self.is_sync:
url = 'accounts/{address}/trustlines?validated=true'
else:
url = 'accounts/{address}/trustlines'
url = url.format(address=self.src_address)
return url, _para
class SubmitMessage(Operation):
def __init__(self, src_address):
super(SubmitMessage, self).__init__(src_address)
self.destination_account = ""
self.message = ""
def para_required(func):
def _func(*args, **args2):
if len(args[0].destination_account) == 0:
#logger.error("setDestAddress first:" + func.__name__)
raise JingtumOperException("setDestAddress first before oper.")
elif len(args[0].message) == 0:
#logger.error("setMessage first:" + func.__name__)
raise JingtumOperException("setMessage first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setDestAddress(self, destination_account):
self.destination_account = destination_account
def setMessage(self, message):
self.message = message
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
_para["destination_account"] = self.destination_account
_para["message_hash"] = self.message
if self.is_sync:
url = 'accounts/{address}/messages?validated=true'
else:
url = 'accounts/{address}/messages'
url = url.format(address=self.src_address)
return url, _para
| |
"D", "exdir_E")])
#----------------------------------------------------------------------
def update_lose_external(sbox):
"update to lose an external module"
external_url_for = externals_test_setup(sbox)
wc_dir = sbox.wc_dir
other_wc_dir = sbox.add_wc_path('other')
repo_url = sbox.repo_url
# Checkout two working copies.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, other_wc_dir)
# Lose one new external item from A/D. The lost item is
# "exdir_A", chosen because there are two other externals underneath
# it (G and H) which are not being removed. We expect them to
# remain -- in other words:
#
# BEFORE AFTER
# ------------ ------------
# A/D/exdir_A A/D/exdir_A
# A/D/exdir_A/.svn/... <GONE>
# A/D/exdir_A/mu <GONE>
# A/D/exdir_A/B/... <GONE>
# A/D/exdir_A/C/... <GONE>
# A/D/exdir_A/D/... <GONE>
# A/D/exdir_A/G/... A/D/exdir_A/G/...
# A/D/exdir_A/H/... A/D/exdir_A/H/...
new_externals_desc = \
external_url_for["A/D/exdir_A/G/"] + " exdir_A/G" + \
"\n" + \
"exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
external_url_for["A/D/x/y/z/blah"] + " x/y/z/blah" + \
"\n"
# Set and commit the property
change_external(sbox.ospath('A/D'), new_externals_desc)
# The code should handle a missing local externals item
svntest.main.safe_rmtree(os.path.join(other_wc_dir, "A", "D", "exdir_A", \
"D"))
# Update other working copy, see if lose & preserve things appropriately
expected_output = svntest.wc.State(other_wc_dir, {
'A/D' : Item(status=' U'),
'A/D/exdir_A' : Item(verb='Removed external'),
})
svntest.actions.run_and_verify_update(other_wc_dir,
expected_output, None, None)
expected_existing_paths = [
os.path.join(other_wc_dir, "A", "D", "exdir_A"),
os.path.join(other_wc_dir, "A", "D", "exdir_A", "G"),
os.path.join(other_wc_dir, "A", "D", "exdir_A", "H"),
]
probe_paths_exist(expected_existing_paths)
expected_missing_paths = [
os.path.join(other_wc_dir, "A", "D", "exdir_A", "mu"),
os.path.join(other_wc_dir, "A", "D", "exdir_A", "B"),
os.path.join(other_wc_dir, "A", "D", "exdir_A", "C"),
os.path.join(other_wc_dir, "A", "D", "exdir_A", "D"),
]
probe_paths_missing(expected_missing_paths)
#----------------------------------------------------------------------
def update_change_pristine_external(sbox):
"update change to an unmodified external module"
external_url_for = externals_test_setup(sbox)
wc_dir = sbox.wc_dir
other_wc_dir = sbox.add_wc_path('other')
repo_url = sbox.repo_url
other_repo_url = repo_url + ".other"
# Checkout two working copies.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, other_wc_dir)
# Change the "x/y/z/blah" external on A/D to point to a different
# URL. Since no changes were made to the old checked-out external,
# we should get a clean replace.
new_externals_desc = \
external_url_for["A/D/exdir_A"] + " exdir_A" + \
"\n" + \
external_url_for["A/D/exdir_A/G/"] + " exdir_A/G" + \
"\n" + \
"exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
"x/y/z/blah " + other_repo_url + "/A/B/F" + \
"\n"
# Set and commit the property
change_external(sbox.ospath('A/D'), new_externals_desc)
# Update other working copy, see if get the right change.
expected_output = svntest.wc.State(other_wc_dir, {
'A/D' : Item(status=' U'),
'A/D/x/y/z/blah/F' : Item(status='D '),
'A/D/x/y/z/blah/E' : Item(status='D '),
'A/D/x/y/z/blah/lambda': Item(status='D '),
})
svntest.actions.run_and_verify_update(other_wc_dir,
expected_output, None, None)
xyzb_path = os.path.join(other_wc_dir, "x", "y", "z", "blah")
expected_missing_paths = [
os.path.join(xyzb_path, "alpha"),
os.path.join(xyzb_path, "beta"),
]
probe_paths_missing(expected_missing_paths)
def update_change_modified_external(sbox):
"update changes to a modified external module"
external_url_for = externals_test_setup(sbox)
wc_dir = sbox.wc_dir
other_wc_dir = sbox.add_wc_path('other')
repo_url = sbox.repo_url
other_repo_url = repo_url + ".other"
# Checkout two working copies.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, other_wc_dir)
# Make a couple of mods in the "x/y/z/blah/" external.
alpha_path = os.path.join(other_wc_dir, "A", "D",
"x", "y", "z", "blah", "alpha")
svntest.main.file_append(alpha_path, "Some new text in alpha.\n")
new_file = os.path.join(other_wc_dir, "A", "D",
"x", "y", "z", "blah", "fish.txt")
svntest.main.file_append(new_file, "This is an unversioned file.\n")
# Change the "x/y/z/blah" external on A/D to point to a different
# URL. There are some local mods under the old checked-out external,
# so the old dir should be saved under a new name.
new_externals_desc = \
external_url_for["A/D/exdir_A"] + " exdir_A" + \
"\n" + \
external_url_for["A/D/exdir_A/G/"] + " exdir_A/G/" + \
"\n" + \
"exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
"x/y/z/blah " + other_repo_url + "/A/B/F" + \
"\n"
# Set and commit the property
change_external(sbox.ospath('A/D'), new_externals_desc)
# Update other working copy, see if get the right change.
expected_output = svntest.wc.State(other_wc_dir, {
'A/D' : Item(status=' U'),
'A/D/x/y/z/blah/F' : Item(status='D '),
'A/D/x/y/z/blah/lambda': Item(status='D '),
'A/D/x/y/z/blah/E' : Item(status='D '),
})
svntest.actions.run_and_verify_update(other_wc_dir,
expected_output, None, None)
xyzb_path = os.path.join(other_wc_dir, "x", "y", "z", "blah")
expected_missing_paths = [
os.path.join(xyzb_path, "alpha"),
os.path.join(xyzb_path, "beta"),
]
probe_paths_missing(expected_missing_paths)
def update_receive_change_under_external(sbox):
"update changes under an external module"
externals_test_setup(sbox)
wc_dir = sbox.wc_dir
other_wc_dir = sbox.add_wc_path('other')
repo_url = sbox.repo_url
other_repo_url = repo_url + ".other"
# Checkout two working copies.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
other_repo_url, other_wc_dir)
# Commit some modifications from the other_wc.
other_gamma_path = os.path.join(other_wc_dir, 'A', 'D', 'gamma')
svntest.main.file_append(other_gamma_path, "New text in other gamma.\n")
expected_output = svntest.wc.State(other_wc_dir, {
'A/D/gamma' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(other_wc_dir, 5)
expected_status.tweak('A/D/gamma', wc_rev=6)
svntest.actions.run_and_verify_commit(other_wc_dir,
expected_output,
expected_status,
None, other_wc_dir)
# Now update the regular wc to see if we get the change. Note that
# none of the module *properties* in this wc have been changed; only
# the source repository of the modules has received a change, and
# we're verifying that an update here pulls that change.
# The output's going to be all screwy because of the module
# notifications, so don't bother parsing it, just run update
# directly.
expected_output = svntest.wc.State(wc_dir, {
'A/D/exdir_A/D/gamma': Item(status='U '),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output, None, None)
external_gamma_path = sbox.ospath('A/D/exdir_A/D/gamma')
contents = open(external_gamma_path).read()
if contents != ("This is the file 'gamma'.\n"
"New text in other gamma.\n"):
raise svntest.Failure("Unexpected contents for externally modified " +
external_gamma_path)
# Commit more modifications
other_rho_path = os.path.join(other_wc_dir, 'A', 'D', 'G', 'rho')
svntest.main.file_append(other_rho_path, "New text in other rho.\n")
expected_output = svntest.wc.State(other_wc_dir, {
'A/D/G/rho' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(other_wc_dir, 5)
expected_status.tweak('A/D/gamma', wc_rev=6)
expected_status.tweak('A/D/G/rho', wc_rev=7)
svntest.actions.run_and_verify_commit(other_wc_dir,
expected_output,
expected_status,
None, other_wc_dir)
expected_output = svntest.wc.State(sbox.ospath('A/C'), {
'exdir_G/rho' : Item(status='U '),
})
svntest.actions.run_and_verify_update(sbox.ospath('A/C'),
expected_output, None, None)
external_rho_path = sbox.ospath('A/C/exdir_G/rho')
contents = open(external_rho_path).read()
if contents != ("This is the file 'rho'.\n"
"New text in other rho.\n"):
raise svntest.Failure("Unexpected contents for externally modified " +
external_rho_path)
#----------------------------------------------------------------------
def modify_and_update_receive_new_external(sbox):
"commit and update additional externals"
external_url_for = externals_test_setup(sbox)
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Checkout a working copy
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
# Add one more external item
B_path = sbox.ospath('A/B')
externals_desc = \
external_url_for["A/D/exdir_A/G/"] + " exdir_G" + \
"\n" + \
"exdir_H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
"exdir_Z " + external_url_for["A/D/exdir_A/H"] + \
"\n"
change_external(B_path, externals_desc)
# Now cd into A/B and try updating
was_cwd = os.getcwd()
os.chdir(B_path)
# Once upon a time there was a core-dump here
svntest.actions.run_and_verify_svn("update failed",
svntest.verify.AnyOutput, [], 'up' )
os.chdir(was_cwd)
probe_paths_exist([os.path.join(B_path, "exdir_Z")])
#----------------------------------------------------------------------
def disallow_dot_or_dotdot_directory_reference(sbox):
"error if external target dir involves '.' or '..'"
external_url_for = externals_test_setup(sbox)
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Checkout a working copy
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
# Try to set illegal externals in the original WC.
def set_externals_for_path_expect_error(path, val):
expected_err = ".*Invalid svn:externals property on '.*': target " + \
"'.*' is an absolute path or involves '..'.*"
change_external_expect_error(path, val, expected_err)
B_path = sbox.ospath('A/B')
G_path = sbox.ospath('A/D/G')
H_path = sbox.ospath('A/D/H')
C_path = sbox.ospath('A/C')
F_path = sbox.ospath('A/B/F')
external_urls = list(external_url_for.values())
# The external_urls contains some examples of relative urls that are
# ambiguous with these local test paths, so we have to use the
# <url> <path> ordering here to check the local path validator.
externals_value_1 = external_urls.pop() + " ../foo\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_2 = external_urls.pop() + " foo/bar/../baz\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_3 = external_urls.pop() + " foo/..\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_4 = external_urls.pop() + " .\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_5 = external_urls.pop() + " ./\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_6 = external_urls.pop() + " ..\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_7 = external_urls.pop() + " ././/.///. \n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_8 = external_urls.pop() + " /foo \n"
if not external_urls: external_urls = list(external_url_for.values())
if svntest.main.is_os_windows():
externals_value_9 = external_urls.pop() + " D:/foo\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_10 = external_urls.pop() + " D:\\foo\n"
if not external_urls: external_urls = list(external_url_for.values())
externals_value_11 = external_urls.pop() + " D:foo\n"
if not external_urls: external_urls = list(external_url_for.values())
set_externals_for_path_expect_error(B_path, externals_value_1)
set_externals_for_path_expect_error(G_path, externals_value_2)
set_externals_for_path_expect_error(H_path, externals_value_3)
set_externals_for_path_expect_error(C_path, externals_value_4)
set_externals_for_path_expect_error(F_path, externals_value_5)
set_externals_for_path_expect_error(B_path, externals_value_6)
set_externals_for_path_expect_error(G_path, externals_value_7)
set_externals_for_path_expect_error(H_path, externals_value_8)
if svntest.main.is_os_windows():
set_externals_for_path_expect_error(B_path, externals_value_9)
set_externals_for_path_expect_error(B_path, externals_value_10)
set_externals_for_path_expect_error(B_path, externals_value_11)
#----------------------------------------------------------------------
def export_with_externals(sbox):
"test exports | |
from __future__ import absolute_import
from compas_fab.backends.ros.messages.geometry_msgs import Point
from compas_fab.backends.ros.messages.geometry_msgs import Pose
from compas_fab.backends.ros.messages.geometry_msgs import PoseStamped
from compas_fab.backends.ros.messages.geometry_msgs import Quaternion
from compas_fab.backends.ros.messages.geometry_msgs import Vector3
from compas_fab.backends.ros.messages.object_recognition_msgs import ObjectType
from compas_fab.backends.ros.messages.octomap_msgs import OctomapWithPose
from compas_fab.backends.ros.messages.sensor_msgs import JointState
from compas_fab.backends.ros.messages.sensor_msgs import MultiDOFJointState
from compas_fab.backends.ros.messages.shape_msgs import Mesh
from compas_fab.backends.ros.messages.shape_msgs import Plane
from compas_fab.backends.ros.messages.shape_msgs import SolidPrimitive
from compas_fab.backends.ros.messages.std_msgs import Header
from compas_fab.backends.ros.messages.std_msgs import ROSmsg
from compas_fab.backends.ros.messages.trajectory_msgs import JointTrajectory
from compas_fab.backends.ros.messages.trajectory_msgs import MultiDOFJointTrajectory
class CollisionObject(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/CollisionObject.html
"""
ADD = 0
REMOVE = 1
APPEND = 2
MOVE = 3
def __init__(self, header=None, id="collision_obj", type=None,
primitives=None, primitive_poses=None, meshes=None, mesh_poses=None,
planes=None, plane_poses=None,
subframe_names=None, subframe_poses=None, operation=0):
self.header = header or Header() # a header, used for interpreting the poses
self.id = id # the id of the object (name used in MoveIt)
self.type = type or ObjectType() # The object type in a database of known objects
# solid geometric primitives
self.primitives = primitives or []
self.primitive_poses = primitive_poses or []
# meshes
self.meshes = meshes or []
self.mesh_poses = mesh_poses or []
# bounding planes
self.planes = planes or []
self.plane_poses = plane_poses or []
self.operation = operation # ADD or REMOVE or APPEND or MOVE
@classmethod
def from_collision_mesh(cls, collision_mesh):
"""Creates a collision object from a :class:`compas_fab.robots.CollisionMesh`
"""
kwargs = {}
kwargs['header'] = Header(frame_id=collision_mesh.root_name)
kwargs['id'] = collision_mesh.id
kwargs['meshes'] = [Mesh.from_mesh(collision_mesh.mesh)]
kwargs['mesh_poses'] = [Pose.from_frame(collision_mesh.frame)]
return cls(**kwargs)
@classmethod
def from_msg(cls, msg):
kwargs = {}
kwargs['header'] = Header.from_msg(msg['header'])
kwargs['id'] = msg['id']
kwargs['type'] = ObjectType.from_msg(msg['type'])
kwargs['primitives'] = [SolidPrimitive.from_msg(i) for i in msg['primitives']]
kwargs['primitive_poses'] = [Pose.from_msg(i) for i in msg['primitive_poses']]
kwargs['meshes'] = [Mesh.from_msg(i) for i in msg['meshes']]
kwargs['mesh_poses'] = [Pose.from_msg(i) for i in msg['mesh_poses']]
kwargs['planes'] = [Plane.from_msg(i) for i in msg['planes']]
kwargs['plane_poses'] = [Pose.from_frame(i) for i in msg['plane_poses']]
kwargs['operation'] = msg['operation']
return cls(**kwargs)
class AttachedCollisionObject(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/AttachedCollisionObject.html
"""
def __init__(self, link_name=None, object=None, touch_links=None,
detach_posture=None, weight=0):
self.link_name = link_name or ''
self.object = object or CollisionObject()
self.touch_links = touch_links or []
self.detach_posture = detach_posture or JointTrajectory()
self.weight = weight
@classmethod
def from_attached_collision_mesh(cls, attached_collision_mesh):
"""Creates an attached collision object from a :class:`compas_fab.robots.AttachedCollisionMesh`
"""
kwargs = {}
kwargs['link_name'] = attached_collision_mesh.link_name
kwargs['object'] = CollisionObject.from_collision_mesh(attached_collision_mesh.collision_mesh)
kwargs['touch_links'] = [str(s) for s in attached_collision_mesh.touch_links]
kwargs['weight'] = attached_collision_mesh.weight
return cls(**kwargs)
class Constraints(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/Constraints.html
"""
def __init__(self, name='', joint_constraints=None, position_constraints=None,
orientation_constraints=None, visibility_constraints=None):
self.name = name
self.joint_constraints = joint_constraints if joint_constraints else []
self.position_constraints = position_constraints if position_constraints else []
self.orientation_constraints = orientation_constraints if orientation_constraints else []
self.visibility_constraints = visibility_constraints if visibility_constraints else []
class RobotState(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/RobotState.html
"""
def __init__(self, joint_state=None, multi_dof_joint_state=None,
attached_collision_objects=None, is_diff=False):
self.joint_state = joint_state if joint_state else JointState()
self.multi_dof_joint_state = multi_dof_joint_state if multi_dof_joint_state else MultiDOFJointState()
self.attached_collision_objects = attached_collision_objects if attached_collision_objects else []
self.is_diff = is_diff
@classmethod
def from_msg(cls, msg):
joint_state = JointState.from_msg(msg['joint_state'])
multi_dof_joint_state = MultiDOFJointState.from_msg(
msg['multi_dof_joint_state'])
attached_collision_objects = [AttachedCollisionObject.from_msg(
item) for item in msg['attached_collision_objects']]
return cls(joint_state, multi_dof_joint_state, attached_collision_objects, msg['is_diff'])
class PositionIKRequest(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/PositionIKRequest.html
"""
def __init__(self, group_name="robot", robot_state=None, constraints=None,
pose_stamped=None, timeout=1.0, attempts=8,
avoid_collisions=True):
self.group_name = group_name
self.robot_state = robot_state if robot_state else RobotState()
self.constraints = constraints if constraints else Constraints()
self.avoid_collisions = avoid_collisions
self.pose_stamped = pose_stamped if pose_stamped else PoseStamped()
self.timeout = timeout
self.attempts = attempts
class RobotTrajectory(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/RobotTrajectory.html
"""
def __init__(self, joint_trajectory=JointTrajectory(),
multi_dof_joint_trajectory=MultiDOFJointTrajectory()):
self.joint_trajectory = joint_trajectory
self.multi_dof_joint_trajectory = multi_dof_joint_trajectory
@classmethod
def from_msg(cls, msg):
joint_trajectory = JointTrajectory.from_msg(msg['joint_trajectory'])
multi_dof_joint_trajectory = MultiDOFJointTrajectory.from_msg(
msg['multi_dof_joint_trajectory'])
return cls(joint_trajectory, multi_dof_joint_trajectory)
class MoveItErrorCodes(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/MoveItErrorCodes.html
"""
# overall behavior
SUCCESS = 1
FAILURE = 99999
PLANNING_FAILED = -1
INVALID_MOTION_PLAN = -2
MOTION_PLAN_INVALIDATED_BY_ENVIRONMENT_CHANGE = -3
CONTROL_FAILED = -4
UNABLE_TO_AQUIRE_SENSOR_DATA = -5
TIMED_OUT = -6
PREEMPTED = -7
# planning & kinematics request errors
START_STATE_IN_COLLISION = -10
START_STATE_VIOLATES_PATH_CONSTRAINTS = -11
GOAL_IN_COLLISION = -12
GOAL_VIOLATES_PATH_CONSTRAINTS = -13
GOAL_CONSTRAINTS_VIOLATED = -14
INVALID_GROUP_NAME = -15
INVALID_GOAL_CONSTRAINTS = -16
INVALID_ROBOT_STATE = -17
INVALID_LINK_NAME = -18
INVALID_OBJECT_NAME = -19
# system errors
FRAME_TRANSFORM_FAILURE = -21
COLLISION_CHECKING_UNAVAILABLE = -22
ROBOT_STATE_STALE = -23
SENSOR_INFO_STALE = -24
# kinematics errors
NO_IK_SOLUTION = -31
def __init__(self, val=-31):
self.val = val
def __int__(self):
return self.val
def __eq__(self, other):
return self.val == other
def __ne__(self, other):
return self.val != other
@property
def human_readable(self):
cls = type(self)
for k, v in cls.__dict__.items():
if v == self.val:
return k
return ''
class PlannerParams(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/msg/PlannerParams.html
"""
def __init__(self, keys=None, values=None, descriptions=None):
self.keys = keys or [] # parameter names (same size as values)
self.values = values or [] # parameter values (same size as keys)
self.descriptions = descriptions or [] # parameter description (can be empty)
class WorkspaceParameters(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/WorkspaceParameters.html
"""
def __init__(self, header=None, min_corner=None, max_corner=None):
self.header = header or Header()
self.min_corner = min_corner or Vector3(-1000, -1000, -1000)
self.max_corner = max_corner or Vector3(1000, 1000, 1000)
class TrajectoryConstraints(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/TrajectoryConstraints.html
"""
def __init__(self, constraints=None):
self.constraints = constraints or [] # Constraints[]
class JointConstraint(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/JointConstraint.html
"""
def __init__(self, joint_name="", position=0, tolerance_above=0, tolerance_below=0, weight=1.):
self.joint_name = joint_name
self.position = float(position)
self.tolerance_above = float(tolerance_above)
self.tolerance_below = float(tolerance_below)
self.weight = float(weight)
@classmethod
def from_joint_constraint(cls, joint_constraint):
"""Creates a `JointConstraint` from a :class:`compas_fab.robots.JointConstraint`.
"""
c = joint_constraint
return cls(c.joint_name, c.value, c.tolerance_above, c.tolerance_below, c.weight)
class VisibilityConstraint(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/VisibilityConstraint.html
"""
def __init__(self):
raise NotImplementedError
class BoundingVolume(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/BoundingVolume.html
"""
def __init__(self, primitives=None, primitive_poses=None, meshes=None,
mesh_poses=None):
self.primitives = primitives or [] # shape_msgs/SolidPrimitive[]
self.primitive_poses = primitive_poses or [] # geometry_msgs/Pose[]
self.meshes = meshes or [] # shape_msgs/Mesh[]
self.mesh_poses = mesh_poses or [] # geometry_msgs/Pose[]
@classmethod
def from_box(cls, box):
"""Creates a `BoundingVolume` from a :class:`compas.geometry.Box`.
Parameters
----------
box: `compas.geometry.Box`
"""
primitive = SolidPrimitive.from_box(box)
pose = Pose.from_frame(box.frame)
return cls(primitives=[primitive], primitive_poses=[pose])
@classmethod
def from_sphere(cls, sphere):
"""Creates a `BoundingVolume` from a :class:`compas.geometry.Sphere`.
Parameters
----------
sphere: `compas.geometry.Sphere`
"""
primitive = SolidPrimitive.from_sphere(sphere)
pose = Pose(Point(*sphere.point), Quaternion(0, 0, 0, 1))
return cls(primitives=[primitive], primitive_poses=[pose])
@classmethod
def from_mesh(cls, mesh):
"""Creates a `BoundingVolume` from a :class:`compas.datastructures.Mesh`.
Parameters
----------
sphere: `compas.datastructures.Mesh`
"""
mesh = Mesh.from_mesh(mesh)
pose = Pose()
return cls(meshes=[mesh], mesh_poses=[pose])
@classmethod
def from_bounding_volume(cls, bounding_volume):
"""Creates a `BoundingVolume` from a :class:`compas_fab.robots.BoundingVolume`.
Parameters
----------
bounding_volume: `compas_fab.robots.BoundingVolume`
"""
if bounding_volume.type == bounding_volume.BOX:
return cls.from_box(bounding_volume.volume)
elif bounding_volume.type == bounding_volume.SPHERE:
return cls.from_sphere(bounding_volume.volume)
elif bounding_volume.type == bounding_volume.MESH:
return cls.from_mesh(bounding_volume.volume)
else:
raise NotImplementedError
class PositionConstraint(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/PositionConstraint.html
"""
def __init__(self, header=None, link_name=None, target_point_offset=None,
constraint_region=None, weight=None):
self.header = header or Header()
self.link_name = link_name or ""
self.target_point_offset = target_point_offset or Vector3(0., 0., 0.) # geometry_msgs/Vector3
self.constraint_region = constraint_region or BoundingVolume() # moveit_msgs/BoundingVolume
self.weight = float(weight) or 1.
@classmethod
def from_position_constraint(cls, header, position_constraint):
"""Creates a `PositionConstraint` from a :class:`compas_fab.robots.PositionConstraint`.
"""
constraint_region = BoundingVolume.from_bounding_volume(position_constraint.bounding_volume)
return cls(header, position_constraint.link_name, None, constraint_region, position_constraint.weight)
class OrientationConstraint(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/OrientationConstraint.html
"""
def __init__(self, header=None, orientation=None, link_name=None,
absolute_x_axis_tolerance=0.0, absolute_y_axis_tolerance=0.0,
absolute_z_axis_tolerance=0.0, weight=1):
"""
Notes
-----
The naming of the absolute_x/y/z_axis_tolerances might be misleading:
If you specify the absolute_x/y/z_axis_tolerances with [0.01, 0.01, 6.3],
it means that the frame's x-axis and y-axis are allowed to rotate about
the z-axis by an angle of 6.3 radians, whereas the z-axis can only change
by 0.01.
"""
self.header = header or Header()
self.orientation = orientation or Quaternion() # geometry_msgs/Quaternion
self.link_name = link_name or ""
self.absolute_x_axis_tolerance = float(absolute_x_axis_tolerance)
self.absolute_y_axis_tolerance = float(absolute_y_axis_tolerance)
self.absolute_z_axis_tolerance = float(absolute_z_axis_tolerance)
self.weight = float(weight)
@classmethod
def from_orientation_constraint(cls, header, orientation_constraint):
"""Creates a ``OrientationConstraint`` from a :class:`compas_fab.robots.OrientationConstraint`.
"""
qw, qx, qy, qz = orientation_constraint.quaternion
ax, ay, az = orientation_constraint.tolerances
kwargs = {}
kwargs['header'] = header
kwargs['orientation'] = Quaternion(qx, qy, qz, qw)
kwargs['link_name'] = orientation_constraint.link_name
kwargs['absolute_x_axis_tolerance'] = ax
kwargs['absolute_y_axis_tolerance'] = ay
kwargs['absolute_z_axis_tolerance'] = az
kwargs['weight'] = orientation_constraint.weight
return cls(**kwargs)
class PlanningSceneComponents(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/PlanningSceneComponents.html
"""
SCENE_SETTINGS = 1
ROBOT_STATE = 2
ROBOT_STATE_ATTACHED_OBJECTS = 4
WORLD_OBJECT_NAMES = 8
WORLD_OBJECT_GEOMETRY = 16
OCTOMAP = 32
TRANSFORMS = 64
ALLOWED_COLLISION_MATRIX = 128
LINK_PADDING_AND_SCALING = 256
OBJECT_COLORS = 512
def __init__(self, components=None):
self.components = components or self.SCENE_SETTINGS
def __eq__(self, other):
return self.components == other
@property
def human_readable(self):
cls = type(self)
for k, v in cls.__dict__.items():
if v == self.components:
return k
return ''
class AllowedCollisionMatrix(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/msg/AllowedCollisionMatrix.html
"""
def __init__(self, entry_names=None, entry_values=None, default_entry_names=None, default_entry_values=None):
self.entry_names = entry_names or [] # string[]
self.entry_values = entry_values or [] # moveit_msgs/AllowedCollisionEntry[]
self.default_entry_names = default_entry_names or [] # string[]
self.default_entry_values = default_entry_values or [] # bool[]
class PlanningSceneWorld(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/msg/PlanningSceneWorld.html
"""
def __init__(self, collision_objects=None, octomap=None):
self.collision_objects = collision_objects or [] # collision objects # CollisionObject[]
self.octomap = octomap or OctomapWithPose() # octomap_msgs/OctomapWithPose
@classmethod
def from_msg(cls, msg):
collision_objects = [CollisionObject.from_msg(i) for i in msg['collision_objects']]
octomap = msg['octomap'] # TODO: Add OctomapWithPose.from_msg(msg['octomap'])
return cls(collision_objects, octomap)
class PlanningScene(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/msg/PlanningScene.html
"""
def __init__(self, name='', robot_state=None, robot_model_name='',
| |
<reponame>michaeldeistler/sbibm-1
"""
Module containing data structures for representing datasets.
"""
from __future__ import division, print_function
from builtins import object, range
from future.utils import with_metaclass
from past.utils import old_div
__author__ = "wittawat"
from abc import ABCMeta, abstractmethod
import autograd.numpy as np
import scipy.stats as stats
import sbibm.third_party.kgof.util as util
class Data(object):
"""
Class representing a dataset i.e., en encapsulation of a data matrix
whose rows are vectors drawn from a distribution.
"""
def __init__(self, X):
"""
:param X: n x d numpy array for dataset X
"""
self.X = X
if not np.all(np.isfinite(X)):
print("X:")
print(util.fullprint(X))
raise ValueError("Not all elements in X are finite.")
def __str__(self):
mean_x = np.mean(self.X, 0)
std_x = np.std(self.X, 0)
prec = 4
desc = ""
desc += "E[x] = %s \n" % (np.array_str(mean_x, precision=prec))
desc += "Std[x] = %s \n" % (np.array_str(std_x, precision=prec))
return desc
def dim(self):
"""Return the dimension of the data."""
dx = self.X.shape[1]
return dx
def sample_size(self):
return self.X.shape[0]
def n(self):
return self.sample_size()
def data(self):
"""Return the data matrix."""
return self.X
def split_tr_te(self, tr_proportion=0.5, seed=820, return_tr_ind=False):
"""Split the dataset into training and test sets.
Return (Data for tr, Data for te)"""
X = self.X
nx, dx = X.shape
Itr, Ite = util.tr_te_indices(nx, tr_proportion, seed)
tr_data = Data(X[Itr, :])
te_data = Data(X[Ite, :])
if return_tr_ind:
return (tr_data, te_data, Itr)
else:
return (tr_data, te_data)
def subsample(self, n, seed=87, return_ind=False):
"""Subsample without replacement. Return a new Data."""
if n > self.X.shape[0]:
raise ValueError("n should not be larger than sizes of X")
ind_x = util.subsample_ind(self.X.shape[0], n, seed)
if return_ind:
return Data(self.X[ind_x, :]), ind_x
else:
return Data(self.X[ind_x, :])
def clone(self):
"""
Return a new Data object with a separate copy of each internal
variable, and with the same content.
"""
nX = np.copy(self.X)
return Data(nX)
def __add__(self, data2):
"""
Merge the current Data with another one.
Create a new Data and create a new copy for all internal variables.
"""
copy = self.clone()
copy2 = data2.clone()
nX = np.vstack((copy.X, copy2.X))
return Data(nX)
### end Data class
class DataSource(with_metaclass(ABCMeta, object)):
"""
A source of data allowing resampling. Subclasses may prefix
class names with DS.
"""
@abstractmethod
def sample(self, n, seed):
"""Return a Data. Returned result should be deterministic given
the input (n, seed)."""
raise NotImplementedError()
def dim(self):
"""
Return the dimension of the data. If possible, subclasses should
override this. Determining the dimension by sampling may not be
efficient, especially if the sampling relies on MCMC.
"""
dat = self.sample(n=1, seed=3)
return dat.dim()
# end DataSource
class DSIsotropicNormal(DataSource):
"""
A DataSource providing samples from a mulivariate isotropic normal
distribution.
"""
def __init__(self, mean, variance):
"""
mean: a numpy array of length d for the mean
variance: a positive floating-point number for the variance.
"""
assert len(mean.shape) == 1
self.mean = mean
self.variance = variance
def sample(self, n, seed=2):
with util.NumpySeedContext(seed=seed):
d = len(self.mean)
mean = self.mean
variance = self.variance
X = np.random.randn(n, d) * np.sqrt(variance) + mean
return Data(X)
class DSNormal(DataSource):
"""
A DataSource implementing a multivariate Gaussian.
"""
def __init__(self, mean, cov):
"""
mean: a numpy array of length d.
cov: d x d numpy array for the covariance.
"""
self.mean = mean
self.cov = cov
assert mean.shape[0] == cov.shape[0]
assert cov.shape[0] == cov.shape[1]
def sample(self, n, seed=3):
with util.NumpySeedContext(seed=seed):
mvn = stats.multivariate_normal(self.mean, self.cov)
X = mvn.rvs(size=n)
if len(X.shape) == 1:
# This can happen if d=1
X = X[:, np.newaxis]
return Data(X)
class DSIsoGaussianMixture(DataSource):
"""
A DataSource implementing a Gaussian mixture in R^d where each component
is an isotropic multivariate normal distribution.
Let k be the number of mixture components.
"""
def __init__(self, means, variances, pmix=None):
"""
means: a k x d 2d array specifying the means.
variances: a one-dimensional length-k array of variances
pmix: a one-dimensional length-k array of mixture weights. Sum to one.
"""
k, d = means.shape
if k != len(variances):
raise ValueError(
"Number of components in means and variances do not match."
)
if pmix is None:
pmix = old_div(np.ones(k), float(k))
if np.abs(np.sum(pmix) - 1) > 1e-8:
raise ValueError("Mixture weights do not sum to 1.")
self.pmix = pmix
self.means = means
self.variances = variances
def sample(self, n, seed=29):
pmix = self.pmix
means = self.means
variances = self.variances
k, d = self.means.shape
sam_list = []
with util.NumpySeedContext(seed=seed):
# counts for each mixture component
counts = np.random.multinomial(n, pmix, size=1)
# counts is a 2d array
counts = counts[0]
# For each component, draw from its corresponding mixture component.
for i, nc in enumerate(counts):
# Sample from ith component
sam_i = np.random.randn(nc, d) * np.sqrt(variances[i]) + means[i]
sam_list.append(sam_i)
sample = np.vstack(sam_list)
assert sample.shape[0] == n
np.random.shuffle(sample)
return Data(sample)
# end of class DSIsoGaussianMixture
class DSGaussianMixture(DataSource):
"""
A DataSource implementing a Gaussian mixture in R^d where each component
is an arbitrary Gaussian distribution.
Let k be the number of mixture components.
"""
def __init__(self, means, variances, pmix=None):
"""
means: a k x d 2d array specifying the means.
variances: a k x d x d numpy array containing k covariance matrices,
one for each component.
pmix: a one-dimensional length-k array of mixture weights. Sum to one.
"""
k, d = means.shape
if k != variances.shape[0]:
raise ValueError(
"Number of components in means and variances do not match."
)
if pmix is None:
pmix = old_div(np.ones(k), float(k))
if np.abs(np.sum(pmix) - 1) > 1e-8:
raise ValueError("Mixture weights do not sum to 1.")
self.pmix = pmix
self.means = means
self.variances = variances
def sample(self, n, seed=29):
pmix = self.pmix
means = self.means
variances = self.variances
k, d = self.means.shape
sam_list = []
with util.NumpySeedContext(seed=seed):
# counts for each mixture component
counts = np.random.multinomial(n, pmix, size=1)
# counts is a 2d array
counts = counts[0]
# For each component, draw from its corresponding mixture component.
for i, nc in enumerate(counts):
# construct the component
# https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.multivariate_normal.html
cov = variances[i]
mnorm = stats.multivariate_normal(means[i], cov)
# Sample from ith component
sam_i = mnorm.rvs(size=nc)
sam_list.append(sam_i)
sample = np.vstack(sam_list)
assert sample.shape[0] == n
np.random.shuffle(sample)
return Data(sample)
# end of DSGaussianMixture
class DSLaplace(DataSource):
"""
A DataSource for a multivariate Laplace distribution.
"""
def __init__(self, d, loc=0, scale=1):
"""
loc: location
scale: scale parameter.
Described in https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.laplace.html#numpy.random.laplace
"""
assert d > 0
self.d = d
self.loc = loc
self.scale = scale
def sample(self, n, seed=4):
with util.NumpySeedContext(seed=seed):
X = np.random.laplace(loc=self.loc, scale=self.scale, size=(n, self.d))
return Data(X)
class DSTDistribution(DataSource):
"""
A DataSource for a univariate T-distribution.
"""
def __init__(self, df):
"""
df: degrees of freedom
"""
assert df > 0
self.df = df
def sample(self, n, seed=5):
with util.NumpySeedContext(seed=seed):
X = stats.t.rvs(df=self.df, size=n)
X = X[:, np.newaxis]
return Data(X)
# end class DSTDistribution
class DSGaussBernRBM(DataSource):
"""
A DataSource implementing a Gaussian-Bernoulli Restricted Boltzmann Machine.
The probability of the latent vector h is controlled by the vector c.
The parameterization of the Gaussian-Bernoulli RBM is given in
density.GaussBernRBM.
- It turns out that this is equivalent to drawing a vector of {-1, 1} for h
according to h ~ Discrete(sigmoid(2c)).
- Draw x | h ~ N(B*h+b, I)
"""
def __init__(self, B, b, c, burnin=2000):
"""
B: a dx x dh matrix
b: a numpy array of length dx
c: a numpy array of length dh
burnin: burn-in iterations when doing Gibbs sampling
"""
assert burnin >= 0
dh = len(c)
dx = len(b)
assert B.shape[0] == dx
assert B.shape[1] == dh
assert dx > 0
assert dh > 0
self.B = B
self.b = b
self.c = c
self.burnin = burnin
@staticmethod
def sigmoid(x):
"""
x: a numpy array.
"""
return old_div(1.0, (1 + np.exp(-x)))
def _blocked_gibbs_next(self, X, H):
"""
Sample from the mutual conditional distributions.
"""
dh = H.shape[1]
n, dx = X.shape
B = self.B
b = self.b
# Draw H.
XB2C = np.dot(X, self.B) + 2.0 * self.c
# Ph: n x dh matrix
Ph = DSGaussBernRBM.sigmoid(XB2C)
# H: n x dh
H = (np.random.rand(n, dh) <= Ph) * 2 - 1.0
assert np.all(np.abs(H) - 1 <= 1e-6)
# Draw X.
# mean: n x dx
mean = old_div(np.dot(H, B.T), 2.0) + b
| |
<filename>vfo/internal_plotting_functions.py
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
import vfo.internal_database_functions as idbf
ele_style = {'color':'black', 'linewidth':1, 'linestyle':'-'} # elements
ele_lim_a_style = {'color':'blue', 'linewidth':1.5, 'linestyle':'-'} # elements
ele_lim_b_style = {'color':'green', 'linewidth':1.5, 'linestyle':'-'} # elements
ele_lim_c_style = {'color':'orange', 'linewidth':1.5, 'linestyle':'-'} # elements
ele_lim_d_style = {'color':'red', 'linewidth':1.5, 'linestyle':'-'} # elements
node_style = {'color':'black', 'marker':'o', 'facecolor':'black','linewidth':0.}
node_text_style = {'fontsize':6, 'fontweight':'regular', 'color':'green'}
ele_text_style = {'fontsize':6, 'fontweight':'bold', 'color':'darkred'}
WireEle_style = {'color':'black', 'linewidth':1, 'linestyle':':'} # elements
Eig_style = {'color':'red', 'linewidth':1, 'linestyle':'-'} # elements
limStateColors = ["blue","green","orange","red"]
def _plotCubeSurf(nodeCords, ax, fillSurface, eleStyle):
## This procedure is called by the plotCubeVol() command
aNode = nodeCords[0]
bNode = nodeCords[1]
cNode = nodeCords[2]
dNode = nodeCords[3]
## Use arrays for less memory and fast code
surfXarray = np.array([[aNode[0], dNode[0]], [bNode[0], cNode[0]]])
surfYarray = np.array([[aNode[1], dNode[1]], [bNode[1], cNode[1]]])
surfZarray = np.array([[aNode[2], dNode[2]], [bNode[2], cNode[2]]])
## Initialize varables for matplotlib objects
tempSurface = [None]
if fillSurface == 'yes':
tempSurface = ax.plot_surface(surfXarray, surfYarray, surfZarray, edgecolor='k', color='g', alpha=.5)
del aNode, bNode, cNode, dNode, surfXarray, surfYarray, surfZarray
return tempSurface
def _plotCubeVol(iNode, jNode, kNode, lNode, iiNode, jjNode, kkNode, llNode, ax, show_element_tags, element, eleStyle, fillSurface):
## procedure to render a cubic element, use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
## USe fillSurface = "yes" for color fill in the elements. fillSurface="no" for wireframe.
tempSurfaces = 6*[None]
tempTag = [None]
# 2D Planer four-node shell elements
# [iNode, jNode, kNode, lNode,iiNode, jjNode, kkNode, llNode] = [*nodesCords]
tempSurfaces[0] = _plotCubeSurf([iNode, jNode, kNode, lNode], ax, fillSurface, eleStyle)
tempSurfaces[1] = _plotCubeSurf([iNode, jNode, jjNode, iiNode], ax, fillSurface, eleStyle)
tempSurfaces[2] = _plotCubeSurf([iiNode, jjNode, kkNode, llNode], ax, fillSurface, eleStyle)
tempSurfaces[3] = _plotCubeSurf([lNode, kNode, kkNode, llNode], ax, fillSurface, eleStyle)
tempSurfaces[4] = _plotCubeSurf([jNode, kNode, kkNode, jjNode], ax, fillSurface, eleStyle)
tempSurfaces[5] = _plotCubeSurf([iNode, lNode, llNode, iiNode], ax, fillSurface, eleStyle)
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0]+jNode[0]+kNode[0]+lNode[0]+iiNode[0]+jjNode[0]+kkNode[0]+llNode[0])/8,
(iNode[1]+jNode[1]+kNode[1]+lNode[1]+iiNode[1]+jjNode[1]+kkNode[1]+llNode[1])/8,
(iNode[2]+jNode[2]+kNode[2]+lNode[2]+iiNode[2]+jjNode[2]+kkNode[2]+llNode[2])/8,
str(element), **ele_text_style) #label elements
return tempSurfaces, tempTag
def _plotTri2D(iNode, jNode, kNode, ax, show_element_tags, element, eleStyle, fillSurface):
## procedure to render a 2D three node shell element. use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
## USe fillSurface = "yes" for color fill in the elements. fillSurface="no" for wireframe.
## Initialize varables for matplotlib objects
tempLines = [None]
tempSurface = [None]
tempTag = [None]
tempLines, = plt.plot((iNode[0], jNode[0], kNode[0], iNode[0]),
(iNode[1], jNode[1], kNode[1], iNode[1]), marker='')
# update style
if eleStyle == "wire":
plt.setp(tempLines,**WireEle_style)
else:
plt.setp(tempLines,**ele_style)
if fillSurface == 'yes':
tempSurface = ax.fill(np.array([iNode[0], jNode[0], kNode[0]]),
np.array([iNode[1], jNode[1], kNode[1]]), color='g', alpha=.6)
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0] + jNode[0] + kNode[0])*1.0/3, (iNode[1]+jNode[1]+kNode[1])*1.0/3,
str(element), **ele_text_style) #label elements
return tempLines, tempSurface, tempTag
def _plotQuad2D(iNode, jNode, kNode, lNode, ax, show_element_tags, element, eleStyle, fillSurface):
## procedure to render a 2D four node shell element. use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
## USe fillSurface = "yes" for color fill in the elements. fillSurface="no" for wireframe.
tempLines = [None]
tempSurface = [None]
tempTag = [None]
tempLines, = plt.plot((iNode[0], jNode[0], kNode[0], lNode[0], iNode[0]),
(iNode[1], jNode[1], kNode[1], lNode[1], iNode[1]), marker='')
# update style
if eleStyle == "wire":
plt.setp(tempLines,**WireEle_style)
else:
plt.setp(tempLines, **ele_style)
if fillSurface == 'yes':
tempSurface = ax.fill(np.array([iNode[0], jNode[0], kNode[0], lNode[0]]),
np.array([iNode[1], jNode[1], kNode[1], lNode[1]]), color='g', alpha=.6)
tempTag = []
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0]+jNode[0]+kNode[0]+lNode[0])*1.0/4, (iNode[1]+jNode[1]+kNode[1]+lNode[1])*1.0/4,
str(element), **ele_text_style) #label elements
return tempLines, tempSurface, tempTag
def _plotQuad3D(iNode, jNode, kNode, lNode, ax, show_element_tags, element, eleStyle, fillSurface):
## procedure to render a 3D four node shell element. use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
## USe fillSurface = "yes" for color fill in the elements. fillSurface="no" for wireframe.
tempLines = [None]
tempSurface = [None]
tempTag = [None]
# Create Lines
tempLines, = plt.plot((iNode[0], jNode[0], kNode[0], lNode[0], iNode[0]),
(iNode[1], jNode[1], kNode[1], lNode[1], iNode[1]),
(iNode[2], jNode[2], kNode[2], lNode[2], iNode[2]), marker='')
# update style
if eleStyle == "wire":
plt.setp(tempLines,**WireEle_style)
else:
plt.setp(tempLines,**ele_style)
# Get Surface
if fillSurface == 'yes':
tempSurface = ax.plot_surface(np.array([[iNode[0], lNode[0]], [jNode[0], kNode[0]]]),
np.array([[iNode[1], lNode[1]], [jNode[1], kNode[1]]]),
np.array([[iNode[2], lNode[2]], [jNode[2], kNode[2]]]), color='g', alpha=.6)
tempTag = []
# Get Tag
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0]+jNode[0]+kNode[0]+lNode[0])*1.05/4, (iNode[1]+jNode[1]+kNode[1]+lNode[1])*1.05/4,
(iNode[2]+jNode[2]+kNode[2]+lNode[2])*1.05/4, str(element), **ele_text_style) #label elements
return tempLines, tempSurface, tempTag
def _plotTri3D(iNode, jNode, kNode, ax, show_element_tags, element, eleStyle, fillSurface):
## procedure to render a 2D three node shell element. use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
## USe fillSurface = "yes" for color fill in the elements. fillSurface="no" for wireframe.
## Initialize varables for matplotlib objects
tempLines = [None]
tempSurface = [None]
tempTag = [None]
tempLines, = plt.plot((iNode[0], jNode[0], kNode[0], iNode[0]),
(iNode[1], jNode[1], kNode[1], iNode[1]),
(iNode[2], jNode[2], kNode[2], iNode[2]), marker='')
# update style
if eleStyle == "wire":
plt.setp(tempLines,**WireEle_style)
else:
plt.setp(tempLines,**ele_style)
# x = [iNode[0],jNode[0],kNode[0]]
# y = [iNode[1],jNode[1],kNode[1]]
# z = [iNode[2],jNode[2],kNode[2]]
# verts = [list(zip(x,y,z))]
# ax.add_collection3d(Poly3DCollection(verts, facecolor='g', alpha=.25))
if fillSurface == 'yes':
tempSurface = ax.plot_surface(np.array([[iNode[0], kNode[0]], [jNode[0], kNode[0]]]),
np.array([[iNode[1], kNode[1]], [jNode[1], kNode[1]]]),
np.array([[iNode[2], kNode[2]], [jNode[2], kNode[2]]]), color='g', alpha=.6)
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0] + jNode[0] + kNode[0])*1.0/3, (iNode[1]+jNode[1]+kNode[1])*1.0/3,
(iNode[2]+jNode[2]+kNode[2])*1.0/3, str(element), **ele_text_style) #label elements
return tempLines, tempSurface, tempTag
def _plotTetSurf(nodeCords, ax, fillSurface, eleStyle):
## This procedure is called by the plotCubeVol() command
aNode = nodeCords[0]
bNode = nodeCords[1]
cNode = nodeCords[2]
## Use arrays for less memory and fast code
surfXarray = np.array([[aNode[0], cNode[0]], [bNode[0], cNode[0]]])
surfYarray = np.array([[aNode[1], cNode[1]], [bNode[1], cNode[1]]])
surfZarray = np.array([[aNode[2], cNode[2]], [bNode[2], cNode[2]]])
## Initialize varables for matplotlib objects
tempSurface = [None]
if fillSurface == 'yes':
tempSurface = ax.plot_surface(surfXarray, surfYarray, surfZarray, edgecolor='k', color='g', alpha=.5)
del aNode, bNode, cNode, surfXarray, surfYarray, surfZarray
return tempSurface
def _plotTetVol(iNode, jNode, kNode, lNode, ax, show_element_tags, element, eleStyle, fillSurface):
## procedure to render a cubic element, use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
## USe fillSurface = "yes" for color fill in the elements. fillSurface="no" for wireframe.
tempSurfaces = 4*[None]
tempTag = [None]
# 2D Planer four-node shell elements
# [iNode, jNode, kNode, lNode,iiNode, jjNode, kkNode, llNode] = [*nodesCords]
tempSurfaces[0] = _plotTetSurf([iNode, jNode, kNode], ax, fillSurface, eleStyle)
tempSurfaces[1] = _plotTetSurf([iNode, jNode, lNode], ax, fillSurface, eleStyle)
tempSurfaces[2] = _plotTetSurf([iNode, kNode, lNode], ax, fillSurface, eleStyle)
tempSurfaces[3] = _plotTetSurf([jNode, kNode, lNode], ax, fillSurface, eleStyle)
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0]+jNode[0]+kNode[0]+lNode[0])/4,
(iNode[1]+jNode[1]+kNode[1]+lNode[1])/4,
(iNode[2]+jNode[2]+kNode[2]+lNode[2])/4,
str(element), **ele_text_style) #label elements
return tempSurfaces, tempTag
def _checkEleLength2D(iNode,jNode):
eleLengthCheck = "ELE"
if abs(jNode[0]-iNode[0])>0.01 or abs(jNode[1]-iNode[1])>0.01 :
pass
else:
eleLengthCheck = "ZLE"
return eleLengthCheck
def _checkEleLength3D(iNode,jNode):
eleLengthCheck = "ELE"
if abs(jNode[0]-iNode[0])>0.001 or abs(jNode[1]-iNode[1])>0.001 or abs(jNode[2]-iNode[2])>0.001 :
pass
else:
eleLengthCheck = "ZLE"
return eleLengthCheck
def _plotBeam2D(iNode, jNode, ax, show_element_tags, element, eleStyle):
##procedure to render a 2D two-node element. use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
# tempLines, = plt.plot((iNode[0], jNode[0]), (iNode[1], jNode[1]), marker='')
tempLines, = plt.plot((iNode[0], jNode[0]), (iNode[1], jNode[1]))
if eleStyle in limStateColors:
if _checkEleLength2D(iNode,jNode) == "ZLE":
ele_lim_style = {'color':eleStyle, 'linewidth':1.0, 'linestyle':'-', 'marker':'o', 'mfc':eleStyle, 'markersize':2} # elements
else:
ele_lim_style = {'color':eleStyle, 'linewidth':5, 'linestyle':'-', 'marker':''} # elements
plt.setp(tempLines,**ele_lim_style)
elif eleStyle == "wire":
plt.setp(tempLines,**WireEle_style)
else:
plt.setp(tempLines,**ele_style)
tempTag = []
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0]+jNode[0])/2, (iNode[1]+jNode[1])*1.02/2,
str(element), **ele_text_style) #label elements
return tempLines, tempTag
def _plotBeam3D(iNode, jNode, ax, show_element_tags, element, eleStyle):
##procedure to render a 3D two-node element. use eleStyle = "wire" for a wire frame, and "solid" for solid element lines.
tempLines, = plt.plot((iNode[0], jNode[0]), (iNode[1], jNode[1]),(iNode[2], jNode[2]), marker='')
if eleStyle in limStateColors:
if _checkEleLength2D(iNode,jNode) == "ZLE":
ele_lim_style = {'color':eleStyle, 'linewidth':1.0, 'linestyle':'-', 'marker':'o', 'mfc':eleStyle, 'markersize':2} # elements
else:
ele_lim_style = {'color':eleStyle, 'linewidth':5, 'linestyle':'-', 'marker':'', 'mfc':eleStyle, 'markersize':1} # elements
plt.setp(tempLines,**ele_lim_style)
elif eleStyle == "wire":
plt.setp(tempLines,**WireEle_style)
else:
plt.setp(tempLines,**ele_style)
tempTag = []
if show_element_tags == 'yes':
tempTag = ax.text((iNode[0]+jNode[0])/2, (iNode[1]+jNode[1])*1.02/2,
(iNode[2]+jNode[2])*1.02/2, str(element), **ele_text_style) #label elements
return tempLines, tempTag
def _plotEle_2D(nodes, elements, DispNodeCoordArray, fig, ax, show_element_tags):
nodeList = nodes[:,0]
Nnode = len(nodeList)
# Find the number of surfaces
Nele = len(elements)
Nsurf = len([ele for ele in elements if len(ele) >= 4])
Nsurf = len([ele for ele in elements if len(ele) >= 4])
figSurfaces = [None]*(Nsurf)
figLines = [None]*(Nele)
figTags = [None]*Nele
# xyz label dafault tabale for the current displacement
xyz_labels = {}
for jj in range(Nnode):
xyz_labels[int(nodeList[jj])] = [*DispNodeCoordArray[jj,:]]
SurfCounter = 0
for jj, element in enumerate(elements):
# Get element Nodes
eletag = int(element[0])
tempNodes = element[1:]
if len(tempNodes) == 2:
# 2D Beam-Column Elements
iNode = xyz_labels[tempNodes[0]]
jNode = xyz_labels[tempNodes[1]]
figLines[jj], = plt.plot((iNode[0], jNode[0]), (iNode[1], jNode[1]),marker='', **ele_style)
if show_element_tags == 'yes':
figTags[jj] = ax.text((iNode[0]+jNode[0])/2, (iNode[1]+jNode[1])/2, str(eletag), **ele_text_style) #label elements
if len(tempNodes) == 3:
# 2D Planer three-node shell elements
iNode = xyz_labels[tempNodes[0]]
jNode = xyz_labels[tempNodes[1]]
kNode = xyz_labels[tempNodes[2]]
outputs = _plotTri2D(iNode,jNode,kNode, ax, show_element_tags, eletag, ele_style, fillSurface='yes')
[figLines[jj], figSurfaces[SurfCounter], figTags[jj]] = [*outputs]
SurfCounter += 1
if len(tempNodes) == | |
i_key = self._instance_key(instance)
now = time.time()
return now - self.cache_times[i_key][entity][LAST] > self.cache_times[i_key][entity][INTERVAL]
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
service_check_tags = [
'vcenter_server:{0}'.format(instance.get('name')),
'vcenter_host:{0}'.format(instance.get('host')),
]
# Check for ssl configs and generate an appropriate ssl context object
ssl_verify = instance.get('ssl_verify', True)
ssl_capath = instance.get('ssl_capath', None)
if not ssl_verify:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
elif ssl_capath:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(capath=ssl_capath)
# If both configs are used, log a message explaining the default
if not ssl_verify and ssl_capath:
self.log.debug("Your configuration is incorrectly attempting to "
"specify both a CA path, and to disable SSL "
"verification. You cannot do both. Proceeding with "
"disabling ssl verification.")
if i_key not in self.server_instances:
try:
server_instance = connect.SmartConnect(
host = instance.get('host'),
user = instance.get('username'),
pwd = instance.get('password'),
sslContext = context if not ssl_verify or ssl_capath else None
)
except Exception as e:
err_msg = "Connection to %s failed: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
self.server_instances[i_key] = server_instance
# Test if the connection is working
try:
self.server_instances[i_key].RetrieveContent()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
except Exception as e:
err_msg = "Connection to %s died unexpectedly: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
if instance.get('all_metrics', False):
return available_metrics
i_key = self._instance_key(instance)
wanted_metrics = []
# Get only the basic metrics
for metric in available_metrics:
# No cache yet, skip it for now
if (i_key not in self.metrics_metadata
or metric.counterId not in self.metrics_metadata[i_key]):
continue
if self.metrics_metadata[i_key][metric.counterId]['name'] in BASIC_METRICS:
wanted_metrics.append(metric)
return wanted_metrics
def get_external_host_tags(self):
""" Returns a list of tags for every host that is detected by the vSphere
integration.
List of pairs (hostname, list_of_tags)
"""
self.log.info("Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
mor_list = self.morlist[i_key].items()
for mor_name, mor in mor_list:
external_host_tags.append((mor['hostname'], {SOURCE_TYPE: mor['tags']}))
return external_host_tags
@atomic_method
def _cache_morlist_raw_atomic(self, i_key, obj_type, obj, tags, regexes=None):
""" Compute tags for a single node in the vCenter rootFolder
and queue other such jobs for children nodes.
Usual hierarchy:
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
If it's a node we want to query metric for, queue it in self.morlist_raw
that will be processed by another job.
"""
### <TEST-INSTRUMENTATION>
t = Timer()
self.log.debug("job_atomic: Exploring MOR {0} (type={1})".format(obj, obj_type))
### </TEST-INSTRUMENTATION>
tags_copy = deepcopy(tags)
if obj_type == 'rootFolder':
for datacenter in obj.childEntity:
# Skip non-datacenter
if not hasattr(datacenter, 'hostFolder'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'datacenter', datacenter, tags_copy, regexes)
)
elif obj_type == 'datacenter':
dc_tag = "vsphere_datacenter:%s" % obj.name
tags_copy.append(dc_tag)
for compute_resource in obj.hostFolder.childEntity:
# Skip non-compute resource
if not hasattr(compute_resource, 'host'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'compute_resource', compute_resource, tags_copy, regexes)
)
elif obj_type == 'compute_resource':
if obj.__class__ == vim.ClusterComputeResource:
cluster_tag = "vsphere_cluster:%s" % obj.name
tags_copy.append(cluster_tag)
for host in obj.host:
# Skip non-host
if not hasattr(host, 'vm'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'host', host, tags_copy, regexes)
)
elif obj_type == 'host':
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of host_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='host', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:host'])
self.morlist_raw[i_key].append(watched_mor)
host_tag = "vsphere_host:%s" % obj.name
tags_copy.append(host_tag)
for vm in obj.vm:
if vm.runtime.powerState != 'poweredOn':
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'vm', vm, tags_copy, regexes)
)
elif obj_type == 'vm':
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of vm_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='vm', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:vm'])
self.morlist_raw[i_key].append(watched_mor)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_raw_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_raw(self, instance):
""" Initiate the first layer to refresh self.morlist by queueing
_cache_morlist_raw_atomic on the rootFolder in a recursive/asncy approach
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s" % i_key)
if i_key in self.morlist_raw and len(self.morlist_raw[i_key]) > 0:
self.log.debug(
"Skipping morlist collection now, RAW results "
"processing not over (latest refresh was {0}s ago)".format(
time.time() - self.cache_times[i_key][MORLIST][LAST])
)
return
self.morlist_raw[i_key] = []
server_instance = self._get_server_instance(instance)
root_folder = server_instance.content.rootFolder
instance_tag = "vcenter_server:%s" % instance.get('name')
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex')
}
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'rootFolder', root_folder, [instance_tag], regexes)
)
self.cache_times[i_key][MORLIST][LAST] = time.time()
@atomic_method
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug(
"job_atomic: Querying available metrics"
" for MOR {0} (type={1})".format(mor['mor'], mor['mor_type'])
)
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=REAL_TIME_INTERVAL)
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_process(self, instance):
""" Empties the self.morlist_raw by popping items and running asynchronously
the _cache_morlist_process_atomic operation that will get the available
metrics for this MOR and put it in self.morlist
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.morlist[i_key] = {}
batch_size = self.init_config.get('batch_morlist_size', BATCH_MORLIST_SIZE)
for i in xrange(batch_size):
try:
mor = self.morlist_raw[i_key].pop()
self.pool.apply_async(self._cache_morlist_process_atomic, args=(instance, mor))
except (IndexError, KeyError):
self.log.debug("No more work to process in morlist_raw")
return
def _vacuum_morlist(self, instance):
""" Check if self.morlist doesn't have some old MORs that are gone, ie
we cannot get any metrics from them anyway (or =0)
"""
i_key = self._instance_key(instance)
morlist = self.morlist[i_key].items()
for mor_name, mor in morlist:
last_seen = mor['last_seen']
if (time.time() - last_seen) > 2 * REFRESH_MORLIST_INTERVAL:
del self.morlist[i_key][mor_name]
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' # FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
### </TEST-INSTRUMENTATION>
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
if counter_id in self.metrics_metadata[i_key]:
unit = self.metrics_metadata[i_key][counter_id]['unit']
if unit == 'percent':
return float(value) / 100
# Defaults to return the value without transformation
return value
@atomic_method
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=20,
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
# Metric types are absolute, delta, and rate
if ALL_METRICS[self.metrics_metadata[i_key][result.id.counterId]['name']]['s_type'] == 'rate':
record_metric = self.rate
else:
record_metric = self.gauge
record_metric(
"vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'],
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
### </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
""" Calls asynchronously _collect_metrics_atomic on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.log.debug("Not collecting metrics for this instance, nothing to do yet: {0}".format(i_key))
return
mors = self.morlist[i_key].items()
self.log.debug("Collecting metrics of %d mors" % len(mors))
vm_count = 0
for mor_name, mor in mors:
if mor['mor_type'] == 'vm':
vm_count += 1
if 'metrics' not in mor:
# self.log.debug("Skipping entity %s collection because we didn't cache its metrics yet" % mor['hostname'])
continue
self.pool.apply_async(self._collect_metrics_atomic, args=(instance, mor))
self.gauge('vsphere.vm.count', vm_count, tags=["vcenter_server:%s" % instance.get('name')])
def check(self, instance):
if not self.pool_started:
self.start_pool()
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:initial'])
| |
<reponame>BrunoKM/station-b-libraries
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from collections import OrderedDict
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Tuple, Union
# Avoid spurious X windows errors, see:
# https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
import GPy
import matplotlib.pyplot as plt # noqa: E402
import numpy as np
import pandas as pd
import scipy.stats
import seaborn as sns
from abex.constants import FILE
from abex.dataset import Dataset
from abex.plotting.composite_core import plot_multidimensional_function_slices
from abex.plotting.core import (
PLOT_SCALE,
calc_2d_slice,
make_lower_triangular_axis_grid_with_colorbar_axes,
plot_2d_slice_from_arrays,
)
from azureml.core import Run
from matplotlib.axes import Axes
from psbutils.type_annotations import PathOrString
if TYPE_CHECKING:
# Imports BayesOptModel for static type-checks only to get around circular import problem
from abex.bayesopt import BayesOptModel # pragma: no cover
RELATIVE_VALUE = "Relative value"
# noinspection PyUnresolvedReferences
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] # type: ignore
def loss_convergence(losses: List[List[float]], fname: Optional[str] = None) -> None: # pragma: no cover
f = plt.figure(figsize=(6, 4))
for i, loss in enumerate(losses):
iterations = len(loss)
plt.scatter(list(range(iterations)), loss, label=f"Fold {i}", s=3)
plt.legend()
plt.xlabel("Iteration")
plt.ylabel("Marginal log-likelihood")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def opt_distance(X, optx, j): # pragma: no cover
# noinspection PyUnresolvedReferences
Nd, n_inputs = np.shape(X)
Ij = np.eye(n_inputs)
Ij[j, j] = 0
d = np.zeros(Nd)
for i in range(Nd):
xd = X[i, :] - optx
d[i] = xd @ Ij @ xd
return d
def simulation_panel1d(
ax: Axes,
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
slice_dim: int,
bounds: Tuple[float, float],
slice_x: np.ndarray,
slice_y: Optional[float] = None,
resolution: int = 101,
color="b",
) -> Axes: # pragma: no cover
"""Make a plot of the predicted output (and +- one standard deviation) against one input (defined by slice_dim)
that is a slice through input space at a location defined by slice_x.
Optionally, mark the point [slice_x, slice_y], which, if the slice is plotted at a maximum of the model's
predictions, would be the maximum of the model predictions.
"""
# Get a grid of inputs for the continuous variable being varied across this slice
x_grid = np.linspace(bounds[0], bounds[1], resolution)
xs = np.tile(slice_x, (len(x_grid), 1))
xs[:, slice_dim] = x_grid
y_pred, y_var = predict_func(xs)
sigma = np.sqrt(y_var)
ax.plot(x_grid, y_pred, "-", label="Prediction", c=color)
ax.fill_between(
x_grid,
y_pred - sigma,
y_pred + sigma,
alpha=0.25,
fc=color,
ec="None",
label="68% confidence interval",
)
ax.set_xlim(bounds[0], bounds[1])
if slice_y is not None:
ax.plot(slice_x[slice_dim], slice_y, "o", markeredgecolor=color, markerfacecolor="w", label="Optimum")
else:
ax.axvline(slice_x[slice_dim], alpha=0.2, linestyle="--")
return ax
def get_logged_img_title(title: Optional[str] = None, fname: Optional[PathOrString] = None) -> str:
"""
Creates a title for logging plots on AML. If title is provided, that forms the base, otherwise use default.
If filename is provided, append the filename, which contains information about iteration and seed number.
Args:
title:
fname:
Returns:
"""
title = title or "plot"
if fname is not None:
assert isinstance(fname, Path)
title += f"_{fname.stem}"
return title
def plot_prediction_slices1d(
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
parameter_space: "OrderedDict[str, Tuple[float, float]]",
slice_loc: np.ndarray,
slice_y: Optional[float] = None,
scatter_x: Optional[np.ndarray] = None,
scatter_y: Optional[np.ndarray] = None,
output_label: str = "Objective",
resolution: int = 100,
size: int = 3,
num_cols: Optional[int] = None,
title: Optional[str] = None,
fname: Optional[PathOrString] = None,
) -> Tuple[plt.Figure, np.ndarray]: # pragma: no cover
"""
Plot slices of the predictions from the model crossing a given location.
Args:
predict_func (Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]]): A function taking an input and returning
mean and variance of the predictive distribution at those points.
parameter_space (OrderedDict[str, Tuple[float, float]]): An ordered dictionary mapping input names to bounds.
slice_loc (np.ndarray): The point through which to plot the slices of the predictive distribution.
slice_y (Optional[float], optional): The output value the the slice location. Defaults to None.
scatter_x (Optional[np.ndarray], optional): Points which to scatter on the plot (project onto the slices).
If given, scatter_y must be specified as well. Defaults to None.
scatter_y (Optional[np.ndarray], optional): Output values corresponding to scatter_x. Defaults to None.
output_label (str, optional): Label for the output axis. Defaults to "Objective".
resolution (int, optional): Resolution (num. points) for the grid of input points along each slice.
Defaults to 100.
size (int, optional): Size of each axis with one slice in inches. Defaults to 3.
num_cols (Optional[int], optional): Maximum number of columns. If more slices, the axes will wrap.
Defaults to num_cols = ceil(sqrt(num_input_dims)).
title (Optional[str], optional): Title for the plot. Defaults to None.
fname (Optional[PathOrString], optional): File-name where to save the plot. Defaults to None.
"""
parameters = list(parameter_space.items())
n_inputs = len(parameter_space)
num_cols = num_cols if num_cols else int(np.ceil(np.sqrt(n_inputs)))
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
nrows=num_rows,
ncols=num_cols,
sharey=True,
figsize=(size * num_cols, size * num_rows),
)
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i, j]
slice_dim = i * num_cols + j
if slice_dim < n_inputs:
param_name, bounds = parameters[slice_dim]
simulation_panel1d(
ax=ax,
predict_func=predict_func,
slice_x=slice_loc,
slice_y=slice_y,
bounds=bounds,
slice_dim=slice_dim,
color=colors[0],
resolution=resolution,
)
# Scatter-plot data points if points to scatter given
if scatter_x is not None and scatter_y is not None:
ax.scatter(scatter_x[:, slice_dim], scatter_y, s=3, c=colors[1])
ax.set_xlabel(param_name)
else:
ax.set_visible(False)
axs[i, 0].set_ylabel(output_label)
if title is not None:
fig.suptitle(title)
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
run = Run.get_context()
logged_img_title = get_logged_img_title(title="plot1d", fname=fname)
run.log_image(name=logged_img_title, plot=plt)
# If filename given, save
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
return fig, axs
def plot_prediction_slices2d(
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
parameter_space: "OrderedDict[str, Tuple[float, float]]",
slice_loc: np.ndarray,
scatter_x: Optional[np.ndarray] = None,
scatter_y: Optional[np.ndarray] = None,
output_label: Optional[str] = None,
resolution: int = 100,
size: int = 3,
title: Optional[str] = None,
fname: Optional[PathOrString] = None,
) -> Tuple[plt.Figure, np.ndarray]: # pragma: no cover
parameters = list(parameter_space.items())
n_inputs = len(parameters)
assert n_inputs >= 2, "At least two input dimensions are required to plots 2d slice"
# Keep a running minimum and maximum of function values in 2D slices
func_values_min, func_values_max = np.inf, -np.inf
# Keep track of contour sets returned for each axis
contour_sets = []
num_cols = n_inputs - 1 # Number of rows of axes equals number of columns
# Construct axes
# noinspection PyTypeChecker
fig = plt.figure(figsize=(size * num_cols, size * num_cols))
axes, cbar_axes = make_lower_triangular_axis_grid_with_colorbar_axes(fig=fig, num_cols=num_cols, num_colorbars=1)
for i in range(num_cols): # i iterates over the rows of the plots
y_param_dim = i + 1
y_param_name, y_bounds = parameters[y_param_dim]
for j in range(num_cols): # j iterates over the columns of the plots
ax = axes[i, j]
if j <= i:
# Indices of the inputs to plot
x_param_dim = j
x_param_name, x_bounds = parameters[x_param_dim]
# Compute the data for the 2D slice plot
xx, yy, func_values_slice = calc_2d_slice(
func=lambda x: predict_func(x)[0], # Only interested in the mean of the prediction
dim_x=x_param_dim,
dim_y=y_param_dim,
slice_loc=slice_loc,
slice_bounds_x=x_bounds,
slice_bounds_y=y_bounds,
resolution=resolution,
)
# Plot the 2D slice
_, contour_set = plot_2d_slice_from_arrays(xx, yy, func_values_slice, ax=ax, plot_type="contourf")
contour_sets.append(contour_set)
# Keep a running minimum and maximum of function values in slices
func_values_min = min(func_values_min, func_values_slice.min()) # type: ignore
func_values_max = max(func_values_max, func_values_slice.max()) # type: ignore
# Scatter-plot the data
if scatter_x is not None and scatter_y is not None:
if len(scatter_y) > 0:
s = (scatter_y - np.min(scatter_y)) / np.max(scatter_y) + 1
ax.scatter(scatter_x[:, x_param_dim], scatter_x[:, y_param_dim], s=5 * s, c="yellow")
ax.set_xlim(x_bounds[0], x_bounds[1])
ax.set_ylim(y_bounds[0], y_bounds[1])
if i == num_cols - 1:
ax.set_xlabel(x_param_name)
else:
# Remove redundant ticks on inner plots
ax.xaxis.set_visible(False)
if j > 0:
ax.yaxis.set_visible(False)
axes[i, 0].set_ylabel(y_param_name)
# Update norm limits for colour scaling for each axis:
for im in contour_sets:
im.set_clim(vmin=func_values_min, vmax=func_values_max)
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
cb = fig.colorbar(contour_sets[-1], cax=cbar_axes[0])
cb.set_label(output_label)
cbar_axes[0].yaxis.set_ticks_position("left")
if title is not None:
fig.suptitle(title)
run = Run.get_context()
logged_img_title = get_logged_img_title(title="plot2d", fname=fname)
run.log_image(name=logged_img_title, plot=plt)
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
return fig, axes
def plot_calibration_curve(
predict_func: Callable, datasets: List[Dataset], labels: List[str]
) -> Tuple[plt.Figure, plt.Axes]: # pragma: no cover
"""Plot a calibration curve - the curve showing the percentage of points within each confidence interval around
the mean prediction from the model. This is useful for gauging how reliable uncertainty estimates from the model
are.
Args:
predict_func (Callable): A function taking an array of inputs and returning a tuple of | |
lower is not None:
lower_names = [param_name + '__lower'
for param_name in self._get_tokeniser_names()]
params_dict = self._add_to_params_dict(params_dict, lower_names, lower)
if C is not None:
params_dict = self._add_to_params_dict(params_dict, ['svm__C'], C)
if random_state is not None:
params_dict = self._add_to_params_dict(params_dict,
['svm__random_state'], random_state)
if scale:
params_dict = self._add_to_params_dict(params_dict, ['scale'],
MinMaxScaler())
else:
params_dict = self._add_to_params_dict(params_dict, ['scale'], None)
return params_dict
@staticmethod
def _add_to_params(params_list, to_add, to_add_names):
'''
Used to add parameters that are stated multiple times in the same
pipeline that must have the same value therefore to add them you
have to copy the current parameter list N amount of times where N is
the length of the to_add list. Returns the updated parameter list.
Method to add parameters that are set in multiple parts of the pipeline
but should contain the same value.
:params_list: A list of dicts where each dict contains parameters and \
corresponding values that are to be searched for. All dict are part of \
the search space.
:param to_add: List of values that are to be added to the search space.
:param to_add_names: List of names that are associated to the values.
:type params_list: list
:type to_add: list
:type to_add_names: list
:returns: The updated params_list
:rtype: list
'''
num_params = len(params_list)
num_to_add = len(to_add)
new_param_list = []
# Catch the case that params_list was originally empty
if num_params == 0:
for _ in range(num_to_add):
new_param_list.append([defaultdict(list)])
else:
for _ in range(num_to_add):
new_param_list.append(copy.deepcopy(params_list))
for index, param in enumerate(to_add):
for param_name in to_add_names:
for sub_list in new_param_list[index]:
sub_list[param_name].append(param)
params_list = [param_dict for sub_list in new_param_list
for param_dict in sub_list]
return params_list
@staticmethod
def _add_to_all_params(params_list, param_name, param_value):
'''
Used to add param_name and its values to each dictionary of parameters
in the params_list. Returns the updated params_list.
:param params_list: A list of dicts where each dict contains parameters and \
corresponding values that are to be searched for. All dict are part of \
the search space.
:param param_name: The name associated to the parameter value to be added \
to the params_list.
:param param_value: The list of values associated to the param_name that are \
added to the params_list linked to the associated name.
:type param_list: list
:type param_name: String
:type param_value: list
:returns: The updated params_list
:rtype: list
'''
for param_dict in params_list:
param_dict[param_name] = param_value
return params_list
def get_cv_params(self, word_vectors, tokenisers=None, lowers=None, C=None,
scale=None, random_state=None):
'''
Each attribute has to be a list which contains parameters that are to be
tunned.
This method is to be overidden when more values than those listed in the
attributes are required for the model. E.g. a lexicon.
:param word_vectors: A list of a list of `bella.word_vectors.WordVectors` \
instances e.g. [[WordVectors()], [WordVectors(), AnotherWordVector()]]
:param tokenisers: A list of tokenisers methods from `bella.tokenisers` \
or a list of methods that conform to the same output as `bella.tokenisers`
:param lowers: A list of bool values which indicate wether to lower case \
the input words.
:param C: A list of floats which indicate the C value on the SVM classifier.
:param random_state: A int which defines the random number to generate \
to shuffle the data. Used to ensure reproducability.
:param scale: Can only be the value None to not scale the data. Do not \
include this
:type word_vectors: list
:type tokenisers: list
:type lowers: list
:type C: list
:type random_state: int
:type scale: None
:return: A list of dicts where each dict represents a different \
parameter space to search. Used as the params attribute to grid_search \
function.
:rtype: list
'''
params_list = []
params_list = self._add_to_params(params_list, word_vectors,
self._get_word_vector_names())
if tokenisers is not None:
tokenisers_names = [param_name + '__tokeniser'
for param_name in self._get_tokeniser_names()]
params_list = self._add_to_params(params_list, tokenisers,
tokenisers_names)
if lowers is not None:
lower_names = [param_name + '__lower'
for param_name in self._get_tokeniser_names()]
params_list = self._add_to_params(params_list, lowers, lower_names)
if C is not None:
params_list = self._add_to_all_params(params_list, 'svm__C', C)
if random_state is not None:
if not isinstance(random_state, int):
raise TypeError('random_state should be of type int and not {}'\
.format(type(random_state)))
random_state = [random_state]
params_list = self._add_to_all_params(params_list, 'svm__random_state',
random_state)
if scale is not None:
scale_params = []
if len(scale) > 2:
raise ValueError('Scale has to be a list, that can only '\
'contain two values False to not scale and '\
'True to scale your list contains more than '\
'two values {}'.format(scale))
for value in scale:
if value:
scale_params.append(MinMaxScaler())
else:
scale_params.append(None)
params_list = self._add_to_all_params(params_list,
scale_params, ['scale'])
return params_list
def fit(self, train_data, train_y, params):
temp_pipeline = copy.deepcopy(self.pipeline)
temp_pipeline.set_params(**params)
temp_pipeline.fit(train_data, train_y)
self.model = temp_pipeline
def grid_search(self, train_data, train_y, params, **kwargs):
grid_search = GridSearchCV(self.pipeline, param_grid=params, **kwargs)
self.model = grid_search.fit(train_data, train_y)
cross_val_results = pd.DataFrame(grid_search.cv_results_)
return cross_val_results
def save_grid_search(self, train_data, train_y, grid_params, save_param,
dataset_name, file_name, re_write=False,
**kwargs):
'''
write_json_data(word_vector_file_path, name, word_vector_results)
Saves the results of the grid search to json file where the result is
stored per dataset and then per parameter being searched. Note that
only one parameter type can be searched and saved per time e.g.
searching for different tokenisers but you cannot search for different
tokeniser and word vectors at the moment. If you would like those
results without saving and caching please use grid_search function.
'''
if save_param not in grid_params:
raise ValueError('save_param {} has to be a key in the grid_params'\
' dict {}'.fromat(save_param, grid_params))
param_values = grid_params[save_param]
param_name = self.param_name_function(save_param)(param_values)
name_score = {}
name_param = {}
if not re_write:
name_score = get_json_data(file_name, dataset_name)
temp_grid_params = copy.deepcopy(grid_params)
for param in param_values:
if isinstance(param, list):
param = tuple(param)
name = param_name[param]
name_param[name] = param
if not re_write:
if name in name_score:
continue
temp_grid_params[save_param] = [param]
cv_params = self.get_cv_params(**temp_grid_params)
grid_res = self.grid_search(train_data, train_y, cv_params, **kwargs)
if grid_res.shape[0] != 1:
raise ValueError('Searching over more than one parameter this '\
'cannot be allowed as only one value can be '\
'can be associated to a search parameter at a '\
'time. Grid results {} parameter being searched'\
' {}'.format(grid_res, save_param))
name_score[name] = grid_res['mean_test_score'][0]
write_json_data(file_name, dataset_name, name_score)
sorted_name_score = sorted(name_score.items(), key=lambda x: x[1],
reverse=True)
best_param = None
for name, score in sorted_name_score:
if name not in name_param:
continue
best_param = name_param[name]
break
if best_param is None:
raise ValueError('best_param cannot be None this should only happen'\
' if no parameters are being searched for. Param '\
' names that have been searched {}'\
.formated(name_param))
if isinstance(best_param, tuple):
return list(best_param)
return best_param
def predict(self, test_data):
if self.model is not None:
return self.model.predict(test_data)
else:
raise ValueError('self.model is not fitted please fit the model '\
'using the fit function')
@staticmethod
def score(true_values, pred_values, scorer, *args, **kwargs):
'''
Performs predicitions on the test_data and then scores the predicitons
based on the scorer function using the true values. Returns the output
of scorer function.
:param true_values: Correct Target values
:param pred_values: Predicted Target values
:param scorer: Scoring function. The function must take the true \
targets as the first parameter and predicted targets as the second \
parameter. e.g sklearn.metrics.f1_score
:param args: Additional arguments to the scorer function
:param kwargs: Additional key word arguments to the scorer function
:type true_values: array
:type pred_values: array
:type scorer: function. Default sklearn.metrics.accuracy_score
:returns: The output from the scorer based on the true and predicted \
values normally a float.
:rtype: scorer output
'''
return scorer(true_values, pred_values, *args, **kwargs)
def __repr__(self):
return 'Target Indepdent'
class TargetDepC(TargetInd):
def __init__(self):
super().__init__()
self.pipeline = Pipeline([
('union', FeatureUnion([
('left', Pipeline([
('contexts', Context('left')),
('tokens', ContextTokeniser(ark_twokenize, True)),
('word_vectors', ContextWordVectors()),
('pool_funcs', FeatureUnion([
('max_pipe', Pipeline([
('max', NeuralPooling(matrix_max)),
('join', JoinContextVectors(matrix_median))
])),
('min_pipe', Pipeline([
('min', NeuralPooling(matrix_min)),
('join', JoinContextVectors(matrix_median))
])),
('avg_pipe', Pipeline([
('avg', NeuralPooling(matrix_avg)),
('join', JoinContextVectors(matrix_median))
])),
('prod_pipe', Pipeline([
('min', NeuralPooling(matrix_prod)),
('join', JoinContextVectors(matrix_median))
])),
('std_pipe', Pipeline([
('min', NeuralPooling(matrix_std)),
('join', JoinContextVectors(matrix_median))
]))
]))
])),
('right', Pipeline([
('contexts', Context('right')),
('tokens', ContextTokeniser(ark_twokenize, True)),
('word_vectors', ContextWordVectors()),
('pool_funcs', FeatureUnion([
('max_pipe', Pipeline([
('max', NeuralPooling(matrix_max)),
('join', JoinContextVectors(matrix_median))
])),
('min_pipe', Pipeline([
('min', NeuralPooling(matrix_min)),
('join', JoinContextVectors(matrix_median))
])),
('avg_pipe', Pipeline([
('avg', NeuralPooling(matrix_avg)),
('join', JoinContextVectors(matrix_median))
])),
('prod_pipe', Pipeline([
('min', NeuralPooling(matrix_prod)),
('join', JoinContextVectors(matrix_median))
])),
('std_pipe', Pipeline([
('min', | |
0, 0, 0, 0],
[1264, 41.970665, 0, 9999, -9999, 1.0, 100, 1, 82.035361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1265, 2.779034, 0, 9999, -9999, 1.0, 100, 1, 6.654727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1266, 49.700969, 0, 9999, -9999, 1.0, 100, 1, 119.710849, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1267, 29.046942, 0, 9999, -9999, 1.0, 100, 1, 39.469006, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1268, 0.146581, 0, 9999, -9999, 1.0, 100, 1, 3.4295, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1269, 0.264653, 0, 9999, -9999, 1.0, 100, 1, 5.105829, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1270, 4.177621, 0, 9999, -9999, 1.0, 100, 1, 38.950511, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1271, 3.846135, 0, 9999, -9999, 1.0, 100, 1, 47.371792, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1272, 0.11849, 0, 9999, -9999, 1.0, 100, 1, 1.23166, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1273, 0.544584, 0, 9999, -9999, 1.0, 100, 1, 2.169201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1274, 35.069699, 0, 9999, -9999, 1.0, 100, 1, 53.095629, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1275, 37.897575, 0, 9999, -9999, 1.0, 100, 1, 99.0753, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1276, 11.349089, 0, 9999, -9999, 1.0, 100, 1, 25.655641, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1277, 26.420938, 0, 9999, -9999, 1.0, 100, 1, 65.611252, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1278, 81.376624, 0, 9999, -9999, 1.0, 100, 1, 170.437781, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1280, 0.002362, 0, 9999, -9999, 1.0, 100, 1, 0.626494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1281, 0.065773, 0, 9999, -9999, 1.0, 100, 1, 2.51246, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1282, 0.127453, 0, 9999, -9999, 1.0, 100, 1, 4.363037, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1283, 502.62361, 0, 9999, -9999, 1.0, 100, 1, 1297.764428, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1284, 10.863971, 0, 9999, -9999, 1.0, 100, 1, 28.426322, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1285, 0.022069, 0, 9999, -9999, 1.0, 100, 1, 2.937048, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1286, 11.000676, 0, 9999, -9999, 1.0, 100, 1, 17.872201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1287, 69.513725, 0, 9999, -9999, 1.0, 100, 1, 93.199628, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1288, 110.38476, 0, 9999, -9999, 1.0, 100, 1, 148.402692, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1289, 158.425906, 0, 9999, -9999, 1.0, 100, 1, 184.149235, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1290, 2.359275, 0, 9999, -9999, 1.0, 100, 1, 4.901974, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1291, 57.797899, 0, 9999, -9999, 1.0, 100, 1, 98.293351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1292, 17.767413, 0, 9999, -9999, 1.0, 100, 1, 41.682074, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1293, 1.111468, 0, 9999, -9999, 1.0, 100, 1, 2.402107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1294, 1.522106, 0, 9999, -9999, 1.0, 100, 1, 5.39743, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1295, 1.588649, 0, 9999, -9999, 1.0, 100, 1, 5.873666, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1296, 2.263518, 0, 9999, -9999, 1.0, 100, 1, 27.356489, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1297, 21.430997, 0, 9999, -9999, 1.0, 100, 1, 177.778742, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1298, 0.164184, 0, 9999, -9999, 1.0, 100, 1, 4.014603, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1299, 0.114819, 0, 9999, -9999, 1.0, 100, 1, 2.158207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1300, 23.594023, 0, 9999, -9999, 1.0, 100, 1, 23.74405, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1301, 60.226747, 0, 9999, -9999, 1.0, 100, 1, 60.863304, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1302, 4.60652, 0, 9999, -9999, 1.0, 100, 1, 4.877299, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1303, 4.06039, 0, 9999, -9999, 1.0, 100, 1, 4.335516, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1304, 7.966663, 0, 9999, -9999, 1.0, 100, 1, 9.594319, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1305, 0.004235, 0, 9999, -9999, 1.0, 100, 1, 0.004567, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1306, 1.726445, 0, 9999, -9999, 1.0, 100, 1, 1.827014, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1307, 0.248113, 0, 9999, -9999, 1.0, 100, 1, 0.29894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1308, 1.551409, 0, 9999, -9999, 1.0, 100, 1, 3.278321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1309, 2.292167, 0, 9999, -9999, 1.0, 100, 1, 3.34909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1310, 1.128691, 0, 9999, -9999, 1.0, 100, 1, 1.64589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1311, 1.830323, 0, 9999, -9999, 1.0, 100, 1, 11.854004, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1312, 248.538591, 0, 9999, -9999, 1.0, 100, 1, 262.264924, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1313, 26.351484, 0, 9999, -9999, 1.0, 100, 1, 30.836748, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1314, 10.615304, 0, 9999, -9999, 1.0, 100, 1, 12.003987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1315, 7.63472, 0, 9999, -9999, 1.0, 100, 1, 7.879027, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1316, 0.191185, 0, 9999, -9999, 1.0, 100, 1, 2.757497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1317, 23.32137, 0, 9999, -9999, 1.0, 100, 1, 23.958574, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1318, 1.333933, 0, 9999, -9999, 1.0, 100, 1, 1.956332, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1319, 15.295998, 0, 9999, -9999, 1.0, 100, 1, 17.708276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1320, 2.617242, 0, 9999, -9999, 1.0, 100, 1, 20.75859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1321, 0.01604, 0, 9999, -9999, 1.0, 100, 1, 0.161123, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1322, 0.216372, 0, 9999, -9999, 1.0, 100, 1, 0.929763, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1323, 174.232927, 0, 9999, -9999, 1.0, 100, 1, 199.111909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1324, 7.189955, 0, 9999, -9999, 1.0, 100, 1, 13.063258, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1325, 39.976999, 0, 9999, -9999, 1.0, 100, 1, 90.497559, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1326, 47.909355, 0, 9999, -9999, 1.0, 100, 1, 56.928865, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1327, 43.502601, 0, 9999, -9999, 1.0, 100, 1, 50.796895, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1328, 13.978435, 0, 9999, -9999, 1.0, 100, 1, 16.063343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1329, 111.189314, 0, 9999, -9999, 1.0, 100, 1, 218.675424, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1330, 12.253092, 0, 9999, -9999, 1.0, 100, 1, 30.131028, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1331, 0.282683, 0, 9999, -9999, 1.0, 100, 1, 0.289238, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1332, 1.800257, 0, 9999, -9999, 1.0, 100, 1, 26.293088, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1333, 9.160829, 0, 9999, -9999, 1.0, 100, 1, 45.650254, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1334, 0.061013, 0, 9999, -9999, 1.0, 100, 1, 1.215341, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1335, 0.229534, 0, 9999, -9999, 1.0, 100, 1, 3.306939, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1336, 25.961183, 0, 9999, -9999, 1.0, 100, 1, 29.773035, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1337, 103.967271, 0, 9999, -9999, 1.0, 100, 1, 121.31241, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1338, 0.167403, 0, 9999, -9999, 1.0, 100, 1, 0.832524, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1339, 8.833687, 0, 9999, -9999, 1.0, 100, 1, 10.086482, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1340, 58.254656, 0, 9999, -9999, 1.0, 100, 1, 70.098327, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1341, 144.714194, 0, 9999, -9999, 1.0, 100, 1, 205.513321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1342, 0.042981, 0, 9999, -9999, 1.0, 100, 1, 0.734589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1343, 0.022656, 0, 9999, -9999, 1.0, 100, 1, 1.102108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1344, 0.045361, 0, 9999, -9999, 1.0, 100, 1, 0.226057, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1345, 0.352881, 0, 9999, -9999, 1.0, 100, 1, 3.971188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1346, 169.647487, 0, 9999, -9999, 1.0, 100, 1, 214.719215, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1347, 216.481124, 0, 9999, -9999, 1.0, 100, 1, 414.115976, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1348, 17.819558, 0, 9999, -9999, 1.0, 100, 1, 22.707927, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1349, 36.440157, 0, 9999, -9999, 1.0, 100, 1, 42.352342, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1350, 0.021701, 0, 9999, -9999, 1.0, 100, 1, 0.094971, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1351, 9.1e-05, 0, 9999, -9999, 1.0, 100, 1, 0.015958, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1352, 0.009481, 0, 9999, -9999, 1.0, 100, 1, 0.83726, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1355, 1.149723, 0, 9999, -9999, 1.0, 100, 1, 1.688324, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1356, 60.176003, 0, 9999, -9999, 1.0, 100, 1, 73.486231, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1357, 45.824982, 0, 9999, -9999, 1.0, 100, 1, 56.459913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1358, 0.168272, 0, 9999, -9999, 1.0, 100, 1, 0.247293, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1359, 29.560394, 0, 9999, -9999, 1.0, 100, 1, 70.633589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1360, 17.052155, 0, 9999, -9999, 1.0, 100, 1, 17.135983, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1361, 63.073197, 0, 9999, -9999, 1.0, 100, 1, 63.207173, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1362, 78.657262, 0, 9999, -9999, 1.0, 100, 1, 79.107216, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1363, 0.002698, 0, 9999, -9999, 1.0, 100, 1, 0.036158, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1364, 0.004108, 0, 9999, -9999, 1.0, 100, 1, 0.061068, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1365, 3.9e-05, 0, 9999, -9999, 1.0, 100, 1, 0.000456, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1366, 0.332872, 0, 9999, -9999, 1.0, 100, 1, 1.229992, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1367, 12.205606, 0, 9999, -9999, 1.0, 100, 1, 43.863891, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1368, 0.245203, 0, 9999, -9999, 1.0, 100, 1, 3.298243, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1369, 5.73475, 0, 9999, -9999, 1.0, 100, 1, 7.968859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1370, 0.220662, 0, 9999, -9999, 1.0, 100, 1, 0.343308, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1371, 72.186745, 0, 9999, -9999, 1.0, 100, 1, 81.767208, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1372, 146.367831, 0, 9999, -9999, 1.0, 100, 1, 192.966588, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1373, 28.198506, 0, 9999, -9999, 1.0, 100, 1, 35.200257, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1374, 108.182893, 0, 9999, -9999, 1.0, 100, 1, 108.220146, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1375, 61.126882, 0, 9999, -9999, 1.0, 100, 1, 61.223816, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1376, 144.232786, 0, 9999, -9999, 1.0, 100, 1, 176.213655, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1377, 128.501752, 0, 9999, -9999, 1.0, 100, 1, 234.376272, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1378, 129.503951, 0, 9999, -9999, 1.0, 100, 1, 246.029906, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1379, 0.736562, 0, 9999, -9999, 1.0, 100, 1, 0.805984, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1380, 1.185856, 0, 9999, -9999, 1.0, 100, 1, 1.213356, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1381, 0.930916, 0, 9999, -9999, 1.0, 100, 1, 1.01257, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1382, 138.558982, 0, 9999, -9999, 1.0, 100, 1, 138.839906, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1383, 109.233959, 0, 9999, -9999, 1.0, 100, 1, 109.821439, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1384, 4.514125, 0, 9999, -9999, 1.0, 100, 1, 4.669135, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1385, 0.113568, 0, 9999, -9999, 1.0, 100, 1, 0.124455, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1386, 0.658322, 0, 9999, -9999, 1.0, 100, 1, 0.673858, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1387, 3.444813, 0, 9999, -9999, 1.0, 100, 1, 3.493561, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1388, 0.90715, 0, 9999, -9999, 1.0, 100, 1, 0.928188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1389, 0.208696, 0, 9999, -9999, 1.0, 100, 1, 0.213536, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1390, 3.68073, 0, 9999, -9999, 1.0, 100, 1, 3.732816, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1391, 0.504414, 0, 9999, -9999, 1.0, 100, 1, 0.521719, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1392, 18.957564, 0, 9999, -9999, 1.0, 100, 1, 19.306386, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1393, 0.904858, 0, 9999, -9999, 1.0, 100, 1, 1.376509, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1394, 0.743091, 0, 9999, -9999, 1.0, 100, 1, 1.077886, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
``set`` of key usage flags, as :py:obj:`~constants.KeyFlags`.
This keyword is ignored for non-self-certifications.
:type usage: ``set``
:keyword ciphers: A list of preferred symmetric ciphers, as :py:obj:`~constants.SymmetricKeyAlgorithm`.
This keyword is ignored for non-self-certifications.
:type ciphers: ``list``
:keyword hashes: A list of preferred hash algorithms, as :py:obj:`~constants.HashAlgorithm`.
This keyword is ignored for non-self-certifications.
:type hashes: ``list``
:keyword compression: A list of preferred compression algorithms, as :py:obj:`~constants.CompressionAlgorithm`.
This keyword is ignored for non-self-certifications.
:type compression: ``list``
:keyword key_expiration: Specify a key expiration date for when this key should expire, or a
:py:obj:`~datetime.timedelta` of how long after the key was created it should expire.
This keyword is ignored for non-self-certifications.
:type key_expiration: :py:obj:`datetime.datetime`, :py:obj:`datetime.timedelta`
:keyword attested_certifications: A list of third-party certifications, as :py:obj:`PGPSignature`, that
the certificate holder wants to attest to for redistribution with the certificate.
Alternatively, any element in the list can be a ``bytes`` or ``bytearray`` object
of the appropriate length (the length of this certification's digest).
This keyword is only used for signatures of type Attestation.
:type attested_certifications: ``list``
:keyword keyserver: Specify the URI of the preferred key server of the user.
This keyword is ignored for non-self-certifications.
:type keyserver: ``str``, ``unicode``, ``bytes``
:keyword keyserver_flags: A set of Key Server Preferences, as :py:obj:`~constants.KeyServerPreferences`.
:type keyserver_flags: ``set``
:keyword primary: Whether or not to consider the certified User ID as the primary one.
This keyword is ignored for non-self-certifications, and any certifications directly on keys.
:type primary: ``bool``
These optional keywords only make sense, and thus only have an effect, when signing another key or User ID:
:keyword trust: Specify the level and amount of trust to assert when certifying a public key. Should be a tuple
of two ``int`` s, specifying the trust level and trust amount. See
`RFC 4880 Section 5.2.3.13. Trust Signature <https://tools.ietf.org/html/rfc4880#section-5.2.3.13>`_
for more on what these values mean.
:type trust: ``tuple`` of two ``int`` s
:keyword regex: Specify a regular expression to constrain the specified trust signature in the resulting signature.
Symbolically signifies that the specified trust signature only applies to User IDs which match
this regular expression.
This is meaningless without also specifying trust level and amount.
:type regex: ``str``
:keyword exportable: Whether this certification is exportable or not.
:type exportable: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig_type = level
if isinstance(subject, PGPKey):
sig_type = SignatureType.DirectlyOnKey
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
# signature options that only make sense in certifications
usage = prefs.pop('usage', None)
exportable = prefs.pop('exportable', None)
if usage is not None:
sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)
if exportable is not None:
sig._signature.subpackets.addnew('ExportableCertification', hashed=True, bflag=exportable)
keyfp = self.fingerprint
if isinstance(subject, PGPKey):
keyfp = subject.fingerprint
if isinstance(subject, PGPUID) and subject._parent is not None:
keyfp = subject._parent.fingerprint
if keyfp == self.fingerprint:
# signature options that only make sense in self-certifications
cipher_prefs = prefs.pop('ciphers', None)
hash_prefs = prefs.pop('hashes', None)
compression_prefs = prefs.pop('compression', None)
key_expires = prefs.pop('key_expiration', None)
keyserver_flags = prefs.pop('keyserver_flags', None)
keyserver = prefs.pop('keyserver', None)
primary_uid = prefs.pop('primary', None)
attested_certifications = prefs.pop('attested_certifications', [])
if key_expires is not None:
# key expires should be a timedelta, so if it's a datetime, turn it into a timedelta
if isinstance(key_expires, datetime):
key_expires = key_expires - self.created
sig._signature.subpackets.addnew('KeyExpirationTime', hashed=True, expires=key_expires)
if cipher_prefs is not None:
sig._signature.subpackets.addnew('PreferredSymmetricAlgorithms', hashed=True, flags=cipher_prefs)
if hash_prefs:
sig._signature.subpackets.addnew('PreferredHashAlgorithms', hashed=True, flags=hash_prefs)
if sig.hash_algorithm is None:
sig._signature.halg = hash_prefs[0]
if sig.hash_algorithm is None:
sig._signature.halg = HashAlgorithm.SHA256
if compression_prefs is not None:
sig._signature.subpackets.addnew('PreferredCompressionAlgorithms', hashed=True, flags=compression_prefs)
if keyserver_flags is not None:
sig._signature.subpackets.addnew('KeyServerPreferences', hashed=True, flags=keyserver_flags)
if keyserver is not None:
sig._signature.subpackets.addnew('PreferredKeyServer', hashed=True, uri=keyserver)
if primary_uid is not None:
sig._signature.subpackets.addnew('PrimaryUserID', hashed=True, primary=primary_uid)
cert_sigtypes = {SignatureType.Generic_Cert, SignatureType.Persona_Cert,
SignatureType.Casual_Cert, SignatureType.Positive_Cert,
SignatureType.CertRevocation}
# Features is always set on certifications:
if sig._signature.sigtype in cert_sigtypes:
sig._signature.subpackets.addnew('Features', hashed=True, flags=Features.pgpy_features)
# If this is an attestation, then we must include a Attested Certifications subpacket:
if sig._signature.sigtype == SignatureType.Attestation:
attestations = set()
for attestation in attested_certifications:
if isinstance(attestation, PGPSignature) and attestation.type in cert_sigtypes:
h = sig.hash_algorithm.hasher
h.update(attestation._signature.canonical_bytes())
attestations.add(h.digest())
elif isinstance(attestation, (bytes,bytearray)) and len(attestation) == sig.hash_algorithm.digest_size:
attestations.add(attestation)
else:
warnings.warn("Attested Certification element is neither a PGPSignature certification nor " +
"a bytes object of size %d, ignoring"%(sig.hash_algorithm.digest_size))
sig._signature.subpackets.addnew('AttestedCertifications', hashed=True, attested_certifications=b''.join(sorted(attestations)))
else:
# signature options that only make sense in non-self-certifications
trust = prefs.pop('trust', None)
regex = prefs.pop('regex', None)
if trust is not None:
sig._signature.subpackets.addnew('TrustSignature', hashed=True, level=trust[0], amount=trust[1])
if regex is not None:
sig._signature.subpackets.addnew('RegularExpression', hashed=True, regex=regex)
return self._sign(subject, sig, **prefs)
@KeyAction(KeyFlags.Certify, is_unlocked=True, is_public=False)
def revoke(self, target, **prefs):
"""
Revoke a key, a subkey, or all current certification signatures of a User ID that were generated by this key so far.
:param target: The key to revoke
:type target: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoke`.
:keyword reason: Defaults to :py:obj:`constants.RevocationReason.NotSpecified`
:type reason: One of :py:obj:`constants.RevocationReason`.
:keyword comment: Defaults to an empty string.
:type comment: ``str``
"""
hash_algo = prefs.pop('hash', None)
if isinstance(target, PGPUID):
sig_type = SignatureType.CertRevocation
elif isinstance(target, PGPKey):
##TODO: check to make sure that the key that is being revoked:
# - is this key
# - is one of this key's subkeys
# - specifies this key as its revocation key
if target.is_primary:
sig_type = SignatureType.KeyRevocation
else:
sig_type = SignatureType.SubkeyRevocation
else: # pragma: no cover
raise TypeError
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
# signature options that only make sense when revoking
reason = prefs.pop('reason', RevocationReason.NotSpecified)
comment = prefs.pop('comment', "")
sig._signature.subpackets.addnew('ReasonForRevocation', hashed=True, code=reason, string=comment)
return self._sign(target, sig, **prefs)
@KeyAction(is_unlocked=True, is_public=False)
def revoker(self, revoker, **prefs):
"""
Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig = PGPSignature.new(SignatureType.DirectlyOnKey, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
# signature options that only make sense when adding a revocation key
sensitive = prefs.pop('sensitive', False)
keyclass = RevocationKeyClass.Normal | (RevocationKeyClass.Sensitive if sensitive else 0x00)
sig._signature.subpackets.addnew('RevocationKey',
hashed=True,
algorithm=revoker.key_algorithm,
fingerprint=revoker.fingerprint,
keyclass=keyclass)
# revocation keys should really not be revocable themselves
prefs['revocable'] = False
return self._sign(self, sig, **prefs)
@KeyAction(is_unlocked=True, is_public=False)
def bind(self, key, **prefs):
"""
Bind a subkey to this key.
In addition to the optional keyword arguments accepted for self-signatures by :py:meth:`PGPkey.certify`,
the following optional keyword arguments can be used with :py:meth:`PGPKey.bind`.
:keyword crosssign: If ``False``, do not attempt a cross-signature (defaults to ``True``). Subkeys
which are not capable of signing will not produce a cross-signature in any case.
Setting ``crosssign`` to ``False`` is likely to produce subkeys that will be rejected
by some other OpenPGP implementations.
:type crosssign: ``bool``
"""
hash_algo = prefs.pop('hash', None)
if self.is_primary and not key.is_primary:
sig_type = SignatureType.Subkey_Binding
elif key.is_primary and not self.is_primary:
sig_type = SignatureType.PrimaryKey_Binding
else: # pragma: no cover
raise PGPError
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
if sig_type == SignatureType.Subkey_Binding:
# signature options that only make sense in subkey binding signatures
usage = prefs.pop('usage', None)
if usage is not None:
sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)
crosssig = None
# if possible, have the subkey create a primary key binding signature
if key.key_algorithm.can_sign and prefs.pop('crosssign', True):
subkeyid = key.fingerprint.keyid
if not key.is_public:
crosssig = key.bind(self)
elif subkeyid in self.subkeys: # pragma: no cover
crosssig = self.subkeys[subkeyid].bind(self)
if crosssig is None:
if usage is None:
raise PGPError('subkey with no key usage flags (may be used for any purpose, including signing) requires a cross-signature')
if KeyFlags.Sign in usage:
raise PGPError('subkey marked for | |
<filename>mistertrade/exchanges/apis/bittrex.py
import time
import math
import requests
import datetime
import logging
import hmac
import hashlib
try:
from urllib.parse import urlparse, urlencode
except ImportError:
from urllib import urlencode
from urlparse import urlparse
import json
from decimal import *
import random
from ..abstract import Exchange, ExchangeAPI, ExchangeCLI, exchange_name
from ..errors import *
LOGGER = logging.getLogger(__name__)
__all__ = ['Bittrex']
@exchange_name('bittrex')
class Bittrex(Exchange):
def __init__(self):
super(Bittrex, self).__init__(
BittrexAPI,
BittrexCLI
)
class BittrexCLI(ExchangeCLI):
def __init__(self, *args, **kwargs):
super(BittrexCLI, self).__init__(*args, **kwargs)
class BittrexAPI(ExchangeAPI):
def __init__(self, *args, **kwargs):
super(BittrexAPI, self).__init__(*args, **kwargs)
self._session = requests.Session()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._session.close()
def market_url(self, market):
return "https://bittrex.com/Market/Index?MarketName={}".format(market)
def markets(self):
result = self.request('https://bittrex.com/api/v2.0/pub/Markets/GetMarketSummaries')
result = [self.__parse_markets_item(x['Market']) for x in result]
self.validate_markets(result)
return result
def __parse_markets_item(self, item):
return {
'exchange': self.name,
'base_coin': item['BaseCurrency'],
'market_coin': item['MarketCurrency'],
'market': item['MarketName'],
'minimum_trade_size': item['MinTradeSize']
}
def fee(self, quantity, base_coin):
return quantity * 0.0025
def buy_stop(self, market, quantity, rate, distance):
return self.buy_or_sell_stop('buy', market, quantity, rate, distance)
def sell_stop(self, market, quantity, rate, distance):
return self.buy_or_sell_stop('sell', market, quantity, rate, distance)
def buy_or_sell_stop(self, buy_or_sell, market, quantity, rate, distance):
base_coin, market_coin = market.split('-')
LOGGER.debug("{exchange_name}: Stop-{buy_or_sell} {quantity:.8f} {market_coin} at {rate:.8f} with distance {distance:.8f}...".format(
buy_or_sell=("Selling" if buy_or_sell == 'sell' else "Buying"),
exchange_name=self.name,
quantity=quantity,
market_coin=market_coin,
base_coin=base_coin,
rate=rate,
distance=distance
))
if buy_or_sell == 'sell':
return self.__bittrex_ask(
market=market,
order_type='LIMIT',
quantity=quantity,
rate=rate,
time_in_effect='GOOD_TIL_CANCELLED',
condition_type='STOP_LOSS_FIXED',
target=distance
)
elif buy_or_sell == 'buy':
return self.__bittrex_bid(
market=market,
order_type='LIMIT',
quantity=quantity,
rate=rate,
time_in_effect='GOOD_TIL_CANCELLED',
condition_type='STOP_LOSS_FIXED',
target=distance
)
else:
raise ValueError("Parameter buy_or_sell should be 'buy' or 'sell'.")
def ask(self, market, quantity, rate):
base_coin, market_coin = market.split('-')
LOGGER.debug("{exchange_name}: {buy_or_sell} {quantity:.8f} {market_coin} at {rate:.8f} ...".format(
buy_or_sell="Selling",
exchange_name=self.name,
quantity=quantity,
market_coin=market_coin,
base_coin=base_coin,
rate=rate
))
return self.__bittrex_ask(
market=market,
order_type='LIMIT',
quantity=quantity,
rate=rate,
time_in_effect='GOOD_TIL_CANCELLED',
condition_type='NONE',
target=None
)
def ask_when_less_than(self, market, quantity, rate, target_rate):
base_coin, market_coin = market.split('-')
LOGGER.debug("{exchange_name}: {buy_or_sell} {quantity:.8f} {market_coin} at {rate:.8f} when less than {target_rate:.8f}...".format(
buy_or_sell="Selling",
exchange_name=self.name,
quantity=quantity,
market_coin=market_coin,
base_coin=base_coin,
rate=rate,
target_rate=target_rate
))
return self.__bittrex_ask(
market=market,
order_type='LIMIT',
quantity=quantity,
rate=rate,
time_in_effect='GOOD_TIL_CANCELLED',
condition_type='LESS_THAN',
target=target_rate
)
def bid(self, market, quantity, rate):
base_coin, market_coin = market.split('-')
LOGGER.debug("{exchange_name}: {buy_or_sell} {quantity:.8f} {market_coin} at {rate:.8f} ...".format(
buy_or_sell="Buying",
exchange_name=self.name,
quantity=quantity,
market_coin=market_coin,
base_coin=base_coin,
rate=rate
))
return self.__bittrex_bid(
market=market,
order_type='LIMIT',
quantity=quantity,
rate=rate,
time_in_effect='GOOD_TIL_CANCELLED',
condition_type='NONE',
target=None)
def bid_when_greater_than(self, market, quantity, rate, target_rate):
base_coin, market_coin = market.split('-')
LOGGER.debug("{exchange_name}: {buy_or_sell} {quantity:.8f} {market_coin} at {rate:.8f} when greater than {target_rate:.8f} ...".format(
buy_or_sell="Buying",
exchange_name=self.name,
quantity=quantity,
market_coin=market_coin,
base_coin=base_coin,
rate=rate,
target_rate=target_rate
))
return self.__bittrex_bid(
market=market,
order_type='LIMIT',
quantity=quantity,
rate=rate,
time_in_effect='GOOD_TIL_CANCELLED',
condition_type='GREATER_THAN',
target=target_rate)
def cancel_order(self, market, order_id):
LOGGER.debug("{exchange_name}: Cancelling order {order_id}.".format(exchange_name=self.name, order_id=order_id))
self.request_private('https://bittrex.com/api/v2.0/key/market/tradecancel', method='POST', json={
'orderId': order_id,
'MarketName': market
})
return
def candlesticks (self, market=None, interval='hour'):
result = self.request('https://bittrex.com/api/v2.0/pub/market/GetTicks' + '?' + urlencode({
'marketName': self.__parse_market_param(market),
'tickInterval': self.__parse_interval_param(interval)
}))
if result is None: return None
result = list(map(self.__parse_candlestick, result))
return result
def candlesticks_since(self, market=None, interval='minute', since=None):
result = self.request('https://bittrex.com/api/v2.0/pub/market/GetLatestTick' + '?' + urlencode({
'marketName': market,
'tickInterval': self.__parse_interval_param(interval),
'_': since if since is not None else int(time.time())
}))
result = list(map(self.__parse_candlestick, result))
return result
def __parse_candlestick(self, candlestick):
candlestick['high'] = candlestick.pop('H')
candlestick['low'] = candlestick.pop('L')
candlestick['close'] = candlestick.pop('C')
candlestick['open'] = candlestick.pop('O')
candlestick['volume'] = candlestick.pop('V')
candlestick['time'] = candlestick.pop('T')# TODO parse time properly
candlestick.pop('BV')
return candlestick
def open_orders(self, market):
result = self.request_private('https://bittrex.com/api/v2.0/key/market/getopenorders' + '?' + urlencode({
'marketName': market
}))
return list(map(self.__parse_open_order, result))
def __parse_open_order(self, order):
return self.__parse_order(order)
def order (self, market, order_id):
result = self.request_private('https://bittrex.com/api/v2.0/key/orders/getorder' + '?' + urlencode({
'orderid': order_id,
}))
if not result: return None
return self.__parse_order(result)
def __parse_order(self, order):
if order is None: return None
order_type = order.get('Type', order.get('OrderType'))
if 'BuyOrSell' in order:
buy_or_sell = order['BuyOrSell'].lower()
else:
buy_or_sell = 'sell' if 'SELL' in order_type else ('buy' if 'BUY' in order_type else None)
if buy_or_sell is None:
raise ValueError("Can't parse order. Can't determine if it's a buy or sell order: {}.".format(buy_or_sell))
order_type = 'limit' if 'LIMIT' in order_type else ('market' if 'MARKET' in order_type else None)
if order_type is None:
raise ValueError("Can't parse order. Order type is unknown: {}.".format(order_type))
order_id = order.get('OrderUuid', order.get('OrderId'))
quantity = order['Quantity']
market = order.get('Exchange', order.get('MarketName'))
base_coin, market_coin = market.split('-')
if 'PricePerUnit' in order and order['PricePerUnit']:
rate = order['PricePerUnit']
elif 'Rate' in order and order['Rate']:
rate = order['Rate']
elif 'Limit' in order and order['Limit']:
rate = order['Limit']
elif 'Price' in order and order['Price']:
rate = order['Price'] / order['Quantity']
else:
raise ValueError("Can't parse order. Order rate is unknown")
if 'Price' in order and order['Price'] > 0:
price = order['Price']
else:
fee = self.fee(quantity * rate, base_coin)
if buy_or_sell == 'sell':
fee = fee * -1
price = math.ceil((quantity * rate + fee) * 10000000)/10000000
result = {
'order_id': order_id,
'buy_or_sell': buy_or_sell,
'order_type': order_type,
'exchange': self.name,
'market': market,
'base_coin': base_coin,
'market_coin': market_coin,
'quantity': quantity,
'quantity_remaining': order.get('QuantityRemaining', quantity),
'rate': rate,
'price': price,
'is_open': order.get('IsOpen', False),
'time': order.get('TimeStamp', order.get('Opened')),# TODO properly parse time
'meta': {
'data': order
}
}
result['is_filled'] = result['quantity_remaining'] == 0
result['is_partially_filled'] = result['quantity_remaining'] > 0 and result['quantity_remaining'] < result['quantity']
# result of place bid
# {
# "OrderId": "6efcf720-27c0-4bb1-b8cf-5bccdbb01adc",
# "MarketName": "USDT-BTC",
# "MarketCurrency": "BTC",
# "BuyOrSell": "Buy",
# "OrderType": "LIMIT",
# "Quantity": 0.00074074,
# "Rate": 11000.0
# }
# open order:
# {
# "Uuid": "e2152d28-597a-4b43-8868-e30e5d342c8b",
# "Id": 4953015642,
# "OrderUuid": "97872ac7-a793-4ef1-bd95-72c8d1ddcac6",
# "Exchange": "BTC-XMR",
# "OrderType": "LIMIT_BUY",
# "Quantity": 0.05,
# "QuantityRemaining": 0.05,
# "Limit": 0.02136587,
# "CommissionPaid": 0.0,
# "Price": 0.0,
# "PricePerUnit": null,
# "Opened": "2017-12-25T20:17:59.803",
# "Closed": null,
# "IsOpen": true,
# "CancelInitiated": false,
# "ImmediateOrCancel": false,
# "IsConditional": false,
# "Condition": "NONE",
# "ConditionTarget": null,
# "Updated": null
# }
# order by id:
# {
# "AccountId": null,
# "OrderUuid": "14bee319-d509-42c8-9a1f-0bad9cd62dbd",
# "Exchange": "BTC-XMR",
# "Type": "LIMIT_BUY",
# "Quantity": 0.04,
# "QuantityRemaining": 0.0,
# "Limit": 0.02008424,
# "Reserved": 0.00080336,
# "ReserveRemaining": 0.00080336,
# "CommissionReserved": 2e-06,
# "CommissionReserveRemaining": 0.0,
# "CommissionPaid": 2e-06,
# "Price": 0.00080336,
# "PricePerUnit": 0.020084,
# "Opened": "2017-12-19T17:33:14.517",
# "Closed": "2017-12-19T21:35:47.423",
# "IsOpen": false,
# "Sentinel": "42d2510d-8192-4c8c-8515-91707e2fbbfd",
# "CancelInitiated": false,
# "ImmediateOrCancel": false,
# "IsConditional": false,
# "Condition": "NONE",
# "ConditionTarget": null
# }
# order history:
# {
# "OrderUuid": "84305ba0-6992-443a-89ef-735017852532",
# "Exchange": "BTC-XMR",
# "TimeStamp": "2017-12-25T02:50:21.12",
# "OrderType": "LIMIT_SELL",
# "Limit": 0.0245528,
# "Quantity": 0.05,
# "QuantityRemaining": 0.0,
# "Commission": 3.06e-06,
# "Price": 0.00122764,
# "PricePerUnit": 0.0245528,
# "IsConditional": false,
# "Condition": "NONE",
# "ConditionTarget": null,
# "ImmediateOrCancel": false,
# "Closed": "2017-12-25T02:50:35.86"
# }
return result
def order_history(self, market):
result = self.request_private('https://bittrex.com/api/v2.0/key/orders/getorderhistory' + '?' + urlencode({'marketname': market}))
#result = self.request_private('/api/v1.1/account/getorderhistory' + '?' + urlencode({'market': market}))
result = [self.__parse_order(order) for order in result if order['Exchange'] == market]
return result
def orderbook (self, market):
result = self.request('/api/v1.1/public/getorderbook?type=both&market={}'.format(market))
for item in result['buy']:
item['quantity'] = item.pop('Quantity')
item['rate'] = item.pop('Rate')
for item in result['sell']:
item['quantity'] = item.pop('Quantity')
item['rate'] = item.pop('Rate')
#result['buy'] = result['buy'][::-1]
#result['sell'] = result['sell'][::-1]
self.validate_orderbook(result)
return result
def price(self, market):
#result = self.request('/api/v1.1/public/getticker' + '?' + urlencode({'market': market}))
orderbook = self.orderbook(market)
return {
'time': format(time.time()),
'highest_bid': self.get_highest_bid(orderbook),
'lowest_ask': self.get_lowest_ask(orderbook)
}
def wallet(self, currency=None):
if currency is None:
result = self.request_private('https://bittrex.com/api/v2.0/key/balance/getbalances')
return [self.__parse_wallet_item(x['Balance']) for x in result if x['Balance']['Balance'] > 0]
else:
result = self.request_private('https://bittrex.com/api/v2.0/key/balance/getbalance', params={'currencyname': currency})
return [self.__parse_wallet_item(result)]
def __parse_wallet_item(self, item):
return {
'name': item['Currency'],
'balance': item['Balance'],
'pending': item['Pending'],
'available': item['Available']
}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._session.close()
def request (self, url, headers=None, method='GET', params=None, json=None):
response = self._session.request(method, url, params=params, json=json, headers=headers)
response.raise_for_status()
json = response.json()
if not json.get('success'):
msg = json.get('message', "Unknown {} response error.".format(self.name))
msg = msg + ' (' + method + ' ' + url + ')'
LOGGER.error(msg)
raise ExchangeError(msg)
return json.get('result')
def request_private (self, url, headers=None, method='GET', params=None, json=None):
if not params: params = {}
params['apikey'] = self.apikey
params['nonce'] = str(int(time.time() * 1000))
headers = {
'apisign': hmac.new(self.apisecret.encode(), url.encode(), hashlib.sha512).hexdigest()
}
return self.request(url, headers=headers, method=method, params=params, json=json)
def __bittrex_ask(self, market, quantity, rate, order_type, time_in_effect, condition_type, target):
params = self.__bittrex_validate_trade_params(market, quantity, rate, order_type, time_in_effect, condition_type, target)
url = 'https://bittrex.com/api/v2.0/key/market/tradesell'
order = self.request_private(url=url, method='POST', json=params)
order = self.__parse_order(order)
order['is_open'] = True
return order
def __bittrex_bid(self, market, quantity, rate, order_type, time_in_effect, condition_type, target):
params = self.__bittrex_validate_trade_params(market, quantity, rate, order_type, time_in_effect, condition_type, target)
url = 'https://bittrex.com/api/v2.0/key/market/tradebuy'
order = self.request_private(url=url, method='POST', json=params)
order = self.__parse_order(order)
order['is_open'] = True
return order
def __bittrex_validate_trade_params(self, market, quantity, rate, order_type='LIMIT', time_in_effect='GOOD_TIL_CANCELLED', condition_type=None, target=None):
market = | |
<reponame>smallmedia/iod-ckan
import json
from nose import tools as nosetools
import ckan.plugins.toolkit as toolkit
try:
import ckan.tests.factories as factories
except ImportError: # for ckan <= 2.3
import ckan.new_tests.factories as factories
try:
import ckan.tests.helpers as helpers
except ImportError: # for ckan <= 2.3
import ckan.new_tests.helpers as helpers
from ckanext.datagathering.tests import DatagatheringFunctionalTestBase
class TestDatagatheringAuthIndex(DatagatheringFunctionalTestBase):
def test_auth_anon_user_can_view_datagathering_index(self):
'''An anon (not logged in) user can view the Datagatherings index.'''
app = self._get_test_app()
app.get("/datagathering", status=200)
def test_auth_logged_in_user_can_view_datagathering_index(self):
'''
A logged in user can view the Datagathering index.
'''
app = self._get_test_app()
user = factories.User()
app.get("/datagathering", status=200,
extra_environ={'REMOTE_USER': str(user["name"])})
def test_auth_anon_user_cant_see_add_datagathering_button(self):
'''
An anon (not logged in) user can't see the Add Datagathering button on the
datagathering index page.
'''
app = self._get_test_app()
response = app.get("/datagathering", status=200)
# test for new datagathering link in response
response.mustcontain(no="/datagathering/new")
def test_auth_logged_in_user_cant_see_add_datagathering_button(self):
'''
A logged in user can't see the Add Datagathering button on the datagathering
index page.
'''
app = self._get_test_app()
user = factories.User()
response = app.get("/datagathering", status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
# test for new datagathering link in response
response.mustcontain(no="/datagathering/new")
def test_auth_sysadmin_can_see_add_datagathering_button(self):
'''
A sysadmin can see the Add Datagathering button on the datagathering index
page.
'''
app = self._get_test_app()
user = factories.Sysadmin()
response = app.get("/datagathering", status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
# test for new datagathering link in response
response.mustcontain("/datagathering/new")
class TestDatagatheringAuthDetails(DatagatheringFunctionalTestBase):
def test_auth_anon_user_can_view_datagathering_details(self):
'''
An anon (not logged in) user can view an individual Datagathering details page.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/my-datagathering', status=200)
def test_auth_logged_in_user_can_view_datagathering_details(self):
'''
A logged in user can view an individual Datagathering details page.
'''
app = self._get_test_app()
user = factories.User()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_anon_user_cant_see_manage_button(self):
'''
An anon (not logged in) user can't see the Manage button on an individual
datagathering details page.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/datagathering/my-datagathering', status=200)
# test for url to edit page
response.mustcontain(no="/datagathering/edit/my-datagathering")
def test_auth_logged_in_user_can_see_manage_button(self):
'''
A logged in user can't see the Manage button on an individual datagathering
details page.
'''
app = self._get_test_app()
user = factories.User()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/datagathering/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
# test for url to edit page
response.mustcontain(no="/datagathering/edit/my-datagathering")
def test_auth_sysadmin_can_see_manage_button(self):
'''
A sysadmin can see the Manage button on an individual datagathering details
page.
'''
app = self._get_test_app()
user = factories.Sysadmin()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/datagathering/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
# test for url to edit page
response.mustcontain("/datagathering/edit/my-datagathering")
def test_auth_datagathering_show_anon_can_access(self):
'''
Anon user can request datagathering show.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/api/3/action/ckanext_datagathering_show?id=my-datagathering',
status=200)
json_response = json.loads(response.body)
nosetools.assert_true(json_response['success'])
def test_auth_datagathering_show_normal_user_can_access(self):
'''
Normal logged in user can request datagathering show.
'''
user = factories.User()
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/api/3/action/ckanext_datagathering_show?id=my-datagathering',
status=200, extra_environ={'REMOTE_USER': str(user['name'])})
json_response = json.loads(response.body)
nosetools.assert_true(json_response['success'])
def test_auth_datagathering_show_sysadmin_can_access(self):
'''
Normal logged in user can request datagathering show.
'''
user = factories.Sysadmin()
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/api/3/action/ckanext_datagathering_show?id=my-datagathering',
status=200, extra_environ={'REMOTE_USER': str(user['name'])})
json_response = json.loads(response.body)
nosetools.assert_true(json_response['success'])
class TestDatagatheringAuthCreate(DatagatheringFunctionalTestBase):
def test_auth_anon_user_cant_view_create_datagathering(self):
'''
An anon (not logged in) user can't access the create datagathering page.
'''
app = self._get_test_app()
app.get("/datagathering/new", status=302)
def test_auth_logged_in_user_cant_view_create_datagathering_page(self):
'''
A logged in user can't access the create datagathering page.
'''
app = self._get_test_app()
user = factories.User()
app.get("/datagathering/new", status=401,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_sysadmin_can_view_create_datagathering_page(self):
'''
A sysadmin can access the create datagathering page.
'''
app = self._get_test_app()
user = factories.Sysadmin()
app.get("/datagathering/new", status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
class TestDatagatheringAuthList(DatagatheringFunctionalTestBase):
def test_auth_datagathering_list_anon_can_access(self):
'''
Anon user can request datagathering list.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/api/3/action/ckanext_datagathering_list',
status=200)
json_response = json.loads(response.body)
nosetools.assert_true(json_response['success'])
def test_auth_datagathering_list_normal_user_can_access(self):
'''
Normal logged in user can request datagathering list.
'''
user = factories.User()
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/api/3/action/ckanext_datagathering_list',
status=200, extra_environ={'REMOTE_USER': str(user['name'])})
json_response = json.loads(response.body)
nosetools.assert_true(json_response['success'])
def test_auth_datagathering_list_sysadmin_can_access(self):
'''
Normal logged in user can request datagathering list.
'''
user = factories.Sysadmin()
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
response = app.get('/api/3/action/ckanext_datagathering_list',
status=200, extra_environ={'REMOTE_USER': str(user['name'])})
json_response = json.loads(response.body)
nosetools.assert_true(json_response['success'])
class TestDatagatheringAuthEdit(DatagatheringFunctionalTestBase):
def test_auth_anon_user_cant_view_edit_datagathering_page(self):
'''
An anon (not logged in) user can't access the datagathering edit page.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/edit/my-datagathering', status=302)
def test_auth_logged_in_user_cant_view_edit_datagathering_page(self):
'''
A logged in user can't access the datagathering edit page.
'''
app = self._get_test_app()
user = factories.User()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/edit/my-datagathering', status=401,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_sysadmin_can_view_edit_datagathering_page(self):
'''
A sysadmin can access the datagathering edit page.
'''
app = self._get_test_app()
user = factories.Sysadmin()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/edit/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_datagathering_admin_can_view_edit_datagathering_page(self):
'''
A datagathering admin can access the datagathering edit page.
'''
app = self._get_test_app()
user = factories.User()
# Make user a datagathering admin
helpers.call_action('ckanext_datagathering_admin_add', context={},
username=user['name'])
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/edit/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_anon_user_cant_view_manage_datasets(self):
'''
An anon (not logged in) user can't access the datagathering manage datasets page.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/manage_datasets/my-datagathering', status=302)
def test_auth_logged_in_user_cant_view_manage_datasets(self):
'''
A logged in user (not sysadmin) can't access the datagathering manage datasets page.
'''
app = self._get_test_app()
user = factories.User()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/manage_datasets/my-datagathering', status=401,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_sysadmin_can_view_manage_datasets(self):
'''
A sysadmin can access the datagathering manage datasets page.
'''
app = self._get_test_app()
user = factories.Sysadmin()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/manage_datasets/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_datagathering_admin_can_view_manage_datasets(self):
'''
A datagathering admin can access the datagathering manage datasets page.
'''
app = self._get_test_app()
user = factories.User()
# Make user a datagathering admin
helpers.call_action('ckanext_datagathering_admin_add', context={},
username=user['name'])
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/manage_datasets/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_anon_user_cant_view_delete_datagathering_page(self):
'''
An anon (not logged in) user can't access the datagathering delete page.
'''
app = self._get_test_app()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/delete/my-datagathering', status=302)
def test_auth_logged_in_user_cant_view_delete_datagathering_page(self):
'''
A logged in user can't access the datagathering delete page.
'''
app = self._get_test_app()
user = factories.User()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/delete/my-datagathering', status=401,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_sysadmin_can_view_delete_datagathering_page(self):
'''
A sysadmin can access the datagathering delete page.
'''
app = self._get_test_app()
user = factories.Sysadmin()
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/delete/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_datagathering_admin_can_view_delete_datagathering_page(self):
'''
A datagathering admin can access the datagathering delete page.
'''
app = self._get_test_app()
user = factories.User()
# Make user a datagathering admin
helpers.call_action('ckanext_datagathering_admin_add', context={},
username=user['name'])
factories.Dataset(type='datagathering', name='my-datagathering')
app.get('/datagathering/delete/my-datagathering', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
def test_auth_anon_user_cant_view_addtodatagathering_dropdown_dataset_datagathering_list(self):
'''
An anonymous user can't view the 'Add to datagathering' dropdown selector
from a datasets datagathering list page.
'''
app = self._get_test_app()
factories.Dataset(name='my-datagathering', type='datagathering')
factories.Dataset(name='my-dataset')
datagathering_list_response = app.get('/dataset/datagatherings/my-dataset', status=200)
nosetools.assert_false('datagathering-add' in datagathering_list_response.forms)
def test_auth_normal_user_cant_view_addtodatagathering_dropdown_dataset_datagathering_list(self):
'''
A normal (logged in) user can't view the 'Add to datagathering' dropdown
selector from a datasets datagathering list page.
'''
user = factories.User()
app = self._get_test_app()
factories.Dataset(name='my-datagathering', type='datagathering')
factories.Dataset(name='my-dataset')
datagathering_list_response = app.get('/dataset/datagatherings/my-dataset', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
nosetools.assert_false('datagathering-add' in datagathering_list_response.forms)
def test_auth_sysadmin_can_view_addtodatagathering_dropdown_dataset_datagathering_list(self):
'''
A sysadmin can view the 'Add to datagathering' dropdown selector from a
datasets datagathering list page.
'''
user = factories.Sysadmin()
app = self._get_test_app()
factories.Dataset(name='my-datagathering', type='datagathering')
factories.Dataset(name='my-dataset')
datagathering_list_response = app.get('/dataset/datagatherings/my-dataset', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
nosetools.assert_true('datagathering-add' in datagathering_list_response.forms)
def test_auth_datagathering_admin_can_view_addtodatagathering_dropdown_dataset_datagathering_list(self):
'''
A datagathering admin can view the 'Add to datagathering' dropdown selector from
a datasets datagathering list page.
'''
app = self._get_test_app()
user = factories.User()
# Make user a datagathering admin
helpers.call_action('ckanext_datagathering_admin_add', context={},
username=user['name'])
factories.Dataset(name='my-datagathering', type='datagathering')
factories.Dataset(name='my-dataset')
datagathering_list_response = app.get('/dataset/datagatherings/my-dataset', status=200,
extra_environ={'REMOTE_USER': str(user['name'])})
nosetools.assert_true('datagathering-add' in datagathering_list_response.forms)
class TestDatagatheringPackageAssociationCreate(DatagatheringFunctionalTestBase):
def test_datagathering_package_association_create_no_user(self):
'''
Calling datagathering package association create with no user raises
NotAuthorized.
'''
context = {'user': None, 'model': None}
nosetools.assert_raises(toolkit.NotAuthorized, helpers.call_auth,
'ckanext_datagathering_package_association_create',
context=context)
def test_datagathering_package_association_create_sysadmin(self):
'''
Calling datagathering package association create by a sysadmin doesn't
raise NotAuthorized.
'''
a_sysadmin = factories.Sysadmin()
context = {'user': a_sysadmin['name'], 'model': None}
helpers.call_auth('ckanext_datagathering_package_association_create',
context=context)
def test_datagathering_package_association_create_datagathering_admin(self):
'''
Calling datagathering package association create by a datagathering admin
doesn't raise NotAuthorized.
'''
datagathering_admin = factories.User()
# Make user a datagathering admin
helpers.call_action('ckanext_datagathering_admin_add', context={},
username=datagathering_admin['name'])
context = {'user': datagathering_admin['name'], 'model': None}
helpers.call_auth('ckanext_datagathering_package_association_create',
context=context)
def test_datagathering_package_association_create_unauthorized_creds(self):
'''
Calling datagathering package association create with unauthorized user
raises NotAuthorized.
'''
not_a_sysadmin = factories.User()
context = {'user': not_a_sysadmin['name'], 'model': None}
nosetools.assert_raises(toolkit.NotAuthorized, helpers.call_auth,
'ckanext_datagathering_package_association_create',
context=context)
class TestDatagatheringPackageAssociationDelete(DatagatheringFunctionalTestBase):
def test_datagathering_package_association_delete_no_user(self):
'''
Calling datagathering package association create with no user raises
NotAuthorized.
'''
context = {'user': None, 'model': None}
nosetools.assert_raises(toolkit.NotAuthorized, helpers.call_auth,
'ckanext_datagathering_package_association_delete',
context=context)
def test_datagathering_package_association_delete_sysadmin(self):
'''
Calling datagathering package association create by a sysadmin doesn't
raise NotAuthorized.
'''
a_sysadmin = factories.Sysadmin()
context = {'user': a_sysadmin['name'], 'model': None}
helpers.call_auth('ckanext_datagathering_package_association_delete',
context=context)
def test_datagathering_package_association_delete_datagathering_admin(self):
'''
Calling datagathering package association create by a datagathering admin
doesn't raise NotAuthorized.
'''
datagathering_admin = factories.User()
# Make user a | |
<filename>resume/views.py
import logging
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import resolve, reverse_lazy
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from core.mixins import WriterOnlyMixin
from .models import (Basic,
Contact,
Letter,
Career,
Education,
Award,
Certificate,
Language,
Skill)
from .forms import (BasicForm,
ContactForm,
LetterForm,
CareerForm,
EducationForm,
AwardForm,
CertificateForm,
LanguageForm,
SkillForm)
logger = logging.getLogger(__name__)
def index(request):
context = {}
try:
basic = Basic.objects.get(writer=request.user)
contact = Contact.objects.get(writer=request.user)
letter = Letter.objects.get(writer=request.user)
career_list = Career.objects.filter(writer=request.user)
education_list = Education.objects.filter(writer=request.user)
award_list = Award.objects.filter(writer=request.user)
certificate_list = Certificate.objects.filter(writer=request.user)
language_list = Language.objects.filter(writer=request.user)
skill_list = Skill.objects.filter(writer=request.user)
context['title'] = _(resolve(request.path).app_name.capitalize())
context['basic'] = basic
context['contact'] = contact
context['letter'] = letter
context['career_list'] = career_list
context['education_list'] = education_list
context['award_list'] = award_list
context['certificate_list'] = certificate_list
context['language_list'] = language_list
context['skill_list'] = skill_list
except TypeError:
return redirect('accounts:login')
return render(request, 'resume/index.html', context)
class BasicList(LoginRequiredMixin, ListView):
"""
Retrieve Basic List
"""
model = Basic
context_object_name = 'basic_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
basic_list = Basic.objects.all()
return basic_list
def get_context_data(self, **kwargs):
context = super(BasicList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class BasicCreate(LoginRequiredMixin, CreateView):
"""
Create Basic
"""
model = Basic
form_class = BasicForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
basic = form.save(commit=False)
basic.writer = self.request.user
basic.save()
messages.success(self.request, _('Your basic was created successfully.'))
return super(BasicCreate, self).form_valid(form)
def form_invalid(self, form):
return super(BasicCreate, self).form_invalid(form)
class BasicDetail(LoginRequiredMixin, DetailView):
"""
Detail Basic
"""
model = Basic
context_object_name = 'basic'
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(BasicDetail, self).get_context_data(**kwargs)
return context
class BasicUpdate(WriterOnlyMixin, UpdateView):
"""
Update Basic
"""
model = Basic
form_class = BasicForm
context_object_name = 'basic'
http_method_names = ['get', 'post', ]
def form_valid(self, form):
basic = form.save(commit=False)
basic.writer = self.request.user
basic.save()
messages.success(self.request, _('Your basic was updated successfully.'))
return super(BasicUpdate, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your basic was not updated!'))
return super(BasicUpdate, self).form_invalid(form)
class BasicDelete(WriterOnlyMixin, DeleteView):
"""
Delete Basic
"""
model = Basic
context_object_name = 'basic'
http_method_names = ['get', 'post']
success_url = reverse_lazy('resume:index')
class ContactList(LoginRequiredMixin, ListView):
"""
Retrieve Contact List
"""
model = Contact
context_object_name = 'contact_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
contact_list = Contact.objects.all()
return contact_list
def get_context_data(self, **kwargs):
context = super(ContactList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class ContactCreate(LoginRequiredMixin, CreateView):
"""
Create Contact
"""
model = Contact
form_class = ContactForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
contact = form.save(commit=False)
contact.writer = self.request.user
contact.save()
messages.success(self.request, _('Your contact was created successfully.'))
return super(ContactCreate, self).form_valid(form)
def form_invalid(self, form):
return super(ContactCreate, self).form_invalid(form)
class ContactDetail(LoginRequiredMixin, DetailView):
"""
Detail Contact
"""
model = Contact
context_object_name = 'contact'
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(ContactDetail, self).get_context_data(**kwargs)
return context
class ContactUpdate(WriterOnlyMixin, UpdateView):
"""
Update Contact
"""
model = Contact
form_class = ContactForm
context_object_name = 'contact'
http_method_names = ['get', 'post', ]
def form_valid(self, form):
contact = form.save(commit=False)
contact.writer = self.request.user
contact.save()
messages.success(self.request, _('Your contact was updated successfully.'))
return super(ContactUpdate, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your contact was not updated!'))
return super(ContactUpdate, self).form_invalid(form)
class ContactDelete(WriterOnlyMixin, DeleteView):
"""
Delete Contact
"""
model = Contact
context_object_name = 'contact'
http_method_names = ['get', 'post']
success_url = reverse_lazy('resume:index')
class LetterList(LoginRequiredMixin, ListView):
"""
Retrieve Letter List
"""
model = Letter
context_object_name = 'letter_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
letter_list = Letter.objects.all()
return letter_list
def get_context_data(self, **kwargs):
context = super(LetterList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class LetterCreate(LoginRequiredMixin, CreateView):
"""
Create Letter
"""
model = Letter
form_class = LetterForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
letter = form.save(commit=False)
letter.writer = self.request.user
letter.save()
messages.success(self.request, _('Your letter was created successfully.'))
return super(LetterCreate, self).form_valid(form)
def form_invalid(self, form):
return super(LetterCreate, self).form_invalid(form)
class LetterDetail(LoginRequiredMixin, DetailView):
"""
Detail Letter
"""
model = Letter
context_object_name = 'letter'
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(LetterDetail, self).get_context_data(**kwargs)
return context
class LetterUpdate(WriterOnlyMixin, UpdateView):
"""
Update Letter
"""
model = Letter
form_class = LetterForm
context_object_name = 'letter'
http_method_names = ['get', 'post', ]
def form_valid(self, form):
letter = form.save(commit=False)
letter.writer = self.request.user
letter.save()
messages.success(self.request, _('Your letter was updated successfully.'))
return super(LetterUpdate, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your letter was not updated!'))
return super(LetterUpdate, self).form_invalid(form)
class LetterDelete(WriterOnlyMixin, DeleteView):
"""
Delete Letter
"""
model = Letter
context_object_name = 'letter'
http_method_names = ['get', 'post']
success_url = reverse_lazy('resume:index')
class CareerList(LoginRequiredMixin, ListView):
"""
Retrieve Career List
"""
model = Career
context_object_name = 'career_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
career_list = Career.objects.all()
return career_list
def get_context_data(self, **kwargs):
context = super(CareerList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class CareerCreate(LoginRequiredMixin, CreateView):
"""
Create Career
"""
model = Career
form_class = CareerForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
career = form.save(commit=False)
career.writer = self.request.user
career.save()
messages.success(self.request, _('Your career was created successfully.'))
return super(CareerCreate, self).form_valid(form)
def form_invalid(self, form):
return super(CareerCreate, self).form_invalid(form)
class CareerDetail(LoginRequiredMixin, DetailView):
"""
Detail Career
"""
model = Career
context_object_name = 'career'
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(CareerDetail, self).get_context_data(**kwargs)
return context
class CareerUpdate(WriterOnlyMixin, UpdateView):
"""
Update Career
"""
model = Career
form_class = CareerForm
context_object_name = 'career'
http_method_names = ['get', 'post', ]
def form_valid(self, form):
career = form.save(commit=False)
career.writer = self.request.user
career.save()
messages.success(self.request, _('Your career was updated successfully.'))
return super(CareerUpdate, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your career was not updated!'))
return super(CareerUpdate, self).form_invalid(form)
class CareerDelete(WriterOnlyMixin, DeleteView):
"""
Delete Career
"""
model = Career
context_object_name = 'career'
http_method_names = ['get', 'post']
success_url = reverse_lazy('resume:index')
class EducationList(LoginRequiredMixin, ListView):
"""
Retrieve Education List
"""
model = Education
context_object_name = 'education_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
education_list = Education.objects.all()
return education_list
def get_context_data(self, **kwargs):
context = super(EducationList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class EducationCreate(LoginRequiredMixin, CreateView):
"""
Create Education
"""
model = Education
form_class = EducationForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
education = form.save(commit=False)
education.writer = self.request.user
education.save()
messages.success(self.request, _('Your education was created successfully.'))
return super(EducationCreate, self).form_valid(form)
def form_invalid(self, form):
return super(EducationCreate, self).form_invalid(form)
class EducationDetail(LoginRequiredMixin, DetailView):
"""
Detail Education
"""
model = Education
context_object_name = 'education'
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(EducationDetail, self).get_context_data(**kwargs)
return context
class EducationUpdate(WriterOnlyMixin, UpdateView):
"""
Update Education
"""
model = Education
form_class = EducationForm
context_object_name = 'education'
http_method_names = ['get', 'post', ]
def form_valid(self, form):
education = form.save(commit=False)
education.writer = self.request.user
education.save()
messages.success(self.request, _('Your education was updated successfully.'))
return super(EducationUpdate, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your education was not updated!'))
return super(EducationUpdate, self).form_invalid(form)
class EducationDelete(WriterOnlyMixin, DeleteView):
"""
Delete Education
"""
model = Education
context_object_name = 'education'
http_method_names = ['get', 'post']
success_url = reverse_lazy('resume:index')
class AwardList(LoginRequiredMixin, ListView):
"""
Retrieve Award List
"""
model = Award
context_object_name = 'award_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
award_list = Award.objects.all()
return award_list
def get_context_data(self, **kwargs):
context = super(AwardList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class AwardCreate(LoginRequiredMixin, CreateView):
"""
Create Award
"""
model = Award
form_class = AwardForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
award = form.save(commit=False)
award.writer = self.request.user
award.save()
messages.success(self.request, _('Your award was created successfully.'))
return super(AwardCreate, self).form_valid(form)
def form_invalid(self, form):
return super(AwardCreate, self).form_invalid(form)
class AwardDetail(LoginRequiredMixin, DetailView):
"""
Detail Award
"""
model = Award
context_object_name = 'award'
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(AwardDetail, self).get_context_data(**kwargs)
return context
class AwardUpdate(WriterOnlyMixin, UpdateView):
"""
Update Award
"""
model = Award
form_class = AwardForm
context_object_name = 'award'
http_method_names = ['get', 'post', ]
def form_valid(self, form):
award = form.save(commit=False)
award.writer = self.request.user
award.save()
messages.success(self.request, _('Your award was updated successfully.'))
return super(AwardUpdate, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your award was not updated!'))
return super(AwardUpdate, self).form_invalid(form)
class AwardDelete(WriterOnlyMixin, DeleteView):
"""
Delete Award
"""
model = Award
context_object_name = 'award'
http_method_names = ['get', 'post']
success_url = reverse_lazy('resume:index')
class CertificateList(LoginRequiredMixin, ListView):
"""
Retrieve Certificate List
"""
model = Certificate
context_object_name = 'certificate_list'
http_method_names = ['get']
paginate_by = 9
def get_queryset(self):
certificate_list = Certificate.objects.all()
return certificate_list
def get_context_data(self, **kwargs):
context = super(CertificateList, self).get_context_data(**kwargs)
context['title'] = _(resolve(self.request.path).app_name.capitalize())
return context
class CertificateCreate(LoginRequiredMixin, CreateView):
"""
Create Certificate
"""
model = Certificate
form_class = CertificateForm
http_method_names = ['get', 'post', ]
success_url = reverse_lazy('resume:index')
def form_valid(self, form):
certificate = form.save(commit=False)
certificate.writer = self.request.user
certificate.save()
messages.success(self.request, _('Your certificate was created successfully.'))
return super(CertificateCreate, self).form_valid(form)
def form_invalid(self, form):
return super(CertificateCreate, self).form_invalid(form)
class CertificateDetail(LoginRequiredMixin, DetailView):
"""
Detail Certificate
"""
model | |
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCC column AOD plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
print("\nCreating plots for {}".format(bmk_mon_strs[t]))
# Create plots
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_aod_plots(
ref[mon_ind],
gchp_vs_gcc_refstr,
dev[mon_ind],
gchp_vs_gcc_devstr,
dst=gchp_vs_gcc_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCC global mass tables
# ==================================================================
if config["options"]["outputs"]["mass_table"]:
print("\n%%% Creating GCHP vs. GCC mass tables %%%")
def gchp_vs_gcc_mass_table(m):
"""
Create mass table for each benchmark month in parallel
"""
# Filepaths
refpath = get_filepath(
gchp_vs_gcc_refrstdir, "Restart", bmk_mons_dev[m]
)
devpath = get_filepath(
gchp_vs_gcc_devrstdir,
"Restart",
bmk_mons_dev[m],
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)
# use initial restart if no checkpoint present (intended for
# first month). need to pass path of meteorology file with
# area variable in this scenario
dev_extra = ""
if not os.path.isfile(devpath):
devpath = join(
gchp_vs_gcc_devrstdir,
"initial_GEOSChem_rst."
+ config["data"]["dev"]["gchp"]["resolution"]
+ "_benchmark.nc",
)
dev_extra = get_filepath(
gchp_vs_gcc_devrstdir,
"Restart",
bmk_mons_dev[m + 1],
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"][
"is_legacy"
],
)
# Create tables
bmk.make_benchmark_mass_tables(
refpath,
gchp_vs_gcc_refstr,
devpath,
gchp_vs_gcc_devstr,
dst=gchp_vs_gcc_tablesdir,
subdst=bmk_mon_yr_strs_dev[m],
label="at 01{}".format(bmk_mon_yr_strs_dev[m]),
overwrite=True,
spcdb_dir=spcdb_dir,
dev_met_extra=dev_extra,
)
results = Parallel(n_jobs=-1)(
delayed(gchp_vs_gcc_mass_table)(t) for t in range(bmk_n_months)
)
# ==================================================================
# GCHP vs GCC operations budgets tables
# ==================================================================
if config["options"]["outputs"]["ops_budget_table"]:
print("\n%%% Creating GCHP vs. GCC operations budget tables %%%")
def gchp_vs_gcc_ops_budg(m):
"""
Create operations budgets for each benchmark month m in parallel
"""
# Filepaths
refpath = get_filepath(gchp_vs_gcc_refdir, "Budget", bmk_mons_dev[m])
devpath = get_filepath(
gchp_vs_gcc_devdir,
"Budget",
bmk_mons_gchp_dev[m],
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)
# Create tables
bmk.make_benchmark_operations_budget(
config["data"]["dev"]["gcc"]["version"],
refpath,
config["data"]["dev"]["gchp"]["version"],
devpath,
bmk_sec_per_month_dev[m],
bmk_sec_per_month_dev[m],
benchmark_type=bmk_type,
label="at 01{}".format(bmk_mon_yr_strs_dev[m]),
operations=[
"Chemistry",
"Convection",
"EmisDryDep",
"Mixing",
"WetDep",
],
compute_accum=False,
dst=gchp_vs_gcc_tablesdir,
)
results = Parallel(n_jobs=-1)(
delayed(gchp_vs_gcc_ops_budg)(t) for t in range(bmk_n_months)
)
# ==================================================================
# GCHP vs GCC aerosol budgets and burdens tables
# ==================================================================
if config["options"]["outputs"]["aer_budget_table"]:
print("\n%%% Creating GCHP vs. GCC aerosol budget tables %%%")
# Filepaths
devaero = get_filepaths(
gchp_vs_gcc_devdir,
"Aerosols",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
devspc = get_filepaths(
gchp_vs_gcc_devdir,
"SpeciesConc",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create tables
bmk.make_benchmark_aerosol_tables(
gchp_vs_gcc_devdir,
devaero,
devspc,
devmet,
config["data"]["dev"]["gchp"]["version"],
bmk_year_dev,
days_per_month_dev,
dst=gchp_vs_gcc_tablesdir,
overwrite=True,
spcdb_dir=spcdb_dir,
is_gchp=True,
)
# ==================================================================
# GCHP vs GCC Ox budget tables
# ==================================================================
if config["options"]["outputs"]["Ox_budget_table"]:
print("\n%%% Creating GCHP vs. GCC Ox budget tables %%%")
# Compute Ox budget table for GCC
ox.global_ox_budget(
config["data"]["dev"]["gcc"]["version"],
gcc_vs_gcc_devdir,
gcc_vs_gcc_devrstdir,
bmk_year_dev,
dst=gcc_vs_gcc_tablesdir,
overwrite=True,
spcdb_dir=spcdb_dir
)
# Compute Ox budget table for GCHP
ox.global_ox_budget(
config["data"]["dev"]["gchp"]["version"],
gchp_vs_gcc_devdir,
gchp_vs_gcc_devrstdir,
bmk_year_dev,
dst=gchp_vs_gcc_tablesdir,
overwrite=True,
is_gchp=True,
spcdb_dir=spcdb_dir
)
# ==================================================================
# GCHP vs. GCC global mean OH, MCF Lifetime, CH4 Lifetime
# ==================================================================
if config["options"]["outputs"]["OH_metrics"]:
print("\n%%% Creating GCHP vs. GCC OH metrics table %%%")
# Filepaths
ref = get_filepaths(gchp_vs_gcc_refdir, "Metrics", all_months_dev)[0]
dev = get_filepaths(
gchp_vs_gcc_devdir,
"Metrics",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create table
oh.make_benchmark_oh_metrics(
ref,
config["data"]["dev"]["gcc"]["version"],
dev,
config["data"]["dev"]["gchp"]["version"],
dst=gchp_vs_gcc_tablesdir,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP Strat-Trop Exchange
# -=================================================================
if config["options"]["outputs"]["ste_table"]:
print("\n%%% Skipping GCHP vs. GCC Strat-Trop Exchange table %%%")
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCHP vs GCHP benchmark plots and tables
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if config["options"]["comparisons"]["gchp_vs_gchp"]["run"]:
# ==================================================================
# GCHP vs GCC filepaths for StateMet collection data
# ==================================================================
refmet = get_filepaths(
gchp_vs_gchp_refdir,
gchp_metname(config["data"]["ref"]["gchp"]["prior_to_13"]),
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
devmet = get_filepaths(
gchp_vs_gcc_devdir,
gchp_metname(config["data"]["dev"]["gchp"]["prior_to_13"]),
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# ==================================================================
# GCHP vs GCHP species concentration plots
# ==================================================================
if config["options"]["outputs"]["plot_conc"]:
print("\n%%% Creating GCHP vs. GCHP concentration plots %%%")
# --------------------------------------------------------------
# GCHP vs GCHP species concentration plots: Annual Mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
"SpeciesConc",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
"SpeciesConc",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
print("\nCreating plots for annual mean")
bmk.make_benchmark_conc_plots(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
refmet=refmet,
devmet=devmet,
dst=gchp_vs_gchp_resultsdir,
subdst="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
plot_by_spc_cat=config["options"]["outputs"]["plot_options"][
"by_spc_cat"
],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCHP species concentration plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
print("\nCreating plots for {}".format(bmk_mon_strs[t]))
# Create plots
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_conc_plots(
ref[mon_ind],
gchp_vs_gchp_refstr,
dev[mon_ind],
gchp_vs_gchp_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
dst=gchp_vs_gchp_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
plot_by_spc_cat=config["options"]["outputs"]["plot_options"][
"by_spc_cat"
],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs. GCHP Emissions plots
# ==================================================================
if config["options"]["outputs"]["plot_emis"]:
print("\n%%% Creating GCHP vs. GCHP emissions plots %%%")
# --------------------------------------------------------------
# GCHP vs GCHP species concentration plots: Annual Mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
"Emissions",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
"Emissions",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
print("\nCreating plots for annual mean")
bmk.make_benchmark_emis_plots(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_resultsdir,
subdst="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
plot_by_spc_cat=config["options"]["outputs"]["plot_options"][
"by_spc_cat"
],
plot_by_hco_cat=config["options"]["outputs"]["plot_options"][
"by_hco_cat"
],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCHP species concentration plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
print("\nCreating plots for {}".format(bmk_mon_strs[t]))
# Create plots
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_emis_plots(
ref[mon_ind],
gchp_vs_gchp_refstr,
dev[mon_ind],
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
plot_by_spc_cat=config["options"]["outputs"]["plot_options"][
"by_spc_cat"
],
plot_by_hco_cat=config["options"]["outputs"]["plot_options"][
"by_hco_cat"
],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs. GCHP tables of emission and inventory totals
# ==================================================================
if config["options"]["outputs"]["emis_table"]:
print("\n%%% Creating GCHP vs. GCHP emissions tables %%%")
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
"Emissions",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
"Emissions",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create table
bmk.make_benchmark_emis_tables(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
refmet=refmet,
devmet=devmet,
dst=gchp_vs_gchp_resultsdir,
ref_interval=sec_per_month_ref,
dev_interval=sec_per_month_dev,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs. GCHP J-values plots
# ==================================================================
if config["options"]["outputs"]["plot_jvalues"]:
print("\n%%% Creating GCHP vs. GCHP J-values plots %%%")
# --------------------------------------------------------------
# GCHP vs GCHP J-values plots: Annual Mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
"JValues",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
"JValues",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
print("\nCreating plots for annual mean")
bmk.make_benchmark_jvalue_plots(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_resultsdir,
subdst='AnnualMean',
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCHP J-values plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
print("\nCreating plots for {}".format(bmk_mon_strs[t]))
# Create plots
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_jvalue_plots(
ref[mon_ind],
gchp_vs_gchp_refstr,
dev[mon_ind],
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCHP column AOD plots
# ==================================================================
if config["options"]["outputs"]["plot_aod"]:
print("\n%%% Creating GCHP vs. GCHP AOD plots %%%")
# --------------------------------------------------------------
# GCHP vs GCHP column AOD plots: Annual Mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
"Aerosols",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
"Aerosols",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
print("\nCreating plots for annual mean")
bmk.make_benchmark_aod_plots(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_resultsdir,
subdst="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCHP column AOD plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
print("\nCreating plots for {}".format(bmk_mon_strs[t]))
# Create plots
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_aod_plots(
ref[mon_ind],
gchp_vs_gchp_refstr,
dev[mon_ind],
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCHP global mass tables
# ==================================================================
if config["options"]["outputs"]["mass_table"]:
print("\n%%% Creating GCHP vs. GCHP mass tables %%%")
def gchp_vs_gchp_mass_table(m):
"""
Create mass table for each benchmark month m in parallel
"""
# Ref filepaths
refpath = get_filepath(
gchp_vs_gchp_refrstdir,
"Restart",
bmk_mons_ref[m],
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)
# Use initial checkpoint if Ref restart is not present
ref_extra = ""
if not os.path.isfile(refpath):
refpath = join(
gchp_vs_gchp_refrstdir,
"initial_GEOSChem_rst."
+ config["data"]["ref"]["gchp"]["resolution"]
+ "_benchmark.nc",
)
ref_extra = get_filepath(
gchp_vs_gchp_refrstdir,
"Restart",
bmk_mons_ref[m + 1],
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"][
"is_legacy"
],
)
# Dev filepaths
devpath = get_filepath(
gchp_vs_gchp_devrstdir,
"Restart",
bmk_mons_dev[m],
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)
# Use initial checkpoint if Dev restart is not present
dev_extra = ""
if not os.path.isfile(devpath):
devpath = join(
gchp_vs_gchp_devrstdir,
"initial_GEOSChem_rst."
+ config["data"]["dev"]["gchp"]["resolution"]
+ "_benchmark.nc",
)
dev_extra = get_filepath(
gchp_vs_gchp_devrstdir,
"Restart",
bmk_mons_dev[m + 1],
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"][
"is_legacy"
],
)
# Create tables
bmk.make_benchmark_mass_tables(
refpath,
gchp_vs_gchp_refstr,
devpath,
gchp_vs_gchp_devstr,
dst=gchp_vs_gchp_tablesdir,
subdst=bmk_mon_yr_strs_dev[m],
label="at 01{}".format(bmk_mon_yr_strs_dev[m]),
overwrite=True,
spcdb_dir=spcdb_dir,
ref_met_extra=ref_extra,
dev_met_extra=dev_extra,
)
# Run in parallel
results = Parallel(n_jobs=-1)(
delayed(gchp_vs_gchp_mass_table)(t) for t in range(bmk_n_months)
)
# ==================================================================
# GCHP vs GCHP operations budgets tables
# ==================================================================
if config["options"]["outputs"]["ops_budget_table"]:
print("\n%%% Creating GCHP vs. GCHP operations budget tables %%%")
# Diagnostic collections to read
def gchp_vs_gchp_ops_budg(m):
"""
Creates operations budgets for each benchmark month m in parallel
"""
# Filepaths
refpath = get_filepath(
gchp_vs_gchp_refdir,
"Budget",
bmk_mons_gchp_ref[m],
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)
devpath = get_filepath(
gchp_vs_gchp_devdir,
"Budget",
bmk_mons_gchp_dev[m],
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)
# Compute tables
bmk.make_benchmark_operations_budget(
config["data"]["ref"]["gchp"]["version"],
refpath,
config["data"]["dev"]["gchp"]["version"],
devpath,
bmk_sec_per_month_ref[m],
bmk_sec_per_month_dev[m],
benchmark_type=bmk_type,
label="at 01{}".format(bmk_mon_yr_strs_dev[m]),
operations=[
"Chemistry",
"Convection",
"EmisDryDep",
"Mixing",
"WetDep",
],
compute_accum=False,
dst=gchp_vs_gchp_tablesdir,
)
# Run in parallel
results = Parallel(n_jobs=-1)(
delayed(gchp_vs_gchp_ops_budg)(t) for t in range(bmk_n_months)
)
| |
is not None:
self.at_union_ids = m.get('atUnionIds')
if m.get('receiverMobiles') is not None:
self.receiver_mobiles = m.get('receiverMobiles')
if m.get('receiverDingtalkIds') is not None:
self.receiver_dingtalk_ids = m.get('receiverDingtalkIds')
if m.get('receiverUnionIds') is not None:
self.receiver_union_ids = m.get('receiverUnionIds')
if m.get('messageType') is not None:
self.message_type = m.get('messageType')
if m.get('btnOrientation') is not None:
self.btn_orientation = m.get('btnOrientation')
self.btns = []
if m.get('btns') is not None:
for k in m.get('btns'):
temp_model = SendServiceGroupMessageRequestBtns()
self.btns.append(temp_model.from_map(k))
return self
class SendServiceGroupMessageResponseBody(TeaModel):
def __init__(
self,
open_msg_task_id: str = None,
):
# 开放消息任务ID
self.open_msg_task_id = open_msg_task_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_msg_task_id is not None:
result['openMsgTaskId'] = self.open_msg_task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openMsgTaskId') is not None:
self.open_msg_task_id = m.get('openMsgTaskId')
return self
class SendServiceGroupMessageResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SendServiceGroupMessageResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SendServiceGroupMessageResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetStoragePolicyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetStoragePolicyRequest(TeaModel):
def __init__(
self,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_suite_key: str = None,
ding_token_grant_type: int = None,
open_team_id: str = None,
biz_type: str = None,
file_size: int = None,
file_name: str = None,
):
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_suite_key = ding_suite_key
self.ding_token_grant_type = ding_token_grant_type
# 团队ID
self.open_team_id = open_team_id
# 业务类型
self.biz_type = biz_type
# 文件大小,单位字节
self.file_size = file_size
# 文件名称
self.file_name = file_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
if self.biz_type is not None:
result['bizType'] = self.biz_type
if self.file_size is not None:
result['fileSize'] = self.file_size
if self.file_name is not None:
result['fileName'] = self.file_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
if m.get('bizType') is not None:
self.biz_type = m.get('bizType')
if m.get('fileSize') is not None:
self.file_size = m.get('fileSize')
if m.get('fileName') is not None:
self.file_name = m.get('fileName')
return self
class GetStoragePolicyResponseBody(TeaModel):
def __init__(
self,
key: str = None,
policy: str = None,
access_key_id: str = None,
endpoint: str = None,
signature: str = None,
):
# Id of the request
self.key = key
self.policy = policy
self.access_key_id = access_key_id
self.endpoint = endpoint
self.signature = signature
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.key is not None:
result['key'] = self.key
if self.policy is not None:
result['policy'] = self.policy
if self.access_key_id is not None:
result['accessKeyId'] = self.access_key_id
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.signature is not None:
result['signature'] = self.signature
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('key') is not None:
self.key = m.get('key')
if m.get('policy') is not None:
self.policy = m.get('policy')
if m.get('accessKeyId') is not None:
self.access_key_id = m.get('accessKeyId')
if m.get('endpoint') is not None:
self.endpoint = m.get('endpoint')
if m.get('signature') is not None:
self.signature = m.get('signature')
return self
class GetStoragePolicyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetStoragePolicyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetStoragePolicyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListUserTeamsHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ListUserTeamsResponseBodyTeams(TeaModel):
def __init__(
self,
open_team_id: str = None,
team_name: str = None,
):
# 开放团队ID
self.open_team_id = open_team_id
# 团队名称
self.team_name = team_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
if self.team_name is not None:
result['teamName'] = self.team_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
if m.get('teamName') is not None:
self.team_name = m.get('teamName')
return self
class ListUserTeamsResponseBody(TeaModel):
def __init__(
self,
teams: List[ListUserTeamsResponseBodyTeams] = None,
):
# teams
self.teams = teams
def validate(self):
if self.teams:
for k in self.teams:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['teams'] = []
if self.teams is not None:
for k in self.teams:
result['teams'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.teams = []
if m.get('teams') is not None:
for k in m.get('teams'):
temp_model = ListUserTeamsResponseBodyTeams()
self.teams.append(temp_model.from_map(k))
return self
class ListUserTeamsResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListUserTeamsResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListUserTeamsResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddTicketMemoHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddTicketMemoRequestTicketMemoAttachments(TeaModel):
def __init__(
self,
file_name: str = None,
key: str = None,
):
# 文件名
self.file_name = file_name
# 文件key
self.key = key
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file_name is not None:
result['fileName'] = self.file_name
if self.key is | |
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
"""
This script defines some useful functions to use in data analysis and visualization
@ <NAME> (<EMAIL>)
"""
def dl_ia_utils_change_directory(path):
"""
path ='path/to/app/'
"""
import os
new_path = os.path.dirname(os.path.dirname(__file__))
new_path = os.chdir(path)
import sys
sys.path.insert(1, path)
def dl_ia_utils_set_up_logger(path):
""" Set up logger
:arg path: path where to store logs example: 'logs\\dl-ia-cla-predictive'
"""
import logging
logger = logging.getLogger(path)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('{}.log'.format(path))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.getLogger().addHandler(logging.StreamHandler()) # to display in console message
# logger.debug('mensaje debug')
# logger.info('mensaje info')
# logger.warning('mensaje warning')
# logger.error('mensaje error')
# logger.critical('mensaje critical')
def dl_ia_utils_systems_info():
""" Function that shows the system properties
"""
import sys
from platform import python_version
print('Python version:{}'.format(python_version()))
print('Python system version:{}'.format(sys.version))
print('Path:{}'.format(sys.executable))
print('Python version info:{}'.format(sys.version_info))
def dl_ia_utils_config_plotly():
""" this function configures the plotly visualization
:return:
"""
import plotly.io as pio
import plotly.graph_objects as go
import plotly.express as px
pio.renderers.default = "browser"
def dl_ia_utils_config_matplotlib():
""" this function configures the matplotlib style
:return:
"""
from matplotlib import rc
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Times']})
rc('text', usetex=True)
def dl_ia_utils_config_pandas():
"""
Allows to show all the columns of a dataframe in the console
Limit pandas warnings
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
desired_width = 350
np.set_printoptions(linewidth=desired_width) # show dataframes in console
pd.set_option('display.max_columns', 10)
def dl_ia_utils_check_folder(path_folder):
""" check that exists a folder, and if not, create it
:param path_folder: string with the path
:return error: error code (0:good, 1:bad)
"""
import os
error = 0
try:
if not os.path.isdir(path_folder):
print('Creating folder: {} '.format(path_folder))
os.mkdir(path_folder)
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_check_folder: ' + str(exception_msg))
error = 1
return error
#############################################################
# ---------- DATA ANALYSIS ---------------------------------#
#############################################################
def dl_ia_utils_memory_usage(df):
""" Calculate and print the memory usage and shape by the dataframe
:param df:
:return:
"""
error = 0
try:
print('{} Data Frame Memory usage: {:2.2f} GB'.format('-' * 20, df.memory_usage(deep=True).sum() / 1000000000))
print('{} Data Frame Shape: {} '.format('-' * 20, df.shape))
except Exception as exception_msg:
error = 1
print('(!) Error in dl_ia_utils_memory_usage: ' + str(exception_msg))
return error
def dl_ia_utils_filter_by_std(df, variable, option):
if option == 2:
df_aux = df[(df[variable] < (df[variable].mean() + 2 * df[variable].std()))
& (df[variable] > (df[variable].mean() - 2 * df[variable].std()))]
elif option == 1:
df_aux = df[(df[variable] < (df[variable].mean() + df[variable].std()))
& (df[variable] > (df[variable].mean() - df[variable].std()))]
print('Rows dropped:{} %'.format(round(100 * (1 - (len(df_aux) / len(df))), 3)))
return df_aux
def dl_ia_utils_subs_zeros_values(y):
""" subs zero values from from an array by values close to zeros 1e-10
e.g.: y = np.array([1,4,2,3,7,8,0,0,8,7,0,0,9,8])
:param y:
:return:
"""
import pandas as pd
df = pd.DataFrame({'y': y})
df.loc[df['y'] == 0, ['y']] = 1e-9
return df['y'].values
def dl_ia_utils_create_datestring(row):
"""
df['date'] = df.apply(lambda row: create_date(row), axis=1)
"""
try:
return row['TIMESTAMP'].strftime('%Y-%m-%d')
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_datestring: ' + str(exception_msg))
def dl_ia_utils_create_time_array(start, end, freq):
""" function that creates an array of times
:param start: string with the initial time (e.g.: 00:00:00)
:param end: string with the end time (e.g.: 23:59:59)
:parm freq: string indicating the frequency (e.g.: 15min)
:return: array of time
"""
t = pd.DataFrame({'t': pd.date_range(start=start, end=end, freq=freq)}).t.dt.date
return t
def dl_ia_utils_create_date(row):
""" create date with year, month and day
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import pandas as pd
try:
return pd.Timestamp(int(row['YEAR']), int(row['MONTH']), int(row['DAY']))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_date: ' + str(exception_msg))
def dl_ia_utils_create_time(row):
""" convert values of HOUR and MINUTE to datetime
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import datetime
try:
return datetime.time(int(row['HOUR']), int(row['MINUTE']), int(row['SECOND']))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_time: ' + str(exception_msg))
def dl_ia_utils_create_timestamp(row):
""" create date with year, month and day
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import pandas as pd
try:
return pd.Timestamp(int(row['YEAR']), int(row['MONTH']), int(row['DAY']), int(row['HOUR']), int(row['MINUTE']))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_timestamp: ' + str(exception_msg))
def dl_ia_utils_create_datetime(row):
""" create datetime with hour and minute
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import datetime
try:
return datetime.time(int(row['HOUR']), int(row['MINUTE']))
except Exception as exception_msg:
print('(!) Error in dl_ia_conn_utils_create_datetime: ' + str(exception_msg))
def dl_ia_utils_read_csv_per_chunks(path):
""" This function read a large csv file into a dataframe per chunks
:param path:
:return df: dataframe
"""
import pandas as pd
chunksize_ = 1000
error = 0
try:
TextFileReader = pd.read_csv(path, sep=";", chunksize=chunksize_)
dfList = []
for df in TextFileReader:
dfList.append(df)
df = pd.concat(dfList, sort=False)
return error, df
except Exception as exception_msg:
print("Error in read_csv_per_chunks {}".format(exception_msg))
# raise
error = 1
df = []
return error, df
def dl_ia_utils_vertical_translation(y):
""" detects in exist a zero value and translate the time series with the minimum value
:param y:
:return:
"""
import numpy as np
if np.isin(0, y):
# exists a zero value, find the minimum distinct from zero
delta = np.min(y[y > 0])
# vertical translation
# ym = y + delta
ym = y + 1
return ym
return y
def dl_ia_utils_get_unique_values(df_in):
""" this function calculate the unique values of the column of a data frame
:param df_in: dataframe with the columns of interest
:return dict_out: dictionary with unique values of the columns
"""
import numpy as np
dict_out = dict()
for column in df_in.columns:
dict_out[column] = np.sort(df_in[column].unique())
return dict_out
def dl_ia_utils_quarter_classify(x):
""" classify a variabel x into four cuadrants
:param x: value with values in (0,60)
:return y: values with values in (1,2,3,4)
"""
if x <= 15:
y = 0
if 30 >= x > 15:
y = 15
if 45 >= x > 30:
y = 30
if x > 45:
y = 45
return y
def dl_ia_utils_quarter_groups(x):
""" classify a variabel x into four cuadrants
:param x: value with values in (0,60)
:return y: values with values in (1,2,3,4)
"""
if x <= 15:
y = 1
if 30 >= x > 15:
y = 2
if 45 >= x > 30:
y = 3
if x > 45:
y = 4
return y
def dl_ia_utils_check_null_values(df):
"""
:param df:
:return df:
"""
# check nans
if df.isna().sum().sum() > 0:
print('(!) NAN Values detected')
print(df.isna().sum())
df.dropna(inplace=True)
return df
elif df.isnull().sum().sum() > 0:
print('(!) NULLs Values detected')
print(df.isnull().sum())
df.dropna(inplace=True)
return df
else:
print('Everything ok')
return df
def dl_ia_utils_comm(msg):
""" Funtion to show mesages in terminal
:parm msg: meassge (str)
:return:
"""
print('{} {}'.format('-' * 20, msg))
def dl_ia_utils_quarter_classify(x):
""" classify a variabel x into four cuadrants
:param x: value with values in (0,60)
:return y: values with values in (1,2,3,4)
"""
if x <= 15:
y = 0
if 30 >= x > 15:
y = 15
if 45 >= x > 30:
y = 30
if x > 45:
y = 45
return y
#############################################################
# ------------------- EDA ---------------------------------#
#############################################################
def dl_ia_utils_check_descriptive_statistics(df):
""" calculate descriptive statiscs of a dataframe columns
:param df: dataframe with columns of interest
:return error: error code (0:ok, 1: something wrong)
"""
error = 0
try:
for variable in df.columns:
print('variable:{}{}'.format(' ' * 2, variable))
print('---------------')
print('Mean Value:{}{}'.format(' ' * 2, round(df[variable].mean(), 2)))
print('std Value:{}{}'.format(' ' * 3, round(df[variable].std(), 2)))
print('Q3.:{}{}'.format(' ' * 9, round(df[variable].quantile(0.75), 2)))
print('Max.:{}{}'.format(' ' * 8, round(df[variable].max(), 2)))
print('Q2 :{}{}'.format(' ' * 2, round(df[variable].median(), 2)))
print('Min.:{}{}'.format(' ' * 8, round(df[variable].min(), 2)))
print('Q1.:{}{}'.format(' ' * 9, round(df[variable].quantile(0.25), 2)))
print('IQR.:{}{}'.format(' ' * 8, round(df[variable].quantile(0.75) - df0[variable].quantile(0.25), 2)))
return error
except Exception as exception_msg:
print('{} (!) Error in dl_ia_utils_check_descriptive_statistics: '.format('-' * 20) + str(exception_msg))
error = 1
return error
#############################################################
# ------------------ PLOTS ---------------------------------#
#############################################################
def dl_ia_utils_plot_timeseries(df, var_x, var_y):
"""
:param df:
:param var_x:
:param var_y:
:return:
"""
import plotly.graph_objects as go
show = True
print_ = True
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y],
marker=dict(color='red'),
mode='markers+lines',
name=var_y))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='Time series',
xaxis_title=var_x,
yaxis_title=var_y,
showlegend=True
)
if show:
fig.show()
if print_:
fig.write_html("figures\\timeseries_{}.html".format(var_y))
def dl_ia_utils_plot_line(df, var_x, var_y, var_group):
"""
:param df:
:param var_x:
:param var_y:
:param var_group:
:return:
"""
import plotly.express as px
show = True
print_ = True
fig = px.line(df,
x=var_x,
y=var_y,
color=var_group,
)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
if show:
fig.show()
if print_:
fig.write_html("figures\\line_plot_simple_{}_{}.html".format(var_x, | |
stg - STG_COMPRESSED)
for d in range(bla_dim):
M_bla_new[new_index + d * new_len] = M_bla[index, d]
r_bla_new[new_index] = r_bla[index]
print("BLA tree compressed with coeff:", k_comp)
return M_bla_new, r_bla_new, new_len
@numba.njit
def BLA_index(i, stg):
"""
Return the indices in BVA table for this iteration and stage
this is the jump from i to j = i + (1 << stg)
"""
return (2 * i) + ((1 << stg) - 1)
@numba.njit
def ref_BLA_get(M_bla, r_bla, bla_len, stages_bla, zn, n_iter,
first_invalid_index, M_out, holomorphic):
"""
Paramters:
----------
A_bla, B_bla, r_bla: arrays
Bilinear approx tree
zn :
The current value of dz
n_iter :
The current iteration for ref pt
M_out :
Container for the Bla coefficient
holomorphic: boolean
True if the base function is holomorphic
Returns:
--------
step
The interation "jump" provided by this linear interpolation
"""
k_comp = (1 << STG_COMPRESSED)
_iter = (n_iter >> STG_COMPRESSED)
for stages in range(STG_COMPRESSED, stages_bla):
if _iter & 1:
break
_iter = _iter >> 1
# The first invalid step /!\
invalid_step = first_invalid_index - n_iter
# numba version of reversed(range(stages_bla)):
for stg in range(stages, STG_COMPRESSED - 1, -1):
step = (1 << stg)
if step >= invalid_step:
continue
index_bla = BLA_index(n_iter // k_comp, stg - STG_COMPRESSED)
r = r_bla[index_bla]
# /!\ Use strict comparisons here: to rule out underflow
if (abs(zn) < r):
if holomorphic:
M_out[0] = M_bla[index_bla]
M_out[1] = M_bla[index_bla + bla_len]
return step
else:
for i in range(8):
M_out[i] = M_bla[index_bla + i * bla_len]
return step
return 0 # No BLA applicable
@numba.njit
def need_xr(x_std):
"""
True if norm L-inf of std is lower than xrange_zoom_level
"""
return (
(abs(np.real(x_std)) < fs.settings.xrange_zoom_level)
and (abs(np.imag(x_std)) < fs.settings.xrange_zoom_level)
)
@numba.njit
def ensure_xr(val_std, val_xr, is_xr):
"""
Return a valid Xrange. if not(Z_xr_trigger) we return x_std
converted
val_xr : complex128_Xrange_scalar or float64_Xrange_scalar
"""
if is_xr:
return fsxn.to_Xrange_scalar(val_xr)
else:
return fsxn.to_Xrange_scalar(val_std)
@numba.njit
def ensure_xr_BS(val_std, valx_xr, valy_xr, is_xr):
"""
Return a valid Xrange. if not(Z_xr_trigger) we return x_std
converted
val_xr : complex128_Xrange_scalar or float64_Xrange_scalar
"""
if is_xr:
return (
fsxn.to_Xrange_scalar(valx_xr),
fsxn.to_Xrange_scalar(valy_xr),
)
else:
return (
fsxn.to_Xrange_scalar(np.real(val_std)),
fsxn.to_Xrange_scalar(np.imag(val_std))
)
@numba.njit
def ref_path_c_from_pix(pix, dx, drift):
"""
Returns the true c (coords from ref point) from the pixel coords
Parameters
----------
pix : complex
pixel location in fraction of dx
Returns
-------
c, c_xr : c value as complex and as Xrange
"""
c_xr = (pix * dx[0]) + drift[0]
return fsxn.to_standard(c_xr), c_xr
@numba.njit
def ref_path_c_from_pix_BS(pix, dx, driftx_xr, drifty_xr):
"""
Returns the true a + i b (coords from ref point) from the pixel coords
Parameters
----------
pix : complex
pixel location in fraction of dx
dx : Xrange float
width of the image
Returns
-------
a, b, a_xr, b_xr : c value as complex and as Xrange
"""
a_xr = (pix.real * dx[0]) + driftx_xr[0]
b_xr = (pix.imag * dx[0]) + drifty_xr[0]
return fsxn.to_standard(a_xr), fsxn.to_standard(b_xr), a_xr, b_xr
@numba.njit
def numba_dZndc_path(Zn_path, has_xr, ref_index_xr, ref_xr,
ref_div_iter, ref_order, dfdz, dx_xr, xr_detect_activated):
"""
Compute dZndc in Xr, or std precision, depending on xr_detect_activated
"""
ref_orbit_len = Zn_path.shape[0]
valid_pts = min(ref_orbit_len, ref_div_iter)
xr_act = xr_detect_activated
dx = fsxn.to_standard(dx_xr[0])
if xr_act:
dZndc_path = np.zeros((1,), dtype=numba.complex128) # dummy
dZndc_xr_path = Xr_template.repeat(ref_orbit_len)
refpath_ptr = np.zeros((2,), dtype=numba.int32)
out_is_xr = np.zeros((1,), dtype=numba.bool_)
out_xr = Xr_template.repeat(1)
for i in range(1, valid_pts):
ref_zn = ref_path_get(
Zn_path, i - 1,
has_xr, ref_index_xr, ref_xr, refpath_ptr,
out_is_xr, out_xr, 0
)
ref_zn_xr = ensure_xr(ref_zn, out_xr[0], out_is_xr[0])
dZndc_xr_path[i] = dfdz(ref_zn_xr) * dZndc_xr_path[i - 1] + dx_xr[0]
if (i == ref_order - 1):
# /!\ We have a cycle, use the "wrapped" value at 0
# Note that this value will be used... a lot !
ref_zn = ref_path_get(
Zn_path, i,
has_xr, ref_index_xr, ref_xr, refpath_ptr,
out_is_xr, out_xr, 0
)
ref_zn_xr = ensure_xr(ref_zn, out_xr[0], out_is_xr[0])
dZndc_xr_path[0] = dfdz(Zn_path[i]) * dZndc_xr_path[i] + dx_xr[0]
else:
dZndc_path = np.zeros((ref_orbit_len,), dtype=numba.complex128)
dZndc_xr_path = Xr_template.repeat(1) # dummy
for i in range(1, valid_pts):
dZndc_path[i] = dfdz(Zn_path[i - 1]) * dZndc_path[i - 1] + dx
if (i == ref_order - 1):
# /!\ We have a cycle, use the "wrapped" value at 0
# Note that this value will be used... a lot !
dZndc_path[0] = dfdz(Zn_path[i]) * dZndc_path[i] + dx
return dZndc_path, dZndc_xr_path
@numba.njit
def numba_dZndc_path_BS(Zn_path, has_xr, ref_index_xr, refx_xr, refy_xr,
ref_div_iter, ref_order, dfxdx, dfxdy, dfydx, dfydy,
dx_xr, xr_detect_activated):
"""
Compute dXndb, dXnda, dYnda , dYndb in Xr, or std precision, depending on
xr_detect_activated
"""
ref_orbit_len = Zn_path.shape[0]
valid_pts = min(ref_orbit_len, ref_div_iter)
dx = fsxn.to_standard(dx_xr[0])
if xr_detect_activated:
dXnda_path = np.zeros((1,), dtype=numba.float64) # dummy
dXndb_path = np.zeros((1,), dtype=numba.float64) # dummy
dYnda_path = np.zeros((1,), dtype=numba.float64) # dummy
dYndb_path = np.zeros((1,), dtype=numba.float64) # dummy
dXnda_xr_path = Xr_float_template.repeat(ref_orbit_len)
dXndb_xr_path = Xr_float_template.repeat(ref_orbit_len)
dYnda_xr_path = Xr_float_template.repeat(ref_orbit_len)
dYndb_xr_path = Xr_float_template.repeat(ref_orbit_len)
refpath_ptr = np.zeros((2,), dtype=numba.int32)
out_is_xr = np.zeros((1,), dtype=numba.bool_)
out_xr = Xr_float_template.repeat(2) # coord X, coord Y
for i in range(1, valid_pts):
from_i = i - 1
to_i = i
ref_zn = ref_path_get_BS(
Zn_path,from_i,
has_xr, ref_index_xr, refx_xr, refy_xr, refpath_ptr,
out_is_xr, out_xr, 0, 1
)
ref_xn_xr, ref_yn_xr = ensure_xr_BS(
ref_zn, out_xr[0], out_xr[1], out_is_xr[0]
)
incr_deriv_ref_BS(
dXnda_xr_path, dXndb_xr_path, dYnda_xr_path, dYndb_xr_path,
from_i, to_i, dx_xr[0],
ref_xn_xr, ref_yn_xr, dfxdx, dfxdy, dfydx, dfydy
)
if (i == ref_order - 1):
# /!\ We have a cycle, use the "wrapped" value at 0
# Note that this value will be used... a lot !
from_i = i
to_i = 0
ref_zn = ref_path_get_BS(
Zn_path, from_i,
has_xr, ref_index_xr, refx_xr, refy_xr, refpath_ptr,
out_is_xr, out_xr, 0, 1
)
ref_xn_xr, ref_yn_xr = ensure_xr_BS(
ref_zn, out_xr[0], out_xr[1], out_is_xr[0]
)
incr_deriv_ref_BS(
dXnda_xr_path, dXndb_xr_path, dYnda_xr_path, dYndb_xr_path,
from_i, to_i, dx_xr[0],
ref_xn_xr, ref_yn_xr, dfxdx, dfxdy, dfydx, dfydy
)
else:
dXnda_path = np.zeros((ref_orbit_len,), dtype=numba.float64)
dXndb_path = np.zeros((ref_orbit_len,), dtype=numba.float64)
dYnda_path = np.zeros((ref_orbit_len,), dtype=numba.float64)
dYndb_path = np.zeros((ref_orbit_len,), dtype=numba.float64)
dXnda_xr_path = Xr_float_template.repeat(1) # dummy
dXndb_xr_path = Xr_float_template.repeat(1) # dummy
dYnda_xr_path = Xr_float_template.repeat(1) # dummy
dYndb_xr_path = Xr_float_template.repeat(1) # dummy
for i in range(1, valid_pts):
from_i = i - 1
to_i = i
Xn = np.real(Zn_path[from_i]) #.real
Yn = np.imag(Zn_path[from_i]) #.imag
incr_deriv_ref_BS(
dXnda_path, dXndb_path, dYnda_path, dYndb_path,
from_i, to_i, dx,
Xn, Yn, dfxdx, dfxdy, dfydx, dfydy
)
if (i == ref_order - 1):
# /!\ We have a cycle, use the "wrapped" value at 0
# Note that this value will be used... a lot !
from_i = i
to_i = 0
Xn = np.real(Zn_path[from_i]) #.real
Yn = np.imag(Zn_path[from_i]) #.imag
incr_deriv_ref_BS(
dXnda_path, dXndb_path, dYnda_path, dYndb_path,
from_i, to_i, dx,
Xn, Yn, dfxdx, dfxdy, dfydx, dfydy
)
return (
dXnda_path, dXndb_path, dYnda_path, dYndb_path,
dXnda_xr_path, dXndb_xr_path, dYnda_xr_path, dYndb_xr_path
)
@numba.njit
def incr_deriv_ref_BS(
dXnda_path, dXndb_path, dYnda_path, dYndb_path,
from_i, to_i, dx,
Xn, Yn, dfxdx, dfxdy, dfydx, dfydy,
):
"""
H = [dfxdx dfxdy] [dfx] = H x [dx]
[dfydx dfydy] [dfy] [dy]
"""
dfxdx = dfxdx(Xn, Yn)
dfxdy = dfxdy(Xn, Yn)
dfydx = dfydx(Xn, Yn)
dfydy = dfydy(Xn, Yn)
dXnda = dXnda_path[from_i]
dXndb = dXndb_path[from_i]
dYnda = dYnda_path[from_i]
dYndb = dYndb_path[from_i]
dXnda_path[to_i] = dfxdx * dXnda + dfxdy * dYnda + dx
dXndb_path[to_i] = dfxdx * dXndb + dfxdy * dYndb
dYnda_path[to_i] = dfydx * dXnda + dfydy * dYnda
dYndb_path[to_i] = dfydx * dXndb + dfydy * dYndb - dx
@numba.njit
def ref_path_get(ref_path, idx, has_xr, ref_index_xr, ref_xr, refpath_ptr,
out_is_xr, out_xr, out_index):
"""
Alternative to getitem which also takes as input prev_idx, curr_xr :
allows to optimize the look-up of Xrange values in case of successive calls
with increasing idx.
idx :
index requested
(prev_idx, curr_xr) :
couple returned from last call, last index requested + next xr target
Contract : curr_xr the smallest integer that verify :
prev_idx <= ref_index_xr[curr_xr]
or curr_xr = ref_index_xr.size (No more xr)
Returns
-------
(val, xr_val, is_xr, prev_idx, curr_xr)
val : np.complex128
Modify in place:
xr_val : complex128_Xrange_scalar -> pushed to out_xr[out_index]
is_xr : bool -> pushed to out_is_xr[out_index]
prev_idx == refpath_ptr[0] : int
curr_xr == refpath_ptr[1] : int (index in path ref_xr)
"""
if not(has_xr):
return ref_path[idx]
# Not an increasing sequence, reset to restart a new sequence
if idx < refpath_ptr[0]:
# Rewind to 0
refpath_ptr[0] = 0 # prev_idx = 0
refpath_ptr[1] = 0 # | |
"""
Multilayer Perceptron
"""
__authors__ = "<NAME>"
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import logging
import math
import operator
import sys
import warnings
import numpy as np
from theano.compat import six
from six.moves import reduce, xrange
from theano import config
from theano.gof.op import get_debug_values
from theano.sandbox.cuda import cuda_enabled
from theano.sandbox.cuda.dnn import dnn_available, dnn_pool
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.signal.pool import pool_2d
import theano.tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.costs.mlp import Default
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
# Try to import the fast cudnn library, else fallback to conv2d
if cuda_enabled and dnn_available():
try:
from pylearn2.linear import cudnn2d as conv2d
except ImportError:
from pylearn2.linear import conv2d
else:
from pylearn2.linear import conv2d
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models.model import Model
from pylearn2.monitor import get_monitor_doc
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
from pylearn2.space import CompositeSpace
from pylearn2.space import Conv2DSpace
from pylearn2.space import Space
from pylearn2.space import VectorSpace, IndexSpace
from pylearn2.utils import function
from pylearn2.utils import is_iterable
from pylearn2.utils import py_float_types
from pylearn2.utils import py_integer_types
from pylearn2.utils import safe_union
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
from pylearn2.utils import wraps
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.expr.nnet import (elemwise_kl, kl, compute_precision,
compute_recall, compute_f1)
# Only to be used by the deprecation warning wrapper functions
from pylearn2.costs.mlp import L1WeightDecay as _L1WD
from pylearn2.costs.mlp import WeightDecay as _WD
logger = logging.getLogger(__name__)
logger.debug("MLP changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when doing max pooling via subtensors don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
class Layer(Model):
"""
Abstract class. A Layer of an MLP.
May only belong to one MLP.
Parameters
----------
kwargs : dict
Passed on to the superclass.
Notes
-----
This is not currently a Block because as far as I know the Block interface
assumes every input is a single matrix. It doesn't support using Spaces to
work with composite inputs, stacked multichannel image inputs, etc. If the
Block interface were upgraded to be that flexible, then we could make this
a block.
"""
# When applying dropout to a layer's input, use this for masked values.
# Usually this will be 0, but certain kinds of layers may want to override
# this behaviour.
dropout_input_mask_value = 0.
def get_mlp(self):
"""
Returns the MLP that this layer belongs to.
Returns
-------
mlp : MLP
The MLP that this layer belongs to, or None if it has not been
assigned to an MLP yet.
"""
if hasattr(self, 'mlp'):
return self.mlp
return None
def set_mlp(self, mlp):
"""
Assigns this layer to an MLP. This layer will then use the MLP's
random number generator, batch size, etc. This layer's name must
be unique within the MLP.
Parameters
----------
mlp : MLP
"""
assert self.get_mlp() is None
self.mlp = mlp
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
"""
Returns monitoring channels.
Parameters
----------
state_below : member of self.input_space
A minibatch of states that this Layer took as input.
Most of the time providing state_blow is unnecessary when
state is given.
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
targets : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
Returns
-------
channels : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
return OrderedDict()
def fprop(self, state_below):
"""
Does the forward prop transformation for this layer.
Parameters
----------
state_below : member of self.input_space
A minibatch of states of the layer below.
Returns
-------
state : member of self.output_space
A minibatch of states of this layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement fprop.")
def cost(self, Y, Y_hat):
"""
The cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : theano.gof.Variable
The targets
Y_hat : theano.gof.Variable
The predictions.
Assumed to be the output of the layer's `fprop` method.
The implmentation is permitted to do things like look at the
ancestors of `Y_hat` in the theano graph. This is useful for
e.g. computing numerically stable *log* probabilities when
`Y_hat` is the *probability*.
Returns
-------
cost : theano.gof.Variable
A Theano scalar describing the cost.
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost.")
def cost_from_cost_matrix(self, cost_matrix):
"""
The cost final scalar cost computed from the cost matrix
Parameters
----------
cost_matrix : WRITEME
Examples
--------
>>> # C = model.cost_matrix(Y, Y_hat)
>>> # Do something with C like setting some values to 0
>>> # cost = model.cost_from_cost_matrix(C)
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"mlp.Layer.cost_from_cost_matrix.")
def cost_matrix(self, Y, Y_hat):
"""
The element wise cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : WRITEME
Y_hat : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost_matrix")
def set_weights(self, weights):
"""
Sets the weights of the layer.
Parameters
----------
weights : ndarray
A numpy ndarray containing the desired weights of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_weights.")
def get_biases(self):
"""
Returns the value of the biases of the layer.
Returns
-------
biases : ndarray
A numpy ndarray containing the biases of the layer. This docstring
is provided by the Layer base class. Layer subclasses should add
their own docstring explaining the subclass-specific format of the
ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"get_biases (perhaps because the class has no biases).")
def set_biases(self, biases):
"""
Sets the biases of the layer.
Parameters
----------
biases : ndarray
A numpy ndarray containing the desired biases of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"set_biases (perhaps because the class has no biases).")
def get_weights_format(self):
"""
Returns a description of how to interpret the weights of the layer.
Returns
-------
format: tuple
Either ('v', 'h') or ('h', 'v').
('v', 'h') means a weight matrix of shape
(num visible units, num hidden units),
while ('h', 'v') means the transpose of it.
"""
raise NotImplementedError
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_weight_decay.")
def get_l1_weight_decay(self, coeff):
"""
Provides | |
pass
if len(tweet_text) > 116:
try:
tweet_text = 'A {d} {n} will be {p} until {e}.'.format(
d=self.description, n=self.name,
p=place_string, e=self.expire_time)
except AttributeError:
tweet_text = (
"A {d} {n} appeared {p}! It'll expire between {e1} & {e2}."
).format(d=self.description, n=self.name, p=place_string,
e1=self.min_expire_time, e2=self.max_expire_time)
else:
return tweet_text
if len(tweet_text) > 116:
try:
tweet_text = 'A {d} {n} will expire at {e}.'.format(
n=self.name, e=self.expire_time)
except AttributeError:
tweet_text = (
'A {d} {n} will expire between {e1} & {e2}.').format(
d=self.description, n=self.name, e1=self.min_expire_time,
e2=self.max_expire_time)
else:
return tweet_text
async def tweet(self):
""" Create message, reduce it until it fits in a tweet, and then tweet
it with a link to Google maps and tweet location included.
"""
tag_string = ''
try:
for hashtag in self.hashtags:
tag_string += ' #{}'.format(hashtag)
except TypeError:
pass
try:
tweet_text = (
'A {d} {n} appeared! It will be {p} until {e}. {t}').format(
d=self.description, n=self.name, p=self.place,
e=self.expire_time, t=tag_string)
except AttributeError:
tweet_text = (
'A {d} {n} appeared {p}! It will expire sometime between '
'{e1} and {e2}. {t}').format(
d=self.description, n=self.name, p=self.place,
e1=self.min_expire_time, e2=self.max_expire_time,
t=tag_string)
if len(tweet_text) > 116:
tweet_text = self.shorten_tweet(tweet_text)
tweet_text += ' ' + self.map_link
media_id = None
client = self.get_twitter_client()
if conf.TWEET_IMAGES:
try:
image = PokeImage(self.pokemon, self.move1, self.move2, self.time_of_day).create()
except Exception:
self.log.exception('Failed to create a Tweet image.')
else:
try:
media = await client.upload_media(image,
media_type='image/png',
media_category='tweet_image',
chunked=True)
media_id = media['media_id']
except Exception:
self.log.exception('Failed to upload Tweet image.')
try:
await client.api.statuses.update.post(
status=tweet_text,
media_ids=media_id,
lat=str(self.coordinates[0]),
long=str(self.coordinates[1]),
display_coordinates=True)
except Exception:
self.log.exception('Failed to tweet about {}.', self.name)
return False
else:
self.log.info('Sent a tweet about {}.', self.name)
return True
finally:
try:
image.close()
except AttributeError:
pass
@staticmethod
def generic_place_string():
""" Create a place string with area name (if available)"""
# no landmarks defined, just use area name
place = 'in {}'.format(conf.AREA_NAME)
return place
@classmethod
def get_pushbullet_client(cls):
try:
return cls._pushbullet_client
except AttributeError:
cls._pushbullet_client = AsyncPushbullet(
api_key=conf.PB_API_KEY,
loop=LOOP)
return cls._pushbullet_client
@classmethod
def get_twitter_client(cls):
try:
return cls._twitter_client
except AttributeError:
cls._twitter_client = PeonyClient(
consumer_key=conf.TWITTER_CONSUMER_KEY,
consumer_secret=conf.TWITTER_CONSUMER_SECRET,
access_token=conf.TWITTER_ACCESS_KEY,
access_token_secret=conf.TWITTER_ACCESS_SECRET,
session=SessionManager.get(),
loop=LOOP)
return cls._twitter_client
class Notifier:
def __init__(self):
self.cache = NotificationCache()
self.notify_ranking = conf.NOTIFY_RANKING
self.initial_score = conf.INITIAL_SCORE
self.minimum_score = conf.MINIMUM_SCORE
self.last_notification = monotonic() - (conf.FULL_TIME / 2)
self.always_notify = []
self.log = get_logger('notifier')
self.never_notify = conf.NEVER_NOTIFY_IDS
self.rarity_override = conf.RARITY_OVERRIDE
self.sent = 0
if self.notify_ranking:
self.initialize_ranking()
LOOP.call_later(3600, self.set_notify_ids)
elif conf.NOTIFY_IDS or conf.ALWAYS_NOTIFY_IDS:
self.notify_ids = conf.NOTIFY_IDS or conf.ALWAYS_NOTIFY_IDS
self.always_notify = conf.ALWAYS_NOTIFY_IDS
self.notify_ranking = len(self.notify_ids)
def set_notify_ids(self):
LOOP.create_task(self._set_notify_ids())
LOOP.call_later(3600, self.set_notify_ids)
async def _set_notify_ids(self):
await run_threaded(self.set_ranking)
self.notify_ids = self.pokemon_ranking[0:self.notify_ranking]
self.always_notify = set(self.pokemon_ranking[0:conf.ALWAYS_NOTIFY])
self.always_notify |= set(conf.ALWAYS_NOTIFY_IDS)
self.log.info('Updated Pokemon rankings.')
def initialize_ranking(self):
self.pokemon_ranking = load_pickle('ranking')
if self.pokemon_ranking:
self.notify_ids = self.pokemon_ranking[0:self.notify_ranking]
self.always_notify = set(self.pokemon_ranking[0:conf.ALWAYS_NOTIFY])
self.always_notify |= set(conf.ALWAYS_NOTIFY_IDS)
else:
LOOP.run_until_complete(self._set_notify_ids())
def set_ranking(self):
try:
with session_scope() as session:
self.pokemon_ranking = get_pokemon_ranking(session)
except Exception:
self.log.exception('An exception occurred while trying to update rankings.')
else:
dump_pickle('ranking', self.pokemon_ranking)
def get_rareness_score(self, pokemon_id):
if pokemon_id in self.rarity_override:
return self.rarity_override[pokemon_id]
exclude = len(self.always_notify)
total = self.notify_ranking - exclude
ranking = self.notify_ids.index(pokemon_id) - exclude
percentile = 1 - (ranking / total)
return percentile
def get_required_score(self, now=None):
if self.initial_score == self.minimum_score or conf.FULL_TIME == 0:
return self.initial_score
now = now or monotonic()
time_passed = now - self.last_notification
subtract = self.initial_score - self.minimum_score
if time_passed < conf.FULL_TIME:
subtract *= (time_passed / conf.FULL_TIME)
return self.initial_score - subtract
def eligible(self, pokemon):
pokemon_id = pokemon['pokemon_id']
encounter_id = pokemon['encounter_id']
if pokemon_id in self.never_notify:
return False
if pokemon_id in self.always_notify:
return encounter_id not in self.cache
if (pokemon_id not in self.notify_ids
and pokemon_id not in self.rarity_override):
return False
if conf.IGNORE_RARITY:
return encounter_id not in self.cache
try:
if pokemon['time_till_hidden'] < conf.TIME_REQUIRED:
return False
except KeyError:
pass
if encounter_id in self.cache:
return False
rareness = self.get_rareness_score(pokemon_id)
highest_score = (rareness + 1) / 2
score_required = self.get_required_score()
return highest_score > score_required
def cleanup(self, encounter_id, handle):
self.cache.remove(encounter_id)
handle.cancel()
return False
async def notify(self, pokemon, time_of_day):
"""Send a PushBullet notification and/or a Tweet, depending on if their
respective API keys have been set in config.
"""
whpushed = False
notified = False
pokemon_id = pokemon['pokemon_id']
name = POKEMON[pokemon_id]
encounter_id = pokemon['encounter_id']
if encounter_id in self.cache:
self.log.info("{} was already notified about.", name)
return False
now = monotonic()
if pokemon_id in self.always_notify:
score_required = 0
else:
score_required = self.get_required_score(now)
try:
iv_score = (pokemon['individual_attack'] + pokemon['individual_defense'] + pokemon['individual_stamina']) / 45
except KeyError:
if conf.IGNORE_IVS:
iv_score = None
else:
self.log.warning('IVs are supposed to be considered but were not found.')
return False
if score_required:
if conf.IGNORE_RARITY:
score = iv_score
elif conf.IGNORE_IVS:
score = self.get_rareness_score(pokemon_id)
else:
rareness = self.get_rareness_score(pokemon_id)
score = (iv_score + rareness) / 2
else:
score = 1
if score < score_required:
try:
self.log.info("{}'s score was {:.3f} (iv: {:.3f}),"
" but {:.3f} was required.",
name, score, iv_score if iv_score is not None else -1, score_required)
except TypeError:
pass
return False
if 'time_till_hidden' not in pokemon:
seen = pokemon['seen'] % 3600
cache_handle = self.cache.store.add(pokemon['encounter_id'])
try:
with session_scope() as session:
tth = await run_threaded(estimate_remaining_time, session, pokemon['spawn_id'], seen)
except Exception:
self.log.exception('An exception occurred while trying to estimate remaining time.')
now_epoch = time()
tth = (pokemon['seen'] + 90 - now_epoch, pokemon['seen'] + 3600 - now_epoch)
LOOP.call_later(tth[1], self.cache.remove, pokemon['encounter_id'])
if pokemon_id not in self.always_notify:
mean = sum(tth) / 2
if mean < conf.TIME_REQUIRED:
self.log.info('{} has only around {} seconds remaining.', name, mean)
return False
pokemon['earliest_tth'], pokemon['latest_tth'] = tth
else:
cache_handle = self.cache.add(pokemon['encounter_id'], pokemon['time_till_hidden'])
if WEBHOOK and NATIVE:
notified, whpushed = await gather(
Notification(pokemon, iv_score, time_of_day).notify(),
self.webhook(pokemon),
loop=LOOP)
elif NATIVE:
notified = await Notification(pokemon, iv_score, time_of_day).notify()
elif WEBHOOK:
whpushed = await self.webhook(pokemon)
if notified or whpushed:
self.last_notification = monotonic()
self.sent += 1
return True
else:
return self.cleanup(encounter_id, cache_handle)
async def notify_raid(self, fort):
discord = False
telegram = False
if conf.RAIDS_DISCORD_URL:
discord = await self.notify_raid_to_discord(fort)
if conf.TELEGRAM_BOT_TOKEN and conf.TELEGRAM_RAIDS_CHAT_ID:
telegram = await self.notify_raid_to_telegram(fort)
if discord or telegram:
self.last_notification = monotonic()
self.sent += 1
async def notify_raid_to_discord(self, fort):
raid = fort.raid_info
if raid.raid_pokemon.pokemon_id not in conf.RAIDS_IDS:
if raid.raid_level < conf.RAIDS_LVL_MIN:
return
tth = raid.raid_battle_ms // 1000 if raid.raid_pokemon.pokemon_id == 0 else raid.raid_end_ms // 1000
timer_end = datetime.fromtimestamp(tth, None)
time_left = timedelta(seconds=tth - time())
payload = {
'username': 'Egg' if raid.raid_pokemon.pokemon_id == 0 else POKEMON[raid.raid_pokemon.pokemon_id],
'avatar_url': conf.ICONS_URL.format(raid.raid_pokemon.pokemon_id),
'embeds': [{
'title': 'Raid{}'.format(raid.raid_level),
'url': self.get_gmaps_link(fort.latitude, fort.longitude),
'description': '{} ({}h {}mn {}s)'.format(timer_end.strftime("%H:%M:%S"), time_left.seconds // 3600, (time_left.seconds // 60) % 60, time_left.seconds % 60),
'thumbnail': {'url': conf.ICONS_URL.format(raid.raid_pokemon.pokemon_id)},
'image': {'url': self.get_static_map_url(fort.latitude, fort.longitude)}
}]
}
session = SessionManager.get()
return await self.hook_post(conf.RAIDS_DISCORD_URL, session, payload)
async def notify_raid_to_telegram(self, fort):
raid = fort.raid_info
if raid.raid_pokemon.pokemon_id not in conf.RAIDS_IDS:
if raid.raid_level < conf.RAIDS_LVL_MIN:
return
title = '[Raid lvl.{}] {}'.format(raid.raid_level, 'Egg' if raid.raid_pokemon.pokemon_id == 0 else POKEMON[raid.raid_pokemon.pokemon_id])
tth = raid.raid_battle_ms // 1000 if raid.raid_pokemon.pokemon_id == 0 else raid.raid_end_ms // 1000
timer_end = datetime.fromtimestamp(tth, None)
time_left = timedelta(seconds=tth - time())
description = '{} ({}h {}mn {}s)'.format(timer_end.strftime("%H:%M:%S"), time_left.seconds // 3600, (time_left.seconds // 60) % 60, time_left.seconds % 60)
if conf.TELEGRAM_MESSAGE_TYPE == 0:
TELEGRAM_BASE_URL = "https://api.telegram.org/bot{token}/sendVenue".format(token=conf.TELEGRAM_BOT_TOKEN)
payload = {
'chat_id': conf.TELEGRAM_RAIDS_CHAT_ID,
'latitude': fort.latitude,
'longitude': fort.longitude,
'title' : title,
'address' : description,
}
else:
TELEGRAM_BASE_URL = "https://api.telegram.org/bot{token}/sendMessage".format(token=conf.TELEGRAM_BOT_TOKEN)
map_link = '<a href="{}">Open GMaps</a>'.format(self.get_gmaps_link(fort.latitude, fort.longitude))
payload = {
'chat_id': conf.TELEGRAM_RAIDS_CHAT_ID,
'parse_mode': 'HTML',
'text' : title + '\n' + description + '\n\n' + map_link
}
session = SessionManager.get()
return await self.hook_post(TELEGRAM_BASE_URL, session, payload, timeout=8)
def get_gmaps_link(self, lat, lng):
return 'http://maps.google.com/maps?q={},{}'.format(repr(lat), repr(lng))
def get_static_map_url(self, lat, lng):
center = '{},{}'.format(lat, lng)
query_center = 'center={}'.format(center)
query_markers = 'markers=color:red%7C{}'.format(center)
query_size = 'size={}x{}'.format('250', '125')
query_zoom = 'zoom={}'.format('15')
query_maptype = 'maptype={}'.format('roadmap')
url = ('https://maps.googleapis.com/maps/api/staticmap?' +
query_center + '&' + query_markers + '&' +
query_maptype + '&' + query_size + '&' + query_zoom)
return url
async def webhook(self, pokemon):
""" Send a notification via webhook
"""
try:
tth = pokemon['time_till_hidden']
ts = pokemon['expire_timestamp']
except KeyError:
tth = pokemon['earliest_tth']
ts = pokemon['seen'] + tth
data = {
'type': "pokemon",
'message': {
"encounter_id": pokemon['encounter_id'],
"pokemon_id": pokemon['pokemon_id'],
"last_modified_time": pokemon['seen'] * 1000,
"spawnpoint_id": pokemon['spawn_id'],
"latitude": pokemon['lat'],
"longitude": pokemon['lon'],
"disappear_time": ts,
"time_until_hidden_ms": tth * 1000
}
}
try:
data['message']['individual_attack'] = pokemon['individual_attack']
data['message']['individual_defense'] = pokemon['individual_defense']
data['message']['individual_stamina'] = pokemon['individual_stamina']
data['message']['move_1'] = pokemon['move_1']
data['message']['move_2'] = pokemon['move_2']
data['message']['height'] = pokemon['height']
data['message']['weight'] = pokemon['weight']
data['message']['gender'] = pokemon['gender']
except KeyError:
pass
session | |
deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if (
self.args.max_grad_norm is not None
and self.args.max_grad_norm > 0
and not self.deepspeed
):
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer)
if self.use_apex
else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(
self.args, self.state, self.control
)
self._maybe_log_save_evaluate(
tr_loss, tr_task_loss, tr_retrieval_loss, model, trial, epoch
)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(
self.args, self.state, self.control
)
self._maybe_log_save_evaluate(
tr_loss, tr_task_loss, tr_retrieval_loss, model, trial, epoch
)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info(
"\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n"
)
if (
self.args.load_best_model_at_end
and self.state.best_model_checkpoint is not None
):
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(
self.state.best_model_checkpoint
)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(
os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
)
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint,
load_optimizer_states=False,
load_lr_scheduler_states=False,
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(
self.args, self.state, self.control
)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(
self.state.global_step,
self._total_loss_scalar / self.state.global_step,
metrics,
)
def _maybe_log_save_evaluate(
self, tr_loss, task_loss, retrieval_loss, model, trial, epoch
):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
task_loss_scalar = task_loss.item() if task_loss is not None else None
retrieval_loss_scalar = (
retrieval_loss.item() if retrieval_loss is not None else None
)
# reset tr_loss to zero
tr_loss -= tr_loss
task_loss -= task_loss
retrieval_loss -= retrieval_loss
logs["loss"] = round(
tr_loss_scalar
/ (self.state.global_step - self._globalstep_last_logged),
4,
)
if task_loss_scalar is not None:
logs["task_loss"] = round(
task_loss_scalar
/ (self.state.global_step - self._globalstep_last_logged),
4,
)
if retrieval_loss_scalar is not None:
logs["retrieval_loss"] = round(
retrieval_loss_scalar
/ (self.state.global_step - self._globalstep_last_logged),
4,
)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(
self.args, self.state, self.control
)
def training_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]
) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
(
loss,
task_loss,
retrieval_loss,
retrieval_logits,
retrieval_instance_labels,
) = self.compute_loss(model, inputs)
else:
(
loss,
task_loss,
retrieval_loss,
retrieval_logits,
retrieval_instance_labels,
) = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
task_loss = task_loss.mean() if task_loss is not None else None
retrieval_loss = (
retrieval_loss.mean() if retrieval_loss is not None else None
)
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
task_loss = (
task_loss / self.args.gradient_accumulation_steps
if task_loss is not None
else None
)
retrieval_loss = (
retrieval_loss / self.args.gradient_accumulation_steps
if retrieval_loss is not None
else None
)
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
task_loss = task_loss.detach() if task_loss is not None else None
retrieval_loss = retrieval_loss.detach() if retrieval_loss is not None else None
return loss.detach(), task_loss, retrieval_loss
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
task_loss = None
retrieval_loss = None
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
task_loss = outputs["task_loss"] if "task_loss" in outputs else None
retrieval_loss = (
outputs["retrieval_loss"] if "retrieval_loss" in outputs else None
)
retrieval_logits = (
outputs["retrieval_predictions"]
if "retrieval_predictions" in outputs
else None
)
retrieval_instance_labels = (
outputs["retrieval_instance_labels"]
if "retrieval_instance_labels" in outputs
else None
)
return (
(
loss,
task_loss,
retrieval_loss,
retrieval_logits,
retrieval_instance_labels,
outputs,
)
if return_outputs
else (
loss,
task_loss,
retrieval_loss,
retrieval_logits,
retrieval_instance_labels,
)
)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
speed_metrics=False
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(
eval_dataset, collections.abc.Sized
):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
# output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics, use_global_step=False)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics | |
the player a chance of getting a powerup or score
if self.hp <= 0:
self.kill()
player.score += 2
# A 5% chance of the player getting HP powerup
if random.random() > 0.95:
hpPowerup = hpPow(self.rect.centerx, self.rect.bottom)
all_sprites.add(hpPowerup)
hpPowerups.add(hpPowerup)
# A 15% chance of the player getting an AMMO powerup
if random.random() > 0.85:
ammoPowerup = ammoPow(self.rect.centerx, self.rect.bottom)
all_sprites.add(ammoPowerup)
ammoPowerups.add(ammoPowerup)
# A 2% chance of the player getting a TURRET powerup
if random.random() >0.95:
turretPowerup = turretPow(self.rect.centerx, self.rect.bottom)
all_sprites.add(turretPowerup)
turretPowerups.add(turretPowerup)
# Method creates the Mob's lazers
def pew(self):
antilazer = Antilazer(self.rect.centerx, self.rect.bottom)
all_sprites.add(antilazer)
antilazers.add(antilazer)
# Lazer Class
class Lazer(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
# Creates the lazer and applies a speed to it
self.image = pg.Surface((5,25))
self.image.fill(BLUE)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
# Destroys the lazer so that the game wont lag
self.rect.y += self.speedy
if self.rect.bottom <= 0:
self.kill()
# Enemy Lazer Class
class Antilazer(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
# Gives the lazer a size and speed
self.image = pg.Surface((5,25))
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = 5
def update(self):
# Kills the lazers, so lag will not occur
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
def update(self):
# Kills the lazers, so lag will not occur
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
# Health powerup Class
class hpPow(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
#Gives it its dimensions
self.image = pg.transform.scale(health_image, (20, 20))
# Eliminates these colors
self.image.set_colorkey(BLACK)
#Gives the powerup speed
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = 2.5
def update(self):
# Destroys the powerups so lag will not occur
self.rect.y += self.speedy
if self.rect.top > HEIGHT:
self.kill()
class ammoPow(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
# Gives the powerup an image
self.image = pg.transform.scale(ammo_image, (20, 20))
# Eliminates these colors
self.image.set_colorkey(BLACK)
#Gives the powerup a size and speed
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = 2.5
def update(self):
# Destroys the powerup, so lag will not occur
self.rect.y += self.speedy
if self.rect.top > HEIGHT:
self.kill()
# Class for the turret powerup
class turretPow(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
# Gives the powerup an image
self.image = pg.transform.scale(turret_image, (20, 20))
# Eliminates these colors
self.image.set_colorkey(BLACK)
#Gives the powerup a size and speed
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = 2.5
# Method updates to destroy all powerups that go off the screen
def update(self):
# Destroys powerups so lag will not occur
self.rect.y += self.speedy
if self.rect.top > HEIGHT:
self.kill()
# Boss class
class Boss(Sprite):
def __init__(self):
Sprite.__init__(self)
# Gives it dimensions and a random speed and spawn point
self.image = pg.transform.scale(Boss_image, (80,80))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(0, WIDTH-self.rect.width)
self.rect.y = random.randrange(0, 240)
self.speedx = random.randrange(1,8)
self.speedy = random.randrange(1,8)
# Gives it hp
self.hp = 500
def update(self):
# Updates various compotents
self.rect.x += self.speedx
if self.rect.x > WIDTH or self.rect.x < 0:
self.speedx*=-1
self.rect.y += 25
if self.rect.y > HEIGHT:
self.rect.y = -25
self.rect.x = random.randrange(0, WIDTH-self.rect.width)
# Allows the boss to randomly shoot
self.shoot = random.randrange(1,1000)
if self.shoot % 150 == 0:
self.pew()
# When the bosses health reach 0 multiple things will happen
if self.hp <= 0:
# Kills the boss
self.kill()
# Gives points
player.score += 10
# 25% chance of getting a HP powerup
if random.random() > 0.75:
hpPowerup = hpPow(self.rect.centerx, self.rect.bottom)
all_sprites.add(hpPowerup)
hpPowerups.add(hpPowerup)
# 30% chance of getting an AMMO powerup
if random.random() > 0.70:
ammoPowerup = ammoPow(self.rect.centerx, self.rect.bottom)
all_sprites.add(ammoPowerup)
ammoPowerups.add(ammoPowerup)
# 22% Chance of getting a TURRET powerup
if random.random() >0.78:
turretPowerup = turretPow(self.rect.centerx, self.rect.bottom)
all_sprites.add(turretPowerup)
turretPowerups.add(turretPowerup)
# Method creates the bosses' lazers
def pew(self):
# Creates two lazers rather than one
antilazer1 = Antilazer(self.rect.left, self.rect.centery)
antilazer2 = Antilazer(self.rect.right, self.rect.centery)
all_sprites.add(antilazer1)
all_sprites.add(antilazer2)
antilazers.add(antilazer1)
antilazers.add(antilazer2)
# The game loop
game_over = True
running = True
while running:
# Allows for the end screen and starting screen to not have sprites until the player begins to play
if game_over:
# Where all things are created and grouped
show_go_screen()
game_over = False
all_sprites = pg.sprite.Group()
hpPowerups = pg.sprite.Group()
ammoPowerups = pg.sprite.Group()
turretPowerups = pg.sprite.Group()
bosses = pg.sprite.Group()
mobs = pg.sprite.Group()
lazers = pg.sprite.Group()
antilazers = pg.sprite.Group()
player = pg.sprite.Group()
player = Player()
all_sprites.add(player)
# Spawns more mobs when they reach 0
for i in range(0,8):
mob = Mob()
all_sprites.add(mob)
mobs.add(mob)
# Keeps the game running at 60 frames per second
clock.tick(FPS)
# Checks for these ceirtain events if they are occuring
for event in pg.event.get():
# Window x button
if event.type == pg.QUIT:
running = False
# If the spacebar is pressed a lazer is shot by the player
elif event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
player.pew()
# Updates all sprites while the game is running
all_sprites.update()
# Checks all the mobs if they have been hit by a lazer
for mob in mobs:
# Checks if a lazer has hit a mob and if does it makes the lazer disappear
shot = pg.sprite.spritecollide(mob, lazers, True)
# Everytime the mob is hit its hp lowers
if shot:
mob.hp-= 25
# print(mob.hp)
# Checks all the bosses if they have been hit by a lazer
for boss in bosses:
# Checks if a lazer has hit a boss and if does it makes the lazer disappear
shot1 = pg.sprite.spritecollide(boss,lazers,True)
# If the boss is shot it lower its hp
if shot1:
boss.hp-= 50
# print(boss.hp)
# Checks if the player has been hit by an antilazer, and if its true then the lazer disappears
damaged = pg.sprite.spritecollide(player, antilazers, True)
# Lowers the player's health everytime its hit
if damaged:
player.hp -= 10
# If the player's lives reach 0 the ending screen is shown
if player.lives == 0:
game_over = True
# Checks if the player picks up the HP powerup and if it does it makes the powerup disappear
hpBack = pg.sprite.spritecollide(player,hpPowerups, True)
# If the player picks up the HP powerup the health is restored
if hpBack:
player.hp = 100
# Checks if the player picks up an AMMO powerup and if it does it makes the powerup disappear
ammoBack= pg.sprite.spritecollide(player,ammoPowerups, True)
# If the player picks up the AMMO powerup it gets 50 ammo back
if ammoBack:
player.ammo += 50
# Checks if the player picks up a TURRET powerup and if it does it makes the powerup disappear
turretBack = pg.sprite.spritecollide(player,turretPowerups, True)
# If the player picks up the TURRET powerup, a turret is added
if turretBack:
player.powerup()
# Checks if the player collides with any of the enemies
hits = pg.sprite.spritecollide(player, bosses, True)
hits = pg.sprite.spritecollide(player, mobs, True)
# If the player collides it will lose a life and if it reaches 0 lives the game will end
if hits:
player.lives -=1
if player.lives == 0:
game_over = True
# Creates more mobs if there are 0
if len(mobs) == 0:
for i in range(0,8):
mob = Mob()
all_sprites.add(mob)
mobs.add(mob)
# Creates more bosses if there are 0
if len(bosses) == 0:
# Specifies when the boss can spawn im
if player.score % 20 == 0:
boss = Boss()
all_sprites.add(boss)
bosses.add(boss)
# Creates the moving background
background_rect2.y = background_rect.y - 600
background_rect.y+= player.speedy
background_rect2.y+= player.speedy
if background_rect2.y >- 0:
background_rect.y = background_rect.y - 600
# Draw's text and the bars, among other things
screen.fill(DARKBLUE)
screen.blit(background_image, background_rect)
screen.blit(background_image, background_rect2)
draw_text(screen, str(player.score), 24, WIDTH / 2, | |
<reponame>uguryagmur/RealTimeObjectDetection<gh_stars>0
"""Utility functions for Object Detection Networks"""
from __future__ import division
import cv2
import torch
import numpy as np
from PIL import Image, ImageDraw
def xyxy2xywh(box: torch.Tensor) -> torch.Tensor:
"""
Returns the xywh format of the input bounding box tensor
Arguments:
box (torch.Tensor): Bounding box tensor in xyxy format
Output:
output (torch.Tensor): Bounding box tensor in xywh format
"""
output = torch.zeros(box.size())
output[..., 0] = (box[..., 2] + box[..., 0])/2
output[..., 1] = (box[..., 3] + box[..., 1])/2
output[..., 2] = box[..., 2] - box[..., 0]
output[..., 3] = box[..., 3] - box[..., 1]
output[..., 4:] = box[..., 4:]
return output
def xywh2xyxy(box: torch.Tensor) -> torch.Tensor:
"""
Returns the xyxy format of the input bounding box tensor
Arguments:
box (torch.Tensor): Bounding box tensor in xywh format
Output:
output (torch.Tensor): Bounding box tensor in xyxy format
"""
output = torch.zeros(box.size())
output[..., 0] = (box[..., 0] - box[..., 2]/2)
output[..., 1] = (box[..., 1] - box[..., 3]/2)
output[..., 2] = (box[..., 0] + box[..., 2]/2)
output[..., 3] = (box[..., 1] + box[..., 3]/2)
output[..., 4:] = box[..., 4:]
return output
def xywh2YOLO(box: torch.Tensor, stride: float,
anchor: tuple):
"""
Returns the bounding box in a format similar to the last output
layer of the YOLO
Arguments:
box (torch.Tensor): Boudning box tensor in xywh format
stride (float): stride of the current layer to decrease the size
anchor (list): it is a list of tuples of anchor boxes
Outputs:
x_coor (int): x coordinate of the detection grid for the given box
y_coor (int): y coordinate of the detection grid for the given box
x (float): x output of the YOLO layer for the corresponding box
y (float): y output of the YOLO layer for the corresponding box
w (float): w output of the YOLO layer for the corresponding box
h (float): h output of the YOLO layer for the corresponding box
"""
x = box[..., 0].item()/stride
x_coor = int(box[..., 0].item()/stride)
y = box[..., 1].item()/stride
y_coor = int(box[..., 1].item()/stride)
x -= x_coor
y -= y_coor
w = torch.log(box[..., 2] / anchor[0] + 1e-16).item()
h = torch.log(box[..., 3] / anchor[1] + 1e-16).item()
return y_coor, x_coor, y, x, w, h
def draw_boxes(img: torch.Tensor, bbox: torch.Tensor,
from_tensor=False):
"""
Draws the given bounding boxes on the sample image and show it to
user to check whether the comming data is correct or not
Arguments:
img (torch.Tensor, PIL.Image): Image sample
bbox (torch.Tensor): Bounding box for corresponding image
from_tensor (bool): Flag for whether the input is tensor or PIL image
"""
if from_tensor:
img = img.transpose(0, 1).transpose(1, 2).numpy()*255
img = Image.fromarray(np.uint8(img))
draw = ImageDraw.Draw(img)
for b in bbox:
if b[5] != 1:
continue
box = b[:4].numpy()
bbox = [0, 0, 0, 0]
bbox[0] = int(box[0] - box[2]/2)
bbox[1] = int(box[1] - box[3]/2)
bbox[2] = int(box[0] + box[2]/2)
bbox[3] = int(box[1] + box[3]/2)
draw.rectangle(bbox, outline='red')
img.show()
def confidence_mask(tensor: torch.Tensor, confidence: float) -> torch.Tensor:
"""
Returns the masked form of the input tensor with respect to given
confidence
Arguments:
tensor (torch.Tensor): tensor to be masked by confidence
confidence (float): confidence score to use in masking
"""
# confidence masking for direct output of the YOLO layer
conf_mask = (tensor[:, :, 4] > confidence).float().unsqueeze(2)
return tensor*conf_mask
def bbox_iou(box1: torch.Tensor, box2: torch.Tensor) -> torch.Tensor:
"""
Returns the IoU of two bounding boxes
Arguments:
box1 (torch.Tensor) : coor tensor of the first box to calculate IoU
box2 (torch.Tensor) : coor tensor of the first box to calculate IoU
Returns:
iou (float): intersection/union ratio for the given bounding boxes
"""
# get the coordinates of bounding boxes
b1_x1, b1_y1 = box1[..., 0], box1[..., 1]
b1_x2, b1_y2 = box1[..., 2], box1[..., 3]
b2_x1, b2_y1 = box2[..., 0], box2[..., 1]
b2_x2, b2_y2 = box2[..., 2], box2[..., 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * \
torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
# union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def bbox_iou_wh(wh1: tuple, wh2: tuple) -> float:
"""
Returns the IoU value between an anchor box and bounding
box width and height
Arguments:
wh1 (tuple, list): width and height pair for first box
wh2 (tuple, list): width and height pair for second box
Returns:
iou (float): intersection/union for given w-h pairs
"""
w1, h1 = wh1[0], wh1[1]
w2, h2 = wh2[0], wh2[0]
intersect_area = min(w1, w2) * min(h1, h2)
union_area = w1*h1 + w2*h2 - intersect_area
return intersect_area/union_area
def predict_transform(prediction, inp_dim, anchors, num_class,
CUDA, TRAIN=False) -> torch.Tensor:
"""
Returns the prediction tensor with respect to the output of the YOLO
Detection Layers outputs
Arguements:
prediction (torch.Tensor) : output tensor of the Detection Layer
inp_dim (torch.Tensor) : input image dimensions
anchors (torch.Tensor) : anchors of the Darknet
num_class (int) : number of classes can be detected by Darknet
CUDA (bool): CUDA
TRAIN (bool):
Returns:
detection (torch.Tensor): masked and reorganized form of the detection
"""
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_class
num_anchors = len(anchors)
prediction = prediction.view(
batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(
batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
# sigmoid the centre_x, centre_Y. and object confidencce
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4:] = torch.sigmoid(prediction[:, :, 4:])
# for trianing no offset addition
if not TRAIN:
# decreasing the size of the anchor boxes to match with prediction
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
# log space transform height and the width
anchors = torch.FloatTensor(anchors)
# add the center offsets
grid = torch.arange(grid_size)
b, a = torch.meshgrid(grid, grid)
x_offset = a.reshape(-1, 1)
y_offset = b.reshape(-1, 1)
if CUDA:
anchors = anchors.cuda()
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(
1, num_anchors).view(-1, 2).unsqueeze(0)
prediction[:, :, :2] += x_y_offset
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4])*anchors
prediction[:, :, :4] *= stride
return prediction # -> shape = [batch, #_of_boxes, 5 + #_of_classes]
def write_results(prediction, num_class, confidence=0.6,
nms_conf=0.4) -> torch.Tensor:
"""
Returns the results of the predictions of the Darknet
as bounding boxes and class of the object
Arguments:
prediction (torch.Tensor) : output of the Darknet network
num_class (int) : number of classes can be detected by Darknet
confidence (float) : confidence of the detection
nms_conf (float) : non-max supression threshold (default=0.4)
Returns:
output (torch.Tensor, int): Returns 0 if there is no detection and
returns the bounding boxes attributes of darknet for each object
"""
# confidence masking
prediction = confidence_mask(prediction, confidence)
# transforming box attributes to corner coordinates
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = (prediction[:, :, 0] - prediction[:, :, 2]/2)
box_corner[:, :, 1] = (prediction[:, :, 1] - prediction[:, :, 3]/2)
box_corner[:, :, 2] = (prediction[:, :, 0] + prediction[:, :, 2]/2)
box_corner[:, :, 3] = (prediction[:, :, 1] + prediction[:, :, 3]/2)
prediction[:, :, :4] = box_corner[:, :, :4]
# obtaining the batch_size of the input tensor
batch_size = prediction.size(0)
write = False
for ind in range(batch_size):
image_pred = prediction[ind]
# generating the max confidence sequence
max_conf, max_conf_score = torch.max(image_pred[:, 5:5 +
num_class], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:, :5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
non_zero_ind = (torch.nonzero(image_pred[:, 4]))
try:
image_pred_ = image_pred[non_zero_ind.squeeze(), :].view(-1, 7)
except IndexError:
continue
# if there is no detection for this batch image
if image_pred_.shape[0] == 0:
continue
# get the various classes detected in the image
# -1 index holds the class index
img_classes = torch.unique(image_pred_[:, -1])
for cls in img_classes:
# perform NMS
# get the detections with one particular class
cls_mask = image_pred_ * \
(image_pred_[:, -1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze()
image_pred_class | |
when an entity is created.
indexed (bool): Indicates if the value should be indexed.
repeated (bool): Indicates if this property is repeated, i.e. contains
multiple values.
required (bool): Indicates if this property is required on the given
model type.
default (~datetime.datetime): The default value for this property.
choices (Iterable[~datetime.datetime]): A container of allowed values
for this property.
validator (Callable[[~google.cloud.ndb.model.Property, Any], bool]): A
validator to be used to check values.
verbose_name (str): A longer, user-friendly name for this property.
write_empty_list (bool): Indicates if an empty list should be written
to the datastore.
Raises:
ValueError: If ``repeated=True`` and ``auto_now=True``.
ValueError: If ``repeated=True`` and ``auto_now_add=True``.
"""
_auto_now = False
_auto_now_add = False
def __init__(
self,
name=None,
*,
auto_now=None,
auto_now_add=None,
indexed=None,
repeated=None,
required=None,
default=None,
choices=None,
validator=None,
verbose_name=None,
write_empty_list=None
):
super(DateTimeProperty, self).__init__(
name=name,
indexed=indexed,
repeated=repeated,
required=required,
default=default,
choices=choices,
validator=validator,
verbose_name=verbose_name,
write_empty_list=write_empty_list,
)
if self._repeated:
if auto_now:
raise ValueError(
"DateTimeProperty {} could use auto_now and be "
"repeated, but there would be no point.".format(self._name)
)
elif auto_now_add:
raise ValueError(
"DateTimeProperty {} could use auto_now_add and be "
"repeated, but there would be no point.".format(self._name)
)
if auto_now is not None:
self._auto_now = auto_now
if auto_now_add is not None:
self._auto_now_add = auto_now_add
def _validate(self, value):
"""Validate a ``value`` before setting it.
Args:
value (~datetime.datetime): The value to check.
Raises:
.BadValueError: If ``value`` is not a :class:`~datetime.datetime`.
"""
if not isinstance(value, datetime.datetime):
raise exceptions.BadValueError(
"Expected datetime, got {!r}".format(value)
)
@staticmethod
def _now():
"""datetime.datetime: Return current datetime.
Subclasses will override this to return different forms of "now".
"""
return datetime.datetime.utcnow()
def _prepare_for_put(self, entity):
"""Sets the current timestamp when "auto" is set.
If one of the following scenarios occur
* ``auto_now=True``
* ``auto_now_add=True`` and the ``entity`` doesn't have a value set
then this hook will run before the ``entity`` is ``put()`` into
the datastore.
Args:
entity (Model): An entity with values.
"""
if self._auto_now or (
self._auto_now_add and not self._has_value(entity)
):
value = self._now()
self._store_value(entity, value)
def _db_set_value(self, v, p, value):
"""Helper for :meth:`_serialize`.
Raises:
NotImplementedError: Always. This method is virtual.
"""
raise NotImplementedError
def _db_get_value(self, v, unused_p):
"""Helper for :meth:`_deserialize`.
Raises:
NotImplementedError: Always. This method is deprecated.
"""
raise exceptions.NoLongerImplementedError()
class DateProperty(DateTimeProperty):
"""A property that contains :class:`~datetime.date` values.
.. automethod:: _to_base_type
.. automethod:: _from_base_type
.. automethod:: _validate
"""
__slots__ = ()
def _validate(self, value):
"""Validate a ``value`` before setting it.
Args:
value (~datetime.date): The value to check.
Raises:
.BadValueError: If ``value`` is not a :class:`~datetime.date`.
"""
if not isinstance(value, datetime.date):
raise exceptions.BadValueError(
"Expected date, got {!r}".format(value)
)
def _to_base_type(self, value):
"""Convert a value to the "base" value type for this property.
Args:
value (~datetime.date): The value to be converted.
Returns:
~datetime.datetime: The converted value: a datetime object with the
time set to ``00:00``.
Raises:
TypeError: If ``value`` is not a :class:`~datetime.date`.
"""
if not isinstance(value, datetime.date):
raise TypeError(
"Cannot convert to datetime expected date value; "
"received {}".format(value)
)
return datetime.datetime(value.year, value.month, value.day)
def _from_base_type(self, value):
"""Convert a value from the "base" value type for this property.
Args:
value (~datetime.datetime): The value to be converted.
Returns:
~datetime.date: The converted value: the date that ``value``
occurs on.
"""
return value.date()
@staticmethod
def _now():
"""datetime.datetime: Return current date."""
return datetime.datetime.utcnow().date()
class TimeProperty(DateTimeProperty):
"""A property that contains :class:`~datetime.time` values.
.. automethod:: _to_base_type
.. automethod:: _from_base_type
.. automethod:: _validate
"""
__slots__ = ()
def _validate(self, value):
"""Validate a ``value`` before setting it.
Args:
value (~datetime.time): The value to check.
Raises:
.BadValueError: If ``value`` is not a :class:`~datetime.time`.
"""
if not isinstance(value, datetime.time):
raise exceptions.BadValueError(
"Expected time, got {!r}".format(value)
)
def _to_base_type(self, value):
"""Convert a value to the "base" value type for this property.
Args:
value (~datetime.time): The value to be converted.
Returns:
~datetime.datetime: The converted value: a datetime object with the
date set to ``1970-01-01``.
Raises:
TypeError: If ``value`` is not a :class:`~datetime.time`.
"""
if not isinstance(value, datetime.time):
raise TypeError(
"Cannot convert to datetime expected time value; "
"received {}".format(value)
)
return datetime.datetime(
1970,
1,
1,
value.hour,
value.minute,
value.second,
value.microsecond,
)
def _from_base_type(self, value):
"""Convert a value from the "base" value type for this property.
Args:
value (~datetime.datetime): The value to be converted.
Returns:
~datetime.time: The converted value: the time that ``value``
occurs at.
"""
return value.time()
@staticmethod
def _now():
"""datetime.datetime: Return current time."""
return datetime.datetime.utcnow().time()
class StructuredProperty(Property):
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class LocalStructuredProperty(BlobProperty):
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class GenericProperty(Property):
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class ComputedProperty(GenericProperty):
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name. For
example, defining a model:
.. code-block:: python
class Book(ndb.Model):
pages = ndb.IntegerProperty()
the ``Book.pages`` property doesn't have the name ``pages`` assigned.
This is accomplished by calling the ``_fix_up_properties()`` method on the
class itself.
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls._fix_up_properties()
def __repr__(cls):
props = []
for _, prop in sorted(cls._properties.items()):
props.append("{}={!r}".format(prop._code_name, prop))
return "{}<{}>".format(cls.__name__, ", ".join(props))
class Model(metaclass=MetaModel):
"""A class describing Cloud Datastore entities.
Model instances are usually called entities. All model classes
inheriting from :class:`Model` automatically have :class:`MetaModel` as
their metaclass, so that the properties are fixed up properly after the
class is defined.
Because of this, you cannot use the same :class:`Property` object to
describe multiple properties -- you must create separate :class:`Property`
objects for each property. For example, this does not work:
.. code-block:: python
reuse_prop = ndb.StringProperty()
class Wrong(ndb.Model):
first = reuse_prop
second = reuse_prop
instead each class attribute needs to be distinct:
.. code-block:: python
class NotWrong(ndb.Model):
first = ndb.StringProperty()
second = ndb.StringProperty()
The "kind" for a given :class:`Model` subclass is normally equal to the
class name (exclusive of the module name or any other parent scope). To
override the kind, define :meth:`_get_kind`, as follows:
.. code-block:: python
class MyModel(ndb.Model):
@classmethod
def _get_kind(cls):
return "AnotherKind"
A newly constructed entity will not be persisted to Cloud Datastore without
an explicit call to :meth:`put`.
User-defined properties can be passed to the constructor via keyword
arguments:
.. doctest:: model-keywords
>>> class MyModel(ndb.Model):
... value = ndb.FloatProperty()
... description = ndb.StringProperty()
...
>>> MyModel(value=7.34e22, description="Mass of the moon")
MyModel(description='Mass of the moon', value=7.34e+22)
In addition to user-defined properties, there are six accepted keyword
arguments:
* ``key``
* ``id``
* ``app``
* ``namespace``
* ``parent``
* ``projection``
Of these, ``key`` is a public attribute on :class:`Model` instances:
.. testsetup:: model-key
from google.cloud import ndb
class MyModel(ndb.Model):
value = ndb.FloatProperty()
description = ndb.StringProperty()
.. doctest:: model-key
>>> entity1 = MyModel(id=11)
>>> entity1.key
Key('MyModel', 11)
>>> entity2 = MyModel(parent=entity1.key)
>>> entity2.key
Key('MyModel', 11, 'MyModel', None)
>>> entity3 = MyModel(key=ndb.Key(MyModel, "e-three"))
>>> entity3.key
Key('MyModel', 'e-three')
However, a user-defined property can be defined on the model with the
same name as one of those keyword arguments. In this case, the user-defined
property "wins":
.. doctest:: model-keyword-id-collision
>>> class IDCollide(ndb.Model):
... id = ndb.FloatProperty()
...
>>> entity = IDCollide(id=17)
>>> entity
IDCollide(id=17.0)
>>> entity.key is None
True
In such cases of argument "collision", an underscore can be used as a
keyword argument prefix:
.. doctest:: model-keyword-id-collision
>>> entity = IDCollide(id=17, _id=2009)
>>> entity
IDCollide(key=Key('IDCollide', 2009), id=17.0)
For the **very** special case of a property named ``key``, the ``key``
attribute will no longer be the entity's key but instead will be the
property value. Instead, the entity's key is accessible via ``_key``:
.. doctest:: model-keyword-key-collision
>>> class KeyCollide(ndb.Model):
... key = ndb.StringProperty()
...
>>> entity1 = KeyCollide(key="Take fork in road", id=987)
>>> entity1
KeyCollide(_key=Key('KeyCollide', 987), key='Take fork in road')
>>> entity1.key
'Take fork in road'
>>> entity1._key
Key('KeyCollide', 987)
>>>
>>> entity2 = KeyCollide(key="Go slow", _key=ndb.Key(KeyCollide, 1))
>>> entity2
KeyCollide(_key=Key('KeyCollide', 1), key='Go slow')
The constructor accepts keyword arguments based on the properties
defined on model subclass. However, using keywords for nonexistent
or non-:class:`Property` class attributes will cause a failure:
.. doctest:: model-keywords-fail
>>> | |
int),
('FULL ELECTROSTATIC EVALUATION FREQUENCY', 'fullElectFrequency', 1,
int),
('RANDOM NUMBER SEED', 'seed', 1, int),
# Langevin dynamics
('LANGEVIN DYNAMICS ACTIVE', 'langevin', -1, True),
('LANGEVIN TEMPERATURE', 'langevinTemp', 1, float),
('LANGEVIN DAMPING COEFFICIENT IS', 'langevinDamping', 1, float),
('LANGEVIN DYNAMICS APPLIED TO HYDROGENS', 'langevinHydrogen', -1,
True),
('LANGEVIN DYNAMICS NOT APPLIED TO HYDROGENS', 'langevinHydrogen', -1,
False),
# Lowe-Andersen thermostat
('LOWE-ANDERSEN DYNAMICS ACTIVE', 'loweAndersen', -1, True),
('LOWE-ANDERSEN TEMPERATURE', 'loweAndersenTemp', 1, float),
('LOWE-ANDERSEN RATE', 'loweAndersenRate', 1, float),
('LOWE-ANDERSEN CUTOFF', 'loweAndersenCutoff', 1, float),
# temperature coupling
('TEMPERATURE COUPLING ACTIVE', 'tCouple', -1, True),
('COUPLING TEMPERATURE', 'tCoupleTemp', 1, float),
# velocity rescaling
('VELOCITY RESCALE FREQ', 'rescaleFreq', 1, int),
('VELOCITY RESCALE TEMP', 'rescaleTemp', 1, float),
# velocity reassignment
('VELOCITY REASSIGNMENT FREQ', 'reassignFreq', 1, int),
('VELOCITY REASSIGNMENT TEMP', 'reassignTemp', 1, float),
#
# barostats
#
('PRESSURE CONTROL IS GROUP-BASE', 'useGroupPressure', -1, True),
('PRESSURE CONTROL IS ATOM-BASE', 'useGroupPressure', -1, False),
('CELL FLUCTUATION IS ANISOTROPIC', 'useFlexibleCell', -1, True),
('CELL FLUCTUATION IS ISOTROPIC', 'useFlexibleCell', -1, False),
('SHAPE OF CELL IS CONSTRAINED IN X-Y PLANE', 'useConstantRatio', -1,
True),
('CONSTANT AREA PRESSURE CONTROL ACTIVE', 'useConstantArea', -1, True),
('TARGET SURFACE TENSION IS', 'surfaceTensionTarget', 1, float),
# Langevin piston
('LANGEVIN PISTON PRESSURE CONTROL ACTIVE', 'langevinPiston', -1,
True),
('TARGET PRESSURE IS', 'langevinPistonTarget', 1, float),
('OSCILLATION PERIOD IS', 'langevinPistonPeriod', 1, float),
('DECAY TIME IS', 'langevinPistonDecay', 1, float),
('PISTON TEMPERATURE IS', 'langevinPistonTemp', 1, float),
#
# Electrostatics
#
('SWITCHING ACTIVE', 'switching', -1, True),
('VDW FORCE SWITCHING ACTIVE', 'vdwForceSwitching', -1, True),
('LONG-RANGE LJ:', 'LJcorrection', -1, True),
('SWITCHING ON', 'switchdist', 1, float),
('SWITCHING OFF', 'cutoff', 1, float),
('CUTOFF', 'cutoff', 1, float),
('PAIRLIST DISTANCE', 'pairlistdist', 1, float),
('PAIRLISTS PER CYCLE', 'pairlistsPerCycle', 1, int),
('EXCLUDE', 'exclude', 1, _exclude2str),
('1-4 ELECTROSTATICS SCALED BY', '1-4scaling', 1, float),
# PME
('PARTICLE MESH EWALD (PME) ACTIVE', 'PME', -1, True),
('PME TOLERANCE', 'PMETolerance', 1, float),
('PME INTERPOLATION ORDER', 'PMEInterpOrder', 1, int),
('PME GRID DIMENSIONS', ('PMEGridSizeX', 'PMEGridSizeY',
'PMEGridSizeZ'), 3, _vec2list(int)),
('PME MAXIMUM GRID SPACING', 'PMEGridSpacing', 1, float),
# GB
('GBIS GENERALIZED BORN IMPLICIT SOLVENT ACTIVE', 'GBIS', -1, True),
('GBIS BORN RADIUS CUTOFF:', 'alphaCutoff', 1, float),
('GBIS ION CONCENTRATION:', 'ionConcentration', 1, float),
#
# Alchemy (Note that these have parallel/redundant output)
#
# FEP
('ALCHEMICAL', ('alch', 'alchType'), 1, _alchtype),
('FEP CURRENT LAMBDA VALUE', 'alchLambda', 1, float),
('FEP COMPARISON LAMBDA VALUE', 'alchLambda2', 1, float),
('FEP CURRENT LAMBDA VALUE SET TO INCREASE IN EVERY', 'alchLambdaFreq',
1, float),
('FEP INTRA-ALCHEMICAL NON-BONDED INTERACTIONS WILL BE DECOUPLED',
'alchDecouple', -1, False),
('FEP INTRA-ALCHEMICAL NON-BONDED INTERACTIONS WILL BE RETAINED',
'alchDecouple', -1, True),
('FEP INTRA-ALCHEMICAL BONDED INTERACTIONS WILL BE DECOUPLED',
'alchBondDecouple', -1, True),
('FEP INTRA-ALCHEMICAL BONDED INTERACTIONS WILL BE RETAINED',
'alchBondDecouple', -1, False),
('FEP VDW SHIFTING COEFFICIENT', 'alchVdwShiftCoeff', 1, float),
('FEP ELEC. ACTIVE FOR EXNIHILATED PARTICLES BETWEEN LAMBDA =',
'alchElecLambdaStart', 1, float),
(('FEP VDW ACTIVE FOR EXNIHILATED PARTICLES BETWEEN LAMBDA = 0 AND '
'LAMBDA ='), 'alchVdwLambdaEnd', 1, float),
(('FEP BOND ACTIVE FOR EXNIHILATED PARTICLES BETWEEN LAMBDA = 0 AND '
'LAMBDA ='), 'alchBondLambdaEnd', 1, float),
# TI
('THERMODYNAMIC INTEGRATION', ('alch', 'alchType'), 1, _alchtype),
('TI LAMBDA VALUE', 'alchLambda', 1, float),
('TI COMPARISON LAMBDA VALUE', 'alchLambda2', 1, float),
('TI CURRENT LAMBDA VALUE SET TO INCREASE IN EVERY', 'alchLambdaFreq',
1, float),
('TI INTRA-ALCHEMICAL NON-BONDED INTERACTIONS WILL BE DECOUPLED',
'alchDecouple', -1, False),
('TI INTRA-ALCHEMICAL NON-BONDED INTERACTIONS WILL BE RETAINED',
'alchDecouple', -1, True),
('TI INTRA-ALCHEMICAL BONDED INTERACTIONS WILL BE DECOUPLED',
'alchBondDecouple', -1, True),
('TI INTRA-ALCHEMICAL BONDED INTERACTIONS WILL BE RETAINED',
'alchBondDecouple', -1, False),
('TI VDW SHIFTING COEFFICIENT', 'alchVdwShiftCoeff', 1, float),
('TI ELEC. ACTIVE FOR ANNIHILATED PARTICLES BETWEEN LAMBDA =',
'alchElecLambdaStart', 1, float),
(('TI VDW ACTIVE FOR EXNIHILATED PARTICLES BETWEEN LAMBDA = 0 AND '
'LAMBDA ='), 'alchVdwLambdaEnd', 1, float),
(('TI BOND ACTIVE FOR EXNIHILATED PARTICLES BETWEEN LAMBDA = 0 AND '
'LAMBDA ='), 'alchBondLambdaEnd', 1, float),
#
# Enhanced Sampling
#
('ACCELERATED MD ACTIVE', 'accelMD', -1, True),
('BOOSTING DIHEDRAL POTENTIAL', 'accelMDDihe', -1, True),
('accelMDE:', ('accelMDE', None, None, 'accelMDAlpha'), 4,
_vec2list(float)),
('accelMD WILL BE DONE FROM STEP', 'accelMDFirstStep', 1, int),
('accelMD OUTPUT FREQUENCY', 'accelMDOutFreq', 1, int)
)
# Any given NamdLog may or may not have the following attributes
# depending on the simulation settings.
_attr_names = ['energy', 'ti']
# _attr_names = ['energy', 'amd_energy', 'ti']
def __init__(self, *filenames, **kwargs):
# Assign default values to optional keyword arguments. This can be
# done more elegantly at the expense of Python 2.x support.
#
info = (bool(kwargs['info']) if 'info' in kwargs else True)
energy = (bool(kwargs['energy']) if 'energy' in kwargs else True)
xgs = (bool(kwargs['xgs']) if 'xgs' in kwargs else False)
self._nostep0 = False
self.filenames = [str(filename) for filename in filenames]
basefile = self.filenames[0]
# Read INFO: lines from the header or not.
if info:
self.info = NamdLog.read_info(basefile)
else:
self.info = None
# Read ENERGY: lines or not.
if energy:
energies = NamdLog.read_energy(basefile)
for kw, value in energies.iteritems():
self.__dict__[kw] = value
else:
self.energy = None
# Read XGS specific output - EXPERIMENTAL!
if xgs:
self.xgs = {}
xgstype, ladder, weights, state = NamdLog.read_xgs(basefile)
self.xgs['type'] = xgstype
self.xgs['ladder'] = ladder
self.xgs['weights'] = weights
self.xgs['state'] = state
# Add additional files by recursive in-place addition.
for filename in self.filenames[1:]:
self += NamdLog(filename, **kwargs)
def __repr__(self):
args = ', '.join(["'%s'"%f for f in self.filenames])
return '%s(%s)'%(self.__class__, args)
@staticmethod
def read_info(filename):
"""Return a dict of keyword/value pairs after parsing the 'Info:'
lines of a NAMD output log. Dict queries are case-insensitive.
"""
info = {}
def add_info(key, value):
"""Helper function for handling list values versus other types."""
if key in NamdLog._multivals: # info[key] is a list of values
try:
info[key].append(value)
except KeyError:
info[key] = info.get(key, [value])
else: # info[key] is a single value
info[key] = value
def parse(tokens, key, configkey, nvalfields, convert):
"""Helper function for handling different keyword/value formats
NAMD does not have a strict standard on this output format.
"""
nkeyfields = len(key.split())
test_key = ' '.join(tokens[:nkeyfields])
if test_key == key:
if nvalfields < 1: # this is a bool
val = convert
elif nvalfields == 1: # this is a scalar
val = convert(tokens[nkeyfields])
else: # this is a vector or tensor
val = convert(tokens[nkeyfields:(nkeyfields+nvalfields)])
if hasattr(configkey, '__iter__'):
# In case multiple values are on a single line...
for ck, v in zip(configkey, val):
if ck is not None:
add_info(ck, v)
else:
add_info(configkey, val)
return True
else:
return False
INFO = 'Info:'
infile = open(filename, 'r')
for line in infile:
# The Info section is over at the first blank line after it
# starts; stop reading when that is reached. Until then, inspect
# all lines starting with "Info:". See the class definition of
# _kvpairs above to see how this is done.
#
if not line.strip(): break
if line.startswith(INFO):
tokens = line.lstrip(INFO).strip().split()
for key, confkey, nfields, dtype in NamdLog._kvpairs:
if parse(tokens, key, confkey, nfields, dtype):
break
infile.close()
if not info:
raise IOError('Bad NAMD output. Check for errors!')
return CaseInsensitiveDict(info)
@staticmethod
def read_energy(filename):
"""Read the energy entries from a NAMD log file. All types of energy
outputs are made into a dict containing numpy arrays. These are in turn
accessible as keywords of a single dict.
"""
energies = {}
etag = 'energy'
# amdtag = 'amd_energy'
ttag = 'ti'
TITLE = 'ETITLE:'
TITITLE = 'TITITLE:'
FORMAT = 'ENERGY:'
# AMDFORMAT = 'ACCELERATED MD:'
TIFORMAT = 'TI:'
# standard MD energy log
#
energies[etag] = OrderedDict()
term_indices = {}
# non-standard energy logs (may or may not exist)
#
# energies[amdtag] = OrderedDict()
# amd_term_indices = {}
#
energies[ttag] = OrderedDict()
ti_term_indices = {}
terms_are_defined = False
amd_terms_are_defined = False
ti_terms_are_defined = False
for line in open(filename, 'r'):
# standard MD energy log
#
if line.startswith(TITLE) and not terms_are_defined:
terms_are_defined = True
terms = line.lstrip(TITLE).strip().split()
for i, term in enumerate(terms):
term_indices[i] = term
energies[etag][term] = []
elif line.startswith(FORMAT):
values = line.lstrip(FORMAT).strip().split()
for i, value in enumerate(values):
energies[etag][term_indices[i]].append(float(value))
# accelerated MD energy log
#
# elif line.startswith(AMDFORMAT):
# terms = line.strip().split()[2::2]
# values = line.strip().split()[3::2]
# if not amd_terms_are_defined:
# amd_terms_are_defined = True
# for | |
"""
Monkey patching for pandas
"""
import copy
import os
import pathlib
import sklearn.pipeline
from mlinspect.to_sql.py_to_sql_mapping import TableInfo, OpTree, sql_obj_prefix
from mlinspect.to_sql.py_to_sql_mapping import OpTree, ColumnTransformerInfo, ColumnTransformerLevel
from mlinspect.backends._pandas_backend import PandasBackend
from mlinspect.monkeypatching._monkey_patching_utils import get_dag_node_for_id
from mlinspect.monkeypatching._patch_sklearn import call_info_singleton
from mlinspect.to_sql._mode import SQLMode, SQLObjRep
from sklearn import pipeline
import gorilla
import numpy
import pandas
from sklearn import preprocessing, compose, tree, impute, linear_model, model_selection
from tensorflow.keras.wrappers import scikit_learn as keras_sklearn_external # pylint: disable=no-name-in-module
from tensorflow.python.keras.wrappers import scikit_learn as keras_sklearn_internal # pylint: disable=no-name-in-module
from mlinspect.inspections._histogram_for_columns import HistogramForColumns
from mlinspect.backends._backend import BackendResult
from mlinspect.backends._sklearn_backend import SklearnBackend
from mlinspect.inspections._inspection_input import OperatorContext, FunctionInfo, OperatorType
from mlinspect.instrumentation import _pipeline_executor
from mlinspect.instrumentation._dag_node import DagNode, BasicCodeLocation, DagNodeDetails, CodeReference
from mlinspect.instrumentation._pipeline_executor import singleton
from mlinspect.monkeypatching._monkey_patching_utils import execute_patched_func, add_dag_node, \
execute_patched_func_indirect_allowed, get_input_info, execute_patched_func_no_op_id, get_optional_code_info_or_none
from mlinspect.monkeypatching._patch_numpy import MlinspectNdarray
from dataclasses import dataclass
from typing import Dict
from mlinspect.utils import store_timestamp
import time
pandas.options.mode.chained_assignment = None # default='warn'
# SKLEARN:
class SklearnCallInfo:
""" Contains info like lineno from the current Transformer so indirect utility function calls can access it """
# pylint: disable=too-few-public-methods
transformer_filename: str or None = None
transformer_lineno: int or None = None
transformer_function_info: FunctionInfo or None = None
transformer_optional_code_reference: CodeReference or None = None
transformer_optional_source_code: str or None = None
column_transformer_active: bool = False
@dataclass
class FitDataCollection:
"""
Data Container for the fitted variables of SimpleImpute.
"""
col_to_fit_block_name: Dict[str, str]
fully_set: bool
extra_info = {} # For the KBin
call_info_singleton = SklearnCallInfo()
column_transformer_share = None
just_transform_run = {} # whether we are fitting or transforming. | Adapt behaviour!
last_name_for_concat_fit = {}
@gorilla.patches(compose.ColumnTransformer)
class SklearnComposePatching:
""" Patches for sklearn ColumnTransformer"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self,
transformers, *,
remainder='drop',
sparse_threshold=0.3,
n_jobs=None,
transformer_weights=None,
verbose=False):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(compose.ColumnTransformer, '__init__')
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=attribute-defined-outside-init
original(self, transformers, remainder=remainder, sparse_threshold=sparse_threshold, n_jobs=n_jobs,
transformer_weights=transformer_weights, verbose=verbose)
self.mlinspect_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_indirect_allowed(execute_inspections)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument
# TO_SQL: ###############################################################################################
fit_data, just_transform = differentiate_fit_transform(self, args[0], set_attributes=False)
# TO_SQL DONE! ##########################################################################################
if not just_transform:
call_info_singleton.transformer_filename = self.mlinspect_filename
call_info_singleton.transformer_lineno = self.mlinspect_lineno
call_info_singleton.transformer_function_info = FunctionInfo('sklearn.compose._column_transformer',
'ColumnTransformer')
call_info_singleton.transformer_optional_code_reference = self.mlinspect_optional_code_reference
call_info_singleton.transformer_optional_source_code = self.mlinspect_optional_source_code
call_info_singleton.column_transformer_active = True
original = gorilla.get_original_attribute(compose.ColumnTransformer, 'fit_transform')
op_id = singleton.get_next_op_id()
else:
original = gorilla.get_original_attribute(compose.ColumnTransformer, 'transform')
op_id = singleton.sql_logic.get_unique_id()
# TO_SQL: ###############################################################################################
# When calling original(self, *args, **kwargs) the overwritten Pipeline-functions (like SimpleImpute)
# will be called with the relevant slice of the table.
name, ti = singleton.mapping.get_name_and_ti(args[0])
# Materialize the source if desired: -> better performance f.e. for Postgres
materialize_query_with_name(name, ti.non_tracking_cols)
cr_to_col_map = {} # code_reference to column mapping
cr_to_level_map = {} # code_reference to level mapping
cols_to_keep = []
for _, (_, op_obj, target_cols) in enumerate(self.transformers):
target_cols = [f"\"{x}\"" for x in target_cols]
cols_to_keep += target_cols
if isinstance(op_obj, pipeline.Pipeline):
for level_s, (_, step) in enumerate(op_obj.steps): # need to take pipeline apart
cr = step.mlinspect_optional_code_reference
cr_to_col_map[cr] = target_cols
cr_to_level_map[cr] = level_s
else:
cr = op_obj.mlinspect_optional_code_reference
cr_to_col_map[cr] = target_cols
cr_to_level_map[cr] = 0
levels_list = [ColumnTransformerLevel({}, set(), set(), []) for _ in
range(max(cr_to_level_map.values()) + 1)]
# HANDLE "drop" case:
cols_to_drop = []
cols_to_keep = list(dict.fromkeys(cols_to_keep)) # keeps order
if self.remainder == "drop":
cols_to_drop = list(set(ti.non_tracking_cols) - set(cols_to_keep))
else:
cols_to_keep = ti.non_tracking_cols # here we keep all columns
# We will need pass the input of this function to the subclass, to be able to achieve the mapping.
global column_transformer_share
column_transformer_share = ColumnTransformerInfo(self,
levels=levels_list,
levels_map=cr_to_level_map,
cr_to_col_map=cr_to_col_map,
target_obj=args[0], cols_to_drop=cols_to_drop)
if len(args) == 2 and len(kwargs) == 0:
result = original(self, args[0], **kwargs)
else:
result = original(self, *args, **kwargs)
query_levels = column_transformer_share.levels
# Query optimization for further executions:
final_sql_code = ""
last_sql_name = name
for i, level in enumerate(query_levels):
select_block = []
column_map = level.column_map
for col in cols_to_keep:
if col in column_map.keys():
select_block.append(column_map[col])
else:
select_block.append(f"\t{col}")
select_block_s = ',\n'.join(select_block) + ",\n\t" + ", ".join(ti.tracking_cols)
from_block_s = ", ".join(level.from_block | {last_sql_name})
where_block_s = " AND \n".join(level.where_block)
sql_code = f"SELECT \n{select_block_s} \n" \
f"FROM {from_block_s}"
if where_block_s != "":
sql_code += "\nWHERE\n" + where_block_s
if i < len(query_levels) - 1:
sql_name, sql_code = singleton.sql_logic.wrap_in_sql_obj(sql_code, op_id,
f"lvl{i}_column_transformer",
force_cte=True)
else:
# Last level reached:
final_sql_code = final_sql_code[:-2] + "\n"
sql_name = f"block_column_transformer_lvl{i}"
# substitute old data sources with new ones:
for old_s in level.sql_source:
sql_code = sql_code.replace(old_s, last_sql_name)
final_sql_code += sql_code + ",\n"
last_sql_name = sql_name
if len(query_levels) > 1:
final_sql_code = "WITH " + final_sql_code
final_sql_code = final_sql_code[:-2]
cte_name, sql_code = singleton.sql_logic.finish_sql_call(final_sql_code, op_id, result,
tracking_cols=ti.tracking_cols,
non_tracking_cols=cols_to_keep,
operation_type=OperatorType.TRANSFORMER,
cte_name=last_sql_name)
singleton.pipeline_container.add_statement_to_pipe(cte_name, sql_code, cols_to_keep=cols_to_keep)
global last_name_for_concat_fit
last_name_for_concat_fit = {}
fit_data.fully_set = True
# TO_SQL DONE! ##########################################################################################
if not just_transform:
call_info_singleton.column_transformer_active = False
return result
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(compose.ColumnTransformer, 'transform')
return transform_logic(original, self, *args, **kwargs) # fit_transform knows only to execute transform
# @gorilla.name('fit')
# @gorilla.settings(allow_hit=True)
# def patched_fit(self, *args, **kwargs):
# # pylint: disable=no-method-argument
# original = gorilla.get_original_attribute(compose.ColumnTransformer, 'fit')
# return original(self, *args, **kwargs)
@gorilla.name('_hstack')
@gorilla.settings(allow_hit=True)
def patched_hstack(self, *args, **kwargs):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument, unused-argument, too-many-locals
original = gorilla.get_original_attribute(compose.ColumnTransformer, '_hstack')
op_id = singleton.get_next_op_id()
if not call_info_singleton.column_transformer_active:
return original(self, *args, **kwargs)
input_tuple = args[0]
function_info = FunctionInfo('sklearn.compose._column_transformer', 'ColumnTransformer')
input_infos = []
for input_df_obj in input_tuple:
input_info = get_input_info(input_df_obj, self.mlinspect_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
input_infos.append(input_info)
operator_context = OperatorContext(OperatorType.CONCATENATION, function_info)
input_annotated_dfs = [input_info.annotated_dfobject for input_info in input_infos]
backend_input_infos = SklearnBackend.before_call(operator_context, input_annotated_dfs)
# No input_infos copy needed because it's only a selection and the rows not being removed don't change
result = original(self, *args, **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
backend_input_infos,
result)
result = backend_result.annotated_dfobject.result_data
# Concat doesn't contain ratios: -> Empty
old_dag_node_annotations = backend_result.dag_node_annotation
to_check_annotations = [a for a in old_dag_node_annotations.keys() if isinstance(a, HistogramForColumns)]
if len(to_check_annotations) > 0:
assert len(to_check_annotations) == 1
annotation = to_check_annotations[0]
old_dag_node_annotations[to_check_annotations[0]] = {x: {} for x in
[f"\"{x}\"" for x in annotation.sensitive_columns]}
dag_node = DagNode(op_id,
BasicCodeLocation(self.mlinspect_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails(None, ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
input_dag_nodes = [input_info.dag_node for input_info in input_infos]
add_dag_node(dag_node, input_dag_nodes, backend_result)
return result
@gorilla.patches(impute.SimpleImputer)
class SklearnSimpleImputerPatching:
""" Patches for sklearn SimpleImputer"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, *, missing_values=numpy.nan, strategy="mean",
fill_value=None, verbose=0, copy=True, add_indicator=False,
mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None):
""" Patch for ('sklearn.impute._base', 'SimpleImputer') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(impute.SimpleImputer, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, missing_values=missing_values, strategy=strategy, fill_value=fill_value, verbose=verbose,
copy=copy, add_indicator=add_indicator)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, missing_values=missing_values,
strategy=strategy, fill_value=fill_value, verbose=verbose, copy=copy,
add_indicator=add_indicator)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._encoders.OneHotEncoder', 'fit_transform') """
# pylint: disable=no-method-argument
# TO_SQL: ###############################################################################################
fit_data, just_transform = differentiate_fit_transform(self, args[0])
# TO_SQL DONE! ##########################################################################################
if not just_transform:
original = gorilla.get_original_attribute(impute.SimpleImputer, 'fit_transform')
function_info = FunctionInfo('sklearn.impute._base', 'SimpleImputer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context, input_infos, result)
new_return_value = backend_result.annotated_dfobject.result_data
if isinstance(input_infos[0].result_data, pandas.DataFrame):
columns = list(input_infos[0].result_data.columns)
else:
columns = ['array']
op_id = singleton.get_next_op_id()
else:
original = gorilla.get_original_attribute(impute.SimpleImputer, 'transform')
result = original(self, *args, **kwargs)
op_id = singleton.sql_logic.get_unique_id()
# TO_SQL: ###############################################################################################
code_ref = self.mlinspect_optional_code_reference
name, ti, target_cols, res_for_map, cols_to_drop = find_target(code_ref, args[0], result)
tracking_cols = ti.tracking_cols
all_cols = [x for x in ti.non_tracking_cols if not x in set(cols_to_drop)]
selection_map = {}
select_block = []
# STRATEGY 1: ###
if self.strategy == "most_frequent":
for col in all_cols:
if col in target_cols:
# Access fitted data if possible:
if not fit_data.fully_set:
fit_lookup_table, fit_lookup_code = singleton.sql_logic.column_max_count(name, col)
fit_data.col_to_fit_block_name[col] = fit_lookup_table
singleton.pipeline_container.add_statement_to_pipe(fit_lookup_table, fit_lookup_code)
if singleton.sql_obj.mode == SQLObjRep.VIEW:
singleton.dbms_connector.run(fit_lookup_code)
else:
fit_lookup_table = fit_data.col_to_fit_block_name[col]
select_block.append(f"\tCOALESCE({col}, (SELECT * FROM {fit_lookup_table})) AS {col}")
selection_map[col] = select_block[-1]
else:
select_block.append(f"\t{col}")
# STRATEGY 2: ###
elif self.strategy == "mean":
for col in all_cols:
if col in target_cols:
# Access fitted data if possible:
if not fit_data.fully_set:
fit_lookup_table, fit_lookup_code = singleton.sql_logic.column_mean(name, col)
fit_data.col_to_fit_block_name[col] = fit_lookup_table
singleton.pipeline_container.add_statement_to_pipe(fit_lookup_table, fit_lookup_code)
if singleton.sql_obj.mode == SQLObjRep.VIEW:
singleton.dbms_connector.run(fit_lookup_code)
else:
fit_lookup_table = fit_data.col_to_fit_block_name[col]
select_block.append(f"\tCOALESCE({col}, (SELECT * FROM {fit_lookup_table})) AS {col}")
selection_map[col] = select_block[-1]
else:
select_block.append(f"\t{col}")
# STRATEGY X: ###
else:
raise NotImplementedError
select_block_s = ',\n'.join(select_block) + ",\n\t" | |
# <NAME> (<EMAIL>)
from __future__ import absolute_import, division, print_function
from builtins import range
import numpy as np
import pandas as pd
import scipy.stats as ss
from joblib import Memory
from mlpaper.constants import METHOD, METRIC
from mlpaper.mlpaper import PAIRWISE_DEFAULT, loss_summary_table
MOMENT = "moment" # Don't put in constants since only needed for regression
def shape_and_validate(y, mu, std):
"""Validate shapes and types of predictive distribution against data and
return the shape information.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`.
Returns
-------
n_samples : int
Number of data points (length of `y`)
"""
n_samples, = y.shape
assert n_samples >= 1
assert np.all(np.isfinite(y))
assert mu.shape == (n_samples,) and std.shape == (n_samples,)
assert np.all(np.isfinite(mu)) and np.all(np.isfinite(std))
assert np.all(std > 0.0)
return n_samples
# ============================================================================
# Loss functions
# ============================================================================
def square_loss(y, mu, std):
"""Compute MSE of predictions vs true targets.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`. Ignored in
this function.
Returns
-------
loss : ndarray, shape (n_samples,)
Square error of target vs prediction. Same shape as `y`.
"""
shape_and_validate(y, mu, std)
loss = (y - mu) ** 2
return loss
def abs_loss(y, mu, std):
"""Compute MAE of predictions vs true targets.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`. Ignored in
this function.
Returns
-------
loss : ndarray, shape (n_samples,)
Absolute error of target vs prediction. Same shape as `y`.
"""
shape_and_validate(y, mu, std)
loss = np.abs(y - mu)
return loss
def log_loss(y, mu, std):
"""Compute log loss of Gaussian predictive distribution on target `y`.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`.
Returns
-------
loss : ndarray, shape (n_samples,)
Log loss of Gaussian predictive distribution on target `y`. Same shape
as `y`.
"""
shape_and_validate(y, mu, std)
loss = -ss.norm.logpdf(y, loc=mu, scale=std)
return loss
# ============================================================================
# Use and summarize loss functions
# ============================================================================
def loss_table(pred_tbl, y, metrics_dict):
"""Compute loss table from table of Gaussian predictions.
Parameters
----------
pred_tbl : DataFrame, shape (n_samples, n_methods * 2)
DataFrame with predictive distributions. Each row is a data point.
The columns should be hierarchical index that is the cartesian product
of methods x moments. For exampe, ``log_pred_prob_table.loc[5, 'foo']``
is a pandas series with (mean, std deviation) prediction that method
foo places on ``y[5]``. Cannot be empty.
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
metrics_dict : dict of str to callable
Dictionary mapping loss function name to function that computes loss,
e.g., `log_loss`, `square_loss`, ...
Returns
-------
loss_tbl : DataFrame, shape (n_samples, n_metrics * n_methods)
DataFrame with loss of each method according to each loss function on
each data point. The rows are the data points in `y` (that is the index
matches `pred_tbl`). The columns are a hierarchical index that is the
cartesian product of loss x method. That is, the loss of method foo's
prediction of ``y[5]`` according to loss function bar is stored in
``loss_tbl.loc[5, ('bar', 'foo')]``.
"""
methods, moments = pred_tbl.columns.levels
assert "mu" in moments and "std" in moments
n_samples = len(pred_tbl)
assert y.shape == (n_samples,)
assert n_samples >= 1 and len(methods) >= 1
col_names = pd.MultiIndex.from_product([metrics_dict.keys(), methods], names=[METRIC, METHOD])
loss_tbl = pd.DataFrame(index=pred_tbl.index, columns=col_names, dtype=float)
for method in methods:
# These get validated inside loss function
mu = pred_tbl[(method, "mu")].values
std = pred_tbl[(method, "std")].values
for metric, metric_f in metrics_dict.items():
loss_tbl.loc[:, (metric, method)] = metric_f(y, mu, std)
return loss_tbl
# ============================================================================
# Variables and functions to make getting results from sklearn objects easy
# ============================================================================
# Pre-build some standard metric dicts for the user
STD_REGR_LOSS = {"NLL": log_loss, "MSE": square_loss, "MAE": abs_loss}
class JustNoise:
"""Class version of iid predictor compatible with sklearn interface. Same
as ``sklearn.dummy.DummyRegressor(strategy='mean')`` but also keeps track
of std to be able to accept ``return_std=True``."""
def __init__(self):
self.mu = np.nan
self.std = np.nan
def fit(self, X_train, y_train):
assert y_train.ndim == 1
assert len(y_train) >= 2 # Require N >= 2 for std
self.mu = np.mean(y_train)
self.std = np.std(y_train, ddof=0)
def predict(self, X_test, return_std=True):
assert return_std
N = X_test.shape[0]
mu = np.repeat([self.mu], N, axis=0)
std = np.repeat([self.std], N, axis=0)
return mu, std
def get_gauss_pred(X_train, y_train, X_test, methods, min_std=0.0, verbose=False, checkpointdir=None):
"""Get the Gaussian prediction tables for each test point on a collection
of regression methods.
Parameters
----------
X_train : ndarray, shape (n_train, n_features)
Training set 2d feature array for classifiers. Each row is an
indepedent data point and each column is a feature.
y_train : ndarray, shape (n_train,)
True training targets for each regression data point. Typically of type
`float`. Must be of same length as `X_train`.
X_test : ndarray, shape (n_test, n_features)
Test set 2d feature array for classifiers. Each row is an indepedent
data point and each column is a feature.
methods : dict of str to sklearn estimator
Dictionary mapping method name (`str`) to object that performs training
and test. Object must follow the interface of sklearn estimators, that
is, it has a ``fit()`` method and a ``predict()`` method that accepts
the argument ``return_std=True``.
min_std : float
Minimum value to floor the predictive standard deviation. Must be >= 0.
Useful to prevent inf log loss penalties.
verbose : bool
If True, display which method being trained.
checkpointdir : str (directory)
If provided, stores checkpoint results using joblib for the train/test
in case process interrupted. If None, no checkpointing is done.
Returns
-------
pred_tbl : DataFrame, shape (n_samples, n_methods * 2)
DataFrame with predictive distributions. Each row is a data point.
The columns should be hierarchical index that is the cartesian product
of methods x moments. For exampe, ``log_pred_prob_table.loc[5, 'foo']``
is a pandas series with (mean, std deviation) prediction that method
foo places on ``y[5]``.
Notes
-----
If a train/test operation is loaded from a checkpoint file, the estimator
object in methods will not be in a fit state.
"""
n_test = X_test.shape[0]
assert n_test > 0
assert X_train.ndim == 2
assert y_train.shape == (X_train.shape[0],)
assert X_test.ndim == 2 and X_test.shape[1] == X_train.shape[1]
assert X_train.dtype.kind == X_test.dtype.kind # Would be weird otherwise
assert min_std >= 0.0
memory = Memory(cachedir=checkpointdir, verbose=0)
@memory.cache
def train_predict(method_obj, X_train, y_train, X_test):
method_obj.fit(X_train, y_train)
try:
mu, std = method_obj.predict(X_test, return_std=True)
except TypeError:
mu = method_obj.predict(X_test)
std = np.ones_like(mu)
return mu, std
col_names = pd.MultiIndex.from_product([methods.keys(), ("mu", "std")], names=[METHOD, MOMENT])
pred_tbl = pd.DataFrame(index=range(n_test), columns=col_names, dtype=float)
for method_name, method_obj in methods.items():
if verbose:
print("Running fit/predict for %s" % method_name)
mu, std = train_predict(method_obj, X_train, y_train, X_test)
assert mu.shape == (n_test,) and std.shape == (n_test,)
std = np.maximum(min_std, std)
pred_tbl.loc[:, (method_name, "mu")] = mu
pred_tbl.loc[:, (method_name, "std")] = std
return pred_tbl
def just_benchmark(
X_train,
y_train,
X_test,
y_test,
methods,
loss_dict,
ref_method,
min_std=0.0,
pairwise_CI=PAIRWISE_DEFAULT,
| |
from ai_ct_scans import data_loading
import torch
from ai_ct_scans import models
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
from cv2 import blur
from ai_ct_scans import phase_correlation_image_processing
from ai_ct_scans.data_writing import ndarray_to_memmap
plt.ion()
if torch.cuda.is_available():
dev = torch.device("cuda")
else:
dev = "cpu"
def det(tensor):
"""Detach a torch Tensor to a cpu numpy version
Args:
tensor (torch.Tensor): A tensor to be detached and turned into an ndarray
Returns:
(ndarray): The same data as an ndarray
"""
return tensor.cpu().detach().numpy()
def debug_plot(model_out, batch, index=0):
"""Plot the original image, the masked image, and the infilled version. Useful during debugging.
Args:
model_out: The infilled image stack from an Infiller model
batch (dict of tensors): A dictionary with torch Tensors 'labels' and 'input_images'
index (int): The index of the model's output to compare to the original image and masked version, [0-batch size]
"""
out = det(model_out)
ims = det(batch["labels"])
inputs = det(batch["input_images"])
f, axes = plt.subplots(1, 3)
axes = np.ravel(axes)
axes[0].imshow(ims[index, 0, :, :])
axes[0].set_title("original image")
axes[1].imshow(inputs[index, 0, :, :])
axes[1].set_title("masked inputs")
axes[2].imshow(out[index, 0, :, :])
axes[2].set_title("output")
class InfillTrainer:
"""A class for training an ai_ct_scans.models.Infiller network"""
def __init__(
self,
axial_width=256,
coronal_width=256,
sagittal_width=256,
batch_size=8,
batch_width=256,
batch_height=256,
blank_width=64,
num_encoder_convs=3,
encoder_filts_per_layer=10,
neurons_per_dense=512,
num_dense_layers=3,
decoder_filts_per_layer=10,
num_decoder_convs=3,
kernel_size=3,
learning_rate=1e-5,
save_dir=None,
clear_previous_memmaps=False,
save_freq=200,
blur_kernel=None,
show_outline=False,
):
"""Initialises the network and dataset handling, gets the trainer ready for run self.train_for_iterations()
Args:
axial_width (int): How wide the model will expect views taken from the axial plane to be in pixels
coronal_width (int): How wide the model will expect views taken from the coronal plane to be in pixels
sagittal_width (int):How wide the model will expect views taken from the sagittal plane to be in pixels
batch_size (int): How many random views to take for a single training iteration (typically 1-8 trialled)
batch_width (int): How wide the views should be at the point of input to the model in pixels
batch_height (int): How high the views should be at the point of input to the model in pixels
blank_width (int): Square size of the centre masked region to be applied in the middle of each view before
input to network
num_encoder_convs (int): How many convolution-maxpool steps to build into the model in the encoder
encoder_filts_per_layer (int): How many filters to include in the first convolution layer (to be doubled at
each subsequent layer Unet style)
neurons_per_dense (int): (currently disconnected) How many neurons in each dense layer that connects the
convolutional layers in the encoder to the convolutional layers in the decoder
num_dense_layers (int): (currently disconnected) How many layers of dense neurons to use to connect the
convolutional encoder and decoder layers
decoder_filts_per_layer (int): (currently must be same as encoder filts_per_layer)
num_decoder_convs (int): How many upsample-convolutional layers to include in the decoder, currently
throws an error if not equal to num_encoder_convs to fit Unet style of the network
kernel_size (int or tuple of two ints): 2D size of kernels used in Conv2D layers
learning_rate (float): parameter to control the rate at which the model learns, typically <1e-4
save_dir (pathlib Path): A directory in which to save the model during training
clear_previous_memmaps (bool): Whether to re-initialise the dataset (i.e. rebuild memmaps off of original
DICOM data)
save_freq (int): How often to save the model, every save_freq iterations
blur_kernel (None or tuple of ints): If not None, apply a blur to the input views before masking and feeding
into the network. This is theorised to prevent the model getting stuck due to attemptin to recreate high
frequency random noise
show_outline (bool): Whether to perform an edge detection and expose these edges in the masked region. This
helps the model to get the correct shapes at output, without showing it much about the intensity/texture it
should aim for.
"""
self.multi_patient_loader = data_loading.MultiPatientLoader()
# Included just abdo due to simplicity of focusing on one body part
# Mentioned twice in the array to preserve the index for testing for multiple body parts
# self.body_parts = ['abdo', 'thorax']
self.body_parts = ["abdo", "abdo"]
self.scan_nums = ["scan_1", "scan_2"]
for patient in self.multi_patient_loader.patients:
for body_part in self.body_parts:
for scan_num in self.scan_nums:
if clear_previous_memmaps is True:
patient.__getattribute__(body_part).__getattribute__(
scan_num
).delete_memmap()
patient.__getattribute__(body_part).__getattribute__(
scan_num
).load_memmap_and_clear_scan()
self.blur_kernel = blur_kernel
self.show_outline = show_outline
self.axial_width = axial_width
self.coronal_width = coronal_width
self.sagittal_width = sagittal_width
self.batch_width = batch_width
self.batch_height = batch_height
self.batch_size = batch_size
self.blank_width = blank_width
self.loss_weighting_width = int(self.blank_width * 1.5)
self.slicers = [
self.random_axial_slicer,
self.random_coronal_slicer,
self.random_sagittal_slicer,
]
self.plane_masks = self.plane_mask_builder()
self.edge_detection_pad = 3
self.edge_window_width = 2 * self.edge_detection_pad + self.blank_width
self.edge_detection_mask = np.logical_not(
self.plane_mask_builder(blank_width=self.edge_window_width)[0]
)
self.inv_plane_mask = np.logical_not(self.plane_masks[0])
self.loss_masks = self.plane_mask_builder(self.loss_weighting_width)
self.loss_masks = self._convert_loss_masks_to_tensor()
self.label_masks = [
np.logical_not(plane_mask) for plane_mask in self.plane_masks
]
self.patient_indices = list(range(len(self.multi_patient_loader.patients)))
self.model = models.Infiller(
input_height=self.batch_height,
input_width=self.batch_width,
output_height=self.blank_width,
output_width=self.blank_width,
num_encoder_convs=num_encoder_convs,
encoder_filts_per_layer=encoder_filts_per_layer,
neurons_per_dense=neurons_per_dense,
num_dense_layers=num_dense_layers,
decoder_filts_per_layer=decoder_filts_per_layer,
num_decoder_convs=num_decoder_convs,
kernel_size=kernel_size,
)
self.optimiser = torch.optim.AdamW(self.model.parameters(), lr=learning_rate)
if save_dir is None:
save_dir = data_loading.data_root_directory().parent / "infiller"
save_dir.mkdir(exist_ok=True, parents=True)
self.save_dir = save_dir
self.iteration = 0
self.last_n_losses = []
self.loss_num_to_ave_over = 100
self.latest_loss = np.inf
self.save_freq = save_freq
self.input_stack = np.zeros(
[self.batch_size, 1, self.batch_height, self.batch_width], dtype="float64"
)
self.plane_mask_stack = np.zeros_like(self.input_stack)
self.error_weighting = (
self.loss_weighting_width ** 2 / self.axial_width ** 2 + 1
)
self.best_loss = np.inf
def _convert_loss_masks_to_tensor(self):
"""Convert existing loss masks, used to reweight the central masked region in the loss function, to tensors
Returns:
(tensor): A stack of tensors that are 1 in the central and border regions around the mask, and 0 elsewhere
"""
return torch.Tensor(self.loss_masks).to(dev)
def loss(self, model_out, batch):
"""Defines a custom loss function for the network. Weights the loss such that reproduction of the masked region
(and a small border area around it) contributes to the overall loss on the same order of magnitude as all other
pixels that were predicted
Args:
model_out (torch Tensor): Stack of images that the model has predicted
batch (dict as built by self.build_batch): The batch that was used for the iteration, which should include
at least a 'labels' stack of Tensor images of the same shape as model_out
Returns:
(torch Tensor): the MSE error in the output prediction after reweighting masked region of prediction
"""
error = model_out - batch["labels"]
squared_error = error ** 2
weighted_error = squared_error * (self.error_weighting - self.loss_masks[0])
mse = torch.mean(weighted_error)
return mse
def train_step(self):
"""Build a single batch, do a single forward and backward pass."""
batch = self.build_batch()
self.optimiser.zero_grad()
out = self.model(batch)
loss = self.loss(out, batch)
loss.backward()
self.optimiser.step()
detached_loss = loss.cpu().detach().numpy()
if len(self.last_n_losses) == self.loss_num_to_ave_over:
self.last_n_losses.pop(0)
self.last_n_losses.append(detached_loss)
else:
self.last_n_losses.append(detached_loss)
self.iteration += 1
if self.iteration % 5000 == 0:
print(f"{self.iteration} iterations complete")
def train_for_iterations(self, iterations):
"""Train the model for a set number of iterations
Args:
iterations (int): Number of iterations to train for
"""
self.model.train()
progress_bar = tqdm(range(iterations))
for _ in progress_bar:
self.train_step()
progress_bar.set_description(f"Average loss {np.mean(self.last_n_losses)}")
if (self.iteration % self.save_freq) == 0:
self.save_model(self.save_dir)
def save_model(self, directory, bypass_loss_check=False):
"""Save the model. If it has achieved the best loss, save to 'model.pth' within directory, otherwise save to
'latest_model.pth'
Args:
directory (pathlib Path): A directory in which to save the model
"""
directory.mkdir(exist_ok=True, parents=True)
curr_loss = np.mean(self.last_n_losses)
if curr_loss < self.best_loss or bypass_loss_check:
torch.save(
{
"iteration": self.iteration,
"model_state_dict": self.model.state_dict(),
"optimiser_state_dict": self.optimiser.state_dict(),
"loss": curr_loss,
"running_loss": self.last_n_losses,
},
str(directory / "model.pth"),
)
self.best_loss = curr_loss
else:
torch.save(
{
"iteration": self.iteration,
"model_state_dict": self.model.state_dict(),
"optimiser_state_dict": self.optimiser.state_dict(),
"loss": curr_loss,
"running_loss": self.last_n_losses,
},
str(directory / "latest_model.pth"),
)
self.best_loss = curr_loss
def load_model(self, directory, model="model.pth"):
"""Load a pretrained model, optimiser state, loss at time of saving, iteration at time of saving
Args:
directory (pathlib Path): Directory in which the model is saved
model (str): Model filename, defaults to 'model.pth'
"""
checkpoint = torch.load(str(directory / model))
self.model.load_state_dict(checkpoint["model_state_dict"])
self.optimiser.load_state_dict(checkpoint["optimiser_state_dict"])
self.latest_loss = checkpoint["loss"]
self.iteration = checkpoint["iteration"]
self.last_n_losses = checkpoint["running_loss"]
self.best_loss = checkpoint["loss"]
def plane_mask_builder(self, blank_width=None):
"""Get a list of logical ndarrays that can be used to mask out the central region of an input image, and
extract that local region for a 'label' array at output of the model
Returns:
(list of 2D ndarrays): A set of masks to apply to the axial, coronal and | |
0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0],
[1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, | |
in group.keys(): # Not looking for attr keys here (Potentially 'none' stored in attr, but that's not helpful here)
# tfit_group = group.get('tfit_info')
# assert isinstance(tfit_group, h5py.Group)
# fits = list()
# for k in ['v0_0', 'vp', 'v0_1', 'vm']:
# fit_group = tfit_group.get(k, None)
# if fit_group is not None:
# fits.append(DA.FitInfo.from_hdf(tfit_group, k))
# else:
# logger.warning(f'Expected to find {k} group in {tfit_group.name} but did not.')
# value.tfit_info = fits
# else:
# value.tfit_info = None
# return value
#
#
# @dataclass
# class EA_values:
# sfs: List[float] = field(default_factory=list)
# tcs: List[float] = field(default_factory=list)
# ths: List[float] = field(default_factory=list)
# dTs: List[float] = field(default_factory=list)
# amps: List[float] = field(default_factory=list)
# mids: List[float] = field(default_factory=list)
# gs: List[float] = field(default_factory=list)
# dxs: List[float] = field(default_factory=list)
# int_dSs: List[float] = field(default_factory=list)
# fit_dSs: List[float] = field(default_factory=list) # TODO: populate this properly
# efit_infos: List[DA.FitInfo] = field(default_factory=list)
# tfit_infos: List[List[DA.FitInfo]] = field(default_factory=list)
# data_minus_fits: List[float] = field(default_factory=list)
#
# def __len__(self):
# lens = [len(getattr(self, k, [])) for k in self.__annotations__.keys()]
# return int(np.nanmax(lens))
#
# def __getitem__(self, item):
# if item >= len(self):
# raise IndexError
# value = EA_value()
# for k in self.__annotations__.keys():
# vs = getattr(self, k, [])
# if item < len(vs):
# setattr(value, k[:-1], vs[item])
#
# return value
#
# def append(self, value: EA_value):
# for k in self.__annotations__.keys():
# list_ = getattr(self, k)
# list_.append(getattr(value, k[:-1]))
#
# @property
# def dSs(self):
# if self.efit_infos:
# dSs = [CU.get_nested_attr_default(f, 'best_values.dS', None) for f in self.efit_infos]
# return dSs
# else:
# return None
#
# @classmethod
# def from_dats(cls, dats: List[DatHDF], uncertainty=False):
# """
# Gets EA_value from each dat and combines into EA_values class
# Args:
# dats (List[DatHDF]):
# uncertainty (bool): Whether to look for uncertainties instead of values (stored in similar way)
#
# Returns:
# EA_values: All values from dats combined
# """
# values = cls()
# if uncertainty:
# dat_key = 'EA_uncertainties'
# else:
# dat_key = 'EA_values'
# for dat in dats:
# if dat.Logs.part_of is not None and dat.Logs.part_of[0] == 2: # By default use part1 of dats
# continue
# value = getattr(dat.Other, dat_key, None)
# if value is None:
# logger.warning(f'Dat{dat.datnum} had no {dat_key} in dat.Other')
# continue
# for k in cls.__annotations__.keys():
# list_ = getattr(values, k)
# list_.append(getattr(value, k[:-1], None))
# return values
#
# @classmethod
# def from_dat(cls, dat: DatHDF):
# values = getattr(dat.Other, 'EA_valuess', None)
# class_name = values.__class__.__name__
# if values is None:
# logger.warning(f'No dat.Other.EA_values found for dat{dat.datnum}')
# return None
# elif class_name == 'EA_value':
# logger.warning(f'Dat{dat.datnum} has single set of EA_value only, not EA_values')
# return None
# elif class_name == 'EA_values':
# efits = list()
# i = 0
# while True:
# name = f'efit_info_{i}'
# if hasattr(dat.Other, name):
# efits.append(getattr(dat.Other, name))
# i += 1
# else:
# break
# values.efit_infos = efits # Should also set it in the dat I think?
# return values
#
#
# @dataclass
# class DataCTvalues:
# tcs: List[float] = field(default_factory=list)
# ths: List[float] = field(default_factory=list)
# amps: List[float] = field(default_factory=list)
# mids: List[float] = field(default_factory=list)
# gs: List[float] = field(default_factory=list)
#
#
# def _get_func(params: EA_params):
# if params.CT_fit_func == 'i_sense':
# fit_func = T.i_sense
# elif params.CT_fit_func == 'i_sense_digamma':
# fit_func = T.i_sense_digamma
# elif params.CT_fit_func == 'i_sense_digamma_quad':
# fit_func = T.i_sense_digamma_quad
# else:
# raise ValueError(f'{params.CT_fit_func} is not understood as a CT fit_function')
# return fit_func
#
#
# def _get_CT_param_estimate(x: np.ndarray, data: np.ndarray, params: EA_params):
# """
# Get param estimates from data, then overwrite/change other variables using info in EA_params
# Args:
# x (np.ndarray): X array for data
# data (np.ndarray): Single transition data
# params (EA_params): Analysis params including CT_fit_param_edit_kwargs
#
# Returns:
# lm.Parameters: Parameters to use for fitting
# """
# pars = T.get_param_estimates(x, data)[0]
# if params.CT_fit_func == 'i_sense':
# pass
# elif params.CT_fit_func == 'i_sense_digamma':
# T._append_param_estimate_1d(pars, ['g'])
# elif params.CT_fit_func == 'i_sense_digamma_quad':
# T._append_param_estimate_1d(pars, ['g', 'quad'])
# else:
# raise ValueError(f'{params.CT_fit_func} is not understood as a CT fit_function')
#
# if params.CT_fit_param_edit_kwargs != {}:
# if 'param_name' in params.CT_fit_param_edit_kwargs:
# pars = CU.edit_params(pars, **params.CT_fit_param_edit_kwargs)
# else:
# raise ValueError(
# f'{params.CT_fit_param_edit_kwargs} is not empty, but does not include "param_name" which is required')
# return pars
#
#
# def calculate_CT_values(data: EA_data, values: EA_value, params: EA_params) -> DataCTvalues:
# """
# Calculates the charge transition fit stuff, stores the interesting final values in 'values', but
# also returns ct_values which has each individual fit value used
# Args:
# data (EA_data): 1D EA data class
# values (EA_value): 1D values class
# params (EA_params): Params for analysis
#
# Returns:
# (DataCTvalues): Each fit value used to fill 'values', mostly for debugging
# """
# fit_func = _get_func(params) # Get i_sense, i_sense_digamma, .. .etc
# indexs = CU.get_data_index(data.x, params.CT_fit_range)
# if indexs[1] < indexs[0]:
# indexs = indexs[1], indexs[0]
# t_fit_data = data.trans_data[:, indexs[0]:indexs[1]]
# x = data.x[indexs[0]:indexs[1]]
#
# # # This was just a check,
# # # but this doesn't work as well if Vp and Vm aren't already corrected to be in line with each other
# # t_cold = np.nanmean(t_fit_data[(0, 2), ], axis=0)
# # t_hot = np.nanmean(t_fit_data[(1, 3), ], axis=0)
# #
# # t_pars = _get_CT_param_estimate(x, t_cold, params)
# # fit = transition_fits(x, t_cold, func=fit_func, params=t_pars)[0]
# # values.tc = fit.best_values['theta']
# # mid = fit.best_values['mid']
# # amp = fit.best_values['amp']
# # if 'g' in fit.best_values.keys():
# # g = fit.best_values['g']
# # else:
# # g = np.nan
# #
# # t_pars = _get_CT_param_estimate(x, t_hot, params)
# # fit = transition_fits(x, t_hot, func=fit_func, params=t_pars)[0]
# # values.th = fit.best_values['theta']
# # dT = values.th - values.tc
#
# ct_values = DataCTvalues()
# for data in t_fit_data[0::2]:
# t_pars = _get_CT_param_estimate(x, data, params)
# fit = transition_fits(x, data, func=fit_func, params=t_pars)[0]
# ct_values.tcs.append(fit.best_values['theta'])
# ct_values.amps.append(fit.best_values['amp'])
# ct_values.mids.append(fit.best_values['mid'])
# if 'g' in fit.best_values.keys():
# ct_values.gs.append(fit.best_values['g'])
# for data in t_fit_data[1::2]:
# t_pars = _get_CT_param_estimate(x, data, params)
# fit = transition_fits(x, data, func=fit_func, params=t_pars)[0]
# ct_values.ths.append(fit.best_values['theta'])
#
# values.tc = np.nanmean(ct_values.tcs)
# values.th = np.nanmean(ct_values.ths)
#
# mid = np.nanmean(ct_values.mids)
# amp = np.nanmean(ct_values.amps)
# g = np.nanmean(ct_values.gs)
# dT = values.th - values.tc
#
# if -1000 < mid < 1000:
# values.mid = mid
# else:
# logger.warning(f'Mid = {mid:.2f}: Using 0 instead')
# values.mid = 0
#
# if params.allowed_amp_range[0] < amp < params.allowed_amp_range[1]:
# values.amp = amp
# else:
# logger.info(f'Amp={amp:.2f}: Default of {params.default_amp:.2f} Used instead')
# values.amp = params.default_amp
#
# if params.allowed_dT_range[0] < dT < params.allowed_dT_range[1]:
# values.dT = dT
# else:
# logger.info(f'dT={dT:.2f}: Default of {params.default_dT:.2f} Used instead')
# values.dT = params.default_dT
#
# values.g = g
# return ct_values
#
#
# def calculate_integrated(data: EA_data, values: EA_value, params: EA_params):
# """
# Integrates data.entropy_data and stores in data.integrated_data. Will sub line or not based on params
# Args:
# data (EA_data): 1D EA data class
# values (EA_value): 1D values class
# params (EA_params): Params for analysis
#
# Returns:
# None: Just adds data.integrated_data, and values.[dx, sf, int_dS]
# """
# values.dx = float(np.mean(np.diff(data.x)))
# values.sf = scaling(dt=values.dT, amplitude=values.amp, dx=values.dx)
# integrated_data = integrate_entropy(data.entropy_data, values.sf)
#
# if params.sub_const:
# indexs = CU.get_data_index(data.x, [values.mid+params.sub_const_range[0], values.mid + params.sub_const_range[1]])
# mean = np.nanmean(integrated_data[indexs[0]:indexs[1]])
# integrated_data = integrated_data - mean
#
# if params.sub_line:
# line = lm.models.LinearModel()
# indexs = CU.get_data_index(data.x,
# [values.mid + params.sub_line_range[0], values.mid + params.sub_line_range[1]])
# if indexs[0] == indexs[1]:
# indexs = [0, int(round(data.x.shape[-1] / 20))]
# logger.warning(
# 'Indexs for subtracting line from Integrated entropy were equal so defaulting to first 5% of data')
# i_data = integrated_data[indexs[0]:indexs[1]]
# line_fit = line.fit(i_data, x=data.x[indexs[0]:indexs[1]], nan_policy='omit')
# integrated_data = integrated_data - line_fit.eval(x=data.x)
#
# data.integrated_data = integrated_data
#
# indexs = CU.get_data_index(data.x,
# [values.mid + params.int_entropy_range[0], values.mid + params.int_entropy_range[1]])
# if indexs[0] == indexs[1]:
# indexs = [-int(round(data.x.shape[-1] / 20)), None]
# logger.warning('Indexs averaging integrated data were the same so defaulting to last 5% of data')
# values.int_dS = np.nanmean(integrated_data[indexs[0]:indexs[1]])
#
#
# def calculate_fit(data: EA_data, values: EA_value, params: EA_params, **edit_param_kwargs):
# """
# Calculates entropy fit to data.entropy_data, will use any fit parameters passed in by analysis params,
# and then will overwrite with any params passed in manually
# Args:
# data (EA_data): 1D EA data class
# values (EA_value): 1D values class
# params (EA_params): Params for analysis
# **edit_param_kwargs (dict): Kwargs for editing params before fitting. i.e.
# (param_name, value, vary, min_val, max_val)
#
# Returns:
# None: Just adds values.efit_info
# """
# indexs = CU.get_data_index(data.x, params.E_fit_range)
# if indexs[1] < indexs[0]:
# indexs = indexs[1], indexs[0]
#
# x = data.x[indexs[0]:indexs[1]]
# e_fit_data = data.entropy_data[indexs[0]:indexs[1]]
# e_pars = E.get_param_estimates(x, e_fit_data)[0]
# if 'param_name' in params.fit_param_edit_kwargs:
# e_pars = CU.edit_params(e_pars, **params.fit_param_edit_kwargs)
# if 'param_name' in edit_param_kwargs:
# e_pars = CU.edit_params(e_pars, **edit_param_kwargs)
# efit = E.entropy_fits(x, e_fit_data, params=e_pars)[0]
# efit_info = DA.FitInfo.from_fit(efit)
# values.efit_info = efit_info
# data.data_minus_fit = data.entropy_data - efit_info.eval_fit(data.x)
# values.data_minus_fit = np.nansum(abs(data.data_minus_fit) * np.nanmean(np.diff(data.x)))
#
#
# def bin_datas(data: EA_data, target_num_per_row: int):
# bin_size = np.ceil(data.x.shape[-1] / target_num_per_row)
# data.trans_data = bin_data(data.trans_data, bin_size)
# data.entropy_data = bin_data(data.entropy_data, bin_size)
# data.integrated_data = bin_data(data.integrated_data, bin_size)
# data.x = np.linspace(data.x[0], data.x[-1], int(data.x.shape[-1] / bin_size))
# return data
#
#
# def _set_data(dat: DatHDF, data: EA_data):
# """Saves 1D datasets properly in HDF, more efficient for loading etc"""
# for k in data.__annotations__:
# if isinstance(getattr(data, k), h5py.Dataset):
# setattr(data, k, getattr(data, k)[:])
# for k, v in asdict(data).items():
# if v is not None and v != []:
# dat.Other.set_data(k, v)
#
#
# def get_data(dat) -> EA_data:
# """Gets 1D datasets from Dat.Other.Data, does it efficiently"""
# data = EA_data()
# for k in data.__annotations__.keys():
# if k in dat.Other.Data.keys():
# setattr(data, k, dat.Other.Data.get(k)) # Might | |
<reponame>JeshuaT/PsyNeuLink<gh_stars>10-100
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* ComparatorMechanism ***************************************************
"""
Contents
--------
* `ComparatorMechanism_Overview`
* `ComparatorMechanism_Creation`
* `ComparatorMechanism_Structure`
* `ComparatorMechanism_Execution`
* `ComparatorMechanism_Example`
* `ComparatorMechanism_Class_Reference`
.. _ComparatorMechanism_Overview:
Overview
--------
A ComparatorMechanism is a subclass of `ObjectiveMechanism` that receives two inputs (a sample and a target), compares
them using its `function <ComparatorMechanism.function>`, and places the calculated discrepancy between the two in its
*OUTCOME* `OutputPort <ComparatorMechanism.output_port>`.
.. _ComparatorMechanism_Creation:
Creating a ComparatorMechanism
------------------------------
ComparatorMechanisms are generally created automatically when other PsyNeuLink components are created (such as
`LearningMechanisms <LearningMechanism_Creation>`). A ComparatorMechanism can also be created directly by calling
its constructor. Its **sample** and **target** arguments are used to specify the OutputPorts that provide the
sample and target inputs, respectively (see `ObjectiveMechanism_Monitored_ports` for details concerning their
specification, which are special versions of an ObjectiveMechanism's **monitor** argument). When the
ComparatorMechanism is created, two InputPorts are created, one each for its sample and target inputs (and named,
by default, *SAMPLE* and *TARGET*). Each is assigned a MappingProjection from the corresponding OutputPort specified
in the **sample** and **target** arguments.
It is important to recognize that the value of the *SAMPLE* and *TARGET* InputPorts must have the same length and type,
so that they can be compared using the ComparatorMechanism's `function <ComparatorMechanism.function>`. By default,
they use the format of the OutputPorts specified in the **sample** and **target** arguments, respectively,
and the `MappingProjection` to each uses an `IDENTITY_MATRIX`. Therefore, for the default configuration, the
OutputPorts specified in the **sample** and **target** arguments must have values of the same length and type.
If these differ, the **input_ports** argument can be used to explicitly specify the format of the ComparatorMechanism's
*SAMPLE* and *TARGET* InputPorts, to insure they are compatible with one another (as well as to customize their
names, if desired). If the **input_ports** argument is used, *both* the sample and target InputPorts must be
specified. Any of the formats for `specifying InputPorts <InputPort_Specification>` can be used in the argument.
If values are assigned for the InputPorts, they must be of equal length and type. Their types must
also be compatible with the value of the OutputPorts specified in the **sample** and **target** arguments. However,
the length specified for an InputPort can differ from its corresponding OutputPort; in that case, by default, the
MappingProjection created uses a `FULL_CONNECTIVITY` matrix. Thus, OutputPorts of differing lengths can be mapped
to the sample and target InputPorts of a ComparatorMechanism (see the `example <ComparatorMechanism_Example>` below),
so long as the latter are of the same length. If a projection other than a `FULL_CONNECTIVITY` matrix is needed, this
can be specified using the *PROJECTION* entry of a `Port specification dictionary <Port_Specification>` for the
InputPort in the **input_ports** argument.
.. _ComparatorMechanism_Structure:
Structure
---------
A ComparatorMechanism has two `input_ports <ComparatorMechanism.input_ports>`, each of which receives a
`MappingProjection` from a corresponding OutputPort specified in the **sample** and **target** arguments of its
constructor. The InputPorts are listed in the Mechanism's `input_ports <ComparatorMechanism.input_ports>` attribute
and named, respectively, *SAMPLE* and *TARGET*. The OutputPorts from which they receive their projections (specified
in the **sample** and **target** arguments) are listed in the Mechanism's `sample <ComparatorMechanism.sample>` and
`target <ComparatorMechanism.target>` attributes as well as in its `monitor <ComparatorMechanism.monitor>` attribute.
The ComparatorMechanism's `function <ComparatorMechanism.function>` compares the value of the sample and target
InputPorts. By default, it uses a `LinearCombination` function, assigning the sample InputPort a `weight
<LinearCombination.weight>` of *-1* and the target a `weight <LinearCombination.weight>` of *1*, so that the sample
is subtracted from the target. However, the `function <ComparatorMechanism.function>` can be customized, so long as
it is replaced with one that takes two arrays with the same format as its inputs and generates a similar array as its
result. The result is assigned as the value of the Comparator Mechanism's *OUTCOME* (`primary <OutputPort_Primary>`)
OutputPort.
.. _ComparatorMechanism_Execution:
Execution
---------
When a ComparatorMechanism is executed, it updates its input_ports with the values of the OutputPorts specified
in its **sample** and **target** arguments, and then uses its `function <ComparatorMechanism.function>` to
compare these. By default, the result is assigned to the `value <Mechanism_Base.value>` of its *OUTCOME*
`output_port <ComparatorMechanism.output_port>`, and as the first item of the Mechanism's
`output_values <ComparatorMechanism.output_values>` attribute.
.. _ComparatorMechanism_Example:
Example
-------
.. _ComparatorMechanism_Default_Input_Value_Example:
*Formatting InputPort values*
The **default_variable** argument can be used to specify a particular format for the SAMPLE and/or TARGET InputPorts
of a ComparatorMechanism. This can be useful when one or both of these differ from the format of the
OutputPort(s) specified in the **sample** and **target** arguments. For example, for `Reinforcement Learning
<Reinforcement>`, a ComparatorMechanism is used to monitor an action selection Mechanism (the sample), and compare
this with a reinforcement signal (the target). In the example below, the action selection Mechanism is a
`TransferMechanism` that uses the `SoftMax` function (and the `PROB <Softmax.PROB>` as its output format) to select
an action. This generates a vector with a single non-zero value (the selected action). Because the output is a vector,
specifying it as the ComparatorMechanism's **sample** argument will generate a corresponding InputPort with a vector
as its value. This will not match the reward signal specified in the ComparatorMechanism's **target** argument, the
value of which is a single scalar. This can be dealt with by explicitly specifying the format for the SAMPLE and
TARGET InputPorts in the **default_variable** argument of the ComparatorMechanism's constructor, as follows::
>>> import psyneulink as pnl
>>> my_action_selection_mech = pnl.TransferMechanism(size=5,
... function=pnl.SoftMax(output=pnl.PROB))
>>> my_reward_mech = pnl.TransferMechanism()
>>> my_comparator_mech = pnl.ComparatorMechanism(default_variable = [[0],[0]],
... sample=my_action_selection_mech,
... target=my_reward_mech)
Note that ``my_action_selection_mechanism`` is specified to take an array of length 5 as its input, and therefore
generate one of the same length as its `primary output <OutputPort_Primary>`. Since it is assigned as the **sample**
of the ComparatorMechanism, by default this will create a *SAMPLE* InputPort of length 5, that will not match the
length of the *TARGET* InputPort (the default for which is length 1). This is taken care of, by specifying the
**default_variable** argument as an array with two single-value arrays (corresponding to the *SAMPLE* and *TARGET*
InputPorts). (In this example, the **sample** and **target** arguments are specified as Mechanisms since,
by default, each has only a single (`primary <OutputPort_Primary>`) OutputPort, that will be used; if either had
more than one OutputPort, and one of those was desired, it would have had to be specified explicitly in the
**sample** or **target** argument).
.. _ComparatorMechanism_Class_Reference:
Class Reference
---------------
"""
from collections.abc import Iterable
import numpy as np
import typecheck as tc
from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination
from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base
from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.core.components.shellclasses import Mechanism
from psyneulink.core.components.ports.inputport import InputPort
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.components.ports.port import _parse_port_spec
from psyneulink.core.globals.keywords import \
COMPARATOR_MECHANISM, FUNCTION, INPUT_PORTS, NAME, OUTCOME, SAMPLE, TARGET, VARIABLE, PREFERENCE_SET_NAME, MSE, SSE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF
from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel
from psyneulink.core.globals.utilities import \
is_numeric, is_value_spec, iscompatible, kwCompatibilityLength, kwCompatibilityNumeric, recursive_update
from psyneulink.core.globals.utilities import safe_len
__all__ = [
'ComparatorMechanism', 'ComparatorMechanismError'
]
class ComparatorMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class ComparatorMechanism(ObjectiveMechanism):
"""
ComparatorMechanism( \
sample, \
target, \
input_ports=[SAMPLE,TARGET] \
function=LinearCombination(weights=[[-1],[1]], \
output_ports=OUTCOME)
Subclass of `ObjectiveMechanism` that compares the values of two `OutputPorts <OutputPort>`.
See `ObjectiveMechanism <ObjectiveMechanism_Class_Reference>` for additional arguments and attributes.
Arguments
---------
sample : OutputPort, Mechanism, value, or string
specifies the value to compare with the `target` by the `function <ComparatorMechanism.function>`.
target : OutputPort, Mechanism, value, or string
specifies the value with which the `sample` is compared by the `function <ComparatorMechanism.function>`.
input_ports : List[InputPort, value, str or dict] or Dict[] : default [SAMPLE, TARGET]
specifies the names and/or formats to use for the values of the sample and target InputPorts;
by default they are named *SAMPLE* and *TARGET*, and their formats are match the value of the OutputPorts
specified in the **sample** and **target** arguments, respectively (see `ComparatorMechanism_Structure`
for additional details).
function : Function, function or method : default Distance(metric=DIFFERENCE)
specifies the `function <Comparator.function>` used to compare the `sample` with the `target`.
Attributes
----------
COMMENT:
default_variable : Optional[List[array] or 2d np.array]
COMMENT
sample : OutputPort
determines | |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import copy
import numpy as np
from scipy import linalg
from .fiff.constants import FIFF
from .fiff.tag import find_tag
from .fiff.tree import dir_tree_find
from .fiff.proj import read_proj
from .fiff.channels import _read_bad_channels
from .fiff.write import start_block, end_block, write_int, write_name_list, \
write_double, write_float_matrix, start_file, end_file
from .fiff.proj import write_proj
from .fiff import fiff_open
from .fiff.pick import pick_types, pick_channels_forward
class Covariance(object):
"""Noise covariance matrix"""
_kind_to_id = dict(full=1, sparse=2, diagonal=3) # XXX : check
_id_to_kind = {1: 'full', 2: 'sparse', 3: 'diagonal'} # XXX : check
def __init__(self, kind='full'):
self.kind = kind
def load(self, fname):
"""load covariance matrix from FIF file"""
if self.kind in Covariance._kind_to_id:
cov_kind = Covariance._kind_to_id[self.kind]
else:
raise ValueError, ('Unknown type of covariance. '
'Choose between full, sparse or diagonal.')
# Reading
fid, tree, _ = fiff_open(fname)
cov = read_cov(fid, tree, cov_kind)
fid.close()
self._cov = cov
self.data = cov['data']
def save(self, fname):
"""save covariance matrix in a FIF file"""
write_cov_file(fname, self._cov)
def estimate_from_raw(self, raw, picks=None, quantum_sec=10):
"""Estimate noise covariance matrix from a raw FIF file
"""
# Set up the reading parameters
start = raw['first_samp']
stop = raw['last_samp'] + 1
quantum = int(quantum_sec * raw['info']['sfreq'])
cov = 0
n_samples = 0
# Read data
for first in range(start, stop, quantum):
last = first + quantum
if last >= stop:
last = stop
data, times = raw[picks, first:last]
if self.kind is 'full':
cov += np.dot(data, data.T)
elif self.kind is 'diagonal':
cov += np.diag(np.sum(data ** 2, axis=1))
else:
raise ValueError, "Unsupported covariance kind"
n_samples += data.shape[1]
self.data = cov / n_samples # XXX : check
print '[done]'
def _regularize(self, data, variances, ch_names, eps):
"""Operates inplace in data
"""
if len(ch_names) > 0:
ind = [self._cov['names'].index(name) for name in ch_names]
reg = eps * np.mean(variances[ind])
for ii in ind:
data[ind,ind] += reg
def whiten_evoked(self, ave, eps=0.2):
"""Whiten an evoked data file
The whitening matrix is estimated and then multiplied to data.
It makes the additive white noise assumption of MNE
realistic.
Parameters
----------
ave : evoked data
A evoked data set read with fiff.read_evoked
eps : float
The regularization factor used.
Returns
-------
ave : evoked data
Evoked data set after whitening.
W : array of shape [n_channels, n_channels]
The whitening matrix
"""
data = self.data.copy() # will be the regularized covariance
variances = np.diag(data)
# Add (eps x identity matrix) to magnetometers only.
# This is based on the mean magnetometer variance like MNE C-code does it.
mag_ind = pick_types(ave['info'], meg='mag', eeg=False, stim=False)
mag_names = [ave['info']['chs'][k]['ch_name'] for k in mag_ind]
self._regularize(data, variances, mag_names, eps)
# Add (eps x identity matrix) to gradiometers only.
grad_ind = pick_types(ave['info'], meg='grad', eeg=False, stim=False)
grad_names = [ave['info']['chs'][k]['ch_name'] for k in grad_ind]
self._regularize(data, variances, grad_names, eps)
# Add (eps x identity matrix) to eeg only.
eeg_ind = pick_types(ave['info'], meg=False, eeg=True, stim=False)
eeg_names = [ave['info']['chs'][k]['ch_name'] for k in eeg_ind]
self._regularize(data, variances, eeg_names, eps)
d, V = linalg.eigh(data) # Compute eigen value decomposition.
# Compute the unique square root inverse, which is a whitening matrix.
# This matrix can be multiplied with data and leadfield matrix to get
# whitened inverse solutions.
d = 1.0 / np.sqrt(d)
W = np.dot(V, d[:,None] * V.T)
# Get all channel indices
n_channels = len(ave['info']['chs'])
ave_ch_names = [ave['info']['chs'][k]['ch_name']
for k in range(n_channels)]
ind = [ave_ch_names.index(name) for name in self._cov['names']]
ave_whiten = copy.copy(ave)
ave_whiten['evoked']['epochs'][ind] = np.dot(W,
ave['evoked']['epochs'][ind])
return ave_whiten, W
def whiten_evoked_and_forward(self, ave, fwd, eps=0.2):
"""Whiten an evoked data set and a forward solution
The whitening matrix is estimated and then multiplied to
forward solution a.k.a. the leadfield matrix.
It makes the additive white noise assumption of MNE
realistic.
Parameters
----------
ave : evoked data
A evoked data set read with fiff.read_evoked
fwd : forward data
A forward solution read with mne.read_forward
eps : float
The regularization factor used.
Returns
-------
ave : evoked data
A evoked data set read with fiff.read_evoked
fwd : evoked data
Forward solution after whitening.
W : array of shape [n_channels, n_channels]
The whitening matrix
"""
# handle evoked
ave_whiten, W = self.whiten_evoked(ave, eps=eps)
ave_ch_names = [ch['ch_name'] for ch in ave_whiten['info']['chs']]
# handle forward (keep channels in covariance matrix)
fwd_whiten = copy.copy(fwd)
ind = [fwd_whiten['sol']['row_names'].index(name)
for name in self._cov['names']]
fwd_whiten['sol']['data'][ind] = np.dot(W,
fwd_whiten['sol']['data'][ind])
fwd_whiten['sol']['row_names'] = [fwd_whiten['sol']['row_names'][k]
for k in ind]
fwd_whiten['chs'] = [fwd_whiten['chs'][k] for k in ind]
# keep in forward the channels in the evoked dataset
fwd_whiten = pick_channels_forward(fwd, include=ave_ch_names,
exclude=ave['info']['bads'])
return ave_whiten, fwd_whiten, W
def __repr__(self):
s = "kind : %s" % self.kind
s += ", size : %s x %s" % self.data.shape
s += ", data : %s" % self.data
return "Covariance (%s)" % s
def read_cov(fid, node, cov_kind):
"""Read a noise covariance matrix
Parameters
----------
fid: file
The file descriptor
node: dict
The node in the FIF tree
cov_kind: int
The type of covariance. XXX : clarify
Returns
-------
data: dict
The noise covariance
"""
# Find all covariance matrices
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError, 'No covariance matrices found'
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and tag.data == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError, 'Covariance matrix dimension not found'
dim = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError, ('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError, 'No covariance matrix data found'
else:
# Diagonal is stored
data = tag.data
diagmat = True
print '\t%d x %d diagonal covariance (kind = %d) found.' \
% (dim, dim, cov_kind)
else:
from scipy import sparse
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim+1] /= 2.0
diagmat = False
print '\t%d x %d full covariance (kind = %d) found.' \
% (dim, dim, cov_kind)
else:
diagmat = False
data = tag.data
print '\t%d x %d sparse covariance (kind = %d) found.' \
% (dim, dim, cov_kind)
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = read_proj(fid, this)
# Read the bad channel list
bads = _read_bad_channels(fid, this)
# Put it together
cov = dict(kind=cov_kind, diag=diagmat, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
return cov
raise ValueError, 'Did not find the desired covariance matrix'
return None
###############################################################################
# Writing
def write_cov(fid, cov):
"""Write a noise covariance matrix
Parameters
----------
fid: file
The file descriptor
cov: dict
The noise covariance matrix to write
"""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
def write_cov_file(fname, cov):
"""Write a noise covariance matrix
Parameters
----------
fname: string
The name of the file
cov: dict
The noise covariance
"""
fid = start_file(fname)
try:
write_cov(fid, cov)
except Exception as inst:
| |
<filename>day7/day7.py
input = """
xsddbi (61)
nqtowev (11)
xwohr (82)
flejt (36)
idwpug (54)
uoxzkp (51)
choeijs (54)
gmsjkn (65)
txszqu (687) -> mvjqmad, lwqlyjq, jlgnsu
zhlfdac (15)
htouwcr (74)
vlbsr (56)
titbn (9)
bvrpb (86)
wuwjp (54)
umnqkb (160) -> nbrvl, bcmbao, vfimqtl
uwnml (29)
cdvhmy (42)
xghhu (306) -> molth, atqewc
vcvayah (71)
fujwb (187) -> tyjyc, xyemll
zqnjd (91) -> jxsmuey, uelnii, vcwezm, uxnwtp
wphtnvm (72)
xgihtsx (15)
fwvvidu (80)
lonwb (1656) -> rydrp, mgyixhn, cjhtxo
qyasuw (41)
vbvug (6)
mkrjamh (154) -> ecbog, owaebx
ghvpg (98)
eazvkxv (66)
jguufio (61)
udpcyt (71)
xerbuu (38)
sxzpils (70)
looigzn (10)
znmumx (77)
uxzwwe (67)
wuegrv (99)
owttcz (91)
vkhazkn (37) -> oactn, ftxif, rxlbou, bkfav
rddnfg (10) -> mvgmbru, zlazoqs
zzsqfm (99)
fhjzpjw (19)
zwxwfin (50)
ocdzc (26)
cohps (52)
dxhrpq (46)
yeheld (55)
ekxczvo (81)
pxlzcx (589) -> vjvit, cnblx, bwiqe, pjsbxvk, ipqjxa, zkikz
offkzc (95) -> wymlvix, felrwte, bzublv
bavyg (22) -> szszezd, lurhq, ltncvl, fmwvok, frikrv, tumwln, xjtujzp
awjwff (18)
sjhwgq (8316) -> ydqgvnf, oztasey, qkmkwgl
vmyrdkl (53)
mzrwaa (48)
njqemt (30)
objssr (91) -> xujmi, oqihtt
bcpbvya (75)
fqayb (69)
vwksn (19) -> zwxxii, owttcz
czmzol (145) -> qzayss, ufrrrzi
inija (25)
eppufdw (555) -> brwsjee, laoouso, jgfcyze
mmtzk (39) -> utispep, onkhx
zoofu (129) -> cyczt, qargs
msryw (260)
etvpko (18)
wxudgdd (84)
ymkax (45) -> znmumx, hnjuqgw
uxbjym (87)
msskm (69)
nmtia (75)
jawbtmo (46)
bpelccj (187) -> ctzhawr, qhldpg
dbexmj (365) -> bijprk, iotkxfd
bxcdldt (16)
fhdqcbx (25) -> gefut, mulybo
iunkpe (15)
yzvqo (80)
fbgguv (57) -> bkomu, tynxlau, sfruur, zxvbb, khupkt, xkntkvz
yaripcu (8)
acfmyv (79)
sohuv (18)
aahhxau (233) -> rbauxvx, neliq
yuhmnjs (98)
xpzpv (206) -> mxtxm, kgzyk, yzpaxlz, vfxqcbq, lcgtwj
ovhhkoc (52)
stwubkv (70) -> uzzeydh, zknpmnc
otbxl (54)
bfroq (39)
nkbmoin (341) -> vgqmfj, weygson, knbems, gdmlk, ecfrqpl, tbaai, sfohocq
twazofk (258) -> dntwqr, vugavk
venvbai (32)
pcqpqjk (38)
apqby (37) -> xkzlyvh, vlsjsgg
tgsicj (59)
rsvixyg (32)
whjcp (245)
rmnkgss (22)
zbmwego (55)
lptmne (17) -> fkjlcz, jinwsas, qnevjto, wfjks
rtyegs (76) -> nhbbd, yqqun
cavlg (1148) -> lsaroxx, evkau, gldgrng
kqswas (504) -> ntcddy, bwqmns, vtvsd, oxoqy
tmvfp (46)
dlhyzjw (79)
xsfwcgq (657) -> inknun, seihhya, qrlhp, bchtcht, rswqvx
ekleund (68)
rihlj (72)
oldrtya (66)
yiqnfd (62)
sgpiiir (14)
vbojbp (66)
qnscqms (197) -> mernsee, ubjuhxp, mlfxnna
vfxzkq (660) -> ukrqfm, pnsvthy, qdcos
kkeuh (273) -> urfhlf, hjtkb
sakhif (30)
yrdhq (71)
ckcjr (50) -> owgsk, vdnphf
ndvdj (10)
zzcqptv (87)
aemyrqj (23)
vbauj (49)
rcumpgx (18)
ijyhvsj (25)
gazaoty (83)
lugwl (228)
msrrlv (90)
enxbya (19)
wlyzbz (99)
gmrqpdk (40)
gefut (75)
bafnaij (57)
jgfcyze (166) -> ziqyhi, wrwgm, hdikur, bmqhrtg
rcmyfr (281)
pohdy (87) -> frcqooy, idhvksi
wkfjzs (81)
dcgphc (16)
bgqvm (38)
ltgnnr (87)
xjtujzp (1565) -> soryrk, zkvopb, xufneyr
idhvksi (69)
yjnrjde (65)
lfykdub (61)
dnjydir (41)
ifgfg (197) -> jcbgta, aemyrqj
grlofut (23)
tjaqrw (96)
gjgcvyp (85)
kwavcf (254) -> vmpmig, djeee
mdnqs (101) -> tpmhs, essvr
gqlyhql (159) -> uwrhuw, ypanow
vjzfc (25)
guauivo (42) -> puvhc, bjcsjrr, ccjtpv, ibwve, evbvdgl, stwubkv, kwxpnrs
pbohgz (74) -> sbfiv, igbhyk, rhxcyd
bkomu (42437) -> krbpgmk, ekgbf, yqlxeb, ogyiesm, poypmax
yjoyyvl (55) -> sjioze, nigkvxl, itjxq
efobdsd (218) -> yqnso, glqspl
zxvbb (56366) -> fvkxwt, xlanm, gxitq
cvrga (47) -> vagjk, ghvpg
mwrnosj (52)
dxffr (15)
dcsxekv (41)
kbmse (68)
tqyldbt (59)
nvtyg (31)
jnzqvia (67) -> divak, cmgvs
rhxcyd (44)
hnjuqgw (77)
vdnphf (89)
qqnywdg (160) -> lrjnzf, luhxcq, whgpim
sfizdf (1053) -> aahhxau, shiiodm, cvnfak, whjcp
zdawjkr (34)
tracdgp (27)
tcvgl (305) -> ljfvbjd, bafnaij
iuwwa (42)
iekwfia (52)
odoxr (72)
uravet (102) -> hzzxjxa, mvdzfo
jvnuwap (66)
kjdjdr (76) -> kpdmptf, hsqhwef
vycgaoz (82) -> jqulm, ziwlgdb
ifdkle (202)
gtpyffj (812) -> kcpwmyz, emkzxk, hwuabde, efgqua
oxoqy (17) -> lktbqq, zukio, bcpbvya
kfngejn (49)
anhjp (75)
xwwzp (14358) -> lonfrp, eppufdw, nmxqs
wcpyi (205)
wzxqipp (88)
ekgbf (6702) -> bckbud, fzzlnwi, cvprip
ziwlgdb (63)
bxqjg (70)
bverp (49)
rlgbvvj (13)
sqvxf (37)
ltncvl (977) -> sktewq, hydeeqe, ibdjxl
fxqng (90)
jcdmhvd (157) -> twgrfm, dxhrpq
kpgatn (41)
znpvlac (97) -> vmyrdkl, aideelj
vifbi (634) -> rkwkwd, onqax, cfrcco, onamlmy, zoofu, vwosnfn
gezqw (12)
hagrq (97) -> fhecgzf, mefajk, uyrxc, gdclwzu
gdclwzu (60)
sczyp (19)
cvlkltp (151) -> guiej, vffqp
fimqsn (125) -> ehvxt, xyoiv, mikast
lktbqq (75)
tyjyc (55)
xoqvvx (18) -> xawfk, ojrjqv
bcmbao (89)
qntpdh (93)
iieico (12)
modms (37)
xwwfi (82) -> cpjkea, hsexbak, dxaemlq, wlyzbz
xxoil (23)
uelnii (82)
shiiodm (53) -> hclea, nrduy
hqwkq (42)
twgrfm (46)
yrozpm (246) -> btukw, fbyju
onqax (301) -> vtlvs, ccnpjdf
klhfca (81)
lvcsn (95)
jpcseag (92)
tgwgno (202) -> yyyub, ofcyw
ztjquwi (545) -> ptawnqq, ziprwty, yzfzbw, hpqux, baaqr, bwugxhi
ealyedu (71)
debir (48)
ppossv (51)
kzxyyd (62)
bchtcht (185) -> rsvixyg, cppdkv
ndeop (9)
htujh (695) -> nzhnurq, btbnj, ghsxqq
lkwogh (53)
ofyuao (159) -> llhiw, nbjpcc
vxlypal (9)
dvipr (67) -> udnmqte, osnjzpt, swujbg
ybnehoi (50) -> hatmr, alkizae
xkntkvz (44502) -> lwugjj, cstmu, rsjffj, pxlzcx, jtmuv
fhvyhk (252) -> boqgjn, rzzezfx
krmbsc (47)
jkymeqw (26)
xjypx (70)
jytapdu (15) -> vqvwkm, jpcseag, kgkpfg
btrepl (15)
umnkslz (13)
lxbsif (72)
adunt (71)
cupeghk (67)
xcckt (7)
mutwmaa (39)
qmpncds (39)
ypanow (88)
zhhhntj (51)
tcqnso (85)
uakcbox (68)
sckgsn (38)
bgpjb (136) -> aqaems, jkymeqw
gxitq (83) -> wshlljj, krpda, aasxby, ywhjz, stnnf, nfkmi
ulehitr (187) -> knrtys, mnfep
galojia (25)
pjxauhw (61)
wvhvw (229) -> xcckt, duzgl
cpjkea (99)
jqulm (63)
gpqhjv (20) -> wzxqipp, ykxnd, dhsopiv
ipqjxa (51) -> gsmcgor, inlcow, hdmcpq, xhmornc, lisfhnn
ofwtjhw (38)
pkceuqw (62) -> isqxwp, evkrnfo, pcqpqjk
hmnvh (97)
cxcah (75)
fkjof (90)
iinxvw (14)
ckypedd (61)
vxwobup (543) -> sejkvk, olrdlw, hfxsib
hpqux (196) -> ocdzc, xlvjz
gyuyu (173) -> xfurlrr, etlce
rffwau (96)
bfulyh (154) -> tjuqz, tzqdwe, ayeqoz
llhiw (88)
vmsmdqx (72)
aasxby (832) -> lvcdsgp, dhejgd, nzvpgv
qlkjhsi (25)
chhhge (34)
rveuz (18)
cmbnwse (112) -> jphmpr, tlkhxww
ppqdnd (175)
szvty (18)
crbcsqt (382) -> oakbrw, dswjpxm
xqxgd (76)
dxddn (12)
usitmiz (81)
vqrtnp (44)
axkhzf (94)
tzszs (299) -> jieijuo, ndeop
frjtwx (94)
tzqdwe (32)
sjioze (46)
yzpaxlz (216) -> heqbpd, dphmg
lewbxa (43)
knbems (173) -> looigzn, qdkrr
bwqmns (210) -> dcgphc, wwzli
vtwdqkd (61) -> vcvayah, wyhnmbs
owgsk (89)
dpfvy (54)
qongfb (48)
qilhudr (125) -> qbcar, wuwjp, umxurvd
aahqtsf (1755) -> hfhswyd, znpvlac, bruqu
dxaemlq (99)
gybikf (185) -> uwnml, noqjuo
vjvit (194) -> jdhewio, wwhlo, crbcsqt
zkikz (194) -> ttqpi, ifdkle, qdyzunw, wkcrce, jcezw, qeiijk
wphgd (7)
nmxqs (180) -> sftqzaf, gyuyu, tkvmtol, vwksn, doppnr
bpgdb (190) -> jxpixqi, kygcziz
tmigcw (151) -> gmsjkn, njmlk
lodcj (54)
loxjzpo (59)
fxfznc (32)
hwuabde (207) -> mvamo, bverp
ixroc (98)
wdzhfmw (126) -> galojia, ibyrsdo
bznst (20)
dkoike (123) -> wuckr, mfbnys, gmrqpdk
atyaqz (112) -> asmysiq, paoitb, okpdcz
bsfsayz (13)
ffhxi (15)
rgkrl (85)
fhecgzf (60)
lwugjj (361) -> hykxm, hechfb, edtkfvv, lonwb
qtraszq (13)
qargs (95)
adwir (18)
bnmshvm (145) -> vfmlyg, ychjmik
isqxwp (38)
yblsbf (83)
azzysl (36) -> anhjp, ixnlvyo, mmndzs, alaug
ysdqkw (87)
olevfy (158) -> wuipwl, etvpko, bavaf
tvcxl (25) -> vlmzodk, vbojbp, oldrtya
fhyvz (40)
hzfwtzf (75)
gdmlk (5) -> bgavnd, iqpcu
utojr (82) -> nuytv, frjtwx, guurpo
kcpwmyz (97) -> oykqvif, ukrjos, mwrnosj, bdjlzf
qvndh (115) -> fyeuhqh, kmbrqv, kgqgct, qtraszq
fvjcbib (98)
ziiijv (85)
lbnenyf (75) -> tdhxy, ozlqcn
nbwkld (54)
blpjnzf (123) -> gapzarg, syxkp
jhruwih (84)
ecbog (27)
ccqldu (46)
izyxppl (58)
lfwru (177) -> smiucuf, libtrl
cvnfak (123) -> lfykdub, yxqaa
jwjrj (89)
ytbyct (73) -> yaehsu, cxcah
oqihtt (68)
mvjqmad (359) -> swaxyxe, perbvgz
vahnoue (42)
egrimlf (42)
uewchd (1998) -> oqqme, vaeeqvi
hbtur (295) -> mgqkhic, jkvgvz
qekpce (78)
anehvd (75) -> frezf, wvdoa
laeaf (38)
hipfszh (149) -> dxavaum, qkapxbc, adwir
parvx (15)
ayeqoz (32)
weunegg (85)
cbbtbz (85)
fkjwn (67)
takxgz (49)
heqbpd (6)
yxpnzkn (212) -> dxavw, nxpozme, wdjcb
vkwsodp (38)
asmysiq (51)
ilvblzm (85)
laohhtn (86)
onoyb (86)
sfwaimh (24)
grtrt (96)
oekhu (40)
jxqzmuv (7)
vtlvs (9)
jjdmu (84)
qzobjfo (158) -> vetsll, utsfqn, dcsxekv
uwrfay (60)
guurpo (94)
kpnxkyy (32)
aqugisv (965) -> rddnfg, qozmho, pbohgz, lupyc
dswjpxm (11)
smiucuf (53)
mfamq (74)
agfnncm (95)
qshoazt (1737) -> ockzlpp, yeheld, judfhye
wngzh (9094) -> guauivo, aiwvi, vxruziw, ikqijv
hawmd (40)
kxkwxoj (32) -> pyvwl, akdffhk
qdpoyl (4734) -> aahqtsf, lnontej, lfiteup, oaopxo
ftkflbo (27)
ukrjos (52)
fdzqz (70)
dvncmy (1046) -> hzfwtzf, nmtia, ygiyze, zuquy
gsfnr (96)
cprbzvp (28)
tyywgp (57)
gsmocx (118) -> krmbsc, ywmif
laoouso (122) -> xvdkt, eogzpga
yzfzbw (178) -> vjevs, paqihg
cxhuwiw (46)
vfimqtl (89)
dkcqdx (132) -> wemjk, kyuneoo
dszip (73) -> vzbpn, xqxgd
chljtta (157) -> dpfvy, nbwkld
zzcagx (165) -> wuvmf, nrvsqf
xfglmz (740) -> nrlhen, rfkcylj, ymkax
pnsvthy (103) -> cadasac, rpxmv, azkmt
xokmont (84)
divak (77)
lonfrp (60) -> pohdy, bnmshvm, oheank, qwlze, dszip
yngqiv (236) -> cayigxg, ixomyeb
lfiteup (1188) -> qnsqwm, qdpnnex, cmbnwse, onaatvy
mefajk (60)
eqbwx (23)
zuoczei (64) -> ismrc, dbexmj, oeqqj
kygcziz (14)
nwxyrtn (87) -> kpgatn, egcjfjo
loljfo (10)
bruqu (17) -> uwylbft, qntpdh
wohvpbn (67)
qgvqg (315) -> iekwfia, qhrxvzj
wwhlo (36) -> hcyjfzz, dbuqsj, latmzh, geztlsi
oheank (225)
ujpgmwm (50)
ndgzf (37)
rgzrnnl (52)
vgjgz (24)
fvwll (147) -> axkhzf, mqpbpgq
iqtemv (69)
yfrmup (6)
rzzezfx (56)
qkpaxtt (76) -> wohvpbn, fkjwn
tjuqz (32)
ztfhz (8)
wayftw (17)
qbcar (54)
neliq (6)
yxqaa (61)
lupyc (114) -> rsqihar, moivx, xxoil, vggstn
dlkhb (56)
uctlk (36)
ssyfvrj (56)
ibwve (36) -> zfmas, sddvn
brwsjee (46) -> uqpprye, ejovcx
wgcgdvt (13)
ndhitxf (141) -> uoxzkp, ppossv
seblnmz (31)
osnjzpt (36)
cihzh (85)
dtkdwp (42)
xmogef (10)
btbnj (186) -> jxqzmuv, tycgm, xewbn, rjayavy
gcrbrn (90)
jiaho (7)
czuwoe (325) -> hxzjg, cfiyqh
hxzjg (6)
zfshwt (80)
frycrnj (44)
owcwvug (85)
cnsbonb (64)
gwudp (77)
xsjbc (89)
fvkxwt (7466) -> zuoczei, gjwzw, qsyocuq
whgpim (68)
tzhoqw (84)
bbbms (56)
ubhlb (350)
zzbzwdm (34)
cwixiq (35) -> jvnuwap, hkweu
ecytwe (249)
ckombz (80)
bzbfpkb (156) -> zhrsp, rgzrnnl
vfkjif (42)
oinyx (42)
necqpdx (48)
qpden (7)
ruabtb (88) -> xvwio, lhbwbik
btukw (52)
rkofvt (1150) -> gkqgru, xoqvvx, iqler, dhaxwd
utyrttu (89) -> wjaocmd, xwohr, rxflxr
nnnkplx (23)
kgzyk (90) -> iqymm, asrwh
ukrqfm (96) -> yuiicxp, qjnhwpq
aulgwyb (852) -> rhjxbx, weguko, ppqdnd, csmul, srscbv, kriwb
jppsaym (95)
vfmlyg (40)
weygson (129) -> kpnxkyy, wdcltyl
vzbpn (76)
foibc (12)
hsqhwef (48)
hpgwq (18)
crothf (108) -> pwfdn, hawmd
nfmicyi (65)
cjeauo (97)
ptawnqq (65) -> kcjwb, bvknwgq, xsddbi
oicysoz (33) -> awgjkiv, ndszsz
rdvtzr (11)
swqym (72)
cppdkv (32)
uuapl (38)
yriibfp (27)
nfklqi (22)
xhmornc (148) -> dnjydir, icnav, qyasuw
lhbwbik (72)
wshlljj (1198) -> ojqqw, olevfy, iwiaa
mjtuzfr (23) -> ukaflr, zzsqfm
zxavxe (34) -> swqym, yreenth, ubvheu
pxhwr (7)
wyhnmbs (71)
nigkvxl (46)
perbvgz (23)
kyuneoo (64)
djeee (15)
lkfdr (85)
evkau (63)
tridq (59)
eepdcje (37) -> cxhuwiw, damyp, tmvfp
nzdbs (50)
ejnqcb (74)
dfzwcc (74)
efgqua (62) -> klhfca, usitmiz, aceqrlu
mqpbpgq (94)
boqgjn (56)
bxkxp (81)
vbzwk (48)
gldgrng (63)
kgkpfg (92)
jutgp (84)
zouxmz (48)
uktpcar (155) -> ebwgi, pozpmyh
dhaxwd (167) -> pxhwr, qxyramq, wphgd
yreenth | |
import copy
import time
import re
from datetime import timedelta
from gi.repository import Gio, Gtk
from zope.interface import Interface
from mxdc import Object, Signal, IBeamline, Property
from mxdc import Registry
from mxdc.conf import load_cache, save_cache
from mxdc.engines.automation import Automator
from mxdc.engines.diffraction import DataCollector
from mxdc.utils import converter, datatools, misc
from mxdc.utils.log import get_module_logger
from mxdc.widgets import datawidget, dialogs, arrowframe
from mxdc.widgets.imageviewer import ImageViewer, IImageViewer
from . import common
from .microscope import IMicroscope
from .samplestore import ISampleStore, SampleQueue, SampleStore
logger = get_module_logger(__name__)
(
RESPONSE_REPLACE_ALL,
RESPONSE_REPLACE_BAD,
RESPONSE_SKIP,
RESPONSE_CANCEL,
) = list(range(4))
class IDatasets(Interface):
"""Sample information database."""
pass
class ConfigDisplay(object):
Formats = {
'resolution': '{:0.3g} Å',
'delta': '{:0.3g}°',
'range': '{:0.1f}°',
'start': '{:0.1f}°',
'wedge': '{:0.1f}°',
'energy': '{:0.3f} keV',
'distance': '{:0.1f} mm',
'exposure': '{:0.3g} s',
'attenuation': '{:0.1f} %',
'strategy_desc': '{}',
'first': '{}',
'name': '{}',
'strategy': '{}',
'helical': '{}',
'inverse': '{}',
}
def __init__(self, item, widget, label_prefix='run'):
self.item = item
self.widget = widget
self.prefix = label_prefix
self.item.connect('notify::info', self.on_item_changed)
def on_item_changed(self, item, param):
for name, format in list(self.Formats.items()):
field_name = '{}_{}_lbl'.format(self.prefix, name)
field = getattr(self.widget, field_name, None)
if field and name in self.item.props.info:
field.set_text(format.format(self.item.props.info[name]))
class AutomationController(Object):
class StateType:
STOPPED, PAUSED, ACTIVE, PENDING = list(range(4))
state = Property(type=int, default=0)
def __init__(self, widget):
super().__init__()
self.widget = widget
self.beamline = Registry.get_utility(IBeamline)
self.image_viewer = Registry.get_utility(IImageViewer)
self.run_dialog = datawidget.DataDialog()
self.widget.auto_edit_acq_btn.set_popover(self.run_dialog.popover)
self.automation_queue = SampleQueue(self.widget.auto_queue)
self.automator = Automator()
self.config = datawidget.RunItem()
self.config_display = ConfigDisplay(self.config, self.widget, 'auto')
self.start_time = 0
self.pause_info = None
self.automator.connect('done', self.on_done)
self.automator.connect('paused', self.on_pause)
self.automator.connect('stopped', self.on_stopped)
self.automator.connect('progress', self.on_progress)
self.automator.connect('sample-done', self.on_sample_done)
self.automator.connect('sample-started', self.on_sample_started)
self.automator.connect('started', self.on_started)
self.automator.connect('error', self.on_error)
self.connect('notify::state', self.on_state_changed)
# default
params = self.run_dialog.get_default(strategy_type=datawidget.StrategyType.SINGLE)
params.update({
'resolution': converter.dist_to_resol(
250, self.beamline.detector.mm_size, self.beamline.energy.get_position()
),
'energy': self.beamline.energy.get_position(),
})
self.run_dialog.configure(params)
self.config.props.info = self.run_dialog.get_parameters()
# btn, type, options method
self.tasks = {
'mount': self.widget.mount_task_btn,
'center': self.widget.center_task_btn,
'pause1': self.widget.pause1_task_btn,
'acquire': self.widget.acquire_task_btn,
'analyse': self.widget.analyse_task_btn,
'pause2': self.widget.pause2_task_btn,
}
self.task_templates = [
(self.widget.mount_task_btn, Automator.Task.MOUNT),
(self.widget.center_task_btn, Automator.Task.CENTER),
(self.widget.pause1_task_btn, Automator.Task.PAUSE),
(self.widget.acquire_task_btn, Automator.Task.ACQUIRE),
(self.widget.analyse_task_btn, Automator.Task.ANALYSE),
(self.widget.pause2_task_btn, Automator.Task.PAUSE)
]
self.options = {
'capillary': self.widget.center_cap_option,
'loop': self.widget.center_loop_option,
'screen': self.widget.analyse_screen_option,
'process': self.widget.analyse_process_option,
'anomalous': self.widget.analyse_anom_option,
'powder': self.widget.analyse_powder_option,
'analyse': self.widget.analyse_task_btn
}
self.setup()
def import_from_cache(self):
config = load_cache('auto')
if config:
self.config.info = config['info']
for name, btn in list(self.tasks.items()):
if name in config:
btn.set_active(config[name])
for name, option in list(self.options.items()):
if name in config:
option.set_active(config[name])
def get_options(self, task_type):
if task_type == Automator.Task.CENTER:
for name in ['loop', 'crystal', 'raster', 'capillary']:
if name in self.options and self.options[name].get_active():
return {'method': name}
elif task_type == Automator.Task.ACQUIRE:
options = {}
if self.options['analyse'].get_active():
for name in ['screen', 'process', 'powder']:
if self.options[name].get_active():
options = {'analysis': name, 'anomalous': self.options['anomalous'].get_active()}
break
options.update(self.config.props.info)
options['energy'] = self.beamline.energy.get_position() # use current beamline energy
return options
return {}
def get_task_list(self):
return [
{'type': kind, 'options': self.get_options(kind)}
for btn, kind in self.task_templates if btn.get_active()
]
def get_sample_list(self):
return self.automation_queue.get_samples()
def on_state_changed(self, obj, param):
if self.props.state == self.StateType.ACTIVE:
self.widget.auto_collect_icon.set_from_icon_name(
"media-playback-pause-symbolic", Gtk.IconSize.SMALL_TOOLBAR
)
self.widget.auto_stop_btn.set_sensitive(True)
self.widget.auto_collect_btn.set_sensitive(True)
self.widget.auto_sequence_box.set_sensitive(False)
self.widget.auto_groups_btn.set_sensitive(False)
elif self.props.state == self.StateType.PAUSED:
self.widget.auto_progress_lbl.set_text("Automation paused!")
self.widget.auto_collect_icon.set_from_icon_name(
"media-playback-start-symbolic", Gtk.IconSize.SMALL_TOOLBAR
)
self.widget.auto_stop_btn.set_sensitive(True)
self.widget.auto_collect_btn.set_sensitive(True)
self.widget.auto_sequence_box.set_sensitive(False)
self.widget.auto_groups_btn.set_sensitive(False)
elif self.props.state == self.StateType.PENDING:
self.widget.auto_collect_icon.set_from_icon_name(
"media-playback-start-symbolic", Gtk.IconSize.SMALL_TOOLBAR
)
self.widget.auto_sequence_box.set_sensitive(False)
self.widget.auto_collect_btn.set_sensitive(False)
self.widget.auto_stop_btn.set_sensitive(False)
self.widget.auto_groups_btn.set_sensitive(False)
else:
self.widget.auto_collect_icon.set_from_icon_name(
"media-playback-start-symbolic", Gtk.IconSize.SMALL_TOOLBAR
)
self.widget.auto_stop_btn.set_sensitive(False)
self.widget.auto_collect_btn.set_sensitive(True)
self.widget.auto_sequence_box.set_sensitive(True)
self.widget.auto_groups_btn.set_sensitive(True)
def setup(self):
self.import_from_cache()
self.widget.auto_edit_acq_btn.connect('clicked', self.on_edit_acquisition)
self.run_dialog.data_save_btn.connect('clicked', self.on_save_acquisition)
for btn in list(self.tasks.values()):
btn.connect('toggled', self.on_save_acquisition)
self.widget.auto_collect_btn.connect('clicked', self.on_start_automation)
self.widget.auto_stop_btn.connect('clicked', self.on_stop_automation)
self.widget.auto_clean_btn.connect('clicked', self.on_clean_automation)
self.widget.auto_groups_btn.set_popover(self.widget.auto_groups_pop)
self.widget.center_task_btn.bind_property('active', self.widget.center_options_box, 'sensitive')
self.widget.analyse_task_btn.bind_property('active', self.widget.analyse_options_box, 'sensitive')
self.widget.acquire_task_btn.bind_property('active', self.widget.acquire_options_box, 'sensitive')
def on_edit_acquisition(self, obj):
info = self.config.info
info['energy'] = self.beamline.energy.get_position()
self.run_dialog.configure(info)
def on_save_acquisition(self, obj):
self.config.props.info = self.run_dialog.get_parameters()
cache = {
'info': self.config.info,
}
for name, btn in list(self.tasks.items()):
cache[name] = btn.get_active()
for name, option in list(self.options.items()):
cache[name] = option.get_active()
save_cache(cache, 'auto')
def on_progress(self, obj, fraction, message):
used_time = time.time() - self.start_time
remaining_time = 0 if not fraction else int((1 - fraction) * used_time / fraction)
eta_time = timedelta(seconds=remaining_time)
self.widget.auto_eta.set_markup(f'<small><tt>{eta_time}</tt></small>')
self.widget.auto_pbar.set_fraction(fraction)
self.widget.auto_progress_lbl.set_text(message)
def on_sample_done(self, obj, uuid):
self.automation_queue.mark_progress(uuid, SampleStore.Progress.DONE)
def on_sample_started(self, obj, uuid):
self.automation_queue.mark_progress(uuid, SampleStore.Progress.ACTIVE)
def on_done(self, obj, data):
self.props.state = self.StateType.STOPPED
self.widget.auto_progress_lbl.set_text("Automation Completed.")
eta_time = timedelta(seconds=0)
self.widget.auto_eta.set_markup(f'<small><tt>{eta_time}</tt></small>')
self.widget.auto_pbar.set_fraction(1.0)
def on_stopped(self, obj, data):
self.props.state = self.StateType.STOPPED
self.widget.auto_progress_lbl.set_text("Automation Stopped.")
def on_pause(self, obj, paused, reason):
if paused:
self.props.state = self.StateType.PAUSED
if reason:
# Build the dialog message
self.pause_info = dialogs.make_dialog(
Gtk.MessageType.WARNING, 'Automation Paused', reason,
buttons=(('OK', Gtk.ResponseType.OK),)
)
self.pause_info.run()
if self.pause_info:
self.pause_info.destroy()
self.pause_info = None
else:
self.props.state = self.StateType.ACTIVE
if self.pause_info:
self.pause_info.destroy()
self.pause_info = None
def on_error(self, obj, reason):
# Build the dialog message
error_dialog = dialogs.make_dialog(
Gtk.MessageType.WARNING, 'Automation Error!', reason,
buttons=(('OK', Gtk.ResponseType.OK),)
)
error_dialog.run()
error_dialog.destroy()
def on_started(self, obj, data):
self.start_time = time.time()
self.props.state = self.StateType.ACTIVE
logger.info("Automation Started.")
def on_stop_automation(self, obj):
self.automator.stop()
def on_clean_automation(self, obj):
self.automation_queue.clean()
def on_start_automation(self, obj):
if self.props.state == self.StateType.ACTIVE:
self.widget.auto_progress_lbl.set_text("Pausing automation ...")
self.automator.pause()
elif self.props.state == self.StateType.PAUSED:
self.widget.auto_progress_lbl.set_text("Resuming automation ...")
self.automator.resume()
elif self.props.state == self.StateType.STOPPED:
tasks = self.get_task_list()
samples = self.get_sample_list()
if not samples:
msg1 = 'Queue is empty!'
msg2 = 'Please add samples and try again.'
dialogs.warning(msg1, msg2)
else:
self.widget.auto_progress_lbl.set_text("Starting automation ...")
self.props.state = self.StateType.PENDING
self.automator.configure(samples, tasks)
self.widget.auto_pbar.set_fraction(0)
self.automator.start()
self.image_viewer.set_collect_mode(True)
class DatasetsController(Object):
class Signals:
changed = Signal('samples-changed', arg_types=(object,))
active = Signal('active-sample', arg_types=(object,))
selected = Signal('sample-selected', arg_types=(object,))
def __init__(self, widget):
super().__init__()
self.widget = widget
self.beamline = Registry.get_utility(IBeamline)
self.collector = DataCollector()
self.collecting = False
self.stopping = False
self.pause_info = False
self.start_time = 0
self.frame_monitor = None
self.starting = True
self.names = datatools.NameManager()
self.monitors = {}
self.image_viewer = ImageViewer()
self.microscope = Registry.get_utility(IMicroscope)
self.run_editor = datawidget.RunEditor()
self.editor_frame = arrowframe.ArrowFrame()
self.editor_frame.add(self.run_editor.data_form)
self.widget.datasets_overlay.add_overlay(self.editor_frame)
self.run_store = Gio.ListStore(item_type=datawidget.RunItem)
self.run_store.connect('items-changed', self.on_runs_changed)
self.collector.connect('done', self.on_done)
self.collector.connect('paused', self.on_pause)
self.collector.connect('stopped', self.on_stopped)
self.collector.connect('progress', self.on_progress)
self.collector.connect('started', self.on_started)
Registry.add_utility(IDatasets, self)
self.setup()
def import_from_cache(self):
runs = load_cache('runs')
if runs:
self.run_editor.set_points(self.microscope.props.points)
for run in runs:
new_item = datawidget.RunItem(
run['info'], state=run['state'], created=run['created'], uid=run['uuid']
)
self.run_store.insert_sorted(new_item, datawidget.RunItem.sorter)
self.widget.datasets_collect_btn.set_sensitive(True)
def update_positions(self):
pos = 0
item = self.run_store.get_item(pos)
while item:
item.props.position = pos
pos += 1
item = self.run_store.get_item(pos)
def setup(self):
self.widget.datasets_list.bind_model(self.run_store, self.create_run_config)
self.widget.datasets_viewer_box.add(self.image_viewer)
self.widget.datasets_clean_btn.connect('clicked', self.on_clean_runs)
self.widget.datasets_list.connect('row-activated', self.on_row_activated)
self.widget.dsets_dir_btn.connect('clicked', self.open_terminal)
self.run_editor.data_delete_btn.connect('clicked', self.on_delete_run)
self.run_editor.data_copy_btn.connect('clicked', self.on_copy_run)
self.run_editor.data_recycle_btn.connect('clicked', self.on_recycle_run)
self.run_editor.data_save_btn.connect('clicked', self.on_save_run)
self.sample_store = Registry.get_utility(ISampleStore)
self.sample_store.connect('updated', self.on_sample_updated)
self.import_from_cache()
new_item = datawidget.RunItem(state=datawidget.RunItem.StateType.ADD)
pos = self.run_store.insert_sorted(new_item, datawidget.RunItem.sorter)
self.run_editor.set_item(new_item)
first_row = self.widget.datasets_list.get_row_at_index(pos)
self.editor_frame.set_row(first_row)
labels = {
'omega': (self.beamline.goniometer.omega, self.widget.dsets_omega_fbk, '{:0.1f}°'),
'energy': (self.beamline.energy, self.widget.dsets_energy_fbk, '{:0.3f} keV'),
'attenuation': (self.beamline.attenuator, self.widget.dsets_attenuation_fbk, '{:0.0f} %'),
'maxres': (self.beamline.maxres, self.widget.dsets_maxres_fbk, '{:0.2f} Å'),
'aperture': (self.beamline.aperture, self.widget.dsets_aperture_fbk, '{:0.0f} µm'),
'two_theta': (self.beamline.two_theta, self.widget.dsets_2theta_fbk, '{:0.0f}°'),
}
self.group_selectors = []
self.monitors = {
name: common.DeviceMonitor(dev, lbl, fmt)
for name, (dev, lbl, fmt) in list(labels.items())
}
self.widget.datasets_collect_btn.connect('clicked', self.on_collect_btn)
self.microscope.connect('notify::points', self.on_points)
self.frame_monitor = self.beamline.detector.connect('new-image', self.on_new_image)
def create_run_config(self, item):
config = datawidget.RunConfig()
config.set_item(item)
return config.get_widget()
def auto_save_run(self):
item = self.run_editor.item
# auto save current parameters
if item and item.props.state not in [datawidget.RunItem.StateType.ADD, datawidget.RunItem.StateType.COMPLETE]:
info = self.run_editor.get_parameters()
item.props.info = info
item.props.state = datawidget.RunItem.StateType.DRAFT
self.check_run_store()
def on_row_activated(self, list, row):
self.auto_save_run()
self.editor_frame.set_row(row)
position = row.get_index()
item = self.run_store.get_item(position)
num_items = self.run_store.get_n_items()
# add a new run
if item.state == item.StateType.ADD and num_items < 8:
sample = self.sample_store.get_current()
name = sample.get('name')
if position > 0:
prev = self.run_store.get_item(position - 1)
config = prev.info.copy()
name = config['name']
else:
config = self.run_editor.get_default(datawidget.StrategyType.FULL)
energy = self.beamline.bragg_energy.get_position()
distance = self.beamline.distance.get_position()
resolution = converter.dist_to_resol(
distance, self.beamline.detector.mm_size, energy
)
config.update({
'resolution': round(resolution, 4),
'strategy': datawidget.StrategyType.FULL,
'energy': energy,
'distance': distance,
})
config['name'] = self.names.get(name)
item.props.info = config
item.props.state = datawidget.RunItem.StateType.DRAFT
new_item = datawidget.RunItem({}, state=datawidget.RunItem.StateType.ADD)
self.run_store.insert_sorted(new_item, datawidget.RunItem.sorter)
self.check_run_store()
# switch focus
self.run_editor.set_item(item)
def on_runs_changed(self, model, position, removed, added):
self.update_positions()
if self.run_store.get_n_items() < 2:
self.widget.datasets_collect_btn.set_sensitive(False)
def on_points(self, *args, **kwargs):
self.run_editor.set_points(self.microscope.props.points)
def generate_run_list(self):
runs = []
pos = 0
item = self.run_store.get_item(pos)
sample = self.sample_store.get_current()
while item:
if item.state in [item.StateType.DRAFT, item.StateType.ACTIVE]:
run = {'uuid': item.uuid}
run.update(item.info)
# convert points to coordinates and then
# make sure point is not empty if end_point is set
for name in ['p0', 'p1']:
if run.get(name) not in [-1, 0, None]:
run[name] = self.run_editor.get_point(run[name])
elif name in run:
del run[name]
if 'p1' in run and 'p0' not in run:
run['p0'] = run.pop('p1')
run = datatools.update_for_sample(run, sample)
runs.append(run)
pos += 1
item = self.run_store.get_item(pos)
return runs
def check_runlist(self, runs):
existing = {
run['name']: self.beamline.detector.check(run['directory'], run['name'], first=run['first'])
for run in runs
}
config_data = copy.deepcopy(runs)
success = True
collected = 0
# check for existing files
if any(pair[0] for pair in existing.values()):
details = '\n'.join([
'{}: {}'.format(k, datatools.summarize_list(v[0]))
for k, v in existing.items()
if v[0]
])
header = 'Frames from this sequence already exist!\n'
sub_header = details + (
'\n\n<b>What would you like to? </b>\n'
'NOTE: Starting over will delete existing data!\n'
)
buttons = (
('Cancel', RESPONSE_CANCEL),
| |
from .mcmcposteriorsamplernorm import fit
from scipy.stats import norm
import pandas as pd
import numpy as np
import pickle as pk
from sklearn.cluster import KMeans
from ..shared_functions import *
class mcmcsamplernorm:
"""
Class for the mcmc sampler of the deconvolution gaussian model
"""
def __init__(self, K=1, Kc=1):
"""
Constructor of the class
Parameters
-------------
K: int, Number of components of the noise distribution
Kc: int, Number of components of the convolved distribution
**kwargs:
alpha: float, parameter to determine the hyperprior of the noise weight components
alphac: float, parameter to determine the hyperprior of the target weight components
"""
self.K = K
self.Kc = Kc
self.fitted = False
return
def fit(self, dataNoise, dataConvolution, iterations = 1000, ignored_iterations = 1000, chains = 1, priors = None, method_initialisation = "kmeans", initial_conditions = [], show_progress = True, seed = 0):
"""
Fit the model to the posterior distribution
Parameters
-------------
dataNoise: list/npArray, 1D array witht he data of the noise
dataConvolution: list/npArray, 1D array witht he data of the convolution
iterations: int, number of samples to be drawn and stored for each chain during the sampling
ignored_iterations: int, number of samples to be drawn and ignored for each chain during the sampling
chains: int, number of independently initialised realisations of the markov chain
priors: array, parameter of the priors gamma distribution acording to the definition of the wikipedia
kconst: float, parameter k of the prior gamma distribution
initialConditions: list, 1D array with all the parameters required to initialise manually all the components of all the chains the chains
show_progress: bool, indicate if the method should show the progress in the generation of the new data
seed: int, value to initialise the random generator and obtain reproducible results
Returns
---------------
Nothing
"""
self.data = dataNoise
self.datac = dataConvolution
self.iterations = iterations
self.ignored_iterations = ignored_iterations
self.chains = chains
if priors == None:
self.priors = np.zeros(10)
self.priors[0] = 1/self.K
self.priors[1] = (np.max(dataNoise)+np.min(dataNoise))/2
self.priors[2] = 3*(np.max(dataNoise)-np.min(dataNoise))
self.priors[3] = 10*(np.max(dataNoise)-np.min(dataNoise))
self.priors[4] = 1.1
self.priors[5] = 1/self.Kc
self.priors[6] = (np.max(dataConvolution)+np.min(dataConvolution))/2
self.priors[7] = 3*(np.max(dataConvolution)-np.min(dataConvolution))
self.priors[8] = 10*(np.max(dataConvolution)-np.min(dataConvolution))
self.priors[9] = 1.1
else:
self.priors = priors
if initial_conditions != []:
self.initial_conditions = initial_conditions
elif method_initialisation == "kmeans":
K =self.K
Kc = self.Kc
y = np.zeros([chains,(K+Kc)*3])
model = KMeans(n_clusters=K)
model.fit(dataNoise.reshape(-1,1))
ids = model.predict(dataNoise.reshape(-1,1))
#Add weights autofluorescence
for i in range(K):
for j in range(chains):
y[j,i] = np.sum(ids==i)/len(ids)
#Add means autofluorescence
for i in range(K):
for j in range(chains):
y[j,K+i] = np.mean(dataNoise[ids==i])
#Add std autofluorescence
for i in range(K):
for j in range(chains):
y[j,2*K+i] = np.std(dataNoise[ids==i])
model = KMeans(n_clusters=Kc)
model.fit(dataConvolution.reshape(-1,1))
ids = model.predict(dataConvolution.reshape(-1,1))
#Add weights autofluorescence
for i in range(Kc):
for j in range(chains):
y[j,3*K+i] = np.sum(ids==i)/len(ids)
#Add means autofluorescence
for i in range(Kc):
for j in range(chains):
y[j,3*K+Kc+i] = np.mean(dataConvolution[ids==i])
#Add std autofluorescence
for i in range(Kc):
for j in range(chains):
y[j,3*K+2*Kc+i] = np.std(dataConvolution[ids==i])
self.initial_conditions = y
elif method_initialisation == "random":
self.initial_conditions = []
else:
self.initial_conditions = []
self.samples = np.array(fit(dataNoise, dataConvolution, ignored_iterations, iterations, chains, self.K, self.Kc, self.priors, self.initial_conditions, show_progress, seed))
self.fitted = True
return
def save(self, name):
"""
Pickle save the model.
Parameters
----------------
name: string, name in which to store the model
Return:
nothing
"""
if self.fitted:
pickling_on = open(name+".pickle","wb")
pk.dump({"K":self.K, "Kc":self.Kc, "priors": self.priors, "iterations": self.iterations,
"ignored_iterations": self.ignored_iterations,
"chains":self.chains, "samples":self.samples}, pickling_on)
pickling_on.close()
else:
print("The model has not been fitted so there is nothing to save.")
return
def load(self, name):
"""
Pickle load the model.
Parameters
----------------
name: string, name from which to recover the model
Return:
nothing
"""
pickle_off = open(name+".pickle","rb")
aux = pk.load(pickle_off)
pickle_off.close()
self.K = aux["K"]
self.Kc = aux ["Kc"]
self.priors = aux["priors"]
self.iterations = aux["iterations"]
self.ignored_iterations = aux["ignored_iterations"]
self.chains = aux["chains"]
self.samples = aux["samples"]
self.fitted = True
return
def sample_autofluorescence(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the noise distribution
Parameters
--------------
size: int, number of samples to be drawn
style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw
pos: if style = "single", draw from the posterior from which to choose
Returns:
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size))
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos))
else:
return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos))
return
def sample_deconvolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the deconvolved distribution
Parameters
--------------
size: int, number of samples to be drawn
style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw
pos: if style = "single", draw from the posterior from which to choose
Returns:
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size))
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos))
else:
return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos))
return
def sample_convolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the convolved distribution
Parameters
--------------
size: int, number of samples to be drawn
style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw
pos: if style = "single", draw from the posterior from which to choose
Returns:
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size))
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos))
else:
return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos))
return
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for k in range(self.K):
mu = self.samples[i,self.K+k]
sigma = self.samples[i,2*self.K+k]
y += self.samples[i,k]*norm.pdf(x,loc=mu,scale=sigma)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
mu = self.samples[i,3*self.K+self.Kc+j]
sigma = self.samples[i,3*self.K+2*self.Kc+j]
y += self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=sigma)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
for k in range(self.K):
mu1 = self.samples[i,self.K+k]
mu2 = self.samples[i,3*self.K+self.Kc+j]
sigma1 = self.samples[i,2*self.K+k]
sigma2 = self.samples[i,3*self.K+2*self.Kc+j]
mu = mu1
s = np.sqrt(sigma1**2+sigma2**2)
y += self.samples[i,k]*self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=s)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def sampler_statistics(self, sort="weight"):
"""
Show statistics of correct mixing of the mcmc sampler
Args:
sort: ["weight", "none", "means"], method for sorting the samples from the different chains
Returns
-------------
| |
1
self.curSubTableIndex = subtableIndex
handler = self.classHandlers.get( (lookup.LookupType, subtable.Format), None)
if handler:
handler(subtable, self)
classNameLists = self.classesByNameList.keys()
classNameLists.sort()
for nameList in classNameLists:
classRecList = self.classesByNameList[nameList]
lenList = len(nameList)
if lenList == 0:
className = "@empty"
elif lenList == 1:
className = "[%s]" % (nameList[0])
elif lenList == 2:
className = "@%s_%s" % (nameList[0], nameList[1])
else:
className = "@%s_%d_%s" % (nameList[0], len(nameList), nameList[-1])
i = 1
while( (i < lenList) and self.classesByClassName.has_key(className)):
className = "@%s_%s_%d_%s" % (nameList[0], nameList[i], len(nameList), nameList[-1])
self.classesByClassName[className]= nameList
for classRec in classRecList:
key = (classRec.lookupIndex, classRec.subtableIndex, classRec.classIndex, classRec.type)
self.classesByLookup[key] = className
classDefLists = self.markClassesByDefList.keys()
classDefLists.sort()
for defList in classDefLists:
classRecList = self.markClassesByDefList[defList]
defNameList = []
for nameList,anchor in defList:
defNameList.extend(list(nameList))
lenList = len(defNameList)
if lenList == 0:
className = "@empty_mark"
elif lenList == 1:
className = "%s_mark" % (defNameList[0])
elif lenList == 2:
className = "@%s_%s_mark" % (defNameList[0], defNameList[1])
else:
className = "@%s_%d_%s_mark" % (defNameList[0], len(defNameList), defNameList[-1])
i = 1
while( (i < lenList) and self.classesByClassName.has_key(className)):
className = "@%s_%s_%d_%s_mark" % (defNameList[0], defNameList[i], len(defNameList), defNameList[-1])
self.markClassesByClassName[className] = defList
for classRec in classRecList:
key = (classRec.lookupIndex, classRec.subtableIndex, classRec.classIndex, classRec.type)
self.markClassesByLookup[key] = className
return
def writeClasses(self, writer):
classNames = self.classesByClassName.keys()
if classNames:
writer.newline()
writer.write("# Class definitions *********************************" )
writer.newline()
classNames.sort()
for className in classNames:
if className[0] == "[":
continue # we don't write out the single glyph class names, as they are ued in -line as class lists.
writer.write("%s = [" % (className) )
nameList = self.classesByClassName[className]
defString = " ".join(nameList[:10])
writer.write( " %s" % (defString) )
nameList = nameList[10:]
writer.indent()
while nameList:
writer.newline()
defString = " ".join(nameList[:10])
writer.write( " %s" % (defString) )
nameList = nameList[10:]
writer.dedent()
writer.write( " ];")
writer.newline()
writer.newline()
def writeMarkClasses(self, writer):
classNames = self.markClassesByClassName.keys()
if classNames:
writer.newline()
writer.write("# Mark Class definitions *********************************" )
writer.newline()
classNames.sort()
for className in classNames:
classDef = self.markClassesByClassName[className]
for nameList, anchor in classDef:
nameTxt = " ".join(nameList[:10])
writer._writeraw("mark [%s" % (nameTxt))
writer.indent()
nameList = nameList[10:]
while nameList:
writer.newline()
defString = " ".join(nameList[:10])
writer._writeraw( " %s" % (defString) )
nameList = nameList[10:]
writer.dedent()
writer._writeraw( "] %s %s;" % (anchor, className))
writer.newline()
writer.newline()
def doFeatures(self, writer, featDictByLangSys):
featTagList = featDictByLangSys.keys()
featTagList.sort()
for featTag in featTagList:
writer.write( "feature %s {" % (featTag))
writer.newline()
writer.indent()
langSysDict = featDictByLangSys[featTag]
dfltLangSysKey = ("DFLT", "dflt")
langSysTagList = langSysDict.keys()
langSysTagList.sort()
try:
dfltFeatIndexList = langSysDict[dfltLangSysKey]
langSysTagList.remove(dfltLangSysKey)
except KeyError:
dfltFeatIndexList = None
if dfltFeatIndexList:
dfltLookupIndexDict = self.writeDfltLangSysFeat(writer, dfltLangSysKey, dfltFeatIndexList)
else:
dfltLookupIndexDict = None
prevLangSysKey = dfltLangSysKey
for langSysKey in langSysTagList:
self.writeLangSysFeat(writer, langSysKey, prevLangSysKey, langSysDict[langSysKey], dfltLookupIndexDict)
prevLangSysKey = langSysKey
writer.dedent()
writer.write( "} %s;" % (featTag))
writer.newline()
writer.newline()
def writeDfltLangSysFeat(self, writer, langSysKey, featIndexList):
pfeatList = self.table.FeatureList.FeatureRecord
pLookupList = self.table.LookupList.Lookup
lookupIndexDict = {}
for featIndex in featIndexList:
featRecord = pfeatList[featIndex]
for lookupIndex in featRecord.Feature.LookupListIndex:
lookupIndexDict[lookupIndex] = pLookupList[lookupIndex]
liList = lookupIndexDict.keys()
liList.sort()
excludeDLFTtxt = ""
nameIndex = 0
for li in liList:
self.curLookupIndex = li
lookup = pLookupList[li]
lookupFlagTxt = getLookupFlagTag(lookup.LookupFlag)
if self.showExtension and lookup.LookupType == self.ExtensionIndex:
useExtension = " useExtension"
else:
useExtension = ""
if self.seenLookup.has_key(li):
lookupName = self.seenLookup[li]
writer.write("lookup %s%s%s;" % (lookupName, lookupFlagTxt, excludeDLFTtxt) )
writer.newline()
else:
nameIndex +=1
lookupName = "%s_%s_%s_%s" % (featRecord.FeatureTag, langSysKey[0], langSysKey[1], nameIndex)
self.seenLookup[li] = lookupName
writer.write("lookup %s%s%s {" % (lookupName, excludeDLFTtxt, useExtension) )
excludeDLFTtxt = "" # Only need to write it once.
writer.newline()
writer.indent()
self.curLookupIndex = li
self.writeLookup(writer, lookup)
writer.dedent()
writer.write("} %s;" % (lookupName) )
writer.newline()
writer.newline()
return lookupIndexDict
def writeLangSysFeat(self, writer, langSysKey, prevLangSysKey, featIndexList, dfltLookupIndexDict):
pfeatList = self.table.FeatureList.FeatureRecord
pLookupList = self.table.LookupList.Lookup
lookupIndexDict = {}
for featIndex in featIndexList:
featRecord = pfeatList[featIndex]
for lookupIndex in featRecord.Feature.LookupListIndex:
lookupIndexDict[lookupIndex] = pLookupList[lookupIndex]
# Remove all lookups shared with the DFLT dflt script.
# Note if there were any; if not, then we need to use the exclude keyword with the lookup.
haveAnyDflt = 0
excludeDLFT = 0
if dfltLookupIndexDict:
lookupIndexList = lookupIndexDict.keys()
lookupIndexList.sort()
for lookupIndex in lookupIndexList:
if dfltLookupIndexDict.has_key(lookupIndex):
del lookupIndexDict[lookupIndex]
haveAnyDflt =1
if not haveAnyDflt:
excludeDLFT = 1
liList = lookupIndexDict.keys()
liList.sort()
if excludeDLFT:
excludeDLFTtxt = " excludeDLFT"
else:
excludeDLFTtxt = ""
if liList: # If all the lookups were shared with DFLt dflt, no need to write anything.
nameIndex = 0
if prevLangSysKey[0] != langSysKey[0]:
writer.write("script %s;" % (langSysKey[0]) )
writer.newline()
writer.write("language %s;" % (langSysKey[1]) )
writer.newline()
elif prevLangSysKey[1] != langSysKey[1]:
writer.write("language %s;" % (langSysKey[1]) )
writer.newline()
for li in liList:
self.curLookupIndex = li
lookup = pLookupList[li]
lookupFlagTxt = getLookupFlagTag(lookup.LookupFlag)
if self.showExtension and lookup.LookupType == self.ExtensionIndex:
useExtension = " useExtension"
else:
useExtension = ""
if self.seenLookup.has_key(li):
lookupName = self.seenLookup[li]
writer.write("lookup %s%s;" % (lookupName, excludeDLFTtxt) )
excludeDLFTtxt = "" # Only need to write it once.
writer.newline()
else:
nameIndex +=1
lookupName = "%s_%s_%s_%s" % (featRecord.FeatureTag, langSysKey[0], langSysKey[1], nameIndex)
self.seenLookup[li] = lookupName
writer.write("lookup %s%s%s;" % (lookupName, lookupFlagTxt, excludeDLFTtxt) )
excludeDLFTtxt = "" # Only need to write it once.
writer.newline()
writer.indent()
self.curLookupIndex = li
self.writeLookup(writer, lookup)
writer.dedent()
writer.write("} %s;" % (lookupName) )
writer.newline()
writer.newline()
def writeLookup(self, writer, lookup):
lookupType = lookup.LookupType
try:
handler = self.ruleHandlers[(lookupType)]
except KeyError:
msg = "Error. Unknown lookup type %s. Skipping lookup." % (lookupType)
print msg
writer._writeraw(msg)
writer.newline()
return
for si in range(lookup.SubTableCount):
self.curSubTableIndex = si
rules = handler(lookup.SubTable[si], self)
for rule in rules:
if len(rule) > kMaxLineLength:
# I had to use a named group because re didn't work with the "\1" when the escape included just a single quote.
m = re.search(r"(^\s+)", rule)
if m:
indent = m.group(1) + INDENT
else:
indent = INDENT
rule1 = re.sub(r"(?P<endBracket>]'*)\s+(?!;)", "\g<endBracket>\n" + indent, rule)
ruleList2 = rule1.split("\n")
for rule2 in ruleList2:
ruleList3 = gtextWrapper.wrap(rule2)
for rule3 in ruleList3:
writer._writeraw(rule3)
writer.newline()
else:
writer._writeraw(rule)
writer.newline()
def dumpOTLAsFeatureFile(tableTag, writer, ttf):
try:
table = ttf[tableTag]
except:
print "Font does not have %s. Skipping." % (tableTag)
return
otlConverter = OTLConverter(table, ttf)
otlConverter.otlFeatureFormat(writer)
def debugmsg(msg):
import time
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
class TTXNWriter(XMLWriter):
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="ISO-8859-1", indentLevel=0):
if not hasattr(fileOrPath, "write"):
self.file = open(fileOrPath, "w")
if os.name == "mac":
import macfs
macfs.FSSpec(fileOrPath).SetCreatorType('R*ch', 'TEXT')
else:
# assume writable file object
self.file = fileOrPath
self.indentwhite = indentwhite
self.indentlevel = indentLevel
self.stack = []
self.needindent = 1
self.idlefunc = idlefunc
self.idlecounter = 0
self.newline()
class TTXNTTFont(TTFont):
def __init__(self, file=None, res_name_or_index=None,
sfntVersion="\000\001\000\000", checkChecksums=0,
verbose=0, recalcBBoxes=1, allowVID=0, ignoreDecompileErrors=False,
fontNumber=-1, supressHints = 0, lazy=False, quiet=False):
self.filePath = file
self.supressHints = supressHints
TTFont. __init__(self, file, res_name_or_index,sfntVersion, checkChecksums,
verbose, recalcBBoxes, allowVID, ignoreDecompileErrors,
fontNumber)
def _tableToXML(self, writer, tag, progress, quiet):
if self.has_key(tag):
table = self[tag]
report = "Dumping '%s' table..." % tag
else:
report = "No '%s' table found." % tag
if progress:
progress.setLabel(report)
elif self.verbose:
debugmsg(report)
else:
if not quiet:
print report
if not self.has_key(tag):
return
xmlTag = tagToXML(tag)
if hasattr(table, "ERROR"):
writer.begintag(xmlTag, ERROR="decompilation error")
else:
writer.begintag(xmlTag)
writer.newline()
if tag in ("glyf", "CFF "):
ttxnWriter = TTXNWriter(writer.file, indentLevel=writer.indentlevel)
print "Dumping '%s' table ..." % (tag)
dumpFont(ttxnWriter, self.filePath, self.supressHints)
ttxnWriter.newline()
elif tag in ("GSUB", "GPOS"):
ttxnWriter = TTXNWriter(writer.file, indentLevel=writer.indentlevel)
print "Dumping '%s' table ..." % (tag)
dumpOTLAsFeatureFile(tag, ttxnWriter, self)
ttxnWriter.newline()
else:
table.toXML(writer, self)
writer.endtag(xmlTag)
writer.newline()
writer.newline()
def tagToXML(tag):
"""Similarly to tagToIdentifier(), this converts a TT tag
to a valid XML element name. Since XML element names are
case sensitive, this is a fairly simple/readable translation.
"""
import re
if tag == "OS/2":
return "OS_2"
elif tag == "GlyphOrder":
return tag
if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
return string.strip(tag)
else:
return tagToIdentifier(tag)
def xmlToTag(tag):
"""The opposite of tagToXML()"""
if tag == "OS_2":
return "OS/2"
if len(tag) == 8:
return identifierToTag(tag)
else:
return tag + " " * (4 - len(tag))
return tag
def shellcmd(cmdList):
tempFile = os.tmpfile() # I use this because tx -dump -6 can be very large.
p = subprocess.Popen(cmdList,stdout=tempFile, stderr=subprocess.STDOUT)
retCode = p.poll()
while None == retCode:
retCode = p.poll()
tempFile.seek(0)
log = tempFile.read()
tempFile.close()
return log
def dumpFont(writer, fontPath, supressHints=0):
dictTxt = shellcmd(["tx", "-dump", "-0", fontPath])
if curSystem == "Windows":
dictTxt = re.sub(r"[\r\n]+", "\n", dictTxt)
dictTxt = re.sub(r"##[^\r\n]*Filename[^\r\n]+", "", dictTxt, 1)
lines = dictTxt.splitlines()
dictTxt = "\n".join(lines)
writer._writeraw("<FontTopDict>")
writer.newline()
writer.indent()
writer._writeraw(dictTxt)
writer.newline()
writer.dedent()
writer._writeraw("</FontTopDict>")
writer.newline()
if supressHints:
charData= shellcmd(["tx", "-dump", "-6", "-n", fontPath])
else:
charData= shellcmd(["tx", "-dump", "-6", fontPath])
if curSystem == "Windows":
charData = re.sub(r"[\r\n]+", "\n", charData)
charList = re.findall(r"[^ ]glyph\[[^]]+\] \{([^,]+),[^\r\n]+,([^}]+)", charData)
if "cid.CIDFontName" in dictTxt:
# fix glyph names to sort
charList = map(lambda entry: ("cid%s" % (entry[0]).zfill(5), entry[1]) , charList)
charList = map(lambda entry: entry[0] + entry[1], charList)
charList.sort()
charTxt = "\n".join(charList)
writer._writeraw("<FontOutlines>")
writer.newline()
writer.indent()
writer._writeraw(charTxt)
writer.newline()
writer.dedent()
writer._writeraw("</FontOutlines>")
writer.newline()
return
def ttnDump(input, output, options, showExtensionFlag, supressHints = 0, supressVersions = 0, supressTTFDiffs = 0):
print 'Dumping "%s" to "%s"...' % (input, output)
ttf = TTXNTTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID,
ignoreDecompileErrors=options.ignoreDecompileErrors, supressHints = supressHints)
splitTables=options.splitTables
ttf.showExtensionFlag = showExtensionFlag
kDoNotDumpTagList = ["GlyphOrder", "DSIG"]
if options.onlyTables:
onlyTables = copy.copy(options.onlyTables)
else:
onlyTables = ttf.keys()
if options.skipTables:
for tag in options.skipTables:
if tag in onlyTables:
onlyTables.remove(tag)
for tag in kDoNotDumpTagList:
if tag in onlyTables:
onlyTables.remove(tag)
# Zero values that always differ.
if 'head' in onlyTables:
head = ttf["head"]
temp = head.created
head.created = 0
head.modified = 0
head.magicNumber = 0
head.checkSumAdjustment = 0
if supressVersions:
head.fontRevision = 0
if 'hmtx' in onlyTables:
hmtx = ttf["hmtx"]
temp = hmtx.metrics # hmtx must be decompiled *before* we zero the hhea.numberOfHMetrics value
if supressTTFDiffs:
try:
del temp["CR"]
except KeyError:
pass
try:
del temp["NULL"]
except KeyError:
pass
if 'hhea' in onlyTables:
hhea = ttf["hhea"]
temp = hhea.numberOfHMetrics
hhea.numberOfHMetrics = 0
if 'vmtx' in onlyTables:
vmtx = ttf["vmtx"]
temp = vmtx.metrics # vmtx must be decompiled *before* we zero the vhea.numberOfHMetrics value
if supressTTFDiffs:
try:
del temp["CR"]
except KeyError:
pass
try:
del temp["NULL"]
except KeyError:
pass
if 'vhea' in onlyTables:
vhea = ttf["vhea"]
temp = vhea.numberOfVMetrics
vhea.numberOfVMetrics = 0
if supressVersions:
if 'name' in onlyTables:
name_table = ttf["name"]
for namerecord in name_table.names:
if namerecord.nameID in [3,5]:
namerecord.string = "VERSION SUPPRSESSED"
if 'GDEF' in onlyTables:
GDEF = ttf["GDEF"]
gt = GDEF.table
if gt.GlyphClassDef:
gtc = gt.GlyphClassDef.classDefs
gt.GlyphClassDef.Format = 0
if gt.MarkAttachClassDef:
gtc = gt.MarkAttachClassDef.classDefs
gt.MarkAttachClassDef.Format = 0
if gt.AttachList:
if not gt.AttachList.Coverage.glyphs:
gt.AttachList = None
if gt.LigCaretList:
if not gt.LigCaretList.Coverage.glyphs:
gt.LigCaretList = None
onlyTables.sort() # this is why I always use the onlyTables option - it allows me to sort the table list.
if 'cmap' in onlyTables:
# remove mappings to notdef.
cmapTable = ttf["cmap"]
""" Force shared cmap tables to get separately decompiled.
The _c_m_a_p.py logic will decompile a cmap from source data if an attempt is made to
access a field which is not (yet) defined. Unicode ( format 4) subtables
are usually identical, and thus shared. When intially decompiled, the first gets
fully decompiled, and then the second gets a reference to the 'cmap' dict of the first.
WHen entries are removed from the cmap dict of the first subtable, that is the same as
removing them from the cmap dict of the second. However, when later an attempt is made
to reference the 'nGroups' field - which doesn't exist in format 4 - the second table
gets fully decompiled, and its cmap dict is rebuilt from the original data.
"""
for cmapSubtable in cmapTable.tables:
#if cmapSubtable.format != | |
<gh_stars>0
import os
import glob
import re
import sys # access system routines, including writing console output to file
import math
import scipy
import numpy as np
import matplotlib.pyplot as plt
import Common
import Plotting
# Make plots of the results obtained for the design of the DBR grating for the CORNERSTONE fab run April 2018
# See notebook 426, pg 7 for details
# <NAME> 3 - 4 - 2018
MOD_NAME_STR = "DBR_Analysis" # use this in exception handling messages
def DBR_Sim_Plots():
# Run the functions to plot the DBR simulation Data
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".DBR_Sim_Plots()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
DATA_HOME = "C:/Users/Robert/Research/CAPPA/Simulations/DBR_Design_Apr_2018/"
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
print os.getcwd()
#plot_RI_versus_WL()
#Wh=2000; Nl=20;
#plot_reflectivity_versus_WL_layers(Wh)
#Wh_list = range(500, 2500, 500)
#Nl_list = range(5, 25, 5)
#for Nl in Nl_list:
#for Wh in Wh_list:
# #plot_neff_versus_WL(Wh)
# #plot_period_versus_WL(Wh)
# plot_reflectivity_versus_WL_layers(Wh)
#plot_reflectivity_versus_width_ratio('Ex')
#plot_device_dims_versus_width_ratio('Ey')
#plot_index_contrast_versus_width_ratio()
plot_layer_lengths_versus_width_ratio()
else:
raise EnvironmentError
except EnvironmentError:
print ERR_STATEMENT
print "Cannot find",DATA_HOME
except Exception:
print ERR_STATEMENT
def read_DBR_data_file(Wl, Wh, Polar, Nlayers):
# read a file containing DBR simulation data
# data is contained in files of the form "DBR_params_Wl_*_Wh_*_Pol_*_Nl_*.txt"
# Wl is the lower waveguide width expressed in nm
# Wh is the higher waveguide width expressed in nm
# Polar is either Ex or Ey
# Nlayers is the assumed number of layers
# contents of the file are as follows
# col[0] = wavelength (nm)
# col[1] = core refractive index (RI)
# col[2] = substrate RI, cladding is assumed to be air
# col[3] = effective RI in waveguide with width W1
# col[4] = layer length of waveguide with width W1 units (nm)
# col[5] = effective RI in waveguide with width W2
# col[6] = layer length of waveguide with width W2 units (nm)
# col[7] = period of grating formed from waveguides with width W1 and W2 units (nm)
# col[8] = total length of grating structure formed from waveguides with width W1 and W2 units (um)
# col[9] = reflectivity of grating structure formed from waveguides with width W1 and W2 units (um)
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".read_DBR_data_file()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
filename = "DBR_params_Wl_%(v1)d_Wh_%(v2)d_Pol_%(v3)s_Nl_%(v4)d.txt"%{"v1":Wl, "v2":Wh, "v3":Polar, "v4":Nlayers}
if glob.glob(filename):
data = Common.read_matrix(filename, '\t', True)
data = Common.transpose_multi_col(data)
return data
else:
raise Exception
except Exception:
print ERR_STATEMENT
def plot_RI_versus_WL():
# make a plot of RI versus wavelength
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".plot_RI_versus_WL()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
Wl = 250; Wh = 1000; pol = 'Ex'; Nl = 15;
data = read_DBR_data_file(Wl, Wh, pol, Nl)
if data is not None:
hv_data = []; labels = []; mark_list = []
hv_data.append([data[0], data[1]]); hv_data.append([data[0], data[2]]);
labels.append('$n_{core}$ Si'); labels.append('$n_{sub}$ SiO$_{2}$');
mark_list.append(Plotting.labs[0]); mark_list.append(Plotting.labs[1]);
args = Plotting.plot_arg_multiple()
args.loud = True
args.x_label = "Wavelength (nm)"
args.y_label = "Refractive Index"
args.mrk_list = mark_list
args.crv_lab_list = labels
args.plt_range = [1500, 1660, 1.0, 4.0]
args.fig_name = 'RI_Dispersion_Si_SiO2'
Plotting.plot_multiple_curves(hv_data, args)
del data; del hv_data; del labels; del mark_list; del args;
else:
raise Exception
except Exception:
print ERR_STATEMENT
def plot_neff_versus_WL(Wh):
# make a plot of a quantity versus wavelength
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".plot_neff_versus_WL()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
Wl = 250; pol1 = 'Ex'; pol2 = 'Ey'; Nl = 15;
data1 = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2 = read_DBR_data_file(Wl, Wh, pol2, Nl)
if data1 is not None and data2 is not None:
hv_data = []; labels = []; mark_list = []
hv_data.append([data2[0], data2[3]]); hv_data.append([data2[0], data2[5]]);
labels.append('$E_{y}$ Wl'); labels.append('$E_{y}$ Wh');
mark_list.append(Plotting.labs[0]); mark_list.append(Plotting.labs[1]);
hv_data.append([data1[0], data1[3]]); hv_data.append([data1[0], data1[5]]);
labels.append('$E_{x}$ Wl'); labels.append('$E_{x}$ Wh');
mark_list.append(Plotting.labs_dashed[0]); mark_list.append(Plotting.labs_dashed[1]);
args = Plotting.plot_arg_multiple()
args.loud = True
args.x_label = 'Wavelength (nm)'
args.y_label = 'Effective Refractive Index $n_{eff}$'
args.mrk_list = mark_list
args.crv_lab_list = labels
args.plt_range = [1500, 1660, 1.4, 3.0]
args.fig_name = 'Neff_Dispersion_Si_SiO2_Wh_%(v1)d'%{"v1":Wh}
Plotting.plot_multiple_curves(hv_data, args)
del data1; del data2; del hv_data; del labels; del mark_list; del args;
except Exception:
print ERR_STATEMENT
def plot_period_versus_WL(Wh):
# make a plot of a quantity versus wavelength
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".plot_period_versus_WL()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
Wl = 250; pol1 = 'Ex'; pol2 = 'Ey'; Nl = 15;
data1 = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2 = read_DBR_data_file(Wl, Wh, pol2, Nl)
if data1 is not None and data2 is not None:
hv_data = []; labels = []; mark_list = []
hv_data.append([data2[0], data2[4]]); hv_data.append([data2[0], data2[6]]); hv_data.append([data2[0], data2[7]]);
labels.append('$E_{y}$ $L_{Wl}$'); labels.append('$E_{y}$ $L_{Wh}$'); labels.append('$E_{y}$ $\Lambda$');
mark_list.append(Plotting.labs[0]); mark_list.append(Plotting.labs[1]); mark_list.append(Plotting.labs[2]);
hv_data.append([data1[0], data1[4]]); hv_data.append([data1[0], data1[6]]); hv_data.append([data1[0], data1[7]]);
labels.append('$E_{x}$ $L_{Wl}$'); labels.append('$E_{x}$ $L_{Wh}$'); labels.append('$E_{x}$ $\Lambda$');
mark_list.append(Plotting.labs_dashed[0]); mark_list.append(Plotting.labs_dashed[1]); mark_list.append(Plotting.labs_dashed[2]);
args = Plotting.plot_arg_multiple()
args.loud = True
args.x_label = 'Wavelength (nm)'
args.y_label = 'Grating Structure Lengths (nm)'
args.mrk_list = mark_list
args.crv_lab_list = labels
args.plt_range = [1500, 1660, 120, 550]
args.fig_name = 'Grating_Lengths_Dispersion_Si_SiO2_Wh_%(v1)d'%{"v1":Wh}
Plotting.plot_multiple_curves(hv_data, args)
del data1; del data2; del hv_data; del labels; del mark_list; del args;
except Exception:
print ERR_STATEMENT
def plot_reflectivity_versus_WL(Wh, Nl):
# make a plot of a quantity versus wavelength
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".plot_reflectivity_versus_WL()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
Wl = 250; pol1 = 'Ex'; pol2 = 'Ey';
data1 = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2 = read_DBR_data_file(Wl, Wh, pol2, Nl)
if data1 is not None and data2 is not None:
hv_data = []; labels = []; mark_list = []
hv_data.append([data2[0], data2[9]]);
labels.append('$E_{y}$');
mark_list.append(Plotting.labs[0]);
hv_data.append([data1[0], data1[9]]);
labels.append('$E_{x}$');
mark_list.append(Plotting.labs_dashed[0]);
args = Plotting.plot_arg_multiple()
args.loud = False
args.x_label = 'Wavelength (nm)'
args.y_label = 'Grating Reflectivity'
args.mrk_list = mark_list
args.crv_lab_list = labels
args.plt_range = [1500, 1660, 0, 1]
args.fig_name = 'Reflectivity_Dispersion_Si_SiO2_Wh_%(v1)d_Nl_%(v2)d'%{"v1":Wh,"v2":Nl}
Plotting.plot_multiple_curves(hv_data, args)
del data1; del data2; del hv_data; del labels; del mark_list; del args;
except Exception:
print ERR_STATEMENT
def plot_reflectivity_versus_WL_layers(Wh):
# make a plot of a quantity versus wavelength
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".plot_reflectivity_versus_WL()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
Wl = 250; pol1 = 'Ex'; pol2 = 'Ey';
Nl = 5
data1 = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2 = read_DBR_data_file(Wl, Wh, pol2, Nl)
Nl = 10
data1a = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2a = read_DBR_data_file(Wl, Wh, pol2, Nl)
Nl = 15
data1b = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2b = read_DBR_data_file(Wl, Wh, pol2, Nl)
Nl = 20
data1c = read_DBR_data_file(Wl, Wh, pol1, Nl)
data2c = read_DBR_data_file(Wl, Wh, pol2, Nl)
if data1 is not None and data2 is not None:
hv_data = []; labels = []; mark_list = []
# Ey polarisation
#hv_data.append([data2[0], data2[9]]); hv_data.append([data2a[0], data2a[9]]); hv_data.append([data2b[0], data2b[9]]); hv_data.append([data2c[0], data2c[9]]);
#labels.append('$E_{y}$ $N_{l} = 5$'); labels.append('$E_{y}$ $N_{l} = 10$'); labels.append('$E_{y}$ $N_{l} = 15$'); labels.append('$E_{y}$ $N_{l} = 20$');
#mark_list.append(Plotting.labs[0]); mark_list.append(Plotting.labs[1]); mark_list.append(Plotting.labs[2]); mark_list.append(Plotting.labs[3]);
# Ex polarisation
hv_data.append([data1[0], data1[9]]); hv_data.append([data1a[0], data1a[9]]); hv_data.append([data1b[0], data1b[9]]); hv_data.append([data1c[0], data1c[9]]);
labels.append('$E_{x}$ $N_{l} = 5$'); labels.append('$E_{x}$ $N_{l} = 10$'); labels.append('$E_{x}$ $N_{l} = 15$'); labels.append('$E_{x}$ $N_{l} = 20$');
#mark_list.append(Plotting.labs_dashed[0]); mark_list.append(Plotting.labs_dashed[1]); mark_list.append(Plotting.labs_dashed[2]); mark_list.append(Plotting.labs_dashed[3]);
mark_list.append(Plotting.labs[0]); mark_list.append(Plotting.labs[1]); mark_list.append(Plotting.labs[2]); mark_list.append(Plotting.labs[3]);
args = Plotting.plot_arg_multiple()
args.loud = False
args.x_label = 'Wavelength (nm)'
args.y_label = 'Grating Reflectivity'
args.mrk_list = mark_list
args.crv_lab_list = labels
args.plt_range = [1500, 1660, 0, 1]
args.fig_name = 'Ex_Reflectivity_Dispersion_Si_SiO2_Wh_%(v1)d'%{"v1":Wh}
Plotting.plot_multiple_curves(hv_data, args)
del data1; del data2; del hv_data; del labels; del mark_list; del args;
except Exception:
print ERR_STATEMENT
def plot_reflectivity_versus_width_ratio(pol):
# make a plot of reflectivity versus waveguide width ratio
# choose \lambda = 1590 nm
# <NAME> 3 - 4 - 2018
FUNC_NAME = ".plot_reflectivity_versus_width_ratio()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
Wh_list = range(500, 2500, | |
<filename>mapping_v2.py
from utils.dict_utils import *
import logging
log = logging.getLogger(__name__)
class V2Mapping:
"""
See documentation/VDUMapping.md
Mapping is strictly {DataLocationPath : LocationToPlaceDataPath}
Which should almost always be {TOSCAPath : SOL6Path}
"""
KEY_TOSCA = "dict_tosca"
KEY_SOL6 = "dict_sol6"
def __init__(self, dict_tosca, dict_sol6):
self.dict_tosca = dict_tosca
self.dict_sol6 = dict_sol6
@staticmethod
def parent_match(map1_list, start_num=0, **kwargs):
"""
Given a list, like ['a', 'b', 'c'], then a value dict of
{'a': 'apple', 'b': 'banana', 'c': 'carrot'}
A parent map of
[apple -> 0, banana -> 1, carrot -> 2]
Return a mapping:
[a -> apple, parent=(apple -> 0),
b -> banana, parent=(banana-> 1),
c -> carrot, parent=(carrot -> 2)]
"""
if "parent_map" not in kwargs:
raise KeyError("parent_map not included in kwargs")
if "value_dict" not in kwargs:
raise KeyError("value_dict not included in kwargs")
value_dict = kwargs["value_dict"]
parent_map = kwargs["parent_map"]
result = []
for key in map1_list:
value = value_dict[key]
final_parent_map = None
# Find the element in the parent map that the cur element is mapped to
# For example [c1_nic0 -> c1] and [c1 -> 0]
for p_map in parent_map:
if p_map.name == value:
final_parent_map = p_map
break
map_elem = MapElem(key, value, final_parent_map)
result.append(map_elem)
return result
@staticmethod
def map_ints(map1_list, start_num=0, **kwargs):
"""
Example input: ["c1", "c2", "s3", "s4"], 0
Example output: {"c1": 0, "c2": 1, "s3": 2, "s4": 3}
Options: parent_map, value_map, none_value, none_key
:param map1_list: A list of strirngs
:param start_num: The number to start mapping values to
:optional parent_map:
:optional value_map:
:optional none_value:
:optional none_key:
:return: A dict of the mappings
"""
parent_map = None
if "parent_map" in kwargs:
parent_map = kwargs["parent_map"]
value_map = None
if "value_map" in kwargs:
value_map = kwargs["value_map"]
none_value = kwargs["none_value"] if "none_value" in kwargs else False
none_key = kwargs["none_key"] if "none_key" in kwargs else False
result = []
cur_num = start_num
for item_1 in map1_list:
try:
if item_1 in result:
log.info("Dict slot {} is already full with {}".format(item_1, result[item_1]))
# We need to find the parent mapping so we can include it in the map definition
final_parent_map = None
if parent_map:
# Find the element in the parent map that the cur element is mapped to
# For example [c1_nic0 -> c1] and [c1 -> 0]
for p_map in parent_map:
if p_map.name == item_1:
final_parent_map = p_map
break
elif value_map:
for v_map in value_map:
if v_map.name == cur_num:
final_parent_map = v_map
break
cur_num_val = None if none_value else cur_num
cur_item_1 = None if none_key else item_1
map_elem = MapElem(cur_item_1, cur_num_val, final_parent_map)
result.append(map_elem)
cur_num += 1
except KeyError:
log.error("Key error")
return result
def generate_map(self, path, field_conditions, map_type="int", map_start=0,
map_function=None, map_args=None, cur_dict=None, parent=None,
field_filter=None):
"""
If map_function is not defined, look at map_type to determine what predefined mapping
function to be used.
Else, use the provided mapping function
:param path:
:param field_conditions:
:param map_type:
:param map_start:
:param map_function:
:param map_args:
:param cur_dict: If we are only specifying the path to generate a mapping, specify the dict
to read from
:param parent: Parent MapElem to assign, if it doesn't exist
:param field_filter: To pass in a conditional function somewhere other than the 3rd element
of the field_conditions array
:return:
"""
field = None
field_value = None
if field_conditions:
field = field_conditions[0]
field_value = field_conditions[1]
if not field_filter:
field_filter = None if len(field_conditions) < 3 else field_conditions[2]
# Get the value at path
if path:
try:
p_val = get_path_value(path, self.dict_tosca, ensure_dict=True)
except KeyError:
# The given path doesn't exist
return []
else:
# If there is no path, search the entire dict
p_val = self.dict_tosca
# Get the relevant nodes based on field and field_value
filtered = None
if field and field_value:
filtered = get_roots_from_filter(p_val, field, field_value, user_filter=field_filter)
elif path:
# If we forgot to pass in a dict, use tosca
if not cur_dict:
cur_dict = self.dict_tosca
# If we do not have field & field_value, but we do have path
filtered = get_path_value(path, cur_dict)
# Check if we have a dict that should be split into a list
if isinstance(filtered, dict) and len(filtered.keys()) > 1:
filtered = listify(filtered)
elif not isinstance(filtered, list):
filtered = [filtered]
result = self.generate_map_from_list(filtered, map_type, map_start, map_function, map_args)
if parent:
# We can't overwrite parent mappings, but there might be some, so just don't do anything
# if that is the case
MapElem.add_parent_mapping(result, parent, fail_silent=True)
return result
def generate_map_from_list(self, to_map, map_type="int", map_start=0,
map_function=None, map_args=None):
if not isinstance(to_map, list):
raise TypeError("Expected type to be list, was {}".format(type(to_map)))
# We now have a list of dicts
# Get the names of each element in the lists
names = []
for elem in to_map:
if isinstance(elem, dict):
names.append(get_dict_key(elem))
elif isinstance(elem, str):
names.append(elem)
elif isinstance(elem, tuple):
names.append(elem[0])
else:
raise TypeError("Unhandled type {}".format(type(elem)))
mapped = None
kwargs = {self.KEY_TOSCA: self.dict_tosca, self.KEY_SOL6: self.dict_sol6,
"filtered": to_map}
if map_args:
kwargs = merge_two_dicts(kwargs, map_args)
# If there is a custom map function defined, use that instead of our defaults
if map_function:
mapped = map_function(names, map_start, **kwargs)
else:
if map_type == "int":
mapped = V2Mapping.map_ints(names, map_start, **kwargs)
elif map_type == "parent_match":
mapped = V2Mapping.parent_match(names, map_start, **kwargs)
return mapped
@staticmethod
def get_items_from_map(path, mapping, cur_dict, link_list=False, must_exist=True):
if not path or not mapping:
return None
res = [get_path_value(path.format(c_map.name), cur_dict, must_exist=must_exist) for c_map in mapping]
# We need the VDU names that are linked with the flavors, so get them this way
if link_list:
temp = []
for i, c_map in enumerate(mapping):
temp.append([c_map.name, res[i]])
res = temp
return res
@staticmethod
def get_input_values(in_list, tosca_inputs, dict_tosca):
"""
:param in_list: List of { "get_input": "VAR_NAME" }, also might be a value list
:return: A list of the values of the inputs
"""
res = []
if tosca_inputs:
for item in in_list:
if V2Mapping.is_tosca_input(item):
cur_item = {item["get_input"]:
V2Mapping.get_input_value(item, tosca_inputs=tosca_inputs)}
else:
cur_item = item
res.append(cur_item)
return res
@staticmethod
def get_input_value(item, input_path=None, dict_tosca=None, tosca_inputs=None):
if input_path and dict_tosca and not tosca_inputs:
tosca_inputs = get_path_value(input_path, dict_tosca)
if V2Mapping.is_tosca_input(item):
# Check to see if there is a value from the config that can be used
# Return the value inside of the get_input key
return tosca_inputs[item["get_input"]]
@staticmethod
def tosca_get_input_key(input_name):
if V2Mapping.is_tosca_input(input_name):
return input_name["get_input"]
@staticmethod
def is_tosca_input(val):
try:
return "get_input" in val
except TypeError:
return False
@staticmethod
def get_object_keys(obj, exclude=None):
return [attr for attr in dir(obj) if not callable(getattr(obj, attr)) and
not (attr.startswith("__") or attr.startswith("_") or
(exclude and exclude in attr))]
class MapElem:
"""
"""
def __init__(self, name, cur_map, parent_map=None):
self.name = name
self.cur_map = cur_map
self.parent_map = parent_map
def copy(self):
par = self.parent_map.copy() if self.parent_map else None
return MapElem(self.name, self.cur_map, par)
@staticmethod
def ensure_map_values(mapping, start_val=None):
"""
Given a mapping, ensure that the values are properly incrementing, if not fix them.
Only applies to top-level mapping, does not check parent maps
"""
# If it's valid, don't do anything
if MapElem.validate_map_values(mapping) and start_val is None:
return
# Take the first value and increment from there
if start_val is None:
cur_val = mapping[0].cur_map
else:
cur_val = start_val
for c_map in mapping:
c_map.cur_map = cur_val
cur_val += 1
@staticmethod
def validate_map_values(mapping):
"""
Ensure the values are incrementing by 1 each time, if not return False
"""
if not isinstance(mapping, list):
return True
last_value = None
for c_map in mapping:
if last_value is None:
last_value = c_map.cur_map
else:
if last_value != c_map.cur_map + 1:
return False
return True
@staticmethod
def add_parent_mapping(mapping_list, parent_mapping, fail_silent=False):
if not isinstance(mapping_list, list):
mapping_list = [mapping_list]
for c_map in mapping_list:
if c_map.parent_map:
if fail_silent:
log.debug("SILENT: Expected an empty parent map, instead found {}".
format(c_map.parent_map))
continue
raise KeyError("Expected an empty parent map, instead found {}".
format(c_map.parent_map))
if not isinstance(parent_mapping, MapElem):
raise ValueError("Expected a MapElem, instead {} was given".
format(type(parent_mapping)))
c_map.parent_map = parent_mapping
@staticmethod
def get_mapping_name(mapping_list, req_name):
if isinstance(mapping_list, list):
for c_map in mapping_list:
if not isinstance(c_map, MapElem):
continue
if c_map.name == req_name:
return c_map
return None
@staticmethod
def basic_map(num):
return MapElem(num, num)
@staticmethod
def basic_map_list(num):
return [MapElem.basic_map(n) for n in range(num)]
@staticmethod
def get_parent_list(map_list):
"""
Returns a list that contains the parent mappings of each | |
(1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
)
def KK_RC35(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
)
def KK_RC36(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
)
def KK_RC37(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
)
def KK_RC38(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / | |
supported, but check or reset analyze/power_spectrum/type' %(spec_type))
exit()
else:
log_info('Input error: no type, please set analyze/power_spectrum/type')
exit()
if ( 'traj_vel_file' in spectrum_dic.keys() ):
traj_vel_file = spectrum_dic['traj_vel_file']
if ( os.path.exists(os.path.abspath(traj_vel_file)) ):
spectrum_dic['traj_vel_file'] = os.path.abspath(traj_vel_file)
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_frame_id, end_frame_id, time_step = \
traj_info.get_traj_info(os.path.abspath(traj_vel_file), 'vel')
else:
log_info.log_error('Input error: %s file does not exist' %(traj_vel_file))
exit()
else:
log_info.log_error('Input error: traj_vel_file, please set analyze/power_spectrum/traj_vel_file')
exit()
spec_type = spectrum_dic['type']
if ( spec_type == 'water_mode' or spec_type == 'hydration_mode' ):
if ( 'traj_coord_file' in spectrum_dic.keys() ):
traj_coord_file = spectrum_dic['traj_coord_file']
if ( os.path.exists(os.path.abspath(traj_coord_file)) ):
spectrum_dic['traj_coord_file'] = os.path.abspath(traj_coord_file)
else:
log_info.log_error('Input error: %s file does not exist' %(traj_coord_file))
exit()
else:
log_info.log_error('Input error: no traj_coord_file, please set analyze/power_spectrum/traj_coord_file')
exit()
if ( 'init_step' in spectrum_dic.keys() ):
init_step = spectrum_dic['init_step']
if ( data_op.eval_str(init_step) == 1 ):
spectrum_dic['init_step'] = int(init_step)
else:
log_info.log_error('Input error: init_step should be integer, please check or reset analyze/power_spectrum/init_step')
exit()
else:
spectrum_dic['init_step'] = start_frame_id
if ( 'end_step' in spectrum_dic.keys() ):
end_step = spectrum_dic['end_step']
if ( data_op.eval_str(end_step) == 1 ):
spectrum_dic['end_step'] = int(end_step)
else:
log_info.log_error('Input error: end_step should be integer, please check or reset analyze/power_spectrum/end_step')
exit()
else:
spectrum_dic['end_step'] = end_frame_id
init_step = spectrum_dic['init_step']
end_step = spectrum_dic['end_step']
check_step(init_step, end_step, start_frame_id, end_frame_id)
if ( 'max_frame_corr' in spectrum_dic.keys() ):
max_frame_corr = spectrum_dic['max_frame_corr']
if ( data_op.eval_str(max_frame_corr) == 1 ):
spectrum_dic['max_frame_corr'] = int(max_frame_corr)
else:
log_info.log_error('Input error: max_frame_corr should be integer, please check or reset analyze/power_spectrum/max_frame_corr')
exit()
else:
spectrum_dic['max_frame_corr'] = int(frames_num/3)
if ( 'start_wave' in spectrum_dic.keys() ):
start_wave = spectrum_dic['start_wave']
if ( data_op.eval_str(start_wave) == 1 or data_op.eval_str(start_wave) == 2 ):
spectrum_dic['start_wave'] = float(start_wave)
else:
log_info.log_error('Input error: start_wave should be float, please check or reset analyze/power_spectrum/start_wave')
exit()
else:
spectrum_dic['start_wave'] = 0
if ( 'end_wave' in spectrum_dic.keys() ):
end_wave = spectrum_dic['end_wave']
if ( data_op.eval_str(end_wave) == 1 or data_op.eval_str(end_wave) == 2 ):
spectrum_dic['end_wave'] = float(end_wave)
else:
log_info.log_error('Input error: end_wave should be float, please check or reset analyze/power_spectrum/end_wave')
exit()
else:
spectrum_dic['end_wave'] = 0
if ( 'normalize' in spectrum_dic.keys() ):
normalize = spectrum_dic['normalize']
if ( data_op.eval_str(normalize) == 1 ):
if ( int(normalize) == 0 or int(normalize) == 1 ):
spectrum_dic['normalize'] = int(normalize)
else:
log_info.log_error('Input error: normalize should be 0 or 1, please check or reset analyze/power_spectrum/normalize')
exit()
else:
log_info.log_error('Input error: normalize should be 0 or 1, please check or reset analyze/power_spectrum/normalize')
exit()
else:
spectrum_dic['normalize'] = 1
if ( spec_type == 'general' or spec_type == 'water_mode' ):
if ( 'atom_id' in spectrum_dic.keys() ):
atom_id_list = data_op.get_id_list(spectrum_dic['atom_id'])
spectrum_dic['atom_id'] = atom_id_list
else:
log_info.log_error('Input error: no atom_id, please set analyze/power_spectrum/atom_id')
exit()
else:
if ( 'box' in spectrum_dic.keys() ):
A_exist = 'A' in spectrum_dic['box'].keys()
B_exist = 'B' in spectrum_dic['box'].keys()
C_exist = 'C' in spectrum_dic['box'].keys()
else:
log_info.log_error('Input error: no box, please set analyze/power_spectrum/box')
exit()
if ( A_exist and B_exist and C_exist ):
box_A = spectrum_dic['box']['A']
box_B = spectrum_dic['box']['B']
box_C = spectrum_dic['box']['C']
else:
log_info.log_error('Input error: box setting error, please check analyze/power_sepctrum/box')
exit()
if ( len(box_A) == 3 and all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in box_A) ):
spectrum_dic['box']['A'] = [float(x) for x in box_A]
else:
log_info.log_error('Input error: A vector of box wrong, please check analyze/power_spectrum/box/A')
exit()
if ( len(box_B) == 3 and all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in box_B) ):
spectrum_dic['box']['B'] = [float(x) for x in box_B]
else:
log_info.log_error('Input error: B vector of box wrong, please check analyze/power_spectrum/box/B')
exit()
if ( len(box_C) == 3 and all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in box_C) ):
spectrum_dic['box']['C'] = [float(x) for x in box_C]
else:
log_info.log_error('Input error: C vector of box wrong, please check analyze/power_spectrum/box/C')
exit()
if ( spec_type == 'hydration_mode' ):
if ( 'hyd_shell_dist' in spectrum_dic.keys() ):
hyd_shell_dist = spectrum_dic['hyd_shell_dist']
if ( data_op.eval_str(hyd_shell_dist) == 1 or data_op.eval_str(hyd_shell_dist) == 2 ):
spectrum_dic['hyd_shell_dist'] = float(hyd_shell_dist)
else:
log_info.log_error('Input error: hyd_shell_dist should be float, please check or reset analyze/power_spectrum/hyd_shell_dist')
exit()
else:
log_info.log_error('Input error: no hyd_shell_dist, please set analyze/power_spectrum/hyd_shell_dist')
exit()
if ( 'dist_conv' in spectrum_dic.keys() ):
dist_conv = spectrum_dic['dist_conv']
if ( data_op.eval_str(dist_conv) == 1 or data_op.eval_str(dist_conv) == 2 ):
spectrum_dic['dist_conv'] = float(dist_conv)
else:
log_info.log_error('Input error: dist_conv should be float, please check or reset analyze/power_spectrum/dist_conv')
exit()
else:
spectrum_dic['dist_conv'] = 0.3
if ( 'atom_type_pair' in spectrum_dic.keys() ):
atom_type_pair = spectrum_dic['atom_type_pair']
if ( len(atom_type_pair) == 2 and all(data_op.eval_str(x) == 0 for x in atom_type_pair) ):
pass
else:
log_info.log_error('Input error: atom_type_pair should be 2 string, please check or reset analyze/power_spectrum/atom_type_pair')
exit()
else:
log_info.log_error('Input error: no atom_type_pair, please set analyze/power_spectrum/atom_type_pair')
exit()
return spectrum_dic
def check_v_hartree_inp(v_hartree_dic):
'''
check_v_hartree_inp: check the input of v_hartree.
Args:
v_hartree_dic: dictionary
v_hartree_dic contains parameters for v_hartree.
Returns:
v_hartree_dic: dictionary
v_hartree_dic is the revised v_hartree_dic.
'''
v_hartree_dic = copy.deepcopy(v_hartree)
if ( 'cube_file' in v_hartree_dic.keys() ):
cube_file = v_hartree_dic['cube_file']
if ( os.path.exists(os.path.abspath(cube_file)) ):
v_hartree_dic['cube_file'] = os.path.abspath(cube_file)
else:
log_info.log_error('Input error: %s file does not exist' %(cube_file))
exit()
else:
log_info.log_error('Input error: no charge cube file, please set analyze/v_hartree/cube_file')
exit()
if ( 'surface' in v_hartree_dic.keys() ):
surface = v_hartree_dic['surface']
if ( len(surface) == 3 and all(data_op.eval_str(x) == 1 for x in surface) ):
v_hartree_dic['surface'] = [int(x) for x in surface]
else:
log_info.log_error('Input error: surface should be 3 integer, please check or set analyze/v_hartree/surface')
exit()
else:
log_info.log_error('Input error: no surface, please set analyze/v_hartree/surface')
exit()
return v_hartree_dic
def check_arrange_data_inp(arrange_data_dic):
'''
check_arrange_data_inp: check the input of arrange_data.
Args:
arrange_data_dic: dictionary
arrange_data_dic contains parameters for arrange_data.
Returns:
arrange_data_dic: dictionary
arrange_data_dic is the revised arrange_data_dic.
'''
if ( 'temperature' in arrange_data_dic):
temp_dic = arrange_data_dic['temperature']
if ( 'traj_ener_file' in temp_dic.keys() ):
traj_ener_file = temp_dic['traj_ener_file']
if ( os.path.exists(os.path.abspath(traj_ener_file)) ):
arrange_data_dic['temperature']['traj_ener_file'] = os.path.abspath(traj_ener_file)
else:
log_info.log_error('Input error: %s file does not exist' %(traj_ener_file))
exit()
else:
log_info.log_error('Input error: energy trajectory file, please set analyze/arranage_data/temperature/traj_ener_file')
exit()
#arrange potential energy
elif ( 'potential' in arrange_data_dic ):
pot_dic = arrange_data_dic['potential']
if ( 'traj_ener_file' in pot_dic.keys() ):
traj_ener_file = pot_dic['traj_ener_file']
if ( os.path.exists(os.path.abspath(traj_ener_file)) ):
arrange_data_dic['potential']['traj_ener_file'] = os.path.abspath(traj_ener_file)
else:
log_info.log_error('Input error: %s file does not exist' %(traj_ener_file))
exit()
else:
log_info.log_error('Input error: no energy trajectory file, please set analyze/arranage_data/potential/traj_ener_file')
exit()
#arrange mulliken charge
elif ( 'mulliken' in arrange_data_dic ):
mulliken_dic = arrange_data_dic['mulliken']
if ( 'traj_mul_file' in mulliken_dic.keys() ):
traj_mul_file = mulliken_dic['traj_mul_file']
if ( os.path.exists(os.path.abspath(traj_mul_file)) ):
arrange_data_dic['mulliken']['traj_mul_file'] = os.path.abspath(traj_mul_file)
else:
log_info.log_error('Input error: %s file does not exist' %(traj_mul_file))
exit()
else:
log_info.log_error('Input error: no mulliken trajectory file, please set analyze/arranage_data/mulliken/traj_mul_file')
exit()
if ( 'atom_id' in mulliken_dic.keys() ):
arrange_data_dic['mulliken']['atom_id'] = data_op.get_id_list(mulliken_dic['atom_id'])
else:
log_info.log_error('Input error: no atom id, please set analyze/arranage_data/mulliken/atom_id')
exit()
if ( 'time_step' in mulliken_dic.keys() ):
time_step = mulliken_dic['time_step']
if ( data_op.eval_str(time_step) == 2 ):
arrange_data_dic['mulliken']['time_step'] = float(time_step)
else:
log_info.log_error('Input error: time step should be float, please check or set analyze/arranage_data/mulliken/atom_id')
exit()
else:
arrange_data_dic['mulliken']['time_step'] = 0.5
if ( 'each' in mulliken_dic.keys() ):
each = mulliken_dic['each']
if ( data_op.eval_str(each) == 1 ):
arrange_data_dic['mulliken']['each'] = int(each)
else:
log_info.log_error('Input error: each should be integer, please check or set analyze/arranage_data/mulliken/each')
exit()
else:
arrange_data_dic['mulliken']['each'] = 1
#arrange vertical energy
elif ( 'vertical_energy' in arrange_data_dic ):
vert_ene_dic = arrange_data_dic['vertical_energy']
if ( 'traj_mix_ener_file' in vert_ene_dic.keys() ):
traj_mix_ener_file = vert_ene_dic['traj_mix_ener_file']
if ( os.path.exists(os.path.abspath(traj_mix_ener_file)) ):
arrange_data_dic['vertical_energy']['traj_mix_ener_file'] = os.path.abspath(traj_mix_ener_file)
blocks_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_frame_id, end_frame_id, time_step = \
traj_info.get_traj_info(os.path.abspath(traj_mix_ener_file), 'mix_ener')
else:
log_info.log_error('Input error: %s file does not exist' %(traj_mix_ener_file))
exit()
else:
log_info.log_error('Input error: no mix energy trajectory file, please set analyze/arranage_data/vertical_energy/traj_mix_ener_file')
exit()
if ( 'row_ox' in vert_ene_dic.keys() ):
row_ox = vert_ene_dic['row_ox']
if ( data_op.eval_str(row_ox) == 1 ):
arrange_data_dic['vertical_energy']['row_ox'] = int(row_ox)
else:
log_info.log_error('Input error: row_ox should be integer, please check or reset analyze/arranage_data/vertical_energy/row_ox')
exit()
else:
log_info.log_error('Input error: no row_ox, please set analyze/arranage_data/vertical_energy/row_ox')
exit()
if ( 'row_red' in vert_ene_dic.keys() ):
row_red = vert_ene_dic['row_red']
if ( data_op.eval_str(row_red) == 1 ):
arrange_data_dic['vertical_energy']['row_red'] = int(row_red)
else:
log_info.log_error('Input error: row_red should be integer, please check or reset analyze/arranage_data/vertical_energy/row_red')
exit()
else:
log_info.log_error('Input error: no row_red, please set analyze/arranage_data/vertical_energy/row_red')
exit()
if ( 'redox_type' in vert_ene_dic.keys() ):
redox_type = vert_ene_dic['redox_type']
if ( redox_type == 'oxidation' or redox_type == 'reduction' ):
pass
else:
log_info.log_error('Input error: only oxidation and reduction are supported for redox_type')
exit()
else:
log_info.log_error('Input error: no redox_type, please set analyze/arranage_data/vertical_energy/redox_type')
exit()
if ( 'slow_growth' in vert_ene_dic.keys() ):
slow_growth = vert_ene_dic['slow_growth']
if ( data_op.eval_str(slow_growth) == 1 | |
# ------------------------------------------------------------------------------
# BSD 2-Clause License
#
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
"""
Abstract
Tool for loading bvh files onto the MHX rig in Blender 2.5x
Place the script in the .blender/scripts/addons dir
Activate the script in the "Add-Ons" tab (user preferences).
Access from UI panel (N-key) when MHX rig is active.
Alternatively, run the script in the script editor (Alt-P), and access from UI panel.
"""
bl_info = {
"name": "Retarget BVH",
"author": "<NAME>",
"version": (2,0),
"blender": (2,80,0),
"location": "View3D > Tools > Retarget BVH",
"description": "Mocap retargeting tool",
"warning": "",
'wiki_url': "http://diffeomorphic.blogspot.com/retarget-bvh/",
"category": "Animation"}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
print("Reloading BVH Retargeter")
import imp
imp.reload(utils)
if bpy.app.version < (2,80,0):
imp.reload(buttons27)
else:
imp.reload(buttons28)
imp.reload(io_json)
imp.reload(props)
imp.reload(t_pose)
imp.reload(armature)
imp.reload(source)
imp.reload(target)
imp.reload(load)
imp.reload(retarget)
imp.reload(fkik)
imp.reload(simplify)
imp.reload(action)
imp.reload(loop)
imp.reload(edit)
imp.reload(floor)
else:
print("Loading BVH Retargeter")
import bpy
from . import utils
if bpy.app.version < (2,80,0):
from . import buttons27
else:
from . import buttons28
from . import io_json
from . import props
from . import t_pose
from . import armature
from . import source
from . import target
from . import load
from . import retarget
from . import fkik
from . import simplify
from . import action
from . import loop
from . import edit
from . import floor
if bpy.app.version < (2,80,0):
Region = "TOOLS"
else:
Region = "UI"
def inset(layout):
split = utils.splitLayout(layout, 0.05)
split.label(text="")
return split.column()
########################################################################
#
# class Main(bpy.types.Panel):
#
class MCP_PT_Main(bpy.types.Panel):
bl_category = "Retarget BVH"
bl_label = "Retarget BVH v %d.%d: Main" % bl_info["version"]
bl_space_type = "VIEW_3D"
bl_region_type = Region
def draw(self, context):
layout = self.layout
ob = context.object
scn = context.scene
layout.operator("mcp.load_and_retarget")
layout.separator()
layout.prop(scn, "McpStartFrame")
layout.prop(scn, "McpEndFrame")
layout.separator()
layout.prop(scn, "McpShowDetailSteps")
if scn.McpShowDetailSteps:
ins = inset(layout)
ins.operator("mcp.load_bvh")
if ob and ob.type == 'ARMATURE':
ins.operator("mcp.rename_bvh")
ins.operator("mcp.load_and_rename_bvh")
ins.separator()
ins.operator("mcp.retarget_mhx")
ins.separator()
ins.operator("mcp.simplify_fcurves")
ins.operator("mcp.rescale_fcurves")
########################################################################
#
# class MCP_PT_Options(bpy.types.Panel):
#
class MCP_PT_Options(bpy.types.Panel):
bl_category = "Retarget BVH"
bl_label = "Options"
bl_space_type = "VIEW_3D"
bl_region_type = Region
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
if context.object and context.object.type == 'ARMATURE':
return True
def draw(self, context):
layout = self.layout
scn = context.scene
rig = context.object
layout.prop(scn, "McpAutoScale")
layout.prop(scn, "McpBvhScale")
layout.prop(scn, "McpUseLimits")
layout.prop(scn, "McpClearLocks")
layout.prop(scn, 'McpAutoSourceRig')
layout.prop(scn, 'McpAutoTargetRig')
layout.prop(scn, "McpIgnoreHiddenLayers")
layout.prop(scn, "McpDoBendPositive")
layout.separator()
layout.label(text="SubSample and Rescale")
ins = inset(layout)
ins.prop(scn, "McpDefaultSS")
if not scn.McpDefaultSS:
ins.prop(scn, "McpSubsample")
ins.prop(scn, "McpSSFactor")
ins.prop(scn, "McpRescale")
ins.prop(scn, "McpRescaleFactor")
ins.operator("mcp.rescale_fcurves")
layout.separator()
layout.label(text="Simplification")
ins = inset(layout)
ins.prop(scn, "McpDoSimplify")
ins.prop(scn, "McpErrorLoc")
ins.prop(scn, "McpErrorRot")
ins.prop(scn, "McpSimplifyVisible")
ins.prop(scn, "McpSimplifyMarkers")
########################################################################
#
# class MCP_PT_Edit(bpy.types.Panel):
#
class MCP_PT_Edit(bpy.types.Panel):
bl_category = "Retarget BVH"
bl_label = "Edit Actions"
bl_space_type = "VIEW_3D"
bl_region_type = Region
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
if context.object and context.object.type == 'ARMATURE':
return True
def draw(self, context):
layout = self.layout
scn = context.scene
rig = context.object
layout.prop(scn, "McpShowIK")
if scn.McpShowIK:
ins = inset(layout)
row = ins.row()
row.prop(scn, "McpFkIkArms")
row.prop(scn, "McpFkIkLegs")
ins.operator("mcp.transfer_to_ik")
ins.operator("mcp.transfer_to_fk")
ins.operator("mcp.clear_animation", text="Clear IK Animation").type = "IK"
ins.operator("mcp.clear_animation", text="Clear FK Animation").type = "FK"
layout.separator()
layout.prop(scn, "McpShowGlobal")
if scn.McpShowGlobal:
ins = inset(layout)
ins.operator("mcp.shift_bone")
#ins.separator()
#row = ins.row()
#row.prop(scn, "McpBendElbows")
#row.prop(scn, "McpBendKnees")
#ins.operator("mcp.limbs_bend_positive")
ins.separator()
row = ins.row()
row.prop(scn, "McpFixX")
row.prop(scn, "McpFixY")
row.prop(scn, "McpFixZ")
ins.operator("mcp.fixate_bone")
ins.separator()
ins.prop(scn, "McpRescaleFactor")
ins.operator("mcp.rescale_fcurves")
layout.separator()
layout.prop(scn, "McpShowDisplace")
if scn.McpShowDisplace:
ins = inset(layout)
ins.operator("mcp.start_edit")
ins.operator("mcp.undo_edit")
row = ins.row()
props = row.operator("mcp.insert_key", text="Loc")
props.loc = True
props.rot = False
props.delete = False
props = row.operator("mcp.insert_key", text="Rot")
props.loc = False
props.rot = True
props.delete = False
row = ins.row()
props = row.operator("mcp.insert_key", text="LocRot")
props.loc = True
props.rot = True
props.delete = False
props = row.operator("mcp.insert_key", text="Delete")
props.loc = True
props.rot = True
props.delete = True
row = ins.row()
props = row.operator("mcp.move_to_marker", text="|<")
props.left = True
props.last = True
props = row.operator("mcp.move_to_marker", text="<")
props.left = True
props.last = False
props = row.operator("mcp.move_to_marker", text=">")
props.left = False
props.last = False
props = row.operator("mcp.move_to_marker", text=">|")
props.left = False
props.last = True
ins.operator("mcp.confirm_edit")
layout.separator()
layout.prop(scn, "McpShowFeet")
if scn.McpShowFeet:
ins = inset(layout)
row = ins.row()
row.prop(scn, "McpFloorLeft")
row.prop(scn, "McpFloorRight")
row.prop(scn, "McpFloorHips")
ins.operator("mcp.offset_toe")
ins.operator("mcp.floor_foot")
layout.separator()
layout.prop(scn, "McpShowLoop")
if scn.McpShowLoop:
ins = inset(layout)
ins.prop(scn, "McpLoopBlendRange")
ins.prop(scn, "McpLoopInPlace")
ins.operator("mcp.loop_fcurves")
ins.separator()
ins.prop(scn, "McpRepeatNumber")
ins.operator("mcp.repeat_fcurves")
layout.separator()
layout.prop(scn, "McpShowStitch")
if scn.McpShowStitch:
ins = inset(layout)
ins.operator("mcp.update_action_list")
ins.separator()
ins.prop(scn, "McpFirstAction")
split = ins.split(0.75)
split.prop(scn, "McpFirstEndFrame")
split.operator("mcp.set_current_action").prop = "McpFirstAction"
ins.separator()
ins.prop(scn, "McpSecondAction")
split = ins.split(0.75)
split.prop(scn, "McpSecondStartFrame")
split.operator("mcp.set_current_action").prop = "McpSecondAction"
ins.separator()
ins.prop(scn, "McpLoopBlendRange")
ins.prop(scn, "McpOutputActionName")
ins.operator("mcp.stitch_actions")
########################################################################
#
# class MCP_PT_MhxSourceBones(bpy.types.Panel):
#
class MCP_PT_MhxSourceBones(bpy.types.Panel):
bl_category = "Retarget BVH"
bl_label = "Source armature"
bl_space_type = "VIEW_3D"
bl_region_type = Region
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE')
def draw(self, context):
layout = self.layout
scn = context.scene
rig = context.object
if not source.isSourceInited(scn):
layout.operator("mcp.init_sources", text="Init Source Panel")
return
layout.operator("mcp.init_sources", text="Reinit Source Panel")
layout.prop(scn, 'McpAutoSourceRig')
layout.prop(scn, "McpSourceRig")
if scn.McpSourceRig:
from .source import getSourceArmature
amt = getSourceArmature(scn.McpSourceRig)
if amt:
bones = amt.boneNames
box = layout.box()
for boneText in target.TargetBoneNames:
if not boneText:
box.separator()
continue
(mhx, text) = boneText
bone = source.findSourceKey(mhx, bones)
if bone:
row = box.row()
row.label(text=text)
row.label(text=bone)
########################################################################
#
# class MCP_PT_MhxTargetBones(bpy.types.Panel):
#
class MCP_PT_MhxTargetBones(bpy.types.Panel):
bl_category = "Retarget BVH"
bl_label = "Target armature"
bl_space_type = "VIEW_3D"
bl_region_type = Region
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE')
def draw(self, context):
layout = self.layout
rig = context.object
scn = context.scene
if not target.isTargetInited(scn):
layout.operator("mcp.init_targets", text="Init Target Panel")
return
layout.operator("mcp.init_targets", text="Reinit Target Panel")
layout.separator()
layout.prop(scn, "McpTargetRig")
layout.prop(scn, 'McpAutoTargetRig')
layout.separator()
layout.prop(scn, "McpIgnoreHiddenLayers")
layout.prop(rig, "MhReverseHip")
layout.operator("mcp.get_target_rig")
layout.separator()
layout.prop(scn, "McpSaveTargetTPose")
layout.operator("mcp.save_target_file")
layout.separator()
if scn.McpTargetRig:
from .target import getTargetInfo, TargetBoneNames, findTargetKeys
(bones, ikBones, tpose, bendTwist) = getTargetInfo(scn.McpTargetRig)
layout.label(text="FK bones")
box = layout.box()
for boneText in TargetBoneNames:
if not boneText:
box.separator()
continue
(mhx, text) = boneText
bnames = findTargetKeys(mhx, bones)
if bnames:
for bname in bnames:
row = box.row()
row.label(text=text)
row.label(text=bname)
else:
row = box.row()
row.label(text=text)
row.label(text="-")
if ikBones:
row = layout.row()
row.label(text="IK bone")
row.label(text="FK bone")
box = layout.box()
for (ikBone, fkBone) in ikBones:
row = box.row()
row.label(text=ikBone)
row.label(text=fkBone)
if bendTwist:
row = layout.row()
row.label(text="Bend bone")
row.label(text="Twist bone")
box = layout.box()
for (bendBone, twistBone) in bendTwist:
row = box.row()
row.label(text=bendBone)
row.label(text=twistBone)
########################################################################
#
# class MCP_PT_Utility(bpy.types.Panel):
#
class MCP_PT_Utility(bpy.types.Panel):
bl_category = "Retarget BVH"
bl_label = "Utilities"
bl_space_type = "VIEW_3D"
bl_region_type = Region
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
if context.object and context.object.type == 'ARMATURE':
return True
def draw(self, context):
layout = self.layout
scn = context.scene
rig = context.object
layout.prop(scn, "McpShowDefaultSettings")
if scn.McpShowDefaultSettings:
ins = inset(layout)
ins.operator("mcp.save_defaults")
ins.operator("mcp.load_defaults")
layout.separator()
layout.prop(scn, "McpShowActions")
if scn.McpShowActions:
ins = inset(layout)
ins.prop_menu_enum(context.scene, "McpActions")
ins.prop(scn, 'McpFilterActions')
ins.operator("mcp.update_action_list")
ins.operator("mcp.set_current_action").prop = 'McpActions'
ins.operator("mcp.delete")
ins.operator("mcp.delete_hash")
layout.separator()
layout.prop(scn, "McpShowPosing")
if scn.McpShowPosing:
ins = inset(layout)
if not rig.McpTPoseDefined:
ins.prop(scn, "McpMakeHumanTPose")
ins.operator("mcp.set_t_pose")
ins.separator()
ins.operator("mcp.define_t_pose")
ins.operator("mcp.undefine_t_pose")
ins.separator()
ins.operator("mcp.load_pose")
ins.operator("mcp.save_pose")
ins.separator()
ins.operator("mcp.rest_current_pose")
layout.separator()
layout.operator("mcp.clear_temp_props")
return
layout.operator("mcp.copy_angles_fk_ik")
layout.separator()
layout.label(text="Batch conversion")
layout.prop(scn, "McpDirectory")
layout.prop(scn, "McpPrefix")
layout.operator("mcp.batch")
#----------------------------------------------------------
# Initialize
#----------------------------------------------------------
classes = [
MCP_PT_Main,
MCP_PT_Options,
MCP_PT_Edit,
MCP_PT_MhxSourceBones,
MCP_PT_MhxTargetBones,
MCP_PT_Utility,
utils.ErrorOperator
]
def register():
action.initialize()
edit.initialize()
fkik.initialize()
floor.initialize()
load.initialize()
loop.initialize()
props.initialize()
retarget.initialize()
simplify.initialize()
source.initialize()
t_pose.initialize()
target.initialize()
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
action.uninitialize()
edit.uninitialize()
fkik.uninitialize()
floor.uninitialize()
load.uninitialize()
loop.uninitialize()
props.uninitialize()
retarget.uninitialize()
simplify.uninitialize()
source.uninitialize()
t_pose.uninitialize()
target.uninitialize()
for cls in classes:
| |
#! /usr/bin/env python
"""
Full-frame PCA algorithm for ADI, ADI+RDI and ADI+mSDI (IFS data) cubes.
"""
from __future__ import division, print_function
__author__ = '<NAME>'
__all__ = ['pca']
import numpy as np
from multiprocessing import cpu_count
from .svd import svd_wrapper
from ..preproc.derotation import _find_indices_adi, _compute_pa_thresh
from ..preproc import cube_rescaling_wavelengths as scwave
from ..preproc import (cube_derotate, cube_collapse, check_pa_vector,
check_scal_vector, cube_crop_frames)
from ..conf import timing, time_ini, check_enough_memory, Progressbar
from ..conf.utils_conf import pool_map, fixed
from ..var import frame_center, dist, prepare_matrix, reshape_matrix
from ..stats import descriptive_stats
def pca(cube, angle_list, cube_ref=None, scale_list=None, ncomp=1, ncomp2=1,
svd_mode='lapack', scaling=None, adimsdi='double', mask_center_px=None,
source_xy=None, delta_rot=1, fwhm=4, imlib='opencv',
interpolation='lanczos4', collapse='median', check_mem=True,
crop_ifs=True, nproc=1, full_output=False, verbose=True, debug=False):
""" Algorithm where the reference PSF and the quasi-static speckle pattern
are modeled using Principal Component Analysis. Depending on the input
parameters this PCA function can work in ADI, RDI or SDI (IFS data) mode.
ADI: If neither a reference cube nor a scaling vector are provided, the
target cube itself is used to learn the PCs and to obtain a low-rank
approximation model PSF (star + speckles).
ADI + RDI: if a reference cube is provided (``cube_ref``), its PCs are used
to reconstruct the target frames to obtain the model PSF (star + speckles).
ADI + SDI (IFS data): if a scaling vector is provided (``scale_list``) and
the cube is a 4d array [# channels, # adi-frames, Y, X], its assumed it
contains several multi-spectral ADI frames. A single or two stages PCA can
be performed, depending on ``adimsdi``.
Parameters
----------
cube : array_like, 3d or 4d
Input cube (ADI or ADI+mSDI).
angle_list : array_like, 1d
Corresponding parallactic angle for each frame.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
scale_list :
Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the
scaling factors are the central channel wavelength divided by the
shortest wavelength in the cube (more thorough approaches can be used
to get the scaling factors). This scaling factors are used to re-scale
the spectral channels and align the speckles.
ncomp : int, optional
How many PCs are used as a lower-dimensional subspace to project the
target frames. For an ADI cube, ``ncomp`` is the number of PCs extracted
from ``cube``. For the RDI case, when ``cube`` and ``cube_ref`` are
provided, ``ncomp`` is the number of PCs obtained from ``cube_ref``.
For an ADI+mSDI cube (e.g. SPHERE/IFS), if ``adimsdi`` is ``double``
then ``ncomp`` is the number of PCs obtained from each multi-spectral
frame (if ``ncomp`` is None then this stage will be skipped and the
spectral channels will be combined without subtraction). If ``adimsdi``
is ``single``, then ``ncomp`` is the number of PCs obtained from the
whole set of frames (n_channels * n_adiframes).
ncomp2 : int, optional
Only used for ADI+mSDI cubes, when ``adimsdi`` is set to ``double``.
``ncomp2`` sets the number of PCs used in the second PCA stage (ADI
fashion, using the residuals of the first stage). If None then the
second PCA stage is skipped and the residuals are de-rotated and
combined.
mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy',
'randcupy', 'pytorch', 'eigenpytorch', 'randpytorch'}, str optional
Switch for the SVD method/library to be used. ``lapack`` uses the LAPACK
linear algebra library through Numpy and it is the most conventional way
of computing the SVD (deterministic result computed on CPU). ``arpack``
uses the ARPACK Fortran libraries accessible through Scipy (computation
on CPU). ``eigen`` computes the singular vectors through the
eigendecomposition of the covariance M.M' (computation on CPU).
``randsvd`` uses the randomized_svd algorithm implemented in Sklearn
(computation on CPU). ``cupy`` uses the Cupy library for GPU computation
of the SVD as in the LAPACK version. ``eigencupy`` offers the same
method as with the ``eigen`` option but on GPU (through Cupy).
``randcupy`` is an adaptation of the randomized_svd algorithm, where all
the computations are done on a GPU (through Cupy). ``pytorch`` uses the
Pytorch library for GPU computation of the SVD. ``eigenpytorch`` offers
the same method as with the ``eigen`` option but on GPU (through
Pytorch). ``randpytorch`` is an adaptation of the randomized_svd
algorithm, where all the linear algebra computations are done on a GPU
(through Pytorch).
scaling : {None, 'temp-mean', 'spat-mean', 'temp-standard', 'spat-standard'}
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done, with
"spat-mean" then the spatial mean is subtracted, with "temp-standard"
temporal mean centering plus scaling to unit variance is done and with
"spat-standard" spatial mean centering plus scaling to unit variance is
performed.
adimsdi : {'double', 'single'}, str optional
In the case ``cube`` is a 4d array, ``adimsdi`` determines whether a
single or double pass PCA is going to be computed. In the ``single``
case, the multi-spectral frames are rescaled wrt the largest wavelength
to align the speckles and all the frames are processed with a single
PCA low-rank approximation. In the ``double`` case, a firt stage is run
on the rescaled spectral frames, and a second PCA frame is run on the
residuals in an ADI fashion.
mask_center_px : None or int
If None, no masking is done. If an integer > 1 then this value is the
radius of the circular mask.
source_xy : tuple of int, optional
For ADI PCA, this triggers a frame rejection in the PCA library.
source_xy are the coordinates X,Y of the center of the annulus where the
PA criterion will be used to reject frames from the library.
delta_rot : int, optional
Factor for tunning the parallactic angle threshold, expressed in FWHM.
Default is 1 (excludes 1xFHWM on each side of the considered frame).
fwhm : float, optional
Known size of the FHWM in pixels to be used. Default value is 4.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
check_mem : bool, optional
If True, it check that the input cube(s) are smaller than the available
system memory.
crop_ifs: bool, optional
If True and the data are to be reduced with ADI+SDI(IFS) in a single step,
this will crop the cube at the moment of frame rescaling in wavelength.
This is recommended for large FOVs such as the one of SPHERE, but can
remove significant amount of information close to the edge of small FOVs
(e.g. SINFONI).
nproc : None or int, optional
Number of processes for parallel computing. If None the number of
processes will be set to (cpu_count()/2). Defaults to ``nproc=1``.
full_output: bool, optional
Whether to return the final median combined image only or with other
intermediate arrays.
verbose : bool, optional
If True prints intermediate info and timing.
debug : bool, optional
Whether to print debug information or not.
Returns
-------
frame : array_like, 2d
Median combination of the de-rotated/re-scaled residuals cube.
If full_output is True, and depending on the type of cube (ADI or ADI+mSDI),
then several arrays will be returned, such as the residuals, de-rotated
residuals, principal components
References
----------
The full-frame ADI-PCA implementation is based on Soummer et al. 2012
(http://arxiv.org/abs/1207.4197) and Amara & Quanz 2012
(http://arxiv.org/abs/1207.6637).
"""
if not cube.ndim > 2:
raise TypeError('Input array is not a 3d or 4d array')
if check_mem:
input_bytes = cube.nbytes
if cube_ref is not None:
input_bytes += cube_ref.nbytes
if not check_enough_memory(input_bytes, 1.5, False):
msgerr = 'Input cubes are larger than available system memory. '
msgerr += 'Set check_mem=False to override this memory check or '
msgerr += 'use the incremental PCA (for ADI)'
raise RuntimeError(msgerr)
start_time = time_ini(verbose)
angle_list = check_pa_vector(angle_list)
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
# ADI + | |
<filename>ridt/equation/eddy_diffusion.py
import warnings
import numpy
from typing import List
from typing import Tuple
from typing import Union
from itertools import product
from tqdm import tqdm
from numpy import ndarray
from numpy import array
from numpy import zeros
from numpy import exp
from numpy import log
from numpy import power
from numpy import pi
from numpy import square
from numpy import nanmean
from scipy.integrate import cumtrapz
from scipy.integrate import romberg
from ridt.config import RIDTConfig
from ridt.config import InstantaneousSource
from ridt.config import InfiniteDurationSource
from ridt.config import FixedDurationSource
from ridt import bar_args
numpy.seterr(divide='ignore')
numpy.seterr(invalid='ignore')
Source = Union[InstantaneousSource, InfiniteDurationSource, FixedDurationSource]
Value = Union[ndarray, float]
FloatList = List[float]
MAX_IMAGE = 20
class EddyDiffusion:
"""The core Eddy Diffusion model class.
This class provides all equation implementation of the Eddy Diffusion Model.
For details of the mathematics implemented here, please see the user guide.
Attributes
----------
settings : :class:`~.RIDTConfig`
The settings for the run in question.
dim : :class:`~.ridt.config.ridtconfig.Dimensions`
The dimensions settings object that define the bounds of the system.
disc : :class:`~.SpatialSamples`
The spatial samples settings object that define how each axis is
discretised.
volume : :obj:`float`
The total volume of the system.
shape : :obj:`Tuple`[:obj:`int`]
The shape of the current grid.
cumtrapz_kwargs : :obj:`dict`
A dictionary of keyword args for the cumulative trapezoidal integration
function.
diff_coeff : :obj:`float`
The diffusion coefficient.
modes : :obj:`List`[:obj:`str`]
The different string ids for the source modes.
x : :class:`~numpy.ndarray`
The current x-axis meshgrid.
y : :class:`~numpy.ndarray`
The current y-axis meshgrid.
z : :class:`~numpy.ndarray`
The current z-axis meshgrid.
t : :obj:`List`[:obj:`float`]
The current time domain array.
rv : :class:`~numpy.ndarray`
The calculated concentration values.
"""
def __init__(self, settings: RIDTConfig):
"""The :class:`EddyDiffusion` constructor.
Parameters
----------
settings : :class:`~.RIDTConfig`
The settings for the run in question.
"""
self.settings = settings
self.dim = self.settings.dimensions
self.disc = self.settings.spatial_samples
self.volume = self.dim.x * self.dim.y * self.dim.z
self.cumtrapz_kwargs = {
"axis": 0,
"initial": 0,
"dx": self.settings.total_time / self.settings.time_samples
}
self.diff_coeff = self.diffusion_coefficient()
self.modes = ["instantaneous", "infinite_duration", "fixed_duration"]
def __call__(self, x: ndarray, y: ndarray, z: ndarray, t: FloatList):
"""This call method is used to evaluate the model.
Parameters
----------
x : :class:`~numpy.ndarray`
The current x-axis meshgrid.
y : :class:`~numpy.ndarray`
The current y-axis meshgrid.
z : :class:`~numpy.ndarray`
The current z-axis meshgrid.
t : :obj:`List`[:obj:`float`]
The current time domain array.
Returns
-------
:class:`~numpy.ndarray`
The calculated concentration values.
"""
self.get_grid_shape(x)
self.assign_grids(x, y, z, t)
self.rv = array(self.zero_arrays())
for mode in self.modes:
self.sources = getattr(self.settings.modes, mode).sources
with warnings.catch_warnings():
warnings.simplefilter("ignore")
getattr(self, f"{mode}")()
return self.rv
def assign_grids(self, x: ndarray, y: ndarray, z: ndarray, t: FloatList) -> None:
"""Assign the meshgrids to the relevant attributes.
Parameters
----------
x : :class:`~numpy.ndarray`
The current x-axis meshgrid.
y : :class:`~numpy.ndarray`
The current y-axis meshgrid.
z : :class:`~numpy.ndarray`
The current z-axis meshgrid.
t : :obj:`List`[:obj:`float`]
The current time domain array.
Returns
-------
None
"""
self.x = x
self.y = y
self.z = z
self.t = t
def get_grid_shape(self, grid: ndarray) -> Tuple[int]:
"""Returns the shape of the passed grid.
Parameters
----------
grid : :class:`~numpy.ndarray`
The grid to be assessed.
Returns
-------
:class:`Tuple`[:obj:`int`]
The shape of the grid.
"""
self.shape = grid.shape
def get_cartesian_index_space(self):
"""Generate a cartesian product set of all grid indices.
Returns
-------
:class:`Iterable`[:class:`Tuple`[:obj:`int`]]
An iterable of tuples of indices.
"""
return product(*[range(d) for d in self.shape])
def get_raw_values(self, idx: int, idy: int, idz: int) -> Tuple[float]:
"""For a given set of grid indices, return raw values from the meshes.
Parameters
----------
idx : :obj:`int`
The x index.
idy : :obj:`int`
The y index.
idz : :obj:`int`
The z index.
Returns
-------
:class:`Tuple`[:obj:`float`]
The x, y, and z values corresponding to the provided indices.
"""
index = (idx, idy, idz)
return self.x[index], self.y[index], self.z[index]
@property
def time(self):
"""Returns a :mod:`tqdm` iterable over the :attr:`time` iterable.
Returns
-------
:obj:`Iterable`[:class:`Tuple`[:obj:`int`, :obj:`float`]
The :mod:`tqdm` iterable over the :attr:`time` iterable
"""
return tqdm(enumerate(self.t), total=len(self.t), **bar_args)
def romberg(self, time: float, source: Source, idx: int, idy: int, idz: int) -> float:
""" Performs romberg integration at the given grid location.
This is performed between zero and the provided time.
Parameters
----------
time : :obj:`float`
The time to integrate to.
source : :class:`~.Source`
The source term in question.
idx : :obj:`int`
The x index.
idy : :obj:`int`
The y index.
idz : :obj:`int`
The z index.
Returns
-------
:obj:`float`
The computed concentration.
"""
x, y, z = self.get_raw_values(idx, idy, idz)
integrand = lambda tin: self.conc(source, x, y, z, tin)
return romberg(integrand, 1e-100, time, tol=1e-100)
def pointwise(self, source: Source, time: float) -> ndarray:
"""Evaluates the equation pointwise at every location in the meshgrids.
Parameters
----------
time : :obj:`float`
The time to integrate to.
source : :class:`~.Source`
The source term in question.
Returns
-------
:class:`~numpy.ndarray`
The array containing the computed concentrations.
"""
return self.conc(source, self.x, self.y, self.z, time)
def evaluate(self, rtime: float, idt: int, conc: List[ndarray], source: Source):
"""Calls the relevant method for evaluating the equations.
Different methods are called depending on the integration method
selected.
Parameters
----------
rtime : :obj:`float`
The time relative to the source release/start time.
idt : :obj:`int`
The absolute time index.
conc : :class:`List`[:class:`~numpy.ndarray`]
The list of grids to store the computed values in.
source : :class:`~.Source`
The source being evaluated.
Returns
-------
None
"""
if self.settings.integration_method == "romberg":
for item in self.get_cartesian_index_space():
conc[idt][item] += source.rate * self.romberg(rtime, source, *item)
else:
conc[idt] += source.rate * self.pointwise(source, rtime)
def process(self, conc: List[ndarray]) -> ndarray:
"""Perform the integration method dependent post processing.
Parameters
----------
conc : :class:`List`[:class:`~numpy.ndarray`]
The list of grids to store the computed values in.
Returns
-------
:class:`~numpy.ndarray`
The final grids to be added to the total.
"""
if self.settings.integration_method == "romberg":
return array(conc)
else:
return cumtrapz(array(conc), **self.cumtrapz_kwargs)
def log_start(self, name: str, id: str) -> None:
"""Print a log message the evaluation of a grid has started.
Parameters
----------
name : :obj:`str`
The type of source.
id : :obj:`str`
The source id string.
Returns
-------
None
"""
print(f"Evaluating {name} source (id: {id}) for each time...")
def instantaneous(self):
"""Evaluate all instanteneous sources.
Returns
-------
None
"""
for id, source in self.sources.items():
conc = self.zero_arrays()
self.log_start("instanteneous", id)
for idt, time in self.time:
stime = time - source.time
if stime > 0:
conc[idt] += source.mass * self.pointwise(source, stime)
self.rv += array(conc)
def infinite_duration(self):
"""Evaluate all infinite duration sources.
Returns
-------
None
"""
for id, source in self.sources.items():
conc = self.zero_arrays()
self.log_start("infinite duration", id)
for idt, time in self.time:
stime = time - source.time
if stime > 0:
self.evaluate(stime, idt, conc, source)
self.rv += self.process(conc)
def fixed_duration(self):
"""Evaluate all fixed duration sources.
Returns
-------
None
"""
for id, source in self.sources.items():
conc = self.zero_arrays()
conc_decay = self.zero_arrays()
self.log_start("fixed duration", id)
for idt, time in self.time:
stime = time - source.start_time
etime = time - source.start_time - source.end_time
if stime > 0:
self.evaluate(stime, idt, conc, source)
if etime > 0:
self.evaluate(etime, idt, conc_decay, source)
self.rv += self.process(conc) - self.process(conc_decay)
def conc(self, source: Source, x: Value, y: Value, z: Value, t: float) -> Value:
"""Evaluate various the model at a given location, time and source.
Parameters
----------
source : :class:`~.Source`
The source being evaluated.
x : :class:`~.Value`
The x value.
y : :class:`~.Value`
The y value.
z : :class:`~.Value`
The z value.
t : :obj:`float`
the time value
Returns
-------
:class:`~.Value`
The calculated concentration.
"""
r_x = self.exp(x, t, self.dim.x, source.x)
r_y = self.exp(y, t, self.dim.y, source.y)
r_z = self.exp(z, t, self.dim.z, source.z)
return self.coefficient(t) * r_x * r_y * r_z
def exp(self, pos: Value, time: float, bound: float, spos: float) -> Value:
"""The sum of exponentials in the Eddy diffusion model.
Parameters
----------
pos : :class:`~.Value`
The position.
t : :obj:`float`
The time.
bound : :obj:`float`
The upper spatial bound.
spos : :obj:`float`
The source position.
Returns
-------
:class:`~.Value`
The calculated value.
"""
image_setting = self.settings.models.eddy_diffusion.images
term = lambda x: exp(-power(x, 2) / (4 * self.diff_coeff * | |
high requires "
"full CPU reservation"
)
self.logger.info("Reserving CPU for VM {0}".format(vm_obj.name))
tasks.append(
ConfigVM(vm_obj).cpu_reservation(host_cpu_mhz, reser=1)
)
if tasks:
GetWait().wait_for_tasks(
tasks, task_name="Configure memory/CPU reservation"
)
def _get_latency_task(self, vm_cfg):
"""Set Latency Sensitivity level and get task
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
Task
"""
vm_obj = self.objs.get_vm(vm_cfg["vm"])
if Check().check_kv(vm_cfg, "check"):
self.logger.info(
"Latency sensitivity level is {0} for "
"VM {1}".format(GetVM(vm_obj).latency(), vm_cfg["vm"])
)
if Check().check_kv(vm_cfg, "level"):
task = ConfigVM(vm_obj).latency(vm_cfg["level"])
return task
def passthru_cli(self):
"""Configure devices in Passthrough mode for VM(s)
Returns:
None
"""
vm_cfgs = self._extract_file(self.cfg)
if self.cfg["query"]:
for vm_cfg in vm_cfgs:
self._query_passthru(vm_cfg)
if self.cfg["add"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.extend(self._get_add_passthru_task(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add Passthrough device(s)")
if self.cfg["remove"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.extend(self._get_remove_passthru_task(vm_cfg))
if tasks:
GetWait().wait_for_tasks(
tasks, task_name="Remove Passthrough device(s)"
)
def _passthru_cluster(self, vm_cfgs, *keys):
"""Configure devices in Passthrough mode for VM(s)
(defined in a cluster conf file)
Args:
vm_cfgs (list): a list of dicts contains VM config info
*keys: a keyword array that can trigger this configuration
Returns:
None
"""
tasks = []
for vm_cfg in vm_cfgs:
if all(k in vm_cfg for k in keys):
tasks.extend(self._get_add_passthru_task(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add Passthrough device(s)")
def _query_passthru(self, vm_cfg):
"""Query Passthrough device(s) for a VM
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
None
"""
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_status = GetVM(vm_obj)
devices = vm_status.avail_pci_info()
if devices:
print("Available Passthrough device(s) for VM {0}:".format(vm_obj.name))
print("------------------------------------------------")
for index, pci_device in enumerate(devices):
device_name, vendor_name, device_id, system_id = pci_device
print("device # {0}".format(index))
wrapper = TextWrapper(initial_indent=" " * 4)
print(wrapper.fill("device name = {0}".format(device_name)))
print(wrapper.fill("vendor name = {0}".format(vendor_name)))
print(wrapper.fill("device id = {0}".format(device_id)))
print(wrapper.fill("system id = {0}\n".format(system_id)))
else:
print("No available Passthrough " "devices for VM {0}".format(vm_obj.name))
def _get_add_passthru_task(self, vm_cfg):
"""Add device(s) in Passthrough mode for a VM and get Task
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
list: a list of Tasks
"""
tasks = []
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_update = ConfigVM(vm_obj)
vm_status = GetVM(vm_obj)
host_obj = self.objs.get_host_by_vm(vm_obj)
Check().check_kv(vm_cfg, "device", required=True)
mmio_size = None
if Check().check_kv(vm_cfg, "mmio_size"):
if not self.is_valid_mmio_size(vm_cfg["mmio_size"]):
self.logger.warning(
"mmio_size has to be power of 2 and "
"larger than 0. "
"This value will be ignored. "
"The default mmio_size (256) will be used"
)
else:
mmio_size = vm_cfg["mmio_size"]
for device in vm_cfg["device"]:
device = device.lower()
if device in vm_status.configurable_pci_ids():
self.logger.debug(
"Device {0} is available for " "VM {1}".format(device, vm_obj.name)
)
tasks.extend(
vm_update.add_pci(device, host_obj, vm_update, vm_status, mmio_size)
)
else:
if device in vm_status.existing_pci_ids():
self.logger.error(
"Device {0} is already configured. " "Skip.".format(device)
)
elif device not in vm_status.avail_pci_ids():
self.logger.error(
"Device {0} is not available "
"for VM {1}. Skip.".format(device, vm_obj.name)
)
self._query_passthru(vm_cfg)
if tasks:
if not vm_status.is_memory_reser_full():
self.logger.warning(
"Adding a PCI device or shared PCI device "
"in passthrough mode needs to reserve memory. "
"Reserving memory."
)
tasks.append(vm_update.memory_reservation(reser=1))
else:
self.logger.debug("Good. Memory is already reserved.")
return tasks
@staticmethod
def is_valid_mmio_size(num):
"""check MMIO size setting is valid
"""
return num > 0 and ((num & (num - 1)) == 0)
def _get_remove_passthru_task(self, vm_cfg):
"""Remove Passthrough device from a VM and get Task
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
list: a list of Tasks
"""
tasks = []
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_status = GetVM(vm_obj)
vm_update = ConfigVM(vm_obj)
Check().check_kv(vm_cfg, "device", required=True)
for device in vm_cfg["device"]:
device = device.lower()
if device in vm_status.existing_pci_ids():
self.logger.info(
"Device {0} to be removed " "on VM {1}".format(device, vm_cfg["vm"])
)
tasks.append(vm_update.remove_pci(device, vm_status))
else:
self.logger.error(
"Couldn't find device {0} "
"on VM {1}. "
"Skip.".format(device, vm_cfg["vm"])
)
return tasks
def sriov_cli(self):
""" Configure device in SR-IOV mode for VM
Returns:
None
"""
vm_cfgs = self._extract_file(self.cfg)
if self.cfg["query"]:
for vm_cfg in vm_cfgs:
self._query_sriov(vm_cfg)
if self.cfg["add"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.extend(self._get_add_sriov_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add SR-IOV device(s)")
if self.cfg["remove"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.append(self._get_remove_sriov_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Remove SR-IOV device(s)")
def _sriov_cluster(self, vm_cfgs, *keys):
""" Configure device in SR-IOV mode for VM(s)
(defined in cluster conf file)
Args:
vm_cfgs (list): a list of dicts contains VM config info
*keys: a keyword array that can trigger this configuration
Returns:
None
"""
tasks = []
for vm_cfg in vm_cfgs:
if all(k in vm_cfg for k in keys):
tasks.extend(self._get_add_sriov_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add SR-IOV device(s)")
def _query_sriov(self, vm_cfg):
""" query available SR-IOV devices for a VM
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
None
"""
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_status = GetVM(vm_obj)
devices = vm_status.avail_sriov_info()
if devices:
print("Available Passthrough device(s) " "for VM {0}:".format(vm_obj.name))
print("------------------------------------------------")
for index, pci_device in enumerate(devices):
pnic, vf, device_name, vendor_name, device_id, system_id = pci_device
print("device # {0}".format(index))
wrapper = TextWrapper(initial_indent=" " * 4)
print(wrapper.fill("PNIC = {0}".format(pnic)))
print(wrapper.fill("Virtual Function = {0}".format(vf)))
print(wrapper.fill("device name = {0}".format(device_name)))
print(wrapper.fill("vendor name = {0}".format(vendor_name)))
print(wrapper.fill("device id = {0}".format(device_id)))
print(wrapper.fill("system id = {0}\n".format(system_id)))
else:
print("No available SR-IOV devices for VM {0}".format(vm_obj.name))
def _get_add_sriov_tasks(self, vm_cfg):
"""Add device in SR-IOV mode for a VM and get Tasks
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
list: a list of Tasks
"""
tasks = []
dvs_obj = []
vm_obj = self.objs.get_vm(vm_cfg["vm"])
host_obj = self.objs.get_host_by_vm(vm_obj)
vm_update = ConfigVM(vm_obj)
Check().check_kv(vm_cfg, "pf", required=True)
Check().check_kv(vm_cfg, "sriov_port_group", required=True)
if (Check().check_kv(vm_cfg, "dvs_name")) :
dvs_name = vm_cfg["dvs_name"]
dvs_obj = self.objs.get_dvs(dvs_name)
pf = vm_cfg["pf"]
pf_obj = GetHost(host_obj).pci_obj(pf)
pg = vm_cfg["sriov_port_group"]
pg_obj = self.objs.get_network(pg)
# verify whether this PF has SR-IOV capability
device_ids = GetVM(vm_obj).avail_sriov_ids()
if pf in device_ids:
self.logger.info(
"Find physical function {0} for VM {1} "
"and this physical function is SR-IOV capable".format(pf, vm_obj.name)
)
tasks.append(vm_update.add_sriov_adapter(pg_obj, pf_obj, dvs_obj))
else:
self.logger.error(
"This physical function is not SR-IOV capable. " "Skipping"
)
if tasks:
if not GetVM(vm_obj).is_memory_reser_full():
self.logger.warning(
"Add a SR-IOV device needs to reserve memory. " "Reserving memory."
)
tasks.append(vm_update.memory_reservation(reser=1))
else:
self.logger.debug("Good. Memory is already reserved.")
return tasks
def _get_remove_sriov_tasks(self, vm_cfg):
"""Remove SR-IOV device from a VM
Args:
vm_cfg (dict): a dict contains vm config info
Returns:
Task
"""
vm_obj = self.objs.get_vm(vm_cfg["vm"])
if Check().check_kv(vm_cfg, "pf"):
pf = vm_cfg["pf"]
pf_obj = GetVM(vm_obj).sriov_obj(pf)
task = ConfigVM(vm_obj).remove_sriov_adapter(pf_obj)
return task
elif Check().check_kv(vm_cfg, "sriov_port_group"):
pg = vm_cfg["sriov_port_group"]
pg_obj = GetVM(vm_obj).network_obj(
network_name=pg, device_type=vim.vm.device.VirtualSriovEthernetCard
)
if pg_obj:
self.logger.debug(
"Found port group {0} for VM {1}".format(pg, vm_obj.name)
)
task = ConfigVM(vm_obj).remove_network_adapter(pg_obj)
return task
else:
self.logger.error(
"Couldn't find SR-IOV port group {0} "
"on VM {1} to remove".format(pg, vm_obj.name)
)
return None
else:
self.logger.error(
"Please specify either the name of SR-IOV port "
"group or the Physical Function backs the "
"SR-IOV port group for device removal."
)
return None
def vgpu_cli(self):
"""Configure vGPU profile for VM(s)
Returns:
None
"""
vm_cfgs = self._extract_file(self.cfg)
if self.cfg["query"]:
for vm_cfg in vm_cfgs:
self._query_vgpu(vm_cfg)
if self.cfg["add"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.extend(self._get_add_vgpu_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add a vGPU profile")
if self.cfg["remove"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.extend(self._get_remove_vgpu_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Remove vGPU profile")
def _vgpu_cluster(self, vm_cfgs, *keys):
"""Configure vGPU profile for VM(s) (defined in cluster conf file)
Args:
vm_cfgs (list): a list of dicts contains VM config info
*keys: a keyword array that can trigger this configuration
Returns:
None
"""
tasks = []
for vm_cfg in vm_cfgs:
if all(k in vm_cfg for k in keys):
vm_cfg["profile"] = vm_cfg["vgpu"]
tasks.extend(self._get_add_vgpu_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add vGPU profile")
def _query_vgpu(self, vm_cfg):
"""Query available vGPU profiles for a VM
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
None
"""
vm_obj = self.objs.get_vm(vm_cfg["vm"])
host_obj = self.objs.get_host_by_vm(vm_obj)
vgpu_profiles = GetHost(host_obj).shared_passthru_vgpu_types()
if vgpu_profiles:
print("Available vGPU profiles for VM {0}:".format(vm_obj.name))
for vgpu_profile in vgpu_profiles:
wrapper = TextWrapper(initial_indent=" " * 4)
print(wrapper.fill(vgpu_profile))
else:
print("No available vGPU profiles for VM {0}".format(vm_obj.name))
def _get_add_vgpu_tasks(self, vm_cfg):
"""Add vGPU profile for a VM and get Task
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
list: a list of Tasks
"""
tasks = []
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_update = ConfigVM(vm_obj)
vm_status = GetVM(vm_obj)
host_obj = self.objs.get_host_by_vm(vm_obj)
Check().check_kv(vm_cfg, "profile", required=True)
if vm_cfg["profile"] in GetHost(host_obj).shared_passthru_vgpu_types():
self.logger.debug(
"vGPU profile {0} is available "
"for VM {1} ".format(vm_cfg["profile"], vm_cfg["vm"])
)
if vm_cfg["profile"] == vm_status.existing_vgpu_profile():
self.logger.error(
| |
<filename>fabricator/fabricator.py
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from .__version__ import __version__
import functools
import json
import re
from builtins import *
import requests
import six
from future.utils import raise_from
from requests.auth import AuthBase
if six.PY2:
import aenum as enum
elif six.PY3:
import enum
# Utilities
def extract_parameters(kwargs, names, remove=False):
"""
In a few cases, because of the dual modes of Fabricator, we need to allow
any set of parameters to be passed into a method in one mode, while requiring
parameters in another. This makes dealing with that situation a bit easier.
TODO(blevenson): This has one downside - None is used as the initializer,
which will make it impossible to set the value of one of the parameters to
None intentionally. Need to think about how to handle this situation.
"""
out = [None] * len(names)
for i, n in enumerate(names):
if n in kwargs:
out[i] = kwargs[n]
if remove:
del kwargs[n]
return tuple(out)
def destructure_dict(d, *args, default=None):
return tuple(d.get(a, default) for a in args)
def check_valid_methods(methods):
for i, m in enumerate(methods):
# Skip check if already an instance of HTTPMethods
if isinstance(m, HTTPMethods):
continue
try:
m = HTTPMethods(m)
methods[i] = m
except ValueError as exc:
raise_from(FabricatorNotImplementedError('method "{}" is not valid'.format(m)), exc)
def add_if_set(kw, **kwargs):
for n, v in kwargs.items():
if v is not None:
kw[n] = v
return kw
def check_required_params(missing_values=(None,), **kwargs):
for n, v in kwargs.items():
if v in missing_values:
raise ValueError('missing required keyword parameter "{}"'.format(n))
# Custom Exceptions and Error Types
quote_and_escape = lambda s: "'{}'".format(s.replace("'", r"\'")) if s is not None else 'None'
class FabricatorException(Exception):
pass
class FabricatorNotImplementedError(FabricatorException):
pass
class FabricatorRequestError(FabricatorException):
def __init__(self, message=None, code=None, content=None):
FabricatorException.__init__(self, message)
self.message = message
self.code = code
self.content = content
@property
def json(self):
try:
return json.loads(self.content)
except:
return self.content
def __repr__(self):
# Make sure the "code" is properly formatted as either an int or 'None'
code_value = 'None' if self.code is None else self.code
return 'FabricatorRequestError({}, code={}, content={})'.format(quote_and_escape(self.message), code_value, quote_and_escape(self.content))
def __str__(self):
return 'FabricatorRequestError: {} - Code: {} - Content: {}'.format(self.message, self.code, self.json)
class FabricatorRequestAuthError(FabricatorRequestError):
def __init__(self, code=None, content=None):
FabricatorRequestError.__init__(self, code=code, content=content)
self.message = 'Authentication failed'
class FabricatorUsageError(FabricatorException):
pass
class FabricatorParamValidationError(FabricatorUsageError):
def __init__(self, param=None, *args, **kwargs):
super(FabricatorParamValidationError, self).__init__(*args, **kwargs)
self.param = param
def __repr__(self):
return 'FabricatorParamValidationError(param={})'.format(quote_and_escape(self.param))
def __str__(self):
return 'required parameter {} is missing'.format(self.param)
# HTTPMethods is used to ensure correct method names are being used when registering request methods and calling them
class HTTPMethods(enum.Enum):
"""The set of valid HTTP methods"""
# Most commonly used for REST APIs
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
PATCH = 'PATCH'
DELETE = 'DELETE'
# Less frequently used in REST APIs
OPTIONS = 'OPTIONS'
HEAD = 'HEAD'
CONNECT = 'CONNECT'
TRACE = 'TRACE'
def __eq__(self, other):
if self is other:
return True
if self.value == other:
return True
return False
@staticmethod
def all():
return tuple(v for v in dir(HTTPMethods) if not v.startswith('__'))
noop_auth_handler = lambda r: r
def make_auth_handler(f):
"""
Creates an AuthBase class
:param callable or AuthBase f: The function that will process the request and add auth details
:return AuthBase: The AuthBase class
"""
class AuthReady(AuthBase):
def __call__(self, req):
return f(req)
return AuthReady
# The FabricatorEndpoint type represents a particular 'path' and HTTP method (or route in ReST terms) that requests can be sent to.
# This class actually makes the requests that occur in this library.
class FabricatorEndpoint:
def __init__(self,
parent,
name=None,
path=None,
handler=None,
methods=None,
auth_handler=None,
headers=None,
required_params=()):
"""
This creates a "Route" (really a known operation) within the Fabricator where it lives.
"""
check_required_params(name=name, path=path)
self.parent = parent
self.name = name
self.path = path
self.handler = handler
self.methods = methods
self.auth_handler = auth_handler
self.headers = headers
self.required_params = required_params
def __getattr__(self, item):
self._check_method(item)
return functools.partial(self._make_request, method=item)
def __call__(self, *args, **kwargs):
"""
If a class instance is called, then a request is made with the "default" method (first in list)
"""
return self._make_request(method=self.methods[0], **kwargs)
def _check_method(self, m):
"""
Checks to make sure that the provided method is valid for this route
"""
if isinstance(m, str):
m = m.upper()
if self.methods and m not in self.methods:
raise FabricatorNotImplementedError('{} is not a valid method for the {} route'.format(m, self.path))
def _get_response_handler(self):
if self.handler is not None:
return self.handler
# No handler set on the route itself, so we need to look at the Fabricator parent, etc
current = self.parent
handler = None
while handler is None and current is not None:
handler = current._default_handler
current = current._parent
return handler or noop_response_handler # If no handler found in tree, default to noop response handler
def _get_auth_handler(self):
if self.auth_handler is not None:
return make_auth_handler(self.auth_handler)
current = self.parent
auth_handler = None
while auth_handler is None and current is not None:
auth_handler = current._auth_handler
current = current._parent
ah = auth_handler or noop_auth_handler
return make_auth_handler(ah)
def _get_headers(self):
if self.headers is not None:
return self.headers
current = self.parent
headers = None
while headers is None and current is not None:
headers = current._headers
current = current._parent
h = headers or {}
return h
def _construct_url(self, url_params=None):
# Handle URL params as necessary
path = self.path
for k, v in url_params.items():
try:
v = str(v)
except:
raise FabricatorUsageError('URL parameter could not be converted to str')
path = path.replace(k, v)
# Construct the full URL base by climbing through the Fabricator instance chain and adding together the base_url values
current = self.parent
base_url = ''
while current is not None:
base_url = current._base_url + base_url
current = current._parent
return '{}{}'.format(base_url, path)
def _make_request(self, method, **kwargs):
# Check to ensure the method is valid
self._check_method(method)
if isinstance(method, HTTPMethods):
# method is actually a HTTPMethod enum, so get the string representation
method = method.value
elif isinstance(method, six.string_types):
# method is already a string, make sure its uppercased the way that requests likes it
method = method.upper()
else:
# Something went very very wrong if we're here. Most likely the user called _make_request directly. Bad user!
raise FabricatorException('"method" ({}) is an invalid type: {}'.format(method, type(method)))
# Check that all required arguments are present and accounted for
for p in self.required_params:
if p not in kwargs:
raise FabricatorParamValidationError(param=p)
# Before we continue, we need to check the path and see if it has any URL params in it, If so, we need to try to find those params in kwargs
url_params = {}
matches = re.findall('(:[A-z_]+)', self.path)
for m in matches:
if m[1:] not in kwargs:
raise FabricatorParamValidationError(param=m)
# Add to url_params and delete from kwargs
url_params[m] = kwargs[m[1:]]
del kwargs[m[1:]]
options = {}
if kwargs:
if method in (HTTPMethods.POST, HTTPMethods.PUT, HTTPMethods.PATCH):
options['json'] = kwargs
else:
# TODO: When passing query string params, need to make sure all values in kwargs are ok in terms of type
options['params'] = kwargs
# Now we want to set up headers and auth
options['auth'] = self._get_auth_handler()()
options['headers'] = self._get_headers()
# Get session and request base from the Fabricator this route belongs to
resp = requests.request(method, self._construct_url(url_params=url_params), **options)
return self._get_response_handler()(resp)
# The noop handler is the default if no other handler is provided. It is just a passthrough.
noop_response_handler = lambda r: r
# The Fabricator class is a logical construct. It "contains" 1 or more "FabricatorEndpoint" instances. It also provides a gateway to
# using those instances for the end user. When the end user says something like 'api.authenticateUser' this class
# proxies the request to the correct FabricatorEndpoint instance automatically.
class Fabricator:
"""
The Fabricator class serves 2 functions:
1) In its initial mode, it allows endpoints to be registered to the client. Then, once .start() is called,
2) It allows an end user to use those endpoints to contact an API
"""
def __init__(self,
base_url,
auth_handler=None,
headers=None,
handler=None,
parent=None):
self._parent = parent
self._base_url = base_url
self._auth_handler = auth_handler
self._headers = headers
self._default_handler = handler
# Initialize the routes dict
self._endpoints = {}
self._started = False
def __getattr_builder(self, name):
"""
Looks up the correct attribute in "builder" mode. That is, finds the
correct HTTP method and returns a proxy to the register method.
"""
if name.upper() not in HTTPMethods.all():
| |
schema name.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('CREATE USER {user} IDENTIFIED BY {pwd}'.format(user=schema_name, pwd=schema_name))
database.engine.execute('GRANT UNLIMITED TABLESPACE TO {user}'.format(user=schema_name))
else:
schema = sqlalchemy.schema.CreateSchema(schema_name)
database.engine.execute(schema)
def _drop_schema(schema_name, database):
"""Remove a schema from the given database.
Args:
schema_name: (:obj:`str`) name of the schema to remove.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('DROP USER {user}'.format(user=schema_name))
else:
sqlalchemy.schema.DropSchema(schema_name)
@credentialstore
@database
def test_jdbc_lookup_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Lookup processor test.
Pipeline will enrich records with the 'name' by adding a field as 'FirstName'.
The pipeline looks like:
dev_raw_data_source >> jdbc_lookup >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(LOOKUP_RAW_DATA))
jdbc_lookup = pipeline_builder.add_stage('JDBC Lookup')
query_str = f"SELECT name FROM {table_name} WHERE id = '${{record:value('/id')}}'"
column_mappings = [dict(dataType='USE_COLUMN_TYPE',
columnName='name',
field='/FirstName')]
jdbc_lookup.set_attributes(sql_query=query_str,
column_mappings=column_mappings)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_lookup >> trash
pipeline = pipeline_builder.build(title='JDBC Lookup').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
LOOKUP_EXPECTED_DATA = copy.deepcopy(ROWS_IN_DATABASE)
for record in LOOKUP_EXPECTED_DATA:
record.pop('id')
record['FirstName'] = record.pop('name')
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
rows_from_snapshot = [{list(record.field.keys())[1]: list(record.field.values())[1].value}
for record in snapshot[jdbc_lookup].output]
assert rows_from_snapshot == LOOKUP_EXPECTED_DATA
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_tee_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Tee processor test.
Pipeline will insert records into database and then pass generated database column 'id' to fields.
The pipeline looks like:
dev_raw_data_source >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support multi row op on SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(RAW_DATA))
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
# Note that here ids are not inserted. Database generates them automatically.
field_to_column_mapping = [dict(columnName='name',
dataType='USE_COLUMN_TYPE',
field='/name',
paramValue='?')]
generated_column_mappings = [dict(columnName='id',
dataType='USE_COLUMN_TYPE',
field='/id')]
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping,
generated_column_mappings=generated_column_mappings,
table_name=table_name)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_tee >> trash
pipeline = pipeline_builder.build(title='JDBC Tee').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Verify the JDBC Tee processor has got new ids which were generated by database.
rows_from_snapshot = [{list(item.field.keys())[0]: list(item.field.values())[0].value,
list(item.field.keys())[1]: int(list(item.field.values())[1].value)}
for item in snapshot[jdbc_tee].output]
assert rows_from_snapshot == ROWS_IN_DATABASE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@pytest.mark.parametrize('use_multi_row', [True, False])
@sdc_min_version('3.0.0.0') # stop_after_first_batch
def test_jdbc_tee_processor_multi_ops(sdc_builder, sdc_executor, database, use_multi_row):
"""JDBC Tee processor with multiple operations
Pipeline will delete/update/insert records into database with one batch and then update 'id'
field if it is inserted. The 'operation' field is used for the record header sdc.operation.type
which defines the CRUD operation (1: Insert, 2: Delete, 3: Update). The pipeline looks like:
dev_raw_data_source >> expression evaluator >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
if use_multi_row == True and type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support multi row on SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
DATA = [
{'operation': 2, 'name': 'Jarcec', 'id': 2}, # delete
{'operation': 3, 'name': 'Hari', 'id': 3}, # update
{'operation': 1, 'name': 'Eddie'} # insert, id will be added by JDBC Tee
]
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='\n'.join(json.dumps(rec) for rec in DATA),
stop_after_first_batch=True)
HEADER_EXPRESSIONS = [dict(attributeToSet='sdc.operation.type',
headerAttributeExpression="${record:value('/operation')}")]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = HEADER_EXPRESSIONS
FIELD_TO_COLUMN = [dict(columnName='name', field='/name', paramValue='?')]
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=FIELD_TO_COLUMN,
generated_column_mappings=[dict(columnName='id', field='/id')],
table_name=table_name,
use_multi_row_operation=use_multi_row)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> expression_evaluator >> jdbc_tee >> trash
pipeline_title = 'JDBC Tee MultiOps MultiRow' if use_multi_row else 'JDBC Tee MultiOps SingleRow'
pipeline = pipeline_builder.build(title=pipeline_title).configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
# Passing only names to get the correct sequence numbers esp. PostgreSQL
if type(database) == SQLServerDatabase:
connection.execute(table.insert(), [{'id': row['id'], 'name': row['name']} for row in ROWS_IN_DATABASE])
else:
connection.execute(table.insert(), [{'name': row['name']} for row in ROWS_IN_DATABASE])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sequence_id = len(ROWS_IN_DATABASE)
# Verify the database is updated.
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
expected_data = [(row['name'], row['id']) for row in ROWS_IN_DATABASE]
for record in DATA:
if record['operation'] == 1: # insert
sequence_id += 1
expected_data.append((record['name'], sequence_id))
elif record['operation'] == 2: # delete
expected_data = [row for row in expected_data if row[1] != record['id']]
elif record['operation'] == 3: # update
expected_data = [row if row[1] != record['id'] else (record['name'], row[1]) for row in expected_data]
assert data_from_database == expected_data
# Verify the JDBC Tee processor has the new ID which were generated by database.
jdbc_tee_output = snapshot[jdbc_tee].output
name_id_from_output = [(record.field['name'], record.field['id']) for record in jdbc_tee_output]
assert name_id_from_output == [('Jarcec', 2), ('Hari', 3), ('Eddie', sequence_id)]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor.set_attributes(sql_query=query_str)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_successful_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor.set_attributes(sql_query=query_str)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_insert_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type
and query-result field for the insert query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor.set_attributes(sql_query=query_str, include_query_result_count_in_events=True)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> | |
import os
import sqlite3
import traceback
from sqlite3 import Error
from F_Experiments_Helper.run_instance import RunInstance
from a_Common.constants import *
DB_FILE = r"__disertation_experiments\karypy_runs.db"
# metadata columns
ID_COLUMN_NAME = "id"
START_TIME_COLUMN_NAME = "start_time"
END_TIME_COLUMN_NAME = "end_time"
# input columns
CLASSIFIER_TYPE_COLUMN_NAME = "classifier_type" # SOM or FSOM atm
INPUT_IMAGE_PATH_COLUMN_NAME = "input_image_path"
FEATURES_FILE_PATH_COLUMN_NAME = "features_file_path"
USED_FEATURES_COLUMN_NAME = "used_features" # 0x01 len, 0x02 - centromere, 0x04 banding, 0x08 area
DISTANCE_COLUMN_NAME = "distance" # euclidean, weighted - euclidean, manhattan
LEN_FEATURE_WEIGHT_COLUMN_NAME = "len_feature_weight" # weight for len feature
SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME = "short_chromatid_ratio_feature_weight" # weight centromere feature
BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME = "banding_pattern_feature_weights" # weights banding pattern feature
AREA_FEATURE_WEIGHT_COLUMN_NAME = "area_feature_weight" # weight for area feature
INITIAL_NEURONS_FILE_COLUMN_NAME = "initial_neurons_file" # initial model configuration (either existing one, meaning
# that the model starts from a given config, either the initial neurons of the model will be saved
EPOCHS_COLUMN_NAME = "epochs" # number of training epochs for the model
ROWS_COLUMN_NAME = "rows"
COLS_COLUMN_NAME = "cols"
# outs
MODEL_OUTPUT_FILE_PATH_COLUMN_NAME = "model_output_file_path" # som output file
DIST_MATRIX_FILE_PATH_COLUMN_NAME = "dist_matrix_file_path"
GENERATED_KARYOTYPE_IMAGE_PATH_COLUMN_NAME = "generated_karyotype_image_path"
PRECKAR_COLUMN_NAME = "preckar"
# metadata columns
ID_COLUMN_INDEX = 0
START_TIME_COLUMN_INDEX = 1
END_TIME_COLUMN_INDEX = 2
# input columns
CLASSIFIER_TYPE_COLUMN_INDEX = 3
INPUT_IMAGE_PATH_COLUMN_INDEX = 4
FEATURES_FILE_PATH_COLUMN_INDEX = 5
USED_FEATURES_COLUMN_INDEX = 6
DISTANCE_COLUMN_INDEX = 7
LEN_FEATURE_WEIGHT_COLUMN_INDEX = 8
SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_INDEX = 9
BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_INDEX = 10
AREA_FEATURE_WEIGHT_COLUMN_INDEX = 11
INITIAL_NEURONS_FILE_COLUMN_INDEX = 12
EPOCHS_COLUMN_INDEX = 13
ROWS_COLUMN_INDEX = 14
COLS_COLUMN_INDEX = 15
# outs
MODEL_OUTPUT_FILE_PATH_COLUMN_INDEX = 16
DIST_MATRIX_FILE_PATH_COLUMN_INDEX = 17
GENERATED_KARYOTYPE_IMAGE_PATH_COLUMN_INDEX = 18
PRECKAR_COLUMN_INDEX = 19
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def create_runs_table():
"""
table run:
# meta info
id - autoincrement
start_time - text
end_time - text
# inputs
classifier_type - text (SOM or FSOM atm)
input_image_path - text
features_file_path - text
used_features - integer (0x01 len, 0x02-centromere, 0x04 banding, 0x08 area)
distance - text (euclidean, weighted-euclidean, manhattan)
1_weight - real # weights for len feature
2_weight - real # weights centromere feature
4_weight - real # weights banding pattern feature
8_weight - real # weights for area feature
used_neurons_file - text (if initial model configuration was loaded from a file
epochs - integer (number of training epochs for the model)
rows - integer
cols - integer
# outs
model_output_file - text (som output file)
dist_matrix_file_path - text
generated_karyotype_image_path - text
preckar - real
:return:
"""
conn = create_connection(DB_FILE)
with conn:
conn.execute('''
CREATE TABLE run ({} INTEGER PRIMARY KEY, {} TEXT , {} TEXT, {} TEXT, {} TEXT, {} TEXT, {} INTEGER, {} TEXT,
{} REAL, {} REAL, {} TEXT, {} REAL, {} TEXT, {} INTEGER, {} INTEGER, {} INTEGER, {} TEXT, {} TEXT, {} TEXT,
{} REAL)
'''.format(ID_COLUMN_NAME, START_TIME_COLUMN_NAME, END_TIME_COLUMN_NAME, CLASSIFIER_TYPE_COLUMN_NAME,
INPUT_IMAGE_PATH_COLUMN_NAME, FEATURES_FILE_PATH_COLUMN_NAME, USED_FEATURES_COLUMN_NAME,
DISTANCE_COLUMN_NAME, LEN_FEATURE_WEIGHT_COLUMN_NAME,
SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME, BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME,
AREA_FEATURE_WEIGHT_COLUMN_NAME, INITIAL_NEURONS_FILE_COLUMN_NAME, EPOCHS_COLUMN_NAME,
ROWS_COLUMN_NAME, COLS_COLUMN_NAME, MODEL_OUTPUT_FILE_PATH_COLUMN_NAME,
DIST_MATRIX_FILE_PATH_COLUMN_NAME, GENERATED_KARYOTYPE_IMAGE_PATH_COLUMN_NAME, PRECKAR_COLUMN_NAME))
def insert_new_run(run_instance: RunInstance):
conn = create_connection(DB_FILE)
with conn:
sql = ''' INSERT INTO run({},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{})
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) ''' \
.format(START_TIME_COLUMN_NAME, END_TIME_COLUMN_NAME, CLASSIFIER_TYPE_COLUMN_NAME,
INPUT_IMAGE_PATH_COLUMN_NAME, FEATURES_FILE_PATH_COLUMN_NAME, USED_FEATURES_COLUMN_NAME,
DISTANCE_COLUMN_NAME, LEN_FEATURE_WEIGHT_COLUMN_NAME,
SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME, BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME,
AREA_FEATURE_WEIGHT_COLUMN_NAME, INITIAL_NEURONS_FILE_COLUMN_NAME, EPOCHS_COLUMN_NAME,
ROWS_COLUMN_NAME, COLS_COLUMN_NAME, MODEL_OUTPUT_FILE_PATH_COLUMN_NAME,
DIST_MATRIX_FILE_PATH_COLUMN_NAME, GENERATED_KARYOTYPE_IMAGE_PATH_COLUMN_NAME, PRECKAR_COLUMN_NAME)
cur = conn.cursor()
cur.execute(sql, run_instance.get_insert_tuple())
inserted_row_id = cur.lastrowid
if inserted_row_id is None:
raise ValueError("Insert Failed. Query: {}. Values: {}".format(sql, run_instance.get_insert_tuple()))
return inserted_row_id
def delete_run(run_instance: RunInstance):
conn = create_connection(DB_FILE)
with conn:
sql = ''' DELETE FROM run WHERE {} = {}'''.format(ID_COLUMN_NAME, run_instance.id)
cur = conn.cursor()
cur.execute(sql)
inserted_row_id = cur.lastrowid
if inserted_row_id is None:
raise ValueError("Insert Failed. Query: {}. Values: {}".format(sql, run_instance.get_insert_tuple()))
return inserted_row_id
def update_run_entry(run_instance: RunInstance):
conn = create_connection(DB_FILE)
with conn:
sql = '''UPDATE run SET {} = ?, {} = ? , {} = ? , {} = ? ,{} = ? ,{} = ? ,{} = ? ,{} = ? ,{} = ? ,{} = ? ,
{} = ? ,{} = ?, {} = ? ,{} = ? ,{} = ? ,{} = ? ,{} = ? ,{} = ? ,{} = ? WHERE {} = ?''' \
.format(START_TIME_COLUMN_NAME, END_TIME_COLUMN_NAME, CLASSIFIER_TYPE_COLUMN_NAME,
INPUT_IMAGE_PATH_COLUMN_NAME, FEATURES_FILE_PATH_COLUMN_NAME,
USED_FEATURES_COLUMN_NAME, DISTANCE_COLUMN_NAME,
LEN_FEATURE_WEIGHT_COLUMN_NAME,
SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME,
BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME,
AREA_FEATURE_WEIGHT_COLUMN_NAME, INITIAL_NEURONS_FILE_COLUMN_NAME,
EPOCHS_COLUMN_NAME, ROWS_COLUMN_NAME, COLS_COLUMN_NAME,
MODEL_OUTPUT_FILE_PATH_COLUMN_NAME, DIST_MATRIX_FILE_PATH_COLUMN_NAME,
GENERATED_KARYOTYPE_IMAGE_PATH_COLUMN_NAME, PRECKAR_COLUMN_NAME,
ID_COLUMN_NAME)
conn.execute(sql, run_instance.get_update_tuple())
conn.commit()
def get_all_runs(what="*", criteria=""):
conn = create_connection(DB_FILE)
runs = list()
with conn:
cur = conn.cursor()
sql = "SELECT {} from run {};".format(what, criteria)
cur.execute(sql)
for i in cur.fetchall():
if what == "*":
runs.append(get_run_instance_obj_from_db_entry(i))
else:
runs.append(i)
return runs
def get_run_instance_obj_from_db_entry(i):
return RunInstance(
identifier=i[ID_COLUMN_INDEX], start_time=i[START_TIME_COLUMN_INDEX], end_time=i[END_TIME_COLUMN_INDEX],
classifier_type=i[CLASSIFIER_TYPE_COLUMN_INDEX], input_image_path=i[INPUT_IMAGE_PATH_COLUMN_INDEX],
features_file_path=i[FEATURES_FILE_PATH_COLUMN_INDEX], used_features=i[USED_FEATURES_COLUMN_INDEX],
distance=i[DISTANCE_COLUMN_INDEX], len_feature_weight=i[LEN_FEATURE_WEIGHT_COLUMN_INDEX],
short_chromatid_ratio_feature_weight=i[SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_INDEX],
banding_pattern_feature_weights=float(i[BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_INDEX]) if i[
BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_INDEX] else None,
area_feature_weight=i[AREA_FEATURE_WEIGHT_COLUMN_INDEX],
initial_neurons_file=i[INITIAL_NEURONS_FILE_COLUMN_INDEX], epochs=i[EPOCHS_COLUMN_INDEX],
rows=i[ROWS_COLUMN_INDEX],
cols=i[COLS_COLUMN_INDEX], model_output_file_path=i[MODEL_OUTPUT_FILE_PATH_COLUMN_INDEX],
dist_matrix_file_path=i[DIST_MATRIX_FILE_PATH_COLUMN_INDEX],
generated_karyotype_image_path=i[GENERATED_KARYOTYPE_IMAGE_PATH_COLUMN_INDEX], preckar=i[PRECKAR_COLUMN_INDEX])
def get_best_run_so_far_with_similar_input(run_instance: RunInstance, logger=None):
conn = create_connection(DB_FILE)
run = None
with conn:
try:
cur = conn.cursor()
sql = "SELECT * FROM run WHERE {} = ? and {} = ? and {} = ? and {} = ? and {} = ? and {} = ? and {} = ? " \
.format(CLASSIFIER_TYPE_COLUMN_NAME, INPUT_IMAGE_PATH_COLUMN_NAME,
FEATURES_FILE_PATH_COLUMN_NAME, USED_FEATURES_COLUMN_NAME, DISTANCE_COLUMN_NAME,
ROWS_COLUMN_NAME,
COLS_COLUMN_NAME)
bindings = [run_instance.classifier_type, run_instance.input_image_path,
run_instance.features_file_path,
run_instance.used_features, run_instance.distance, run_instance.rows, run_instance.cols]
if run_instance.len_feature_weight:
sql += " AND {} = ? ".format(LEN_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.len_feature_weight)
else:
sql += " AND {} IS NULL ".format(LEN_FEATURE_WEIGHT_COLUMN_NAME)
if run_instance.short_chromatid_ratio_feature_weight:
sql += " AND {} = ? ".format(SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.short_chromatid_ratio_feature_weight)
else:
sql += " AND {} IS NULL ".format(SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME)
if run_instance.banding_pattern_feature_weights:
sql += " AND {} = ? ".format(BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME)
bindings.append(run_instance.banding_pattern_feature_weights)
else:
sql += " AND {} IS NULL ".format(BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME)
if run_instance.area_feature_weight:
sql += " AND {} = ? ".format(AREA_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.area_feature_weight)
else:
sql += " AND {} IS NULL ".format(AREA_FEATURE_WEIGHT_COLUMN_NAME)
sql += " AND {} is not NULL ORDER BY {} DESC LIMIT 1;".format(PRECKAR_COLUMN_NAME, PRECKAR_COLUMN_NAME)
cur.execute(sql, tuple(bindings))
i = cur.fetchone()
if i:
run = get_run_instance_obj_from_db_entry(i)
except:
msg = "Exception while searching for best_run_so_far_with_similar_input. Traceback: {}".format(
traceback.format_exc())
print(msg)
if logger:
logger.warning(msg)
return run
def get_similar_runs(run_instance: RunInstance, over_id=None):
conn = create_connection(DB_FILE)
runs = list()
with conn:
try:
cur = conn.cursor()
sql = "SELECT * FROM run WHERE {} = ? and {} = ? and {} = ? and {} = ? and {} = ? and {} = ? and {} = ? " \
.format(CLASSIFIER_TYPE_COLUMN_NAME, INPUT_IMAGE_PATH_COLUMN_NAME, FEATURES_FILE_PATH_COLUMN_NAME,
USED_FEATURES_COLUMN_NAME, DISTANCE_COLUMN_NAME, ROWS_COLUMN_NAME, COLS_COLUMN_NAME)
bindings = [run_instance.classifier_type, run_instance.input_image_path, run_instance.features_file_path,
run_instance.used_features, run_instance.distance, run_instance.rows, run_instance.cols]
if over_id:
sql += " AND {} >= ? ".format(ID_COLUMN_NAME)
bindings.append(over_id)
if run_instance.len_feature_weight:
sql += " AND {} = ? ".format(LEN_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.len_feature_weight)
else:
sql += " AND {} IS NULL ".format(LEN_FEATURE_WEIGHT_COLUMN_NAME)
if run_instance.short_chromatid_ratio_feature_weight:
sql += " AND {} = ? ".format(SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.short_chromatid_ratio_feature_weight)
else:
sql += " AND {} IS NULL ".format(SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME)
if run_instance.banding_pattern_feature_weights:
sql += " AND {} = ? ".format(BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME)
bindings.append(run_instance.banding_pattern_feature_weights)
else:
sql += " AND {} IS NULL ".format(BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME)
if run_instance.area_feature_weight:
sql += " AND {} = ? ".format(AREA_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.area_feature_weight)
else:
sql += " AND {} IS NULL ".format(AREA_FEATURE_WEIGHT_COLUMN_NAME)
sql += " AND {} is not NULL ORDER BY {} desc;".format(PRECKAR_COLUMN_NAME, PRECKAR_COLUMN_NAME)
cur.execute(sql, tuple(bindings))
for i in cur.fetchall():
runs.append(get_run_instance_obj_from_db_entry(i))
except:
msg = "Exception while searching for best_run_so_far_with_similar_input. Traceback: {}".format(
traceback.format_exc())
print(msg)
return runs
def how_many_similar_input_runs_so_far(run_instance: RunInstance, logger=None, over_id=None):
conn = create_connection(DB_FILE)
with conn:
try:
cur = conn.cursor()
sql = "SELECT count(*) FROM run WHERE {} = ? and {} = ? and {} = ? and {} = ? and {} = ? and {} = ? " \
"and {} = ? ".format(CLASSIFIER_TYPE_COLUMN_NAME, INPUT_IMAGE_PATH_COLUMN_NAME,
FEATURES_FILE_PATH_COLUMN_NAME, USED_FEATURES_COLUMN_NAME, DISTANCE_COLUMN_NAME,
ROWS_COLUMN_NAME, COLS_COLUMN_NAME)
bindings = [run_instance.classifier_type, run_instance.input_image_path,
run_instance.features_file_path, run_instance.used_features, run_instance.distance,
run_instance.rows, run_instance.cols]
if over_id:
sql += " AND {} >= ?".format(ID_COLUMN_NAME)
bindings.append(over_id)
if run_instance.len_feature_weight:
sql += " AND {} = ? ".format(LEN_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.len_feature_weight)
else:
sql += " AND {} IS NULL ".format(LEN_FEATURE_WEIGHT_COLUMN_NAME)
if run_instance.short_chromatid_ratio_feature_weight:
sql += " AND {} = ? ".format(SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.short_chromatid_ratio_feature_weight)
else:
sql += " AND {} IS NULL ".format(SHORT_CHROMATID_RATIO_FEATURE_WEIGHT_COLUMN_NAME)
if run_instance.banding_pattern_feature_weights:
sql += " AND {} = ? ".format(BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME)
bindings.append(run_instance.banding_pattern_feature_weights)
else:
sql += " AND {} IS NULL ".format(BANDING_PATTERN_FEATURE_WEIGHTS_COLUMN_NAME)
if run_instance.area_feature_weight:
sql += " AND {} = ? ".format(AREA_FEATURE_WEIGHT_COLUMN_NAME)
bindings.append(run_instance.area_feature_weight)
else:
sql += " AND {} IS NULL ".format(AREA_FEATURE_WEIGHT_COLUMN_NAME)
sql += " AND {} is not NULL ORDER BY {} DESC LIMIT 1;".format(PRECKAR_COLUMN_NAME, PRECKAR_COLUMN_NAME)
cur.execute(sql, tuple(bindings))
i = cur.fetchone()[0]
return i
except:
msg = "Exception while searching for best_run_so_far_with_similar_input. Traceback: {}".format(
traceback.format_exc())
print(msg)
if logger:
logger.warning(msg)
return 0
def test_get_best_similar_run():
rr = RunInstance(
classifier_type=FSOM_CLASSIFIER_TYPE,
input_image_path=r"__disertation_experiments\dataset\5\5.bmp",
features_file_path=r"__disertation_experiments\dataset\5\inputs\features_15.txt",
used_features=15,
distance=WEIGHTED_EUCLIDEAN_DISTANCE,
len_feature_weight=0.3,
short_chromatid_ratio_feature_weight=0.1,
banding_pattern_feature_weights=0.3,
area_feature_weight=0.3,
rows=20,
cols=20, epochs=200000
)
print(rr)
print("====================================")
print(get_best_run_so_far_with_similar_input(rr, None))
print(how_many_similar_input_runs_so_far(rr, None, 2937))
def test_main_flow():
global | |
- 2*m.b10*m.b96 - 2*m.b10*m.b97 - 2*
m.b10*m.b98 - 2*m.b10*m.b100 - 2*m.b10*m.b103 - 2*m.b10*m.b104 - 2*m.b10*m.b106 - 2*m.b10*
m.b108 - 2*m.b10*m.b111 - 2*m.b10*m.b112 - 2*m.b10*m.b114 + 2*m.b10*m.b115 - 2*m.b10*m.b117 -
2*m.b10*m.b118 - 2*m.b10*m.b121 - 2*m.b10*m.b125 + 2*m.b10*m.b127 + 2*m.b10*m.b129 + 2*m.b10*
m.b132 + 2*m.b10*m.b133 + 2*m.b10*m.b136 + 2*m.b10*m.b137 + 2*m.b10*m.b138 + 2*m.b10*m.b141 +
2*m.b10*m.b142 + 2*m.b10*m.b145 + 2*m.b10*m.b146 - 2*m.b10*m.b148 - 2*m.b10*m.b152 + 2*m.b10*
m.b154 + 2*m.b10*m.b162 + 2*m.b10*m.b163 + 2*m.b10*m.b166 + 2*m.b10*m.b167 - 2*m.b10*m.b170 +
2*m.b10*m.b172 - 2*m.b10*m.b174 + 2*m.b10*m.b176 + 2*m.b10*m.b180 + 2*m.b10*m.b181 + 2*m.b10*
m.b182 + 2*m.b11*m.b92 - 5*m.b11 - 2*m.b11*m.b93 + 2*m.b11*m.b94 - 2*m.b11*m.b96 - 2*m.b11*
m.b98 + 2*m.b11*m.b99 - 2*m.b11*m.b100 + 2*m.b11*m.b101 - 2*m.b11*m.b104 - 2*m.b11*m.b106 + 2*
m.b11*m.b107 - 2*m.b11*m.b108 + 2*m.b11*m.b111 - 2*m.b11*m.b112 + 2*m.b11*m.b113 - 2*m.b11*
m.b114 - 2*m.b11*m.b117 - 2*m.b11*m.b118 + 2*m.b11*m.b120 - 2*m.b11*m.b121 - 2*m.b11*m.b122 +
2*m.b11*m.b129 + 2*m.b11*m.b130 + 2*m.b11*m.b133 + 2*m.b11*m.b135 + 2*m.b11*m.b136 + 2*m.b11*
m.b138 + 2*m.b11*m.b139 + 2*m.b11*m.b142 + 2*m.b11*m.b144 + 2*m.b11*m.b145 + 2*m.b11*m.b147 -
2*m.b11*m.b148 - 2*m.b11*m.b149 - 2*m.b11*m.b155 - 2*m.b11*m.b156 - 2*m.b11*m.b157 - 2*m.b11*
m.b159 - 2*m.b11*m.b160 + 2*m.b11*m.b163 + 2*m.b11*m.b165 + 2*m.b11*m.b166 + 2*m.b11*m.b168 +
2*m.b11*m.b170 + 2*m.b11*m.b171 + 2*m.b12*m.b93 + 3*m.b12 - 2*m.b12*m.b94 - 2*m.b12*m.b99 - 2*
m.b12*m.b101 - 2*m.b12*m.b102 - 2*m.b12*m.b105 + 2*m.b12*m.b108 - 2*m.b12*m.b109 - 2*m.b12*
m.b110 - 2*m.b12*m.b111 - 2*m.b12*m.b112 + 2*m.b12*m.b114 + 2*m.b12*m.b116 - 2*m.b12*m.b119 -
2*m.b12*m.b120 - 2*m.b12*m.b122 - 2*m.b12*m.b124 - 2*m.b12*m.b125 - 2*m.b12*m.b147 + 2*m.b12*
m.b150 + 5*m.b150 - 2*m.b12*m.b151 - 2*m.b12*m.b152 + 2*m.b12*m.b153 + 2*m.b12*m.b154 + 2*
m.b12*m.b156 + 2*m.b12*m.b157 + 2*m.b12*m.b160 + 2*m.b12*m.b161 + 2*m.b12*m.b168 - 2*m.b12*
m.b169 - 2*m.b12*m.b170 + 2*m.b12*m.b171 + 2*m.b12*m.b172 - 2*m.b12*m.b173 - 2*m.b12*m.b174 +
2*m.b12*m.b178 + 2*m.b12*m.b179 + 2*m.b12*m.b180 + 2*m.b12*m.b181 + 2*m.b13*m.b93 - 2*m.b13*
m.b106 + 2*m.b13*m.b108 - 2*m.b13*m.b110 + 2*m.b13*m.b114 - 2*m.b13*m.b119 - 2*m.b13*m.b120 -
2*m.b13*m.b122 - 2*m.b13*m.b124 - 2*m.b13*m.b125 - 2*m.b13*m.b149 + 2*m.b13*m.b150 - 2*m.b13*
m.b151 + 2*m.b13*m.b153 - 2*m.b13*m.b156 + 2*m.b13*m.b157 - 2*m.b13*m.b158 + 2*m.b13*m.b160 +
2*m.b13*m.b168 + 2*m.b13*m.b170 + 2*m.b13*m.b171 - 2*m.b13*m.b173 - 2*m.b13*m.b174 + 2*m.b13*
m.b177 + 2*m.b13*m.b178 + 2*m.b13*m.b180 + 2*m.b14*m.b92 + 8*m.b14 + 2*m.b14*m.b94 - 2*m.b14*
m.b95 - 2*m.b14*m.b96 - 2*m.b14*m.b97 - 2*m.b14*m.b98 - 2*m.b14*m.b100 - 2*m.b14*m.b103 - 2*
m.b14*m.b104 - 2*m.b14*m.b106 - 2*m.b14*m.b108 - 2*m.b14*m.b109 - 2*m.b14*m.b110 - 2*m.b14*
m.b114 - 2*m.b14*m.b116 - 2*m.b14*m.b117 - 2*m.b14*m.b118 - 2*m.b14*m.b119 - 2*m.b14*m.b121 -
2*m.b14*m.b122 - 2*m.b14*m.b123 - 2*m.b14*m.b124 - 2*m.b14*m.b126 - 2*m.b14*m.b127 + 2*m.b14*
m.b129 + 2*m.b14*m.b132 + 2*m.b14*m.b133 + 2*m.b14*m.b136 + 2*m.b14*m.b137 + 2*m.b14*m.b138 +
2*m.b14*m.b141 + 2*m.b14*m.b142 + 2*m.b14*m.b145 + 2*m.b14*m.b146 - 2*m.b14*m.b148 - 2*m.b14*
m.b151 + 2*m.b14*m.b162 + 2*m.b14*m.b163 + 2*m.b14*m.b166 + 2*m.b14*m.b167 - 2*m.b14*m.b169 -
2*m.b14*m.b173 + 2*m.b14*m.b178 + 2*m.b14*m.b179 + 2*m.b15*m.b92 + 5*m.b15 + 2*m.b15*m.b94 - 2
*m.b15*m.b96 - 2*m.b15*m.b98 + 2*m.b15*m.b99 - 2*m.b15*m.b100 + 2*m.b15*m.b101 - 2*m.b15*
m.b104 - 2*m.b15*m.b106 + 2*m.b15*m.b107 - 2*m.b15*m.b108 - 2*m.b15*m.b110 + 2*m.b15*m.b111 +
2*m.b15*m.b113 - 2*m.b15*m.b114 - 2*m.b15*m.b116 - 2*m.b15*m.b117 - 2*m.b15*m.b118 - 2*m.b15*
m.b119 - 2*m.b15*m.b121 - 2*m.b15*m.b122 - 2*m.b15*m.b123 - 2*m.b15*m.b124 - 2*m.b15*m.b126 -
2*m.b15*m.b127 + 2*m.b15*m.b129 + 2*m.b15*m.b130 + 2*m.b15*m.b133 + 2*m.b15*m.b135 + 2*m.b15*
m.b136 + 2*m.b15*m.b138 + 2*m.b15*m.b139 + 2*m.b15*m.b142 + 2*m.b15*m.b144 + 2*m.b15*m.b145 +
2*m.b15*m.b147 - 2*m.b15*m.b148 - 2*m.b15*m.b149 - 2*m.b15*m.b151 + 2*m.b15*m.b152 - 2*m.b15*
m.b154 - 2*m.b15*m.b155 - 2*m.b15*m.b156 - 2*m.b15*m.b157 - 2*m.b15*m.b158 - 2*m.b15*m.b160 -
2*m.b15*m.b161 + 2*m.b15*m.b163 + 2*m.b15*m.b165 + 2*m.b15*m.b166 + 2*m.b15*m.b168 + 2*m.b15*
m.b170 + 2*m.b15*m.b171 - 2*m.b15*m.b173 + 2*m.b15*m.b174 - 2*m.b15*m.b176 + 2*m.b15*m.b177 +
2*m.b15*m.b178 - 2*m.b15*m.b180 - 2*m.b15*m.b181 - 2*m.b15*m.b182 - 2*m.b16*m.b92 + 11*m.b16
- 2*m.b16*m.b94 - 2*m.b16*m.b99 - 2*m.b16*m.b101 - 2*m.b16*m.b102 - 2*m.b16*m.b103 - 2*m.b16*
m.b104 + 2*m.b16*m.b106 + 2*m.b16*m.b108 - 2*m.b16*m.b109 - 2*m.b16*m.b110 - 2*m.b16*m.b111 -
2*m.b16*m.b112 - 2*m.b16*m.b113 - 2*m.b16*m.b115 - 2*m.b16*m.b120 - 2*m.b16*m.b121 + 2*m.b16*
m.b122 + 2*m.b16*m.b123 - 2*m.b16*m.b124 - 2*m.b16*m.b125 - 2*m.b16*m.b147 - 2*m.b16*m.b148 +
2*m.b16*m.b149 + 2*m.b16*m.b150 - 2*m.b16*m.b151 - 2*m.b16*m.b152 + 2*m.b16*m.b156 + 2*m.b16*
m.b157 + 2*m.b16*m.b160 + 2*m.b16*m.b161 + 2*m.b16*m.b162 + 2*m.b16*m.b163 + 2*m.b16*m.b166 +
2*m.b16*m.b167 - 2*m.b16*m.b169 - 2*m.b16*m.b170 - 2*m.b16*m.b171 - 2*m.b16*m.b172 - 2*m.b16*
m.b173 - 2*m.b16*m.b174 - 2*m.b16*m.b175 - 2*m.b16*m.b176 + 2*m.b16*m.b178 + 2*m.b16*m.b179 +
2*m.b16*m.b180 + 2*m.b16*m.b181 - 2*m.b17*m.b104 + 9*m.b17 + 2*m.b17*m.b108 - 2*m.b17*m.b110
- 2*m.b17*m.b116 - 2*m.b17*m.b121 + 2*m.b17*m.b123 - 2*m.b17*m.b124 - 2*m.b17*m.b127 - 2*
m.b17*m.b148 + 2*m.b17*m.b150 - 2*m.b17*m.b151 - 2*m.b17*m.b154 - 2*m.b17*m.b155 + 2*m.b17*
m.b157 - 2*m.b17*m.b158 - 2*m.b17*m.b161 + 2*m.b17*m.b163 + 2*m.b17*m.b165 + 2*m.b17*m.b166 -
2*m.b17*m.b173 - 2*m.b17*m.b174 - 2*m.b17*m.b175 - 2*m.b17*m.b176 + 2*m.b17*m.b177 + 2*m.b17*
m.b178 - 2*m.b17*m.b181 - 2*m.b17*m.b182 + 2*m.b18*m.b92 + 7*m.b18 + 2*m.b18*m.b93 - 2*m.b18*
m.b95 - 2*m.b18*m.b96 - 2*m.b18*m.b97 - 2*m.b18*m.b98 - 2*m.b18*m.b101 - 2*m.b18*m.b102 - 2*
m.b18*m.b109 - 2*m.b18*m.b110 - 2*m.b18*m.b111 - 2*m.b18*m.b112 - 2*m.b18*m.b117 - 2*m.b18*
m.b118 - 2*m.b18*m.b119 - 2*m.b18*m.b120 - 2*m.b18*m.b122 - 2*m.b18*m.b123 - 2*m.b18*m.b124 -
2*m.b18*m.b125 - 2*m.b18*m.b126 - 2*m.b18*m.b127 + 2*m.b18*m.b129 + 2*m.b18*m.b132 + 2*m.b18*
m.b133 + 2*m.b18*m.b136 + 2*m.b18*m.b137 + 2*m.b18*m.b138 + 2*m.b18*m.b141 + 2*m.b18*m.b142 +
2*m.b18*m.b145 + 2*m.b18*m.b146 - 2*m.b18*m.b147 - 2*m.b18*m.b151 - 2*m.b18*m.b152 + 2*m.b18*
m.b156 + 2*m.b18*m.b157 + 2*m.b18*m.b160 + 2*m.b18*m.b161 - 2*m.b18*m.b169 - 2*m.b18*m.b170 -
2*m.b18*m.b173 - 2*m.b18*m.b174 + 2*m.b18*m.b178 + 2*m.b18*m.b179 + 2*m.b18*m.b180 + 2*m.b18*
m.b181 + 2*m.b19*m.b92 + 2*m.b19 + 2*m.b19*m.b93 + 2*m.b19*m.b94 - 2*m.b19*m.b96 - 2*m.b19*
m.b98 + 2*m.b19*m.b99 + 2*m.b19*m.b101 - 2*m.b19*m.b106 + 2*m.b19*m.b107 - 2*m.b19*m.b110 + 2*
m.b19*m.b111 + 2*m.b19*m.b113 - 2*m.b19*m.b116 - 2*m.b19*m.b117 - 2*m.b19*m.b118 - 2*m.b19*
m.b119 - 2*m.b19*m.b120 - 2*m.b19*m.b122 - 2*m.b19*m.b123 - 2*m.b19*m.b124 - 2*m.b19*m.b125 -
2*m.b19*m.b126 - 2*m.b19*m.b127 + 2*m.b19*m.b129 + 2*m.b19*m.b130 + 2*m.b19*m.b133 + 2*m.b19*
m.b135 + 2*m.b19*m.b136 + 2*m.b19*m.b138 + 2*m.b19*m.b139 + 2*m.b19*m.b142 + 2*m.b19*m.b144 +
2*m.b19*m.b145 - 2*m.b19*m.b149 - 2*m.b19*m.b151 - 2*m.b19*m.b154 - 2*m.b19*m.b156 - 2*m.b19*
m.b158 - 2*m.b19*m.b161 + 2*m.b19*m.b168 + 2*m.b19*m.b170 + 2*m.b19*m.b171 - 2*m.b19*m.b173 -
2*m.b19*m.b176 + 2*m.b19*m.b177 + 2*m.b19*m.b178 - 2*m.b19*m.b181 - 2*m.b19*m.b182 - 2*m.b20*
m.b92 + 11*m.b20 - 2*m.b20*m.b94 - 2*m.b20*m.b95 - 2*m.b20*m.b96 - 2*m.b20*m.b97 - 2*m.b20*
m.b98 - 2*m.b20*m.b99 - 2*m.b20*m.b101 - 2*m.b20*m.b102 - 2*m.b20*m.b103 - 2*m.b20*m.b104 - 2*
m.b20*m.b105 - 2*m.b20*m.b107 - 2*m.b20*m.b109 - 2*m.b20*m.b110 - 2*m.b20*m.b111 - 2*m.b20*
m.b112 - 2*m.b20*m.b113 - 2*m.b20*m.b115 - 2*m.b20*m.b117 - 2*m.b20*m.b118 - 2*m.b20*m.b120 -
2*m.b20*m.b121 - 2*m.b20*m.b124 - 2*m.b20*m.b125 + 2*m.b20*m.b129 + 2*m.b20*m.b132 + 2*m.b20*
m.b133 + 2*m.b20*m.b136 + 2*m.b20*m.b137 + 2*m.b20*m.b138 + 2*m.b20*m.b141 + 2*m.b20*m.b142 +
2*m.b20*m.b145 + 2*m.b20*m.b146 - 2*m.b20*m.b147 - 2*m.b20*m.b148 - 2*m.b20*m.b151 - 2*m.b20*
m.b152 + 2*m.b20*m.b156 + 2*m.b20*m.b157 + 2*m.b20*m.b160 + 2*m.b20*m.b161 + 2*m.b20*m.b162 +
2*m.b20*m.b163 + 2*m.b20*m.b166 + 2*m.b20*m.b167 - 2*m.b20*m.b169 - 2*m.b20*m.b170 - 2*m.b20*
m.b173 - 2*m.b20*m.b174 + 2*m.b20*m.b178 + 2*m.b20*m.b179 + 2*m.b20*m.b180 + 2*m.b20*m.b181 -
2*m.b21*m.b96 + 6*m.b21 - 2*m.b21*m.b98 - 2*m.b21*m.b104 - 2*m.b21*m.b106 - 2*m.b21*m.b110 - 2
*m.b21*m.b116 - 2*m.b21*m.b117 - 2*m.b21*m.b118 - 2*m.b21*m.b121 - 2*m.b21*m.b122 - 2*m.b21*
m.b124 - 2*m.b21*m.b127 + 2*m.b21*m.b129 + 2*m.b21*m.b130 + 2*m.b21*m.b133 + 2*m.b21*m.b135 +
2*m.b21*m.b136 + 2*m.b21*m.b138 + 2*m.b21*m.b139 + 2*m.b21*m.b142 + 2*m.b21*m.b144 + 2*m.b21*
m.b145 - 2*m.b21*m.b148 - 2*m.b21*m.b149 - 2*m.b21*m.b151 - 2*m.b21*m.b154 - 2*m.b21*m.b155 -
2*m.b21*m.b156 - 2*m.b21*m.b158 - 2*m.b21*m.b161 + 2*m.b21*m.b163 + 2*m.b21*m.b165 + 2*m.b21*
m.b166 + 2*m.b21*m.b168 + 2*m.b21*m.b170 + 2*m.b21*m.b171 - 2*m.b21*m.b173 - 2*m.b21*m.b176 +
2*m.b21*m.b177 + 2*m.b21*m.b178 - 2*m.b21*m.b181 - 2*m.b21*m.b182 - 2*m.b22*m.b92 + 15*m.b22
- 2*m.b22*m.b94 - 2*m.b22*m.b99 - 2*m.b22*m.b103 - 2*m.b22*m.b104 - 2*m.b22*m.b105 - 2*m.b22*
m.b107 - 2*m.b22*m.b109 - 2*m.b22*m.b110 - 2*m.b22*m.b111 - 2*m.b22*m.b112 - 2*m.b22*m.b113 -
2*m.b22*m.b115 - 2*m.b22*m.b121 - 2*m.b22*m.b124 - 2*m.b22*m.b125 - 2*m.b22*m.b148 - 2*m.b22*
m.b151 - 2*m.b22*m.b152 + 2*m.b22*m.b162 + 2*m.b22*m.b163 + 2*m.b22*m.b166 + 2*m.b22*m.b167 -
2*m.b22*m.b169 - 2*m.b22*m.b170 - 2*m.b22*m.b173 - 2*m.b22*m.b174 + 2*m.b22*m.b178 + 2*m.b22*
m.b179 + 2*m.b22*m.b180 + 2*m.b22*m.b181 + 2*m.b23*m.b102 + 12*m.b23 - 2*m.b23*m.b104 - 2*
m.b23*m.b106 - 2*m.b23*m.b110 - 2*m.b23*m.b116 + 2*m.b23*m.b120 - 2*m.b23*m.b121 - 2*m.b23*
m.b122 - 2*m.b23*m.b124 - 2*m.b23*m.b127 + 2*m.b23*m.b147 - 2*m.b23*m.b148 - 2*m.b23*m.b149 -
2*m.b23*m.b151 - 2*m.b23*m.b154 - 2*m.b23*m.b155 - 2*m.b23*m.b156 - 2*m.b23*m.b157 - 2*m.b23*
m.b158 - 2*m.b23*m.b159 - 2*m.b23*m.b160 - 2*m.b23*m.b161 + | |
"""
Asynchronous Advantage Actor Critic, A3C + RNN in continuous action space (https://arxiv.org/abs/1602.01783)
with Generalized Advantage Estimation, GAE (https://arxiv.org/abs/1506.02438)
Actor and Critic share similarities with the DDPG architecture (https://arxiv.org/abs/1509.02971)
Special thanks to the following GitHub users for their blogs & examples on A3C:
<NAME> (morvanzhou), <NAME> (awjuliani), <NAME> (andrewliao11), Jaromír (jaara),
<NAME> (dennybritz), <NAME> (coreylynch), NVlabs, OpenAI
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import scipy.signal
from gym import wrappers
from datetime import datetime
OUTPUT_RESULTS_DIR = "./"
ENVIRONMENT = 'Pendulum-v0'
# ENVIRONMENT = 'MountainCarContinuous-v0'
# ENVIRONMENT = 'BipedalWalker-v2'
# ENVIRONMENT = 'BipedalWalkerHardcore-v2'
# ENVIRONMENT = 'LunarLanderContinuous-v2'
RENDER = False
RESTORE_DATE = None
if RESTORE_DATE is not None:
SUMMARY_DIR = os.path.join(OUTPUT_RESULTS_DIR, 'A3C_LSTM', "gym", ENVIRONMENT, RESTORE_DATE)
else:
TIMESTAMP = datetime.now().strftime("%Y%m%d-%H%M%S")
SUMMARY_DIR = os.path.join(OUTPUT_RESULTS_DIR, "A3C_LSTM", "gym", ENVIRONMENT, TIMESTAMP)
N_WORKERS = multiprocessing.cpu_count() * 2
# N_WORKERS = 1
MAX_GLOBAL_EP = 10000 * N_WORKERS
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 5 # 5 for Pendulum
GAMMA = 0.99 # Paper uses 0.99
LAMBDA = 1.00 # 1.00 for Pendulum
ENTROPY_BETA = 0.0001 # Paper uses 0.0001
LR_A = 0.0001 # learning rate for actor - 0.000001 for Pendulum
LR_C = 0.0001 # learning rate for critic - 0.0001 for Pendulum
CELL_SIZE = 128
GLOBAL_EP = 0
RANDOM_SEED = 12345
env = gym.make(ENVIRONMENT)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
with tf.name_scope('env_bounds'):
S_UPPER = tf.Variable(env.observation_space.high, dtype=tf.float32, name="state_upper")
S_LOWER = tf.Variable(env.observation_space.low, dtype=tf.float32, name="state_lower")
A_UPPER = tf.Variable(env.action_space.high, dtype=tf.float32, name="action_upper")
A_LOWER = tf.Variable(env.action_space.low, dtype=tf.float32, name="action_lower")
class ACNet(object):
def __init__(self, scope, global_net=None):
if scope == GLOBAL_NET_SCOPE: # Create global network. This isn't used for prediction, only parameter updates
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S_global')
self.is_training = False
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # Create local net and loss ops
with tf.variable_scope(scope):
with tf.name_scope('input_norm'):
self.s = 2 * (tf.placeholder(tf.float32, [None, N_S], 'S') - S_LOWER) / (S_UPPER - S_LOWER) - 1
self.a_history = tf.placeholder(tf.float32, [None, N_A], 'A_history')
self.advantage = tf.placeholder(tf.float32, [None, 1], 'Advantage')
self.R_discounted = tf.placeholder(tf.float32, [None, 1], 'R_discounted')
self.is_training = tf.placeholder(bool)
mu, sigma, self.v = self._build_net()
with tf.name_scope('action_prep'):
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_UPPER, sigma + 1e-4
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1, seed=RANDOM_SEED), axis=0),
A_LOWER, A_UPPER, name="a_chosen")
with tf.name_scope('loss'):
with tf.name_scope('a_loss'):
self.log_prob = normal_dist.log_prob(self.a_history)
self.entropy = tf.reduce_sum(normal_dist.entropy())
self.a_loss = -tf.reduce_sum(self.log_prob * self.advantage) - self.entropy * ENTROPY_BETA
with tf.name_scope('c_loss'):
self.c_loss = 0.5 * tf.reduce_sum(tf.square(self.R_discounted - self.v))
with tf.name_scope('local_grad'):
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
a_grads = tf.gradients(self.a_loss, self.a_params)
c_grads = tf.gradients(self.c_loss, self.c_params)
# Gradient clipping
self.a_grads, _ = tf.clip_by_global_norm(a_grads, 10.0)
self.c_grads, _ = tf.clip_by_global_norm(c_grads, 10.0)
for grad, var in list(zip(self.a_grads, self.a_params)):
tf.summary.histogram(var.name + '/gradient', grad)
for grad, var in list(zip(self.c_grads, self.c_params)):
tf.summary.histogram(var.name + '/gradient', grad)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, global_net.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, global_net.c_params)]
with tf.name_scope('push'):
# Use a globally shared pair of Adam optimisers
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, global_net.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, global_net.c_params))
# Each worker has an independent set of Adam optimiser parameters
# opt_a = tf.train.AdamOptimizer(LR_A, name='AdamA')
# opt_b = tf.train.AdamOptimizer(LR_C, name='AdamC')
# self.update_a_op = opt_a.apply_gradients(zip(self.a_grads, global_net.a_params))
# self.update_c_op = opt_b.apply_gradients(zip(self.c_grads, global_net.c_params))
tf.summary.scalar("Actor/Loss/" + scope, self.a_loss)
tf.summary.scalar("Actor/Advantage/" + scope, tf.reduce_sum(self.advantage))
tf.summary.scalar("Actor/Entropy/" + scope, tf.reduce_sum(self.entropy * ENTROPY_BETA))
tf.summary.scalar("Critic/Loss/" + scope, self.c_loss)
tf.summary.scalar("Critic/Value/" + scope, tf.reduce_sum(self.v))
tf.summary.scalar("Critic/Discounted_Reward/" + scope, tf.reduce_sum(self.R_discounted))
summary_list = [s for s in tf.get_collection(tf.GraphKeys.SUMMARIES) if scope in s.name]
self.summarise = tf.summary.merge(summary_list)
def _build_net(self):
w_init = tf.contrib.layers.variance_scaling_initializer(seed=RANDOM_SEED)
w_init_final = tf.random_uniform_initializer(minval=-0.003, maxval=0.003, seed=RANDOM_SEED)
w_reg = tf.contrib.layers.l2_regularizer(0.01)
# Both actor and critic are separate networks with 2 dense layers as per DDPG. LSTMs added on final layer
with tf.variable_scope('actor'):
a_hidden1 = tf.layers.dense(self.s, 400, tf.nn.relu6, kernel_initializer=w_init,
kernel_regularizer=w_reg, name='hidden_1')
# a_hidden1 = tf.layers.batch_normalization(a_hidden1, training=self.is_training)
# a_hidden1 = tf.layers.dropout(a_hidden1, training=self.is_training)
a_hidden2 = tf.layers.dense(a_hidden1, 300, tf.nn.relu6, kernel_initializer=w_init,
kernel_regularizer=w_reg, name='hidden_2')
# a_hidden2 = tf.layers.batch_normalization(a_hidden2, training=self.is_training)
# a_hidden2 = tf.layers.dropout(a_hidden2, training=self.is_training)
# [time_step, feature] => [time_step, batch, feature]
a_cell_in = tf.expand_dims(a_hidden2, axis=1, name='timely_input')
a_rnn_cell = tf.contrib.rnn.BasicRNNCell(CELL_SIZE)
self.a_init_state = a_rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
a_outputs, self.a_final_state = tf.nn.dynamic_rnn(
cell=a_rnn_cell, inputs=a_cell_in, initial_state=self.a_init_state, time_major=True)
a_cell_out = tf.reshape(a_outputs, [-1, CELL_SIZE], name='flatten_rnn_outputs')
# mu = tf.layers.dense(a_cell_out, N_A, tf.nn.tanh, kernel_initializer=w_init_final,
# kernel_regularizer=w_reg, name='mu')
mu = tf.layers.dense(a_cell_out, N_A, kernel_initializer=w_init_final, kernel_regularizer=w_reg,
use_bias=False, name='mu')
sigma = tf.layers.dense(a_cell_out, N_A, tf.nn.softplus, kernel_initializer=w_init_final,
kernel_regularizer=w_reg, name='sigma')
with tf.variable_scope('critic'):
c_hidden1 = tf.layers.dense(self.s, 400, tf.nn.relu6, kernel_initializer=w_init,
kernel_regularizer=w_reg, name='hidden_1')
# c_hidden1 = tf.layers.batch_normalization(c_hidden1, training=self.is_training)
# c_hidden1 = tf.layers.dropout(c_hidden1, training=self.is_training)
c_hidden2 = tf.layers.dense(c_hidden1, 300, tf.nn.relu6, kernel_initializer=w_init,
kernel_regularizer=w_reg, name='hidden_2')
# c_hidden2 = tf.layers.batch_normalization(c_hidden2, training=self.is_training)
# c_hidden2 = tf.layers.dropout(c_hidden2, training=self.is_training)
# [time_step, feature] => [time_step, batch, feature]
c_cell_in = tf.expand_dims(c_hidden2, axis=1, name='timely_input')
c_rnn_cell = tf.contrib.rnn.BasicRNNCell(CELL_SIZE)
self.c_init_state = c_rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
c_outputs, self.c_final_state = tf.nn.dynamic_rnn(
cell=c_rnn_cell, inputs=c_cell_in, initial_state=self.c_init_state, time_major=True)
c_cell_out = tf.reshape(c_outputs, [-1, CELL_SIZE], name='flatten_rnn_outputs')
v = tf.layers.dense(c_cell_out, 1, kernel_initializer=w_init, name='v') # state value
return mu, sigma, v
def update_global(self, feed_dict):
return SESS.run([self.update_a_op, self.update_c_op, self.summarise], feed_dict)[2]
def pull_global(self):
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def eval_state(self, s, a_state, c_state):
a, v, a_cell_state, c_cell_state = SESS.run([self.A, self.v, self.a_final_state, self.c_final_state],
{self.s: s[np.newaxis, :], self.a_init_state: a_state,
self.c_init_state: c_state, self.is_training: False})
return a[0], v[0], a_cell_state, c_cell_state
def get_value(self, s, cell_state):
return SESS.run(self.v, {self.s: s[np.newaxis, :], self.c_init_state: cell_state, self.is_training: False})[0]
def add_histogram(writer, tag, values, step, bins=1000):
"""
Logs the histogram of a list/vector of values.
From: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
"""
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
writer.add_summary(summary, step)
class Worker(object):
def __init__(self, name, global_net):
self.env = gym.make(ENVIRONMENT)
self.name = name
self.ep_count = 0
self.AC = ACNet(name, global_net)
# if self.name == 'Worker_0':
# self.env = wrappers.Monitor(self.env, os.path.join(SUMMARY_DIR, ENVIRONMENT+'_'+self.name))
@staticmethod
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def work(self):
global GLOBAL_EP
buffer_s, buffer_a, buffer_r, buffer_v = [], [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
self.env.seed(RANDOM_SEED + GLOBAL_EP)
s = self.env.reset()
ep_r, ep_t = 0, 0
ep_a, ep_v = [], []
a_rnn_state = SESS.run(self.AC.a_init_state) # Zero RNN state at beginning
c_rnn_state = SESS.run(self.AC.c_init_state)
a_keep_state = a_rnn_state.copy()
c_keep_state = c_rnn_state.copy()
while True:
if (self.name == 'Worker_1' or N_WORKERS == 1) and RENDER:
self.env.render()
a, v, a_rnn_state_, c_rnn_state_ = self.AC.eval_state(s, a_rnn_state, c_rnn_state)
s2, r, terminal, info = self.env.step(a)
ep_r += r
ep_t += 1
# r = np.clip(r, -1, 1) # clip reward
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
buffer_v.append(v)
ep_a.append(a)
ep_v.append(v)
if ep_t % UPDATE_GLOBAL_ITER == 0 or terminal:
if terminal:
r_next = 0 # Next reward is zero if terminal
else:
r_next = self.AC.get_value(s2, c_rnn_state_)[0]
# Generalized Advantage Estimation - https://arxiv.org/abs/1506.02438
rewards = np.array(buffer_r)
discounted_rewards = self.discount(np.append(rewards, r_next), GAMMA)[:-1]
values = np.array(buffer_v + [r_next])
advantages = self.discount(rewards + GAMMA * values[1:] - values[:-1], GAMMA * LAMBDA)
feed_dict = {
self.AC.s: np.asarray(buffer_s),
self.AC.a_history: np.asarray(buffer_a),
self.AC.advantage: np.vstack(advantages),
self.AC.R_discounted: np.vstack(discounted_rewards),
self.AC.a_init_state: a_keep_state,
self.AC.c_init_state: c_keep_state,
self.AC.is_training: False # For small windows I doubt BN will work. Dropout may though
}
graph_summary = self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r, buffer_v = [], [], [], []
self.AC.pull_global()
# Replace the keep_state as the new initial rnn state_
a_keep_state = a_rnn_state_.copy()
c_keep_state = c_rnn_state_.copy()
s = s2
# Renew RNN states
a_rnn_state = a_rnn_state_
c_rnn_state = c_rnn_state_
if terminal:
print(self.name, "| Local Ep:", self.ep_count,
"| Global Ep:", GLOBAL_EP,
"| Reward: %.2f" % ep_r,
"| Reward/step: %.3f" % (ep_r / ep_t),
"| Steps:", ep_t)
# Add summaries for TensorBoard
if self.name == 'Worker_0' and self.ep_count % 5 == 0:
worker_summary = tf.Summary()
worker_summary.value.add(tag="Reward/" + self.name, simple_value=ep_r)
# worker_summary.value.add(tag="Steps/" + self.name, simple_value=ep_t)
add_histogram(WRITER, "Critic/Value/" + self.name, np.ravel(ep_v), self.ep_count)
# Create Action histograms for each dimension
actions = np.array(ep_a)
for a in range(N_A):
add_histogram(WRITER, "Action/Dim"+str(a)+"/" + self.name, actions[:, a], self.ep_count)
WRITER.add_summary(worker_summary, self.ep_count)
WRITER.add_summary(graph_summary, self.ep_count)
WRITER.flush()
GLOBAL_EP += 1
self.ep_count += 1
break
if __name__ == "__main__":
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.01
SESS = tf.Session(config=config)
| |
<reponame>akliouev/LXD-Cloud
#!/usr/bin/python
from __future__ import with_statement
import inspect, os, shutil
import subprocess
import datetime
import time
from time import sleep
import socket
import string
import sys
import logging
import logging.handlers
from tendo import singleton
import argparse
import paramiko
import json
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-x",
"--fqdn",
help = "FQDN of the server",
required = True,
type = str
)
parser.add_argument(
"-f",
"--hostname",
help = "Specify the hostname",
required = True,
type = str
)
return parser.parse_args()
# display time as HMS
def duration_format(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60.
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
# log results
def create_logger():
# create logger for "Sample App"
log_runtime = logging.getLogger('automated_runtime')
log_runtime.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('results.log', mode='w')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('[%(asctime)s] %(message)s ',datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
#ch.setFormatter(formatter)
# add the handlers to the logger
log_runtime.addHandler(fh)
return log_runtime
#
# db = MySQLdb.connect(
# host = conf['logger']['mysql']['host'],
# user = conf['logger']['mysql']['user'],
# passwd = conf['logger']['mysql']['password'],
# db = conf['logger']['mysql']['database']
# )
# cursor = db.cursor()
# try:
# if log_type == 'backup':
# output('Log backup', False)
# cursor.execute("INSERT INTO `backupschedule` (`host`, `start`, `end`, `startgroup`, `backupserver`) values (%s,%s,%s,%s,%s)", (
# lxc_host['name'],
# start_datetime,
# end_datetime,
# conf['runtime'],
# conf['local_info']['hostname']))
# elif log_type == 'rotate':
# output('Log rotate',False)
# cursor.execute("INSERT INTO `rotateschedule` (`host`, `start`, `end`, `startgroup`, `backupserver`) values (%s,%s,%s,%s,%s)", (
# lxc_host['name'],
# start_datetime,
# end_datetime,
# conf['runtime'],
# conf['local_info']['hostname']))
# db.commit()
# provide status output
def status(var):
output('[' + var + ']',True,False)
return
# provide output
def output(string, clear = True, timestamp = True):
# global print_cache
# global log_runtime
if string is None or string.isspace():
return
if(timestamp):
print '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] ',
if(clear):
print string
else:
print string,
log_runtime.log(logging.INFO, string)
# connect to lxc_host via SSH key
def ssh_connect(fqdn):
global ssh
global containerization
output(' - Login ', False)
try:
# create paramiko ssh client
ssh = paramiko.SSHClient()
ssh._policy = paramiko.WarningPolicy()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# select key
ssh_key = paramiko.RSAKey.from_private_key_file('/root/.ssh/lxd_hosts')
# automatically add remote host keys
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# create a log file for SSH because stdout may be large during rsync
try:
os.remove('transfer.log')
except OSError:
pass
os.mknod('transfer.log')
paramiko.util.log_to_file('transfer.log')
# try connecting
ssh.connect(fqdn,username='root',pkey=ssh_key)
except paramiko.SSHException:
status('FAIL')
return False
# show user logged in as
user = ssh_exec('whoami').strip()
output(' as ' + user, False, False)
status('PASS')
return True
# ssh exec and output return
def ssh_exec(command):
# check if still connected
if ssh.get_transport().is_active() == False:
output(' Disconnected cannot run command: ', False)
status('FAIL')
output(command, True)
return False
# check if active again as returning false positives
try:
transport = ssh.get_transport()
transport.send_ignore()
except EOFError, e:
# connection is closed
output(' Disconnected cannot run command: ', False)
status('FAIL')
output(command, True)
return False
# standard input - where process reads to get information from user
# standard output - where process writes normal information to this file handle
# standard error - where process writes error information
stdin, stdout, stderr = ssh.exec_command(command)
# wait until a response hasn't been seen for timeout period
# incase command does not return EOF
timeout = 30
endtime = time.time() + timeout
while not stdout.channel.eof_received:
sleep(1)
if time.time() > endtime:
stdout.channel.close()
break
string = ''
for line in stdout.readlines():
string += line
return string.strip()
# run a command
def shell_exec(command):
try:
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = proc.communicate()
if errors:
return False
return output.strip()
except OSError as e:
if e.errno == os.errno.ENOENT:
# handle file not found error.
return False
else:
return False
# backup host
def backup_host(fqdn, hostname):
global path
global verbose
output('[ {host} ]'.format(**{'host':fqdn}))
try:
# make backup folder for rsync if missing
if not os.path.exists(path['backup']):
output('Make ' + path['backup'],False)
os.makedirs(path['backup'])
if not os.path.exists(path['backup']):
status('FAIL')
else:
status('PASS')
output(' - Copy {host_address}:/ {host_dir}/'.format(**{
'host_address' : fqdn,
'host_dir' : path['backup']
}), False)
# rsync lxc host to backup host using inode / hard linking
cmd = 'rsync -aAX --progress --delete --stats --link-dest={hard_link_dir} --exclude={exclude_paths} {host_address}:/ {host_dir}/'.format(**{
'hard_link_dir' : path['rotate'],
'exclude_paths' : ' --exclude='.join(path['exclude']),
'host_address' : fqdn,
'host_dir' : path['backup']
})
if verbose:
output(cmd)
os.system(cmd)
else:
shell_exec(cmd)
# touch host backup to indicate backup processed
shell_exec('touch ' + path['backup'])
status('N/A')
except:
status('FAIL')
# get array of containers on machine
def get_containers():
global containerization
try:
if containerization == 'lxd':
output('- Get LXD containers', False)
raw = ssh_exec('lxc list -cn --format json')
containers = json.loads(raw)
status('PASS')
elif containerization == 'lxc':
output('- Get LXC containers', False)
containers = []
raw = ssh_exec('ls /var/lib/lxc').split()
for container in raw:
state = ssh_exec('lxc-info -s -n ' + str(container)).split(":")[-1].strip()
containers.append({'name':str(container),'status':str(state)})
status('PASS')
else:
output('- No containers found', False)
containers = []
status('PASS')
except:
containers = []
status('FAIL')
return containers
# get containerization method of virtual machine
def containerization_get():
output(' - Determine containerization ',False)
vm_type = ssh_exec('if test -d /var/lib/lxd/containers; then echo \'lxd\'; elif test -d /var/lib/lxc/; then echo \'lxc\'; else echo \'none\'; fi;')
if vm_type == 'lxd':
output('(LXD found)', False, False)
elif vm_type == 'lxc':
output('(LXC found)', False, False)
else:
output('(none found)', False, False)
vm_type = ''
status('PASS')
return vm_type
#un mount lxc_container
def lxc_rootfs_umount(container):
rootfs = '/var/lib/lxc/' + container['name'] + '/rootfs'
#f(not os.path.isdir(rootfs)):
# output('ERROR: rootfsfor ' + container['name'] + ' does not exist')
# return False
try:
ssh_exec('umount ' + rootfs)
except Exception as error:
output('ERROR: Failed to unmount lvm for ' + container['name'] + ', you may need to do this manually')
output(error)
return
return True
# mount volume group
def lxc_rootfs_mount(container):
rootfs = '/var/lib/lxc/' + container['name'] + '/rootfs'
config = '/var/lib/lxc/' + container['name'] + '/config'
if lxc_rootfs_umount(container) == False:
return False
config = ssh_exec('cat ' + config)
for line in config.splitlines():
if 'lxc.rootfs' in line:
volume = line.split("=")[-1].strip()
ssh_exec('mount ' + volume + ' ' + rootfs)
return True
# backup a host container
def backup_host_container(fqdn, name, container):
global path
global verbose
global containerization
output(' - Process "' + container['name'] + '" ...')
# find container's storage pool
output(' - Find storage pool ',False)
try:
if containerization == 'lxd':
# get the location of the contain's storage pool
container_storage_pool_dir = ssh_exec('readlink -f /var/lib/lxd/containers/' + container['name'])
elif containerization == 'lxc':
container_storage_pool_dir = ssh_exec('readlink -f /var/lib/lxc/' + container['name'])
else:
output('- Skipping')
return
output(container_storage_pool_dir,False,False)
status('PASS')
except:
status('FAIL')
pass
container_backup_dir = path['backup'] + container_storage_pool_dir
container_rotate_dir = path['rotate'] + container_storage_pool_dir
# make container folder on backup if does not exists
if not os.path.exists(container_backup_dir):
output('- Make ' + container_backup_dir,False)
os.makedirs(container_backup_dir)
status('PASS')
# if container is running stop it unless it's ansible
if container['status'].lower() == "Running".lower() and container['name'].lower() != 'ansible':
output(' - Stop container', False)
# start downtime timer
start_timer = time.time()
# issue different commands based on containerization method
if containerization == 'lxd':
# stop container only allow 30 seconds for stopdown to accomidate for hang
ssh_exec('lxc stop --timeout 30 ' + container['name'])
# after the up to 30 second wait issue a kill command to be sure its stopped
ssh_exec('lxc stop --force ' + container['name'])
# switch to pause if no database detected
elif containerization == 'lxc':
ssh_exec('lxc-freeze -n ' + container['name'])
status('N/A')
# mount rootfs if needed
if containerization == 'lxc':
lxc_rootfs_mount(container)
try:
output(' - Copy container (may take a while)... ',False)
# backup the containers storage pool
exclude_paths = ["/rootfs" + exclude for exclude in path['exclude']]
# backup container
cmd = 'rsync -aAX --progress --delete --stats --link-dest={hard_link_dir} --exclude={exclude_paths} {host_address}:{dir_from}/ {dir_to}'.format(
exclude_paths = ' --exclude='.join(exclude_paths),
host_address = fqdn,
hard_link_dir = container_rotate_dir,
dir_from = container_storage_pool_dir,
dir_to = container_backup_dir
)
if verbose:
output(cmd)
os.system(cmd)
else:
shell_exec(cmd)
status('PASS')
except:
output(command)
status('FAIL')
# if container was running start it again unless it is ansible
if container['status'].lower() == "Running".lower() and container['name'].lower() != 'ansible':
output(' - Restart container', False)
# issue different commands depending on containerization method
if containerization == 'lxd':
ssh_exec('lxc start ' + container['name'])
elif containerization == 'lxc':
ssh_exec('lxc-unfreeze -n ' + container['name'])
status('N/A')
# output downtime
end_timer = time.time()
output(' - Restored after {} of downtime'.format(duration_format(end_timer - start_timer)))
# umount rootfs if needed
if containerization == 'lxc':
lxc_rootfs_umount(container)
if __name__ == "__main__":
# only allow one instance at a time
me = singleton.SingleInstance()
global log_runtime
global args
global path
global ssh
global containers
global verbose
global containerization_method
# debug turn on to see Rsync info
verbose = False
# get args passed
args = get_args()
path = {}
path['base'] = '/vault/{folder}'
path['arg'] = path['base'] + '/{host_folder}/daily/{iteration}'
path['exclude'] = {
"/var/spool/apt-mirror/*",
#"/dev/*",
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.