input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#
# Copyright (c) 2017 Ensoft Ltd, 2008-2010 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# This file defines the external API of the backends, and splits out
# to the right one depending on the Project.
#
# It also defines a few helper functions first.
#
from tiqit import *
import fields, sys, re, utils
from StringIO import StringIO
_backends = {}
def initialise():
global allFields
allFields = fields.load()
# Import all the configured backends
for prefix, mod in Config().section('backends').items():
# Should probably check prefix is valid here
__import__(mod)
_backends[prefix] = sys.modules[mod]
#
# Helper functions (not part of external API)
#
noteTypes = ("A-comments",
"C-comments",
"Code-review",
"Configuration",
"Crash-decode",
"D-comments",
"Debug-output",
"Deferral-Info",
"Dev-escape-comments",
"Documentation",
"EDP-resolver-comments",
"Eng-notes",
"Evaluation",
"F-comments",
"Field-Notice-Info",
"H-comments",
"I-comments",
"J-comments",
"M-comments",
"N-comments",
"O-comments",
"Other",
"P-comments",
"PSIRT-evaluation",
"R-comments",
"Release-note",
"SS-Eval",
"SS-Review",
"SS-Test",
"Static-analysis",
"TS-Eval",
"TS-Info",
"Test",
"Testplan",
"U-comments",
"Unit-test",
"Unknown-type",
"V-comments",
"W-comments",
"Workaround");
class DataCommon(object):
"""
DataCommon: Common data super class
Allows data from all sources to be wrapped so that the Field: Value data
mapping method is common.
DataCommon and has the following methods:
getSanitisedValue: returns the transformed value of the given TiqitField
The raw value is obtained using the Field name
from the base class's own "getRawValue" function.
There is an option for whether the data should be in
standard or HTML format.
__getitem__: standard python dictionary accessor which calls
getSanitisedValue asking for standard formatting.
Supplied for code neatness as this is the most commonly used access.
Any concrete subclass of DataCommon must provide its own _getRawValue
function to provide the raw value of the given field based on the data
"""
def __getitem__(self, key):
return self.getSanitisedValue(key, False)
@utils.memoize
def getSanitisedValue(self, name, pretty=False):
# First convert the name (which is the field unique name) to the view
# name(s) under which the value in the data is to be found
indexNames = allFields[name].viewnames
# Now get the values
vals = map(self._getRawValue, indexNames)
# Now convert the values according to the arguments
if pretty:
value = allFields[name].filterHtml(self, *map(encodeHTML, vals))
else:
value = allFields[name].filterView(self, *vals)
return value
def getHistory(self):
"""
Returns a list of dicts containing the following fields:
- Date: a string representing the date in some random format
- Operation: the operation that the event is tracking
- NewValue: the new value of the field
- OldValue: the previous value of the field
- Field: the name of the field that is changing (not converted?)
- User: the user who performed the change
- DateVal: the date in a useful format (from data.mktime())
"""
return []
def getEnclosures(self):
"""
Returns a list of dicts containing the following fields:
- Date: a string representing the date the note was last changed
- Creator: user who created the note
- Updater: user who last updated the note
- Type: the type string of the note
- Title: title of the note
- Note: the note content
- Size: the number of characters in the note
- Identifier: the bugid the note belongs to (i.e. of this bugdata)
"""
return []
def getAttachments(self):
"""
Returns a list of dicts containing the following fields:
- Date: a string representing the date the note was last changed
- Creator: user who created the attachment
- Updater: user who last updated the attachment
- Type: the type string of the attachment
- Title: title of the attachment
- Size: the number of characters in the attachment
- Identifier: the bugid the attachment belongs to
- Name: wtf, a second name/title thing
- Ext: wtf, a file extension or something
- LinkName: the name that should be used when trying to find the damned thing.
"""
return []
def getRelates(self):
"""
Returns a list of 3-tuples containing:
- Bug ID of the related bug
- Relationship (a user-friendly string)
- Boolean indicating whether the relationship is deleteable
"""
return []
class OverriddenData(DataCommon):
"""
OverriddenData: Instance of DataCommon
Initialised with two sets of data - the base data, in DataCommon format,
and some overrides, formatted as a dictionary of name to View
Values.
If an override value is present for a particular field this is used.
Otherwise the base data is used.
OverriddenData has the following methods:
getSanitisedValue: First chooses which of the data items the value should
be extracted from (overrides first as above).
Then returns the standard format value from this data.
Pretty formatting is not supported.
"""
def __init__(self, data, overrides):
self._data = data
self._overrides = overrides
def getSanitisedValue(self, name, pretty):
assert not pretty, "Overridden data only supports standard output format"
if name in self._overrides:
return self._overrides[name]
else:
return self._data[name]
def __repr__(self):
return "<OverriddenData(data=%s,overrides=%s)>" % (str(self._data), str(self._overrides))
def isValidField(field, data):
# This field may be banned. Check all of the fields in its "bannedif" list
# and get the value to check that it is not in the list. To get the
# value we must check the changes first (incase the value of the parent
# is also being updated) or if no value is found here then check the bug
# data for the original value - the data object handles this for us
for banned in field._bannedIf:
if data[banned] in field._bannedIf[banned]:
return False
return True
def extractFieldsFromFormat(cls, formats):
fieldsInUpdate = []
for title, titleDetail, format in formats:
fieldsInUpdate += [allFields[x] for x in re.findall('%\((.*?)(?:Raw)?\)s',
cls.getFormat(format)) if not x.endswith('Label')]
return list(dict.fromkeys(fieldsInUpdate))
def getFormatArguments(data, inFields=None):
if not inFields:
inFields = allFields.values()
args = {}
for field in inFields:
val = data[field.name]
args['%sRaw' % field.name] = val
# Plugins can add information to the Label; use a StringIO
label = StringIO("<label for='%s' id='%sLabel'>%s:</label>" % (field.name, field.name, field.longname))
plugins.updateLabel(field, label)
args['%sLabel' % field.name] = label.getvalue()
label.close()
args[field.name] = field.filterEditableHtml(data, val)
# Now return the lovely dict
return args
#
# The first section deals with loading bugs
#
def loadBugs(bugids):
"""
Loads the given bugids and returns an array of bug data objects.
"""
bck = None
curr = []
data = []
for x in bugids:
if bck != x[:3]:
if curr:
data.extend(_backends[bck].loadBugs(curr))
curr = []
bck = x[:3]
curr.append(x)
data.extend(_backends[bck].loadBugs(curr))
return data
def performQuery(*args):
"""
Performs the given query. (Laziness omits the arguments here)
Ask each backend to perform it, and concatenate their results.
"""
results = []
for bck in _backends:
more = _backends[bck].performQuery(*args)
if more:
results.extend(more)
return results
#
# This section deals with actually updating the database
#
def createBug(fields):
return _backends[fields['Project'][:3]].createBug(fields)
def updateBug(bugid, changes):
return _backends[bugid[:3]].updateBug(bugid, changes)
def addNote(bugid, noteType, noteTitle, noteContent, isUpdate=False):
"""
Add a new note to the given bug.
Arguments:
- bugid: the ID of the bug to add the note to
- noteType: type of note to add (str)
- noteTitle: the title to give the node
- noteContent: the content of the new note
- isUpdate: is this an update to an existing note
"""
return _backends[bugid[:3]].addNote(bugid, noteType, noteTitle, noteContent, isUpdate)
def deleteNote(bugid, noteTitle):
return _backends[bugid[:3]].deleteNote(bugid, noteTitle)
def renameNote(bugid, noteType, noteTitle, newTitle):
return _backends[bugid[:3]].renameNote(bugid, noteType, | |
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 <NAME> <<EMAIL>>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
clipData += [
{ 'title': [ "#09.3", "Transmisja", "danych", "" ] },
{ 'comment': 'transmisja - intro' },
{
'image': [
[0.0, eduMovie.convertFile('enkoder.svg', negate=True)],
["dekoder", eduMovie.convertFile('dekoder.svg', negate=True)],
["matrix", eduMovie.circuitjs("matrix", 1, 36, [
("longClick", 430, 100, 1.5), ("wait", 1), ("longClick", 560, 100, 1.5), ("wait", 1), ("longClick", 690, 100, 1.5), ("wait", 1), # pierwszy rząd
("longClick", 430, 235, 1.5), ("wait", 1), ("longClick", 560, 235, 1.5), ("wait", 1), # trzeci rząd
("click", 160, 330), ("wait", 1), ("click", 190, 330), ("wait", 1), ("click", 160, 330), ("wait", 1), # akywacja różnych rzędów ... kończymy na trzecim
("longClick", 430, 235, 1.5), ("wait", 1), ("longClick", 560, 235, 1.5), ("wait", 1), ("longClick", 690, 235, 1.5)
])],
],
'text' : [
'Rejestry szeregowe nie są jedynymi układami, <m> które pozwalają na redukcję ilości linii. <m>'
'Innym przykładem tego typu układów są enkodery i dekodery. <m>'
'Stosowane są one w sytuacji kiedy z jakiś względów zakładamy, że spośród n <m> jakiś linii tylko jedna w danym momencie ma prawo, powinna być aktywna. <m>'
'Enkoder <n>[eN] do <m>[eM] posiada <n>[eN] wejść i m-bitowe wyjście. <m>'
'Na wyjściu tym wystawia zakodowany binarnie numer wejścia które jest aktywne, <m> czyli w enkoderach aktywowanych stanem wysokim ma stan wysoki, <m> a w aktywowanych stanem niskim, z wejściami zanegowanymi, ma stan niski. <m>'
'Typowo są to układy priorytetowe, czyli jeżeli mimo wszystko kilka wejść <m> będzie aktywnych to wystawiony zostanie numer jednego z nich <m> – w zależności od użytego układu tego z najniższym lub najwyższym numerem. <m>'
'Układ taki często posiada dodatkowe wyjście sygnalizujące, <m> że żadne z wejść nie jest w stanie aktywnym lub reprezentowane jest to <m> jakąś wartością w wystawianym kodzie binarnym. <m>'
'Numer aktywnego wejścia na ogół podawany jest w NKB, <m> w niektórych układach mogą jednak być użyte inne kodowania. <mark name="dekoder" /> <m>'
'Odwrotne działanie realizuje dekoder <m>[eM] do <n>[eN]. <m>'
'Posiada on m-bitowe wejście i <n>[eN] wyjść. <m>'
'Aktywuje tylko jedno z tych wyjść - to którego numer znajduje się na wejściu. <m>'
'Aktywacja może oznaczać podanie na tym wyjściu stanu wysokiego, <m> podczas gdy pozostałe mają stan niski <m>'
'lub (w przypadku dekoderów z zanegowanym wyjściem) podanie stanu niskiego, <m> gdy wszystkie pozostałe są w stanie wysokim. <m>'
'Dekodery często posiadają dodatkowy sygnał wejściowy <m> zezwalający na aktywację wyjść. <mark name="matrix" />'
'Jednym z zastosowań dla takich układów może być obsługa matrycy przycisków. <m>'
'Przykład takiej matrycy opartej na dekoderze i enkoderze widoczny jest na symulacji. <m>'
'Pozwala ona na odczyt 12 przycisków z użyciem jedynie 4 linii <m>'
'– dwóch wyjściowych (wybierających który rząd przycisków jest czytany) <m> oraz dwóch wejściowych (informujących o tym który z przycisków <m> w wybranym rzędzie został wciśnięty). <m>'
'Wadą takiej matrycy jest brak możliwości detekcji <m> równoczesnego naciśnięcia kilku przycisków. <m>'
'W przedstawionym schemacie na wyjściu dekodera dodane zostały rezystory <m> zabezpieczające go przed uszkodzeniem w przypadku <m> równoczesnego wciśnięcia przycisków z kilku linii. <m>'
]
},
{
'image': [
[0.0, eduMovie.convertFile("mux.sch", negate=True)],
["mux", eduMovie.circuitjs("mux", 1, 10, [
("switch", 460, 70), ("wait", 0.5), ("switch", 460, 70), ("wait", 0.5), ("switch", 460, 70), # przełączanie linii 1
("wait", 1), ("switch", 255, 460), ("wait", 1), # przełączenie na 3 linię
("switch", 460, 70), ("wait", 0.5), ("switch", 460, 70) # przełączanie linii 1
])],
["demux", eduMovie.convertFile("mux.sch", negate=True)],
["analog_mux", eduMovie.convertFile("mux_analog.sch", negate=True)],
],
'text' : [
'Trochę podobne działanie mają multipleksery i demultipleksery cyfrowe. <m>'
'Multiplekser posiada <n>[eN] wejść, m-bitowe wejście sterujące i jedno wyjście. <m>'
'Wejście sterujące pełni funkcję analogiczną jak w dekoderze <m> - podajemy na nim numer linii, tyle że nie jest to linia która ma być aktywowana, <m> a linia której stan ma być odzwierciedlony na wyjściu. <mark name="mux" />'
'Tak jak pokazano na ekranie multiplekser można by skonstruować <m> podłączając do każdego wyjścia dekodera bramkę AND z sygnałem wejściowym <m> i łącząc wyjścia tych bramek do jednej bramki OR. <m>'
'Dekoder wybiera która z bramek jest aktywna i ta bramka (zawsze tylko jedna) <m> przepuszcza swój sygnał na wyjście. <mark name="demux" />'
'Demultiplekser ma działanie odwrotne do multipleksera, <m> czyli stan pojedynczej linii wystawia na wyjście o wskazanym numerze. <m>'
'Można zauważyć że to prawie to samo co dekoder <m> – on aktywował wyjście o danym numerze, zatem jakby dekoderowi zabronić aktywacji <m> wyjścia gdy linia jest w stanie niskim to uzyskalibyśmy demultiplekser. <m>'
'I tak się właśnie robi podłączając linie danych do wejścia zezwalającego <m> na aktywację wyjść, które posiada większość dekoderów. <m>'
'Gdy chcemy używać ich jako dekoderów a nie demultiplekserów wejście to <m> podłączamy do stanu wysokiego. <m>'
'Z tego względu nie warto szukać osobno układów <m> demultiplekserów cyfrowych i dekoderów. <mark name="analog_mux" />'
'Mamy również multipleksery analogowe, których działanie polega na <m> elektrycznym zwarciu odpowiedniego wejścia z wyjściem, <m>'
'z wykorzystaniem odpowiedniego układu tranzystorów, <m> w postaci bramek transmisyjnych, o których jeszcze powiemy. <m>'
'W związku z tym ten sam układ może być używany w roli <m> multipleksera jak i demultipleksera, <m> ponieważ przewodzenie sygnału w jego wypadku jest dwukierunkowe. <m>'
'W odróżnieniu od multipleksera cyfrowego, <m> który zawsze wymusza silny poziom logiczny swojego wyjścia, <m> multiplekser analogowy jest też w stanie przekazać poziom wysokiej impedancji. <m>'
'Generalnie na multiplekser analogowy należy patrzeć jak na <m> zespół przełączników przełączających nam kable naszej magistrali. <m>'
]
},
{
'image': [
[0.0, eduMovie.convertFile("bufory.sch", negate=True)],
["tristate", eduMovie.convertFile("bramka_transmisyjna.sch", negate=True)],
],
'text' : [
'Oprócz układów kompresujących i dekompresujących ilość linii mamy też <m> bardziej standardowe układy służące sterowaniu linii nazywane buforami. <m>'
'Bufory oznaczamy w postaci trójkąta, takiego jak pokazano na ekranie. <m>'
'Trójkąt ten wskazuje kierunek przepływu sygnału przez bufor. <m>'
'Bufory pełnią różne funkcje – mogą służyć do zwiększenia wydajności prądowej <m> wyjścia, do regeneracji sygnału lub wymuszenia kierunku jego podawania. <m>'
'Regeneracja sygnału może być potrzebna w związku z występowaniem <m> zjawisk pasożytniczych nawet na zwykłym kawałku przewodnika <m>'
'– na przykład kablu (czyli głównie rezystancją i pojemnością tego kabla), <m> które prowadzą do degradacji sygnału zarówno pod względem swojej wielkości, <m> jak i ostrości narastania zboczy. <m>'
'Warto tutaj zwrócić uwagę iż nawet zwykła bramka NOT będzie regenerować sygnał, gdyż sygnał z wejścia nie przepływa bezpośrednio na wyjście, <m> tylko jest odtwarzany z poziomu zasilania i masy, czyli regenerowany. <m>'
'Ponadto taka bramka zapewnia też brak możliwości przeniesienia sygnału <m> z wyjścia na wejście ze względu na izolowaną bramkę. <m>'
'I najprostsze bufory nie odwracające to właśnie <m> dwie połączone szeregowo bramki NOT. <m>'
'Bufory mogą być także stosowane w celu zamiany linii <m> na linię typu open collector lub 3 stanową. <mark name="tristate" />'
'Bufor trójstanowy decyduje o przepuszczeniu lub nie sygnału, <m> podstawą jego działania jest bramka transmisyjna, <m> której schemat widoczny teraz na ekranie. <m>'
'Jest to zasadniczo jedyna bramka która ma charakter symetryczny <m> – przewodzi sygnał w obie strony i możemy zamienić wejście z wyjściem. <m>'
'Samodzielnie używana jest w multiplekserach analogowych, <m> a w buforach trójstanowych występuje w kombinacji <m> (np. z jednym lub dwoma bramkami not), <m>'
'aby zapewnić regenerację sygnału i uniemożliwć przekazywanie go w drugą stronę. <m>'
'Warto zwrócić uwagę że użyte do jej konstrukcji tranzystory mają 4 wyprowadzenia. <m>'
'Tak naprawdę w technologii MOSFET przewodzenie jest regulowane <m> napięciem pomiędzy bramką tranzystora a podłożem. <m>'
'Jednak w większości | |
OpenGL libraries. The parameters are taken from the current analysis parameters in the current scenario. The preview image used is the one chosen in the Camera menu.\n'
try:
extent = analysis[geoparams[0]]
extent_proj = analysis[geoparams[1]]
res = float(analysis[geoparams[2]])
dem = analysis[geoparams[3]]
C = analysis[geoparams[4]]
C_proj = analysis[geoparams[5]]
Cz = float(analysis[geoparams[6]])
hd = float(analysis[geoparams[7]])
td = float(analysis[geoparams[8]])
vd = float(analysis[geoparams[9]])
f = float(analysis[geoparams[10]])*0.001
s = float(analysis[geoparams[11]])
interpolate = analysis[geoparams[12]]
flat = analysis[geoparams[13]]
extent_proj = geoopts[1][int(extent_proj)]
dem = geoopts[3][int(dem)]
C_proj = geoopts[5][int(C_proj)]
origin = analysis[corrparams[0]]
ax = analysis[corrparams[1]]
ay = analysis[corrparams[2]]
except:
message += '\nGeorectification parameters are not found in the current analysis or an unexpected error has occured. Check the analysis type and parameters and try again.\n'
tkMessageBox.showwarning('Georectification preview',message)
return 0
extent = map(float,extent.split(';'))
C = map(float,C.split(';'))
C = transSingle(C,C_proj)
if extent != [0,0,0,0]:
if extent_proj == "ETRS-TM35FIN(EPSG:3067) GEOID with Camera at Origin":
extent[0] += C[0]
extent[2] += C[0]
extent[1] += C[1]
extent[3] += C[1]
extent_proj = "ETRS-TM35FIN(EPSG:3067)"
extent = transExtent(extent,extent_proj)
[x1,y1,x2,y2] = extent
size = int(((y2-y1)/res)*((x2-x1)/res))
goodsize = 1000000.
res = res*size/goodsize
size = goodsize
analysis[geoparams[2]] = res
georectificationTool(self,self.Message,self.PictureFileName.get(),analysis,geoparams,geoopts,corrparams,self.memorylimit.get())
# message += '\nThere are over ' + str(size) + ' points in the real world to be simulated for the preview. It is adviced to keep the number of points under half million, otherwise the process may take too long.\n'
# message += '\nThe spatial resolution or the spatial extent can be decreased to decrease number of points.\n'
# message += '\nDo you wish the continue?'
#
#
# if tkMessageBox.askyesno('Georectification preview',message):
# georectificationTool(self,self.Message,self.PictureFileName.get(),analysis,geoparams,geoopts,corrparams,self.memorylimit.get())
def Menu_Base(self):
greentexture = Tkinter.PhotoImage(file=os.path.join(ResourcesDir,'green_grad_inv.gif'))
bluetexture = Tkinter.PhotoImage(file=os.path.join(ResourcesDir,'blue_grad_vert.gif'))
#Previous and next analysis shadow
Label = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="",anchor='w',bg="RoyalBlue4",relief=Tkinter.GROOVE)
Label.photo = bluetexture
Label.place(x=self.TableX+self.FolderX,y=0,height=self.PasAnaY,width=self.WindowX-2*self.TableX-2*self.FolderX)
Label = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="",anchor='w',bg="RoyalBlue4",relief=Tkinter.GROOVE)
Label.place(x=self.TableX+self.FolderX,y=2*self.TableY+3*self.FolderY+self.BannerY-self.PasAnaY+self.MenuY+self.LogY,height=self.PasAnaY,width=self.WindowX-2*self.TableX-2*self.FolderX)
#Folder
Label = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="",bg='DarkOrange4',anchor='w',relief=Tkinter.GROOVE)
#Label.photo = greentexture
#Label.place(width=1000,height=3000)
Label.place(x=self.TableX,y=self.TableY,width=self.WindowX-2*self.TableX,height=self.WindowY-2*self.TableY-self.LogY)
#Banner
Label = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="<",command=self.AnalysisNoMinus,relief=Tkinter.GROOVE)
Label.place(x=self.TableX+self.FolderX,y=self.TableY+self.FolderY,width=self.BannerX,height=self.BannerY)
Label = Tkinter.Button(self,wraplength=self.MenuX*0.8,text=">",command=self.AnalysisNoPlus,relief=Tkinter.GROOVE)
Label.place(x=self.WindowX-self.TableX-self.FolderX-self.BannerX,y=self.TableY+self.FolderY,width=self.BannerX,height=self.BannerY)
Label = Tkinter.Entry(self,textvariable=self.ScenarioNameVariable,fg="white",bg="RoyalBlue4",relief=Tkinter.GROOVE,justify='center')
Label.place(x=self.TableX+self.FolderX+self.BannerX,y=self.TableY+self.FolderY,width=self.WindowX-2*self.TableX-2*self.FolderX-2*self.BannerX,height=self.BannerY) #Passive Parchment
#Passive Parchment
#Label = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="",anchor='w',bg="white",image=greentexture,relief=Tkinter.GROOVE)
#Label.photo = greentexture
#Label.place(x=self.TableX+self.FolderX,y=self.TableY+2*self.FolderY+self.BannerY+self.PasParchIn,width=self.PasParchX,height=self.WindowY-2*self.TableY-3*self.FolderY-self.BannerY-2*self.PasParchIn-self.LogY)
#Active Parchment
Label = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="",anchor='w',bg="white",image=greentexture,relief=Tkinter.GROOVE)
Label.photo = greentexture
Label.place(x=self.TableX+self.FolderX+self.PasParchX,y=self.TableY+2*self.FolderY+self.BannerY,width=self.MenuX+self.MenuHeaderX,height=self.WindowY-2*self.TableY-3*self.FolderY-self.BannerY-self.LogY)
#MenuHeader
Label = Tkinter.Label(self,textvariable=self.ActiveMenu,anchor='c',bg="RoyalBlue4",fg="white",wraplength=1,relief=Tkinter.GROOVE)
Label.place(x=self.TableX+self.FolderX+self.PasParchX,y=self.TableY+2*self.FolderY+self.BannerY,height=self.MenuY,width=self.MenuHeaderX)
#Log
Label = Tkinter.Label(self,textvariable=self.LogLL,wraplength=self.WindowX-2*self.TableX,relief=Tkinter.GROOVE,fg="white",bg="RoyalBlue4",anchor="c")
Label.place(x=self.TableX,y=2*self.TableY+3*self.FolderY+self.BannerY-self.PasAnaY+self.MenuY,height=self.LogY,width=self.WindowX-2*self.TableX)
#Menu
self.MenuOSX = self.TableX+self.FolderX+self.PasParchX+self.MenuHeaderX
self.MenuOSY = self.TableY+2*self.FolderY+self.BannerY
def Menu_Main(self):
self.ClearMenu()
self.ActiveMenu.set("Main Menu")
self.Menu_Prev("Main Menu","self.Menu_Main")
self.MaskingPolygonPen.set(0)
NItems = 8
space = 0.02
Item = 1
self.MenuItem1 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Camera",anchor="c",command=self.Menu_Main_Camera,activebackground='RoyalBlue4',activeforeground='white')#,image=photo,compound="center",fg="white")
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 2
self.MenuItem2 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Temporal",anchor="c",command=self.Menu_Main_Temporal,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem2.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 3
self.MenuItem3 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Thresholds",anchor="c",command=self.Menu_Main_Thresholds,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem4 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Masking/ROIs",anchor="c",command=self.Menu_Main_Masking_Polygonic,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 5
self.MenuItem6 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Analyses",anchor="c",command=self.Menu_Main_Calculations,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem10 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text='Results',anchor="c",command=self.Menu_Main_Output,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem10.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem9 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Run All",anchor="c",command=self.RunAnalyses,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem9.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 8
self.MenuItem8 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text='Result Viewer',anchor="c",command=self.Menu_Main_Results,activebackground='RoyalBlue4',activeforeground='white')
self.MenuItem8.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Camera(self):
self.ClearMenu()
self.ActiveMenu.set("Camera")
self.Menu_Prev("Main Menu","self.Menu_Main")
self.callbackCameraName(0,0)
NItems = 10
space = 0.02
Item = 2
self.MenuItem11 = Tkinter.Label(self,wraplength=self.MenuX*0.4,text="Camera Network",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem11.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 3
self.MenuItem10 = Tkinter.OptionMenu(self,self.NetworkNameVariable,*sources.listNetworks(self.Message,self.networklist))
self.MenuItem10.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem12 = Tkinter.Label(self,wraplength=self.MenuX*0.4,text="Camera",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem12.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 5
self.MenuItem1 = Tkinter.OptionMenu(self,self.CameraNameVariable,*sources.listSources(self.Message,self.sourcelist,network=self.NetworkNameVariable.get()))
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem2 = Tkinter.Checkbutton(self,variable=self.MenuItem2Switch,wraplength=self.MenuX*0.7,height=self.CheckButtonY,width=self.CheckButtonX,text="Preview")
self.MenuItem2.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem3 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Choose Picture for Preview",anchor="c",command=self.Menu_Main_Camera_Picture)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 8
self.MenuItem5 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Open local image directory",anchor="c",command=self.Menu_Main_Camera_Open)
self.MenuItem5.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 9
self.MenuItem4 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Camera metadata...",anchor="c",command=self.Menu_Main_Camera_Metadata)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
self.MenuEnablerFunc.set("self.MenuEnabler([2,[3],['self.PreviewCanvasSwitch'],['0'],['self.PreviewCanvasSwitch.set(self.MenuItem2Switch.get())']])")
exec(self.MenuEnablerFunc.get())
def Menu_Main_Camera_Open(self):
source_ = self.setup[self.AnalysisNoVariable.get()-1]['source']
source = sources.getProxySource(self.Message,source_,self.proxylist)
self.makeDirStorage()
if source['protocol'] == 'LOCAL':
if 'temporary' in source and source['temporary']:
tkMessageBox.showwarning('No directory','Directory does not exist. This camera was temporarily added and the images of the camera refer to local directories and they do not exist. It probably means that the setup file loaded is saved in another computer with a camera network or camera which is not defined identically in this computer. To fix it, load the setup file again, confirm the permanent save of the camera/network and the open camera network manager and set up directories accordingly.')
return False
else:
webbrowser.open('file:///'+source['path'])
else:
if 'temporary' in source and source['temporary']:
webbrowser.open('file:///'+os.path.join(os.path.join(TmpDir,'tmp_images'),validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])))
else:
webbrowser.open('file:///'+os.path.join(self.imagespath.get(),source['networkid']+'-'+parsers.validateName(source['network']),parsers.validateName(source['name'])))
def Menu_Main_Camera_Metadata(self):
string = ''
source = sources.getSource(self.Message,self.sourcelist,self.CameraNameVariable.get())
for key in source:
if key not in source_metadata_hidden:
if key in source_metadata_names:
if 'time' in key:
string += source_metadata_names[key] + ': ' + str(parsers.strptime2(source[key])[0]) +'\n'
else:
string += source_metadata_names[key] + ': ' + str(source[key]) +'\n'
else:
string += key + ': ' + str(source[key]) +'\n'
source_ = sources.getSource(self.Message,self.sourcelist,self.CameraNameVariable.get())
source = sources.getProxySource(self.Message,source_,self.proxylist)
if source == source_:
string += 'Proxy metadata\n'
for key in source:
if key not in source_metadata_hidden:
if key in source_metadata_names:
if 'time' in key:
string += source_metadata_names[key] + ': ' + str(parsers.strptime2(source[key])[0]) +'\n'
else:
string += source_metadata_names[key] + ': ' + str(source[key]) +'\n'
else:
string += key + ': ' + str(source[key]) +'\n'
tkMessageBox.showwarning('Camera Metadata',string)
def Menu_Main_Camera_Picture(self):
source_ = self.setup[self.AnalysisNoVariable.get()-1]['source']
source = sources.getProxySource(self.Message,source_,self.proxylist)
if source['protocol'] == 'LOCAL' and 'temporary' in source and source['temporary']:
tkMessageBox.showwarning('No directory','Directory does not exist. This camera was temporarily added and the images of the camera refer to local directories and they do not exist. It probably means that the setup file loaded is saved in another computer with a camera network or camera which is not defined identically in this computer. To fix it, load the setup file again, confirm the permanent save of the camera/network and the open camera network manager and set up directories accordingly.')
return False
self.ClearMenu()
self.ActiveMenu.set("Choose Picture for Preview")
self.Menu_Prev("Camera","self.Menu_Main_Camera")
self.Menu_Choose_Picture()
def Menu_Choose_Picture(self):
NItems = 10
space = 0.02
Item = 1
self.MenuItem2 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Double click to choose:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem2.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 2
self.MenuItem8 = Tkinter.Scrollbar(self)
self.MenuItem8.place(x=self.MenuX*0.8-self.ScrollbarX+self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.ScrollbarX,height=space*6+7*self.MenuY*(1.0-(NItems+1)*space)/NItems)
self.MenuItem1 = Tkinter.Listbox(self,yscrollcommand=self.MenuItem8.set)
self.MenuItem8.config(command=self.MenuItem1.yview)
self.ChoosePictureKeywords = Tkinter.StringVar()
self.ChoosePictureKeywords.set("Keywords")
self.Menu_Main_Camera_Picture_Filter()
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8-self.ScrollbarX,height=space*6+7*self.MenuY*(1.0-(NItems+1)*space)/NItems)
self.MenuItem1.bind("<Double-Button-1>", self.ChangePictureFileName)
Item = 8
self.MenuItem4 = Tkinter.Entry(self,textvariable=self.ChoosePictureKeywords,justify="center")
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 9
self.MenuItem5 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Filter by keywords",anchor="c",command=self.Menu_Main_Camera_Picture_Filter)
self.MenuItem5.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 10
self.MenuItem3 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Download pictures...",anchor="c",command=self.FetchCurrentImages)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Camera_Picture_Filter(self):
if self.ChoosePictureKeywords.get() == "Keywords":
keys = ""
else:
keys = self.ChoosePictureKeywords.get().split()
try:
self.MenuItem1.delete(0,"end")
source_ = self.setup[self.AnalysisNoVariable.get()-1]['source']
source = sources.getProxySource(self.Message,source_,self.proxylist)
if source['protocol'] == 'LOCAL':
if 'temporary' in source and source['temporary']:
tkMessageBox.showwarning('No directory','Directory does not exist. This camera was temporarily added and the images of the camera refer to local directories and they do not exist. It probably means that the setup file loaded is saved in another computer with a camera network or camera which is not defined identically in this computer. To fix it, load the setup file again, confirm the permanent save of the camera/network and the open camera network manager and set up directories accordingly.')
return False
imglist = fetchers.fetchImages(self, self.Message, source, self.proxy, self.connection, self.imagespath.get(), [0,0,0,0, "All"], online=True, care_tz = self.TimeZoneConversion.get())[0]
for i,v in enumerate(imglist):
imglist[i] = os.path.split(v)[-1]
else:
if 'temporary' in source and source['temporary']:
imglist = os.listdir(os.path.join(os.path.join(TmpDir,'tmp_images'),validateName(source['network'])+'-'+source['protocol']+'-'+source['host']+'-'+validateName(source['username'])+'-'+validateName(source['path'])))
else:
imglist = os.listdir(os.path.join(self.imagespath.get(),source['networkid']+'-'+parsers.validateName(source['network']),parsers.validateName(source['name'])))
imglist.sort()
for item in imglist:
if keys == [] or keys=="":
self.MenuItem1.insert("end",item)
else:
inc = True
for key in keys:
if key not in item:
inc = False
break
if inc:
self.MenuItem1.insert("end",item)
except:
pass
def Menu_Main_Temporal(self):
self.ClearMenu()
self.ActiveMenu.set("Temporal")
self.Menu_Prev("Main Menu","self.Menu_Main")
NItems = 10
space = 0.02
Item = 4
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Temporal selection:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 5
self.MenuItem2 = Tkinter.OptionMenu(self,self.TemporalModeVariable,*temporal_modes)
self.MenuItem2.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem3 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Dates",anchor="c",command=self.Menu_Main_Temporal_Dates)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem4 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Time of the day",anchor="c",command=self.Menu_Main_Temporal_Times)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
self.callbackTemporalMode()
def callbackTemporalMode(self,*args):
if self.TemporalModeVariable.get() in ['All','Latest 1 hour','Latest image only','Latest 48 hours','Latest 24 hours','Last 48 hours','Last 24 hours']:
self.DateStartVariable.set(scenario_def['temporal'][0])
self.DateEndVariable.set(scenario_def['temporal'][1])
self.TimeStartVariable.set(scenario_def['temporal'][2])
self.TimeEndVariable.set(scenario_def['temporal'][3])
if self.ActiveMenu.get() == "Temporal":
self.MenuItem3.config(state='disabled')
self.MenuItem4.config(state='disabled')
if (self.ActiveMenu.get() == "Dates" or self.ActiveMenu.get() == "Time of the day"):
self.Menu_Main_Temporal()
if self.TemporalModeVariable.get() in ['Time of day','Yesterday only','Today only','Last one year','Last one week','Last one month','Latest one year','Latest one week','Latest one month']:
self.DateStartVariable.set(scenario_def['temporal'][0])
self.DateEndVariable.set(scenario_def['temporal'][1])
if self.ActiveMenu.get() == "Temporal":
self.MenuItem3.config(state='disabled')
self.MenuItem4.config(state='normal')
if self.ActiveMenu.get() == "Dates":
self.Menu_Main_Temporal_Dates()
if self.TemporalModeVariable.get() in ['Date and time intervals']:
if self.ActiveMenu.get() == "Temporal":
self.MenuItem3.config(state='normal')
self.MenuItem4.config(state='normal')
if self.TemporalModeVariable.get() in ['Earliest date and time intervals']:
self.DateEndVariable.set(scenario_def['temporal'][1])
if self.ActiveMenu.get() == "Temporal":
self.MenuItem3.config(state='normal')
self.MenuItem4.config(state='normal')
if self.TemporalModeVariable.get() in ['Latest date and time intervals']:
self.DateStartVariable.set(scenario_def['temporal'][0])
if self.ActiveMenu.get() == "Temporal":
self.MenuItem3.config(state='normal')
self.MenuItem4.config(state='normal')
def Menu_Main_Temporal_Dates(self):
self.ClearMenu()
self.ActiveMenu.set("Dates")
self.Menu_Prev("Temporal","self.Menu_Main_Temporal")
NItems = 10
space = 0.02
Item = 3
if self.TemporalModeVariable.get() != 'Latest date and time intervals':
Item += 1
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Start:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem3 = Tkinter.Entry(self,justify="center",width=10,textvariable=self.DateStartVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
if self.TemporalModeVariable.get() != 'Earliest date and time intervals':
Item += 1
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="End:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem6 = Tkinter.Entry(self,justify="center",width=10,textvariable=self.DateEndVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Temporal_Times(self):
self.ClearMenu()
self.ActiveMenu.set("Time of the day")
self.Menu_Prev("Temporal","self.Menu_Main_Temporal")
NItems = 10
space = 0.02
Item = 4
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Start:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 5
self.MenuItem3 = Tkinter.Entry(self,justify="center",width=10,textvariable=self.TimeStartVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="End:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Entry(self,justify="center",width=10,textvariable=self.TimeEndVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds(self):
self.ClearMenu()
self.ActiveMenu.set("Thresholds")
self.Menu_Prev("Main Menu","self.Menu_Main")
NItems = 15
space = 0.02
Item = 1
self.MenuItem8 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Image thresholds",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem8.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem1 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Brightness",anchor="c",command=self.Menu_Main_Thresholds_Brightness,activebackground='seashell',activeforeground='black')
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem2 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Luminance",anchor="c",command=self.Menu_Main_Thresholds_Luminance,activebackground='beige',activeforeground='black')
self.MenuItem2.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem13 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Red Fraction",anchor="c",command=self.Menu_Main_Thresholds_RedFI,activebackground='red3',activeforeground='white')
self.MenuItem13.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem14 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Green Fraction",anchor="c",command=self.Menu_Main_Thresholds_GreenFI,activebackground='green4',activeforeground='white')
self.MenuItem14.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem15 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Blue Fraction",anchor="c",command=self.Menu_Main_Thresholds_BlueFI,activebackground='blue2',activeforeground='white')
self.MenuItem15.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem9 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="ROI thresholds",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem9.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem11 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Red Fraction",anchor="c",command=self.Menu_Main_Thresholds_RedF,activebackground='red3',activeforeground='white')
self.MenuItem11.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem3 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Green Fraction",anchor="c",command=self.Menu_Main_Thresholds_GreenF,activebackground='green4',activeforeground='white')
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem4 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Blue Fraction",anchor="c",command=self.Menu_Main_Thresholds_BlueF,activebackground='blue2',activeforeground='white')
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem10 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Pixel thresholds",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem10.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem5 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Red Channel",anchor="c",command=self.Menu_Main_Thresholds_Red,activebackground='red3',activeforeground='white')
self.MenuItem5.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem6 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Green Channel",anchor="c",command=self.Menu_Main_Thresholds_Green,activebackground='green4',activeforeground='white')
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem7 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Blue Channel",anchor="c",command=self.Menu_Main_Thresholds_Blue,activebackground='blue2',activeforeground='white')
self.MenuItem7.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item += 1
self.MenuItem12 = Tkinter.Button(self,wraplength=self.MenuX*0.8,text="Grey Composite",anchor="c",command=self.Menu_Main_Thresholds_Grey,activebackground='seashell',activeforeground='black')
self.MenuItem12.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_Brightness(self):
self.ClearMenu()
self.ActiveMenu.set("Brightness")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.BrightnessLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.BrightnessUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_Luminance(self):
self.ClearMenu()
self.ActiveMenu.set("Luminance")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.LuminanceLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.LuminanceUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_Red(self):
self.ClearMenu()
self.ActiveMenu.set("Red Channel")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.RedLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.RedUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_Green(self):
self.ClearMenu()
self.ActiveMenu.set("Green Channel")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.GreenLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.GreenUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_Blue(self):
self.ClearMenu()
self.ActiveMenu.set("Blue Channel")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.BlueLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.BlueUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_Grey(self):
self.ClearMenu()
self.ActiveMenu.set("Grey Composite")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.GreyLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=255,orient="horizontal",resolution=1, width=self.CheckButtonY, variable=self.GreyUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_RedF(self):
self.ClearMenu()
self.ActiveMenu.set("Red Fraction")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.RedFLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.RedFUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_GreenF(self):
self.ClearMenu()
self.ActiveMenu.set("Green Fraction")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = 0.02
Item = 3
self.MenuItem1 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Minimum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem1.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 4
self.MenuItem3 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.GreenFLTVariable)
self.MenuItem3.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 6
self.MenuItem4 = Tkinter.Label(self,wraplength=self.MenuX*0.8,text="Maximum:",anchor='c',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor)
self.MenuItem4.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
Item = 7
self.MenuItem6 = Tkinter.Scale(self, from_=0, to=1,orient="horizontal",resolution=0.005, width=self.CheckButtonY,variable=self.GreenFUTVariable)
self.MenuItem6.place(x=self.MenuOSX+self.MenuX*0.1,y=self.MenuOSY+Item*space*self.MenuY+(Item-1)*self.MenuY*(1.0-(NItems+1)*space)/NItems,width=self.MenuX*0.8,height=self.MenuY*(1.0-(NItems+1)*space)/NItems)
def Menu_Main_Thresholds_BlueF(self):
self.ClearMenu()
self.ActiveMenu.set("Blue Fraction")
self.Menu_Prev("Thresholds","self.Menu_Main_Thresholds")
NItems = 10
space = | |
<reponame>BoyanZhou/LongStrain<gh_stars>0
# construct microbiome haplotype
import numpy as np
import sys
class Haplotype:
"""
each haplotype represents a strain, default 3 Haplotypes in one sample
"""
def __init__(self, index, sample_size, reads_count_by_sample=np.array([])):
self.sample_size = sample_size # number of sample contained in this haplotype
self.number = index # index of the haplotype (0, 1, or 2)
self.sample_read_names = [[]] * sample_size # reads name contained by each sample, not used now
self.read_names_sample_index = {} # indicate which sample the read belongs to
self.read_names_positions = {} # positions interrogated in that reads, like {"read1": [12, 38, 59]}
if reads_count_by_sample.shape == (0, ):
self.reads_count_by_sample = np.ones(sample_size, dtype=int)
else:
self.reads_count_by_sample = reads_count_by_sample # vector, length is sample size
self.pos_genotype = {} # like {pos1: genotype}, {183742: 0}
# self.reads_number = 0
def haplotype_extend(self, geno_index, data_dict, pos, geno):
"""
Add data at pos to existing Haplotype
:param geno_index: bool value, index of reads interrogated by geno
:param data_dict: the entire data in the block with many pos
:param pos: the target pos
:param geno: 0 or 1
:return:
"""
# add reads of a new locus to the haplotype
data_list = data_dict[pos]
# add a new locus to pos_genotype
self.pos_genotype.update({pos: geno})
for index in np.where(geno_index)[0]:
# for each reads
read_name = data_list[0][index]
read_sample_index = data_list[2][index]
self.read_names_sample_index.update({read_name: read_sample_index})
if read_name in self.read_names_positions:
self.read_names_positions[read_name].append(pos)
else:
self.read_names_positions.update({read_name: [pos]})
if read_name not in self.sample_read_names[read_sample_index]:
self.sample_read_names[read_sample_index].append(read_name)
self.reads_count_by_sample[read_sample_index] += 1 # update counts by sample
# self.reads_number = len(self.read_names_sample_index.keys())
def haplotype_subtract(self, geno_index, data_dict, pos):
# subtract one read from the haplotype
data_list = data_dict[pos]
read_name = data_list[0][geno_index]
read_sample_index = data_list[2][geno_index]
if read_name in self.read_names_sample_index:
del self.read_names_sample_index[read_name]
if read_name in self.sample_read_names[read_sample_index]:
self.sample_read_names[read_sample_index].remove(read_name)
self.reads_count_by_sample[read_sample_index] -= 1 # update counts by sample
read_positions = self.read_names_positions.pop(read_name)
read_positions_geno = [self.pos_genotype[i] for i in read_positions]
# print(f"subtract {read_positions_geno}")
# self.reads_number = len(self.read_names_sample_index.keys())
# return a list containing positions
return read_positions, read_positions_geno
def haplotype_add(self, geno_index, data_dict, pos, read_positions, read_positions_geno):
# add one read to the haplotype, pos is not included in read_positions
# geno_index is an int, data_list has three elements: read names, geno(0 or 1), sample index
data_list = data_dict[pos]
read_name = data_list[0][geno_index]
read_sample_index = data_list[2][geno_index]
read_positions.append(pos)
read_positions_geno.append(data_list[1][geno_index])
self.read_names_sample_index.update({read_name: read_sample_index})
if read_name not in self.sample_read_names[read_sample_index]:
self.sample_read_names[read_sample_index].append(read_name)
self.reads_count_by_sample[read_sample_index] += 1 # update counts by sample
# read_positions is a list
if read_name in self.read_names_positions:
self.read_names_positions[read_name].extend(read_positions)
else:
self.read_names_positions.update({read_name: read_positions})
# self.reads_number = len(self.read_names_sample_index.keys())
for p, geno in zip(read_positions, read_positions_geno):
if p not in self.pos_genotype:
# print(read_name, p, geno)
self.pos_genotype.update({p: geno})
def get_pos_depth(self):
"""
# Get the coverage depth at each site
:return: total depth of this haplotype at pos
"""
pos_depth_dict = {}
for pos_list in self.read_names_positions.values():
for pos in pos_list:
if pos in pos_depth_dict:
pos_depth_dict[pos] += 1
else:
pos_depth_dict[pos] = 1
return pos_depth_dict
def get_samples_pos_depth(self):
"""
# Get the coverage depth of samples at each site
:return: a dict, depth of each sample {pos: [dp1, dp2, dp3, dp4]}, length is sample size
"""
pos_sample_depth_dict = {}
for read_name in self.read_names_positions.keys():
read_sample_index = self.read_names_sample_index[read_name]
for pos in self.read_names_positions[read_name]:
# get pos in pos-list of that read
if pos in pos_sample_depth_dict:
pos_sample_depth_dict[pos][read_sample_index] += 1
else:
# pos has not been record
pos_depth_list = [0] * self.sample_size
pos_depth_list[read_sample_index] += 1
pos_sample_depth_dict.update({pos: pos_depth_list})
return pos_sample_depth_dict
def whether_skip(likelihood_array, skip_threshold):
# one dimension likelihood array
order_index = np.argsort(likelihood_array)
genotype_to_haplotype3 = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1], [0, 1, 0], [1, 1, 0], [0, 0, 1]])
geno_difference = genotype_to_haplotype3[order_index[-1]] - genotype_to_haplotype3[order_index[-2]]
if np.sum(geno_difference == 0) == 2:
# if there two same genotypes
max_likelihood = likelihood_array[order_index[-1]] + likelihood_array[order_index[-2]]
else:
max_likelihood = likelihood_array[order_index[-1]]
if max_likelihood < skip_threshold:
return True
else:
return False
def calculate_maximum_likelihood_matrix(value_matrix, bool_matrix):
# this function calculate the sum of maximum non-zero likelihood of each row
# value_matrix likes array([[-1.63657945, -0.24074084, -3.94713272], [-1.63657945, -0.24074084, -3.94713272],
# [-1.63657945, -0.24074084, -3.94713272], [-1.63657945, -0.24074084, -3.94713272]])
# bool_matrix likes array([[False, False, True], [False, False, True],
# [ True, True, False], [ True, True, False]])
matrix_with_true_index = value_matrix * bool_matrix
row_index_more_than_one_true = np.sum(bool_matrix, axis=1) > 1
likelihood_sum = np.sum(matrix_with_true_index) - np.sum(np.min(matrix_with_true_index[row_index_more_than_one_true], axis=1))
return likelihood_sum
def haplotype_identification(pos_reads_mutation_dict, sample_size, reads_count_by_strains=np.array([]),
skip_threshold=0.99):
"""
Main function of identifying haplotypes
:param pos_reads_mutation_dict: is like {pos: [read_names, "0-1" genotype, sample index]}
:param sample_size: number of samples in this individual
:param reads_count_by_strains:
:param skip_threshold:
:return: haplotypes=[Haplotype0, Haplotype1, Haplotype2], reads_count_by_strains=M by 3 matrix storing reads counts,
posterior_probability_dict= {pos: probability_six_combination}
"""
genotype_to_haplotype = np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])
genotype_to_haplotype3 = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1], [0, 1, 0], [1, 1, 0], [0, 0, 1]])
error_rate = np.log(0.005)
# print("haplotype identification start ... ...")
posterior_probability_dict = {}
# initial bin
if reads_count_by_strains.shape == (0, ):
bin_initial = True
else:
bin_initial = False
# matrix for storing counts of each strains in each sample, samples * strains
if bin_initial:
# if this is haplotype initialization, assign 0 to the count
reads_count_by_strains = np.ones([sample_size, 3], dtype=int)
# initialize the list storing haplotypes
haplotypes = [Haplotype(i, sample_size) for i in range(3)]
else:
# if haplotypes have been initialized, assign cumulative counts to haplotypes
# print(reads_count_by_strains)
haplotypes = [Haplotype(i, sample_size, reads_count_by_strains[:, i]) for i in range(3)]
pos_last = -1000 # update after each position, give it a negative value as start
haplotype_last = np.array([], dtype=int) # strain index of reads (0, 1 or 2 ) at last position
for pos in sorted(pos_reads_mutation_dict.keys()):
# print(pos)
# need to estimate which strains these new reads belong to, update for each position
strains_proportions = reads_count_by_strains / np.tile(np.sum(reads_count_by_strains, axis=1), (3, 1)).T
if pos == -2:
print(strains_proportions)
strains_proportions = np.log(strains_proportions)
""" first pos in dict; start of the chain """
if bin_initial:
index_0 = pos_reads_mutation_dict[pos][1] == 0 # which genotype is 0
index_1 = pos_reads_mutation_dict[pos][1] == 1
haplotype_last = np.full(len(index_0), 0)
if np.sum(index_0) >= np.sum(index_1):
haplotypes[0].haplotype_extend(index_0, pos_reads_mutation_dict, pos, 0)
haplotypes[1].haplotype_extend(index_1, pos_reads_mutation_dict, pos, 1)
haplotype_last[index_1] = 1
else:
haplotypes[1].haplotype_extend(index_0, pos_reads_mutation_dict, pos, 0)
haplotypes[0].haplotype_extend(index_1, pos_reads_mutation_dict, pos, 0)
haplotype_last[index_0] = 1
pos_last = pos
# update reads count matrix
reads_count_by_strains[:, 0] = haplotypes[0].reads_count_by_sample.copy()
reads_count_by_strains[:, 1] = haplotypes[1].reads_count_by_sample.copy()
else:
""" not the first pos, the chain has started """
# compare read names of last position with present position to find shared reads
# three outcomes: shared names, index in pos_last, index in pos_current
if pos_last != -1000:
read_shared = np.intersect1d(pos_reads_mutation_dict[pos_last][0], pos_reads_mutation_dict[pos][0],
assume_unique=False, return_indices=True)
else:
read_shared = ([],)
if len(read_shared[0]) == 0:
# there is no shared read between two positions
# get sample index, row of reads_strains_proportions = reads number
reads_strains_proportions = strains_proportions[pos_reads_mutation_dict[pos][2]]
# reads_genotype_matrix stores genotype for each reads
reads_genotype_matrix = np.tile(pos_reads_mutation_dict[pos][1], (2, 1))
reads_genotype_matrix[0, :] = 1 - reads_genotype_matrix[0, :]
likelihood_six = []
for reads_to_strains in np.dot(genotype_to_haplotype, reads_genotype_matrix):
likelihood_six.append(np.sum(reads_strains_proportions * np.eye(3, dtype=int)[reads_to_strains]))
likelihood_max_index = np.array(likelihood_six).argmax()
# corresponding relationship between geno and strain; e.g. [0, 2]
geno_strain = genotype_to_haplotype[likelihood_max_index]
probability_six = np.exp(likelihood_six)
probability_six = probability_six/np.sum(probability_six)
# if the maximum probability < 0.99, skip this position and not record this position
if probability_six[likelihood_max_index] < skip_threshold and not bin_initial:
# print(pos)
# print(probability_six)
continue
posterior_probability_dict.update({pos: probability_six})
if pos == -2:
print(read_shared)
print("posterior probability is:")
print(probability_six)
print("pos is:")
print(pos)
print(pos_reads_mutation_dict[pos])
print("last pos is:")
print(pos_last)
print(pos_reads_mutation_dict[pos_last])
print("haplotype last is:")
print(haplotype_last)
sys.exit()
# update pos information
haplotypes[geno_strain[0]].haplotype_extend(pos_reads_mutation_dict[pos][1] == 0,
pos_reads_mutation_dict, pos, 0)
haplotypes[geno_strain[1]].haplotype_extend(pos_reads_mutation_dict[pos][1] == 1,
pos_reads_mutation_dict, pos, 1)
pos_last = pos
reads_count_by_strains[:, geno_strain[0]] = haplotypes[geno_strain[0]].reads_count_by_sample.copy()
reads_count_by_strains[:, geno_strain[1]] = haplotypes[geno_strain[1]].reads_count_by_sample.copy()
haplotype_last = pos_reads_mutation_dict[pos][1].copy()
haplotype_last[pos_reads_mutation_dict[pos][1] == 0] = geno_strain[0]
haplotype_last[pos_reads_mutation_dict[pos][1] == 1] = geno_strain[1]
else:
# there are shared reads
""" for six strains-genotype combinations, get the one with max likelihood """
likelihood_six_combination = []
geno_actual = pos_reads_mutation_dict[pos][1][read_shared[2]].copy()
for combination in genotype_to_haplotype3:
if pos == -2:
print(combination)
# calculate the likelihood of two part
likelihood_reads_consistency = 0
likelihood_strain_in_sample = 0
# haplotype_last = np.array([1, 1, 0, 0, 2])
# combination = genotype_to_haplotype3[0], e.g. [0, 1, 1]
# geno_actual = np.array([1, 1, 0, 0, 0])
geno_predicted = combination[haplotype_last[read_shared[1]]]
if np.sum(geno_predicted != geno_actual) > 0:
# reads inconsistency exists
inconsistent_index = read_shared[2][geno_actual != | |
#!/usr/bin/env python3
import socket
import threading
import logging
logging.basicConfig(filename='meca.log', level=logging.DEBUG)
PROGRAM_FILE = 'program_output.txt'
# Dictionary of status indexes in robot status message
statusDict = {'activated': 0,
'homed': 1,
'simulating': 2,
'error': 3,
'paused': 4,
'EOB': 5,
'EOM': 6}
# Ease of use cartesian index labeling
cartDict = {'x': 0,
'y': 1,
'z': 2,
'rx': 3,
'ry': 4,
'rz': 5}
# Dictionary of command responses
responseDict = {'ActivateRobot': [2000, 2001],
'DeactivateRobot': [2004],
'BrakesOn': [2010],
'BrakesOff': [2008],
'Home': [2002, 2003],
'GetJoints': [2026],
'GetPose': [2027],
'ClearMotion': [2044],
'PauseMotion': [2042],
'ResumeMotion': [2043],
'ResetError': [2005],
'GetStatusRobot': [2007],
'GetFwVersion': [2081],
'GetProductType': [2084]}
# Combined control and feedback class for Mecademic
class Robot:
def __init__(self, ip):
self.ip = ip
self.connected = False
# Initialize tool and work reference frames
self.pose = {'stow': [75,0,240,0,90,0], 'home': [110,-150,130,-180,0,-180]}
self.joints = {'stow': [0,-60,60,0,0,0]}
self.toolFrame = {'flange': [0,0,0,0,0,0]}
self.workFrame = {'base': [0,0,0,0,0,0]}
# Connect to both control and feedback servers
def Connect(self):
self.connected = True
self.controlClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.controlClient.settimeout(10) # 100ms
self.controlClient.connect((self.ip, 10000))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) != 3000:
if int(code) == 3001:
print('Another user is already connected!')
exit()
logging.warning('Unable to connect to port 10000')
self.connected = False
# Clear initial errors
if self.GetStatus('error'):
logging.info('Error on initialization')
self.ResetError()
self.firmware = self.ReadResponse('GetFwVersion')
self.product = self.ReadResponse('GetProductType')
self.feedbackClient = socket.socket()
self.feedbackClient.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.feedbackClient.settimeout(10) # 100ms
self.feedbackClient.connect((self.ip, 10001))
code = int(self.feedbackClient.recv(1024).decode('ascii')[1:-2].split('][')[0])
if int(code) != 2079:
logging.warning('Unable to connect to port 10001')
self.connected = False
with open(PROGRAM_FILE,'w') as f:
f.write('')
f.close()
return self.connected
# Easy setup routine
def Startup(self):
if self.Activate(): return self.Home()
# Ease of use 0-100% global speed adjustment
def SetSpeed(self, percentage):
# If speed is provided as fractional change to percentage
if percentage < 1: percentage *= 100
self.SetCartAcc(percentage)
self.SetCartAngVel(3*percentage)
self.SetCartLinVel(10*percentage)
self.SetJointAcc(1.5*percentage)
self.SetJointVel(percentage)
# Move robot in +Z of tool frame
def Push(self, mm): self.MoveToolRel([0,0,mm,0,0,0])
# Move robot in -Z of tool frame
def Pull(self, mm): self.MoveToolRel([0,0,-mm,0,0,0])
def Wiggle(self):
self.MoveToolRel([0,0,0,4,0,0])
self.MoveToolRel([0,0,0,-4,0,0])
# Move robot Z-offset of tool frame
def Approach(self, pose, zOffset):
approachPose = pose.copy()
approachPose[2] += zOffset
self.MoveP(approachPose)
# Move robot Z-offset of tool frame
def Depart(self, pose, zOffset):
departPose = pose.copy()
departPose[2] += zOffset
self.MoveL(departPose)
# Power-up robot motors
def Activate(self):
if self.GetStatus('activated'): return True
else: return self.SendCommand('ActivateRobot')
# Power-down robot motors
def Deactivate(self):
if not self.GetStatus('activated'): return True
else: return self.SendCommand('DeactivateRobot')
# De-activate robot and engage brakes
def BrakesOn(self):
if self.GetStatus('activated'): self.Deactivate()
else: return self.SendCommand('BrakesOn')
# Activate robot and disengage brakes
def BrakesOff(self):
if not self.GetStatus('activated'): self.Activate()
else: return self.SendCommand('BrakesOff')
# Home robot motors
def Home(self):
if self.GetStatus('homed'): return True
else: return self.SendCommand('Home')
# Move robot to target "pose" list relative to work plane
def MovePose(self, pose):
if self.GetStatus('paused'): self.ResumeMove()
sentPose = _returnList(self.pose, pose)
if sentPose is not None: return self.SendCommand(f'MovePose{tuple(sentPose)}')
else: return False
# Move robot to target "joints" list
def MoveJoints(self, joints):
if not self._checkJointLimits(joints):
logging.warning("Target position outside joint limits!")
return False
if self.GetStatus('paused'): self.ResumeMove()
sentJoints = _returnList(self.joint, joints)
if sentJoints is not None: return self.SendCommand(f'MoveJoints{tuple(sentJoints)}')
else: return False
# Jog robot at target "joints" speed
def MoveJV(self, joints):
if not self._checkJointSpeedLimits(joints):
logging.warning("Target speed outside joint limits!")
return False
else:
if self.GetStatus('paused'): self.ResumeMove()
return self.SendCommand(f'MoveJointsVel{tuple(joints)}')
# Move robot linearly to target "pose" list relative to work frame
def MoveLinear(self, pose):
if self.GetStatus('paused'): self.ResumeMove()
sentPose = _returnList(self.pose, pose)
if sentPose is not None: return self.SendCommand(f'MoveLin{tuple(sentPose)}')
else: return False
# Move robot in by "pose" list relative to tool frame
def MoveToolRel(self, pose):
return self.SendCommand(f'MoveLinRelTRF{tuple(pose)}')
# Move robot in by "pose" list relative to work frame
def MoveWorkRel(self, pose):
return self.SendCommand(f'MoveLinRelWRF{tuple(pose)}')
# Jog at target "pose" speed relative to tool frame
def MoveToolVel(self, pose):
return self.SendCommand(f'MoveLinVelTRF{tuple(pose)}')
# Jog tool at target "pose" speed relative to work plane
def MoveWorkVel(self, pose):
return self.SendCommand(f'MoveLinVelWRF{tuple(pose)}')
# Set blend radius from 0-100%
def SetBlending(self, percentage):
assert percentage >= 0 and percentage <= 100
return self.SendCommand(f'SetBlending({percentage})')
# Set cartesian acceleration from 0.001-600%
def SetCartAcc(self, percentage):
assert percentage >= .001 and percentage <= 600
return self.SendCommand(f'SetCartAcc({percentage})')
# Set cartesian angular velocity from 0.001-300deg/s
def SetCartAngVel(self, degrees):
assert degrees >= 0.001 and degrees <= 300
return self.SendCommand(f'SetCartAngVel({degrees})')
# Set cartesian linear velocity from 0.001-1,000mm/s
def SetCartLinVel(self, mms):
assert mms >= 0.001 and mms <= 1000
return self.SendCommand(f'SetCartLinVel({mms})')
# Set joint acceleration from 0.001-150%
def SetJointAcc(self, percentage):
return self.SendCommand(f'SetJointAcc({percentage})')
# Set joint velocity from 0.001-100%
def SetJointVel(self, percentage):
return self.SendCommand(f'SetJointVel({percentage})')
# Add a new robot pose
def AddPose(self, poseName, pose):
self.pose[poseName] = pose
# Add a new robot joint position
def AddJoints(self, jointsName, joint):
self.joints[jointsName] = joint
# Set tool frame to existing tool or arbitrary offset
def SetTool(self, toolOffset):
sentTool = _returnList(self.tool, toolOffset)
self.SendCommand(f'SetTRF({sentTool})')
# Add a new tool frame to robot tools
def AddTool(self, toolName, toolOffset):
if len(toolOffset) == 3:
for vector in range(3):
toolOffset.append(0)
self.toolFrame[toolName] = toolOffset
# Set work plane to existing plane or arbitrary offset
def SetWork(self, workPlane):
sentWork = _returnList(self.work, workPlane)
self.SendCommand(f'SetWRF({sentWork})')
# Add a new work plane to robot workFrame dict
def AddWork(self, workName, workPlane):
if len(workPlane) == 3:
for vector in range(3):
workPlane.append(0)
self.workFrame[workName] = workPlane
# Get list of current joint positions in degrees
def GetJoints(self):
return self.ReadResponse('GetJoints')
# Get list of current cartesian position in millimeters
def GetPose(self):
return self.ReadResponse('GetPose')
# Delete current planned move
def ClearMove(self):
return self.SendCommand('ClearMotion')
# Pause current move
def PauseMove(self):
return self.SendCommand('PauseMotion')
# Resume current move
def ResumeMove(self):
return self.SendCommand('ResumeMotion')
# Reset error
def ResetError(self):
return self.SendCommand('ResetError')
def SetCheckpoint(self, step=1):
self.controlClient.send(bytes(f'SetCheckpoint({step})\0','ascii'))
code, response = self._GetMessage()
if code in [2000, 2001]: return True
else: return False
# Set position update rate in ms
def SetMonitoringInterval(self, ms):
assert ms >= 0.001 and ms <= 1
return self.SendCommand(f'SetMonitoringInterval({ms})', client='feedback')
# Get robot status as list of booleans
def GetStatus(self, status='all'):
responseList = self.ReadResponse('GetStatusRobot').split(',')
responseBool = [bool(int(response)) for response in responseList]
if status != 'all':
if status in statusDict.keys():
return responseBool[statusDict[status]]
else:
print(f'Use an available value:\n{statusDict.keys()}')
else:
return responseBool
# Send command and receive confirmation
def SendCommand(self, cmd, client='command'):
if self.connected is False: self.Connect()
if client == 'command':
_writeProgram(cmd)
self.controlClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) in self._getCodes(cmd): return True
else:
print(f'Error: {response}')
self.ResetError()
return False
else:
self.feedbackClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.feedbackClient.recv(1024).decode('ascii')[1:-2].split('][')
print(code, response)
return True
# Send command and receive message
def ReadResponse(self, cmd):
if self.connected is False: self.Connect()
self.controlClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) in self._getCodes(cmd): return response
else:
logging.warning(f'Error: {response}')
return None
# Receive current joint or cartesian positions
def ReadPosition(self, cmd):
if self.connected is False: self.Connect()
jointResponse, poseResponse = self.feedbackClient.recv(1024).decode('ascii').split('\x00')[:2]
print(jointResponse, poseResponse)
if cmd == 'GetJoints': msg = jointResponse
elif cmd == 'GetPose': msg = poseResponse
code, responseString = msg[1:-2].split('][')
if not int(code) in self._getCodes(cmd):
logging.warning(f'Error: {responseString}')
return None
responseList = responseString.split(',')
responseFloat = [float(response) for response in responseList]
return responseFloat
# Look up corresponding error code in dictionary
def _getCodes(self, cmd):
if cmd.startswith('Move'):
return [3004,3012]
elif cmd.startswith('Set'):
return [3012]
else:
return responseDict[cmd]
# Move speed checks
def _checkJointLimits(self, joints):
assert abs(joints[0]) <= 175
assert joints[1] >= -70 and joints[1] <= 90
assert joints[2] >= -135 and joints[2] <= 70
assert abs(joints[3]) <= 170
assert abs(joints[4]) <= 115
assert abs(joints[5]) <= 180
return True
def _checkJointSpeedLimits(self, joints):
assert abs(joints[0]) <= 150
assert abs(joints[1]) <= 150
assert abs(joints[2]) <= 180
assert abs(joints[3]) <= 300
assert abs(joints[4]) <= 300
assert abs(joints[5]) <= 500
return True
def _checkPoseSpeedLimits(self, pose):
assert pose[0] >= 0.001 and pose[0] <= 1000
assert pose[1] >= 0.001 and pose[1] <= 1000
assert pose[2] >= 0.001 and pose[2] <= 1000
assert pose[3] >= 0.001 and pose[3] <= 300
assert pose[4] >= 0.001 and pose[4] <= 300
assert pose[5] >= 0.001 and pose[5] <= 500
return True
def _checkPoseRotLimits(self, pose):
for vector in pose:
assert vector >= 0.001 and vector <= 300
# Pose object
class Pose():
def __init__(self, pose, coords='pose'):
self.coords = coords
self.pose = pose
# Ease of use 0-100% global speed adjustment
| |
"""
x, b, y_idx = input_storage
if b.shape[0] != x.shape[1]:
raise ValueError('b must have same number of columns as x')
if y_idx.shape[0] != x.shape[0]:
raise ValueError('y_idx must have same number of rows as x')
sm = numpy.zeros_like(x) # softmax
nll = numpy.zeros(x.shape[0], dtype=node.outputs[0].type.
dtype) # nll(y | softmax(x))
am = numpy.zeros_like(y_idx)
for i in xrange(sm.shape[0]):
#add the bias vector to the i'th row of x
row = x[i] + b
#get the maximum value of i'th row for numerically safe
#softmax / nll
am[i] = numpy.argmax(row)
m = row[am[i]]
#compute the unnormalized softmax, and normalization constant
sm[i] = numpy.exp(row - m)
sum_j = numpy.sum(sm[i]) # sum_j(exp(x[j] - m))
#normalized our softmax
sm[i] *= 1.0 / sum_j
# store the nll
nll[i] = -row[y_idx[i]] + m + numpy.log(sum_j)
output_storage[0][0] = nll
output_storage[1][0] = sm
output_storage[2][0] = am
def infer_shape(self, node, shapes):
x_shp, b_shp, idx_shp = shapes
nll_shp = (x_shp[0],)
sm_shp = x_shp
am_shp = idx_shp
return [nll_shp, sm_shp, am_shp]
def connection_pattern(self, node):
return [[True, True, True], # x
[True, True, True], # b
[False, False, True]] # y_idx
def grad(self, inp, grads):
x, b, y_idx = inp
g_nll, g_sm, g_am = grads
dx_terms = []
db_terms = []
d_idx_terms = []
if not isinstance(g_nll.type, DisconnectedType):
nll, sm = crossentropy_softmax_1hot_with_bias(x, b, y_idx)
dx = crossentropy_softmax_1hot_with_bias_dx(g_nll, sm, y_idx)
db = tensor.sum(dx, axis=[0])
dx_terms.append(dx)
db_terms.append(db)
if not isinstance(g_sm.type, DisconnectedType):
dx, db = softmax_with_bias.grad((x, b), (g_sm, ))
dx_terms.append(dx)
db_terms.append(db)
if not isinstance(g_am.type, DisconnectedType):
dx_terms.append(x.zeros_like())
db_terms.append(b.zeros_like())
d_idx_terms.append(y_idx.zeros_like())
def fancy_sum(terms):
if len(terms) == 0:
return DisconnectedType()()
rval = terms[0]
for term in terms[1:]:
rval = rval + term
return rval
return [fancy_sum(terms) for terms in
[dx_terms, db_terms, d_idx_terms]]
def c_headers(self):
return ['<iostream>', '<cmath>']
@staticmethod
def c_code_template():
# this implementation was lifted from
# /u/bergstrj/cvs/bergstrj/src/feb07/nn.cxx
#TODO: put this into a templated function, in the support code
#TODO: declare the max of each row as an Op output
#TODO: set error messages for failures in this code
#TODO: use this to accept float32 and int32: node.inputs[0].type.dtype_specs()[1]
(init_decl, begin_row_loop, inside_row_loop, end_row_loop) = \
SoftmaxWithBias.c_code_template()
return (init_decl,
"""
if (PyArray_NDIM(%(y_idx)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "y_idx not 1d tensor");
%(fail)s;
}
if (PyArray_DIMS(%(x)s)[0] != PyArray_DIMS(%(y_idx)s)[0])
{
PyErr_Format(PyExc_ValueError,
"number of rows in x (%%ld) does not match length of y (%%ld)",
(long int)PyArray_DIMS(%(x)s)[0],
(long int)PyArray_DIMS(%(y_idx)s)[0]);
%(fail)s;
}
if ((NULL == %(nll)s) //initial condition
|| (PyArray_DIMS(%(nll)s)[0] != PyArray_DIMS(%(y_idx)s)[0]))
{
if (NULL != %(nll)s) Py_XDECREF(%(nll)s);
%(nll)s = (PyArrayObject*)PyArray_SimpleNew(1,
PyArray_DIMS(%(y_idx)s), type_num_%(x)s);
if(!%(nll)s)
{
PyErr_SetString(PyExc_MemoryError,
"failed to alloc nll output");
%(fail)s;
}
}
if ((NULL == %(am)s)
|| (PyArray_DIMS(%(am)s)[0] != PyArray_DIMS(%(y_idx)s)[0]))
{
Py_XDECREF(%(am)s);
%(am)s = (PyArrayObject*) PyArray_SimpleNew(1,
PyArray_DIMS(%(y_idx)s), type_num_%(y_idx)s);
if(!%(am)s)
{
PyErr_SetString(PyExc_MemoryError,
"failed to alloc am output");
%(fail)s;
}
}
""",
begin_row_loop,
"""
const %(y_idx_type) s y_i = ((%(y_idx_type)s*)(PyArray_BYTES(%(y_idx)s) + PyArray_STRIDES(%(y_idx)s)[0] * i))[0];
dtype_%(nll) s* __restrict__ nll_i = (dtype_%(nll)s*)(PyArray_BYTES(%(nll)s) + PyArray_STRIDES(%(nll)s)[0] * i);
%(am_type)s* __restrict__ am_i = (%(am_type)s*) (PyArray_BYTES(%(am)s) + PyArray_STRIDES(%(am)s)[0] * i);
""",
inside_row_loop,
"""
if ((y_i >= PyArray_DIMS(%(x)s)[1]) || (y_i < 0))
{
PyErr_SetString(PyExc_ValueError, "y_i value out of bounds");
%(fail)s;
}
nll_i[0] = - x_i[y_i*Sx]
- b_i[y_i*Sb]
+ row_max
+ log(sum);
am_i[0] = row_max_j;
""",
end_row_loop)
def c_code_cache_version(self):
return (5,) + SoftmaxWithBias.c_code_cache_version()
def c_code(self, node, name, inp, out, sub):
x, b, y_idx = inp
nll, sm, am = out
y_idx_type = node.inputs[2].type.dtype_specs()[1]
am_type = y_idx_type
code_template = ''.join(self.c_code_template())
return code_template % dict(locals(), **sub)
class CrossentropySoftmax1HotWithBiasDx (gof.Op):
nin = 3
nout = 1
"""Gradient wrt x of the CrossentropySoftmaxArgmax1HotWithBias Op"""
def __init__(self, **kwargs):
gof.Op.__init__(self, **kwargs)
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return tensor.hashtype(self)
def __str__(self):
return self.__class__.__name__
def make_node(self, dy, sm, y_idx, **kwargs):
dy = tensor.as_tensor_variable(dy)
sm = tensor.as_tensor_variable(sm)
y_idx = tensor.as_tensor_variable(y_idx)
if (dy.type.ndim != 1 or
dy.type.dtype not in tensor.float_dtypes):
raise ValueError('dy must be 1-d tensor of floats', dy.type)
if (sm.type.ndim != 2 or
sm.type.dtype not in tensor.float_dtypes):
raise ValueError('sm must be 2-d tensor of floats', sm.type)
if (y_idx.type.ndim != 1 or
y_idx.type.dtype not in tensor.discrete_dtypes):
raise ValueError('y_idx must be 1-d tensor of [u]ints', y_idx.type)
return Apply(self, [dy, sm, y_idx], [sm.type.make_variable()])
def perform(self, node, input_storage, output_storage):
dy, sm, y_idx = input_storage
dx = numpy.zeros_like(sm)
for i in xrange(sm.shape[0]):
dx[i] = dy[i] * sm[i] # vector scale
dx[i, y_idx[i]] -= dy[i] # scalar decrement
output_storage[0][0] = dx
def infer_shape(self, node, shapes):
return [shapes[1]]
def grad(self, inp, grads):
dy, sm, y_idx = inp
g_dx, = grads
# TODO: currently we do not compute the gradient w.r.t. dy, because
# advanced indexing is not working yet. When it works, do it to avoid
# potentially misleading behavior in gradient computations! (although
# typically we should not need the gradient w.r.t. dy).
y_idx_range = tensor.arange(y_idx.shape[0])
g_dy = tensor.sum(
g_dx * tensor.AdvancedIncSubtensor()(
sm, tensor.fill(dy, -1), y_idx_range, y_idx),
axis=1)
g_sm = dy.dimshuffle(0, 'x') * g_dx
g_y_idx = grad_not_implemented(self, 2, y_idx)
return [g_dy, g_sm, g_y_idx]
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inp, out, sub):
dnll, sm, y_idx = inp
dx, = out
y_idx_type = node.inputs[2].type.dtype_specs()[1]
return """
if ((PyArray_DESCR(%(dnll)s)->type_num != NPY_DOUBLE) &&
(PyArray_DESCR(%(dnll)s)->type_num != NPY_FLOAT))
{
PyErr_SetString(PyExc_TypeError,
"dnll type should be float32 or float64");
%(fail)s;
}
if ((PyArray_DESCR(%(sm)s)->type_num != NPY_DOUBLE) &&
(PyArray_DESCR(%(sm)s)->type_num != NPY_FLOAT))
{
PyErr_SetString(PyExc_TypeError,
"sm type should be float32 or float64");
%(fail)s;
}
if ((PyArray_NDIM(%(dnll)s) != 1)
|| (PyArray_NDIM(%(sm)s) != 2)
|| (PyArray_NDIM(%(y_idx)s) != 1))
{
PyErr_SetString(PyExc_ValueError, "rank error");
%(fail)s;
}
if (PyArray_DIMS(%(dnll)s)[0] != PyArray_DIMS(%(sm)s)[0])
{
PyErr_Format(PyExc_ValueError,
"dnll.shape[0] (%%ld) != sm.shape[0] (%%ld)",
(long int)PyArray_DIMS(%(dnll)s)[0],
(long int)PyArray_DIMS(%(sm)s)[0]);
%(fail)s;
}
if (PyArray_DIMS(%(dnll)s)[0] != PyArray_DIMS(%(y_idx)s)[0])
{
PyErr_Format(PyExc_ValueError,
"dnll.shape[0] (%%ld) != y_idx.shape[0] (%%ld)",
(long int)PyArray_DIMS(%(dnll)s)[0],
(long int)PyArray_DIMS(%(y_idx)s)[0]);
%(fail)s;
}
if ((NULL == %(dx)s)
|| (PyArray_DIMS(%(dx)s)[0] != PyArray_DIMS(%(sm)s)[0])
|| (PyArray_DIMS(%(dx)s)[1] != PyArray_DIMS(%(sm)s)[1]))
{
if (NULL != %(dx)s) Py_XDECREF(%(dx)s);
%(dx)s = (PyArrayObject*) PyArray_SimpleNew(2,
PyArray_DIMS(%(sm)s),
type_num_%(sm)s);
if(!%(dx)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc dx output");
%(fail)s
}
}
for (size_t i = 0; i < PyArray_DIMS(%(dx)s)[0]; ++i)
{
const dtype_%(dnll)s dnll_i = ((dtype_%(dnll)s*)(PyArray_BYTES(%(dnll)s) + PyArray_STRIDES(%(dnll)s)[0] * i))[0];
const %(y_idx_type) s y_i = ((%(y_idx_type)s*)(PyArray_BYTES(%(y_idx)s) + PyArray_STRIDES(%(y_idx)s)[0] * i))[0];
const dtype_%(sm)s* __restrict__ sm_i = (dtype_%(sm)s*)(PyArray_BYTES(%(sm)s) + PyArray_STRIDES(%(sm)s)[0] * i);
npy_intp Ssm = PyArray_STRIDES(%(sm)s)[1]/sizeof(dtype_%(sm)s);
dtype_%(dx) s* __restrict__ dx_i = (dtype_%(dx)s*)(PyArray_BYTES(%(dx)s) + PyArray_STRIDES(%(dx)s)[0] * i);
npy_intp Sdx = PyArray_STRIDES(%(dx)s)[1]/sizeof(dtype_%(dx)s);
for (size_t j = 0; j < PyArray_DIMS(%(dx)s)[1]; ++j)
{
dx_i[j * Sdx] = dnll_i * sm_i[j * Ssm];
}
if (y_i >= PyArray_DIMS(%(dx)s)[1])
{
PyErr_SetString(PyExc_ValueError, "y_i >= dx dimensions[1]");
%(fail)s;
}
dx_i[y_i * Sdx] -= dnll_i;
}
""" % dict(locals(), **sub)
crossentropy_softmax_argmax_1hot_with_bias = \
CrossentropySoftmaxArgmax1HotWithBias()
crossentropy_softmax_1hot_with_bias_dx = \
CrossentropySoftmax1HotWithBiasDx()
def crossentropy_softmax_1hot_with_bias(x, b, y_idx, **kwargs):
return crossentropy_softmax_argmax_1hot_with_bias(x, b, y_idx,
**kwargs)[0:2]
def crossentropy_softmax_1hot(x, y_idx, **kwargs):
b = tensor.zeros_like(x[0, :])
return crossentropy_softmax_1hot_with_bias(x, b, y_idx, **kwargs)
def crossentropy_softmax_max_and_argmax_1hot_with_bias(x, b, y_idx, **kwargs):
"""
@return: The cross-entropy, the softmax output, the max probability,
and the argmax index
@todo: Since we are recomputing the argmax,
we might as well assert that it is correct.
@todo: Make this entire function is
unnecessary? e.g. CrossentropySoftmaxArgmax1HotWithBias should return
the appropriate information (i.e. the max probability)?
"""
(xent, softmax) = crossentropy_softmax_1hot_with_bias(x, b, y_idx,
**kwargs)
(max_pr, argmax) = tensor.max_and_argmax(softmax, axis=-1)
return (xent, softmax, max_pr, argmax)
def crossentropy_softmax_max_and_argmax_1hot(x, y_idx, **kwargs):
b = tensor.zeros_like(x[0, :])
return crossentropy_softmax_max_and_argmax_1hot_with_bias(x, b, y_idx,
**kwargs)
class CrossentropyCategorical1HotGrad(gof.Op):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return tensor.hashtype(self)
def __str__(self):
return self.__class__.__name__
def make_node(self, g_y, coding_dist, true_one_of_n):
return Apply(self, [g_y, coding_dist, true_one_of_n],
[coding_dist.type()])
def perform(self, node, inp, out):
g_y, coding_dist, true_one_of_n = inp
g_coding_strg, = out
g_coding = numpy.zeros_like(coding_dist)
for i in xrange(len(g_y)):
g_coding[i, true_one_of_n[i]] = -g_y[i] / coding_dist[i,
true_one_of_n[i]]
g_coding_strg[0] = g_coding
def infer_shape(self, node, in_shapes):
return [in_shapes[1]]
crossentropy_categorical_1hot_grad = CrossentropyCategorical1HotGrad()
class CrossentropyCategorical1Hot(gof.Op):
"""Compute the cross entropy between a coding distribution and
a true distribution of the form [0, 0, ... 0, 1, 0, ..., 0]
.. math::
y[i] = - \log(coding_dist[i, one_of_n[i])
:note: In the case that the coding distribution is the output of a
softmax, an application of this Op will probably be optimized
away in favour of one with a C implementation.
"""
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return tensor.hashtype(self)
def __str__(self):
return self.__class__.__name__
def make_node(self, coding_dist, true_one_of_n):
| |
<reponame>anooptp/maml
"""
Selectors
"""
import inspect
from collections import defaultdict
from itertools import combinations
from typing import List, Optional, Union, Dict, Callable
import numpy as np
from scipy.linalg import lstsq
from scipy.optimize import minimize, NonlinearConstraint
from sklearn.linear_model import LinearRegression
from sklearn.metrics import get_scorer
from joblib import Parallel, delayed
# pylint: disable=R0201
class BaseSelector:
"""
Feature selector. This is meant to work on relatively smaller
number of features
"""
def __init__(self, coef_thres: float = 1e-6, method: str = "SLSQP"):
"""
Base selector
Args:
coef_thres (float): threshold to discard certain coefficents
method (str): optimization methods in scipy.optmize.minimize
"""
self.coef_thres = coef_thres
self.is_fitted = False
self.coef_: Optional[np.ndarray] = None
self.method = method
self.indices: Optional[np.ndarray] = None
def select(self, x: np.ndarray, y: np.ndarray, options: Optional[Dict] = None) -> Optional[np.ndarray]:
"""
Select feature indices from x
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
options (dict): options in the optimizations provided
to scipy.optimize.minimize
Returns: list of int indices
"""
n_data, n_dim = x.shape
options = options or {"maxiter": 1e4, "ftol": 1e-12}
res = minimize(
lambda beta: self.construct_loss(x=x, y=y, beta=beta),
[0] * n_dim,
jac=self.construct_jac(x=x, y=y),
method=self.method,
constraints=self.construct_constraints(x=x, y=y),
options=options,
)
if res.status != 0:
raise RuntimeError(f"Not converged, status {res.status}")
self.is_fitted = True
self.coef_ = res.x
# output coefficient indices that are above certain thresholds
self.indices = np.where(np.abs(self.coef_) > self.coef_thres)[0] # type: ignore
self.coef_[np.where(np.abs(self.coef_) <= self.coef_thres)[0]] = 0.0 # type: ignore
return self.indices
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: np.ndarray) -> float:
"""
Get loss function from data and tentative coefficients beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: loss value
"""
raise NotImplementedError
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[np.ndarray] = None
) -> Optional[Union[Dict, List, NonlinearConstraint]]:
"""
Get constraints dictionary from data, e.g.,
{"func": lambda beta: fun(x, y, beta), "type": "ineq"}
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): parameter to optimize
Returns: dict of constraints
"""
return None
def construct_jac(self, x: np.ndarray, y: np.ndarray) -> Optional[Callable]:
"""
Jacobian of cost function
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: Jacobian function
"""
return None
def evaluate(self, x: np.ndarray, y: np.ndarray, metric: str = "neg_mean_absolute_error") -> float:
"""
Evaluate the linear models using x, and y test data
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
metric (str): scorer function, used with
sklearn.metrics.get_scorer
Returns:
"""
metric_func = get_scorer(metric)
lr = LinearRegression(fit_intercept=False)
lr.coef_ = self.coef_[self.indices] # type: ignore
lr.intercept_ = 0
return metric_func(lr, x[:, self.indices], y)
def get_coef(self) -> Optional[np.ndarray]:
"""
Get coefficients
Returns: the coefficients array
"""
return self.coef_
def get_feature_indices(self) -> Optional[np.ndarray]:
"""
Get selected feature indices
Returns:
"""
return self.indices
def predict(self, x: np.ndarray) -> np.ndarray:
"""
Predict the results using sparsified coefficients
Args:
x (np.ndarray): design matrix
Returns:
"""
return x[:, self.indices].dot(self.coef_[self.indices]) # type: ignore
def compute_residual(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Compute
Args:
x (np.ndarray): design matrix
y (np.ndarray): target vector
Returns: residual vector
"""
return y - self.predict(x)
@classmethod
def _get_param_names(cls):
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
return []
init_signature = inspect.signature(init)
parameters = [p for p in init_signature.parameters.values() if p.name != "self" and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_KEYWORD:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % (cls, init_signature)
)
return sorted([p.name for p in parameters])
def get_params(self):
"""
Get params for this selector
Returns: mapping of string to any
parameter names mapped to their values
"""
out = {}
for key in self._get_param_names():
value = getattr(self, key, None)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this selector
Args:
**params: dict
Selector parametrs
Returns:
self: selector instance
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params()
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for selector %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`." % (key, self)
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
class DantzigSelector(BaseSelector):
"""
Equation 11 in
https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf
and reference in https://projecteuclid.org/download/pdfview_1/euclid.aos/1201012958
"""
def __init__(self, lambd, sigma=1.0, **kwargs):
"""
Dantzig selector
Args:
lamb: tunable parameter
sigma: standard deviation of the error
"""
self.lambd = lambd
self.sigma = sigma
super().__init__(**kwargs)
def construct_loss(self, x, y, beta) -> float:
"""
Get loss function from data and tentative coefficients beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: loss value
"""
return np.sum(np.abs(beta)).item()
def construct_jac(self, x: np.ndarray, y: np.ndarray) -> Callable:
"""
Jacobian of cost functions
Args:
x:
y:
Returns:
"""
def _jac(beta):
sign = np.sign(beta)
sign[np.abs(sign) < 0.1] = 1.0
sign *= 30.0 # multiply the gradients to get better convergence
return sign
return _jac
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[np.ndarray] = None
) -> NonlinearConstraint:
"""
Get constraints dictionary from data, e.g.,
{"func": lambda beta: fun(x, y, beta), "type": "ineq"}
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): placeholder
Returns: dict of constraints
"""
def _constraint(beta):
return np.linalg.norm(x.T @ (y - x @ beta), np.infty)
def _jac(beta):
vec = x.T @ (y - x @ beta)
max_ind = np.argmax(np.abs(vec))
der = np.zeros_like(vec.ravel())
der[max_ind] = np.sign(vec[max_ind])
return -x.T.dot(x).dot(der)
return NonlinearConstraint(_constraint, -np.infty, self.lambd * self.sigma, jac=_jac)
class PenalizedLeastSquares(BaseSelector):
"""
Penalized least squares. In addition to minimizing the sum of squares loss,
it adds an additional penalty to the coefficients
"""
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: np.ndarray) -> float:
"""
Construct the loss function. An extra penalty term is added
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: sum of errors
"""
n = x.shape[0]
se = 1.0 / (2 * n) * np.sum((y - x.dot(beta)) ** 2) + self.penalty(beta, x=x, y=y)
return se
def _sse_jac(self, x, y, beta):
n = x.shape[0]
return 1.0 / n * (y - x.dot(beta)).T.dot(-x)
def _penalty_jac(self, x, y, beta):
return 0.0
def construct_jac(self, x: np.ndarray, y: np.ndarray):
"""
Construct the jacobian of loss function
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: jacobian vector
"""
def _jac(beta):
return self._sse_jac(x, y, beta) + self._penalty_jac(x, y, beta)
return _jac
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[np.ndarray] = None
) -> List[Optional[Dict]]:
"""
No constraints
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): placeholder only
Returns: a list of dictionary constraints
"""
return []
def penalty(self, beta: np.ndarray, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None) -> float:
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: penalty value
"""
return 0.0
class SCAD(PenalizedLeastSquares):
"""
Smoothly clipped absolute deviation (SCAD),
equation 12 and 13 in https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf
"""
def __init__(self, lambd: Union[float, np.ndarray], a: float = 3.7, **kwargs):
"""
Smoothly clipped absolute deviation.
Args:
lambd (float or list of floats): The weights for the penalty
a (float): hyperparameter in SCAD penalty
"""
self.lambd = lambd
self.a = a
super().__init__(**kwargs)
def penalty(self, beta: np.ndarray, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None) -> float:
"""
Calculate the SCAD penalty from input x, output y
and coefficient beta
Args:
beta (np.ndarray): N coefficients
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: penalty value
"""
beta_abs = np.abs(beta)
penalty = (
self.lambd * beta_abs * (beta_abs <= self.lambd)
+ -(beta_abs ** 2 - 2 * self.a * self.lambd | |
# encoding: utf-8
import math
import time
import numpy as np
import random
import torch
from torch import nn
from torch.utils.data import DataLoader
from utils.loss import euclidean_dist, hard_example_mining
from utils.meters import AverageMeter
# from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import cv2
class cls_tripletTrainer_gumble:
def __init__(self, opt, model, optimzier, criterion, summary_writer):
self.opt = opt
self.model = model
self.optimizer= optimzier
self.criterion = criterion
self.summary_writer = summary_writer
def train(self, epoch, data_loader):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
start = time.time()
for i, inputs in enumerate(data_loader):
data_time.update(time.time() - start)
# model optimizer
self._parse_data(inputs)
self._forward(epoch)
self.optimizer.zero_grad()
self._backward()
self.optimizer.step()
batch_time.update(time.time() - start)
losses.update(self.loss.item())
# tensorboard
global_step = epoch * len(data_loader) + i
self.summary_writer.add_scalar('loss', self.loss.item(), global_step)
self.summary_writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'], global_step)
start = time.time()
if (i + 1) % self.opt.print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Batch Time {:.3f} ({:.3f})\t'
'Data Time {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})\t'
.format(epoch, i + 1, len(data_loader),
batch_time.val, batch_time.mean,
data_time.val, data_time.mean,
losses.val, losses.mean))
param_group = self.optimizer.param_groups
print('Epoch: [{}]\tEpoch Time {:.3f} s\tLoss {:.3f}\t'
'Lr {:.2e}'
.format(epoch, batch_time.sum, losses.mean, param_group[0]['lr']))
print()
x = self.pmask.detach().cpu().numpy()[0][0]
x = x - np.min(x)
x = x / np.max(x)
x = np.uint8(x*255)
x = np.expand_dims(x, -1)
x = cv2.applyColorMap(x, cv2.COLORMAP_JET)
x = x.transpose((2, 0, 1))
self.summary_writer.add_image('pmask', np.uint8(x), epoch)
# self.summary_writer.add_image('Mask1', x[1], epoch)
x_ = self.nmask.detach().cpu().numpy()[0][0]
x_ = x_ - np.min(x_)
x_ = x_ / np.max(x_)
x_ = np.uint8(x_ * 255)
x_ = np.expand_dims(x_, -1)
x_ = cv2.applyColorMap(x_, cv2.COLORMAP_JET)
x_ = x_.transpose((2, 0, 1))
self.summary_writer.add_image('nmask', np.uint8(x_), epoch)
def _parse_data(self, inputs):
imgs, pids, _ = inputs
if self.opt.random_crop and random.random() > 0.3:
h, w = imgs.size()[-2:]
start = int((h-2*w)*random.random())
mask = imgs.new_zeros(imgs.size())
mask[:, :, start:start+2*w, :] = 1
imgs = imgs * mask
'''
if random.random() > 0.5:
h, w = imgs.size()[-2:]
for attempt in range(100):
area = h * w
target_area = random.uniform(0.02, 0.4) * area
aspect_ratio = random.uniform(0.3, 3.33)
ch = int(round(math.sqrt(target_area * aspect_ratio)))
cw = int(round(math.sqrt(target_area / aspect_ratio)))
if cw < w and ch < h:
x1 = random.randint(0, h - ch)
y1 = random.randint(0, w - cw)
imgs[:, :, x1:x1+h, y1:y1+w] = 0
break
'''
self.data = imgs.cuda()
self.target = pids.cuda()
def _forward(self, epoch):
if self.opt.block_choice=='position':
predicted_mask, label_mask = self.model(self.data, self.target)
self.loss = self.criterion(predicted_mask, label_mask)
elif self.opt.block_choice=='position_reg':
score, feat, predicted_mask, label_mask = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, self.target, predicted_mask, label_mask)
elif self.opt.block_choice=='prior_posterior':
cls_score, posterior_mu, posterior_sigma, prior_mu, prior_sigma = self.model(self.data, self.target)
self.loss = self.criterion(cls_score, posterior_mu, posterior_sigma, prior_mu, prior_sigma, self.target)
elif self.opt.block_choice=='cross_attention':
score, feat, score_tensor = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, score_tensor, self.target)
elif self.opt.block_choice=='gumble':
score, feat, self.pmask, self.nmask = self.model(self.data, epoch, self.target)
self.loss = self.criterion(score, feat, self.target)
else:
score, feat = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, self.target)
def _backward(self):
self.loss.backward()
class cls_tripletTrainer:
def __init__(self, opt, model, optimzier, criterion, summary_writer):
self.opt = opt
self.model = model
self.optimizer= optimzier
self.criterion = criterion
self.summary_writer = summary_writer
def train(self, epoch, data_loader):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
start = time.time()
for i, inputs in enumerate(data_loader):
data_time.update(time.time() - start)
# model optimizer
self._parse_data(inputs)
self._forward()
self.optimizer.zero_grad()
self._backward()
self.optimizer.step()
batch_time.update(time.time() - start)
losses.update(self.loss.item())
# tensorboard
global_step = epoch * len(data_loader) + i
self.summary_writer.add_scalar('loss', self.loss.item(), global_step)
self.summary_writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'], global_step)
start = time.time()
if (i + 1) % self.opt.print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Batch Time {:.3f} ({:.3f})\t'
'Data Time {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})\t'
.format(epoch, i + 1, len(data_loader),
batch_time.val, batch_time.mean,
data_time.val, data_time.mean,
losses.val, losses.mean))
param_group = self.optimizer.param_groups
print('Epoch: [{}]\tEpoch Time {:.3f} s\tLoss {:.3f}\t'
'Lr {:.2e}'
.format(epoch, batch_time.sum, losses.mean, param_group[0]['lr']))
print()
def _parse_data(self, inputs):
imgs, pids, _ = inputs
if self.opt.random_crop and random.random() > 0.3:
h, w = imgs.size()[-2:]
start = int((h-2*w)*random.random())
mask = imgs.new_zeros(imgs.size())
mask[:, :, start:start+2*w, :] = 1
imgs = imgs * mask
'''
if random.random() > 0.5:
h, w = imgs.size()[-2:]
for attempt in range(100):
area = h * w
target_area = random.uniform(0.02, 0.4) * area
aspect_ratio = random.uniform(0.3, 3.33)
ch = int(round(math.sqrt(target_area * aspect_ratio)))
cw = int(round(math.sqrt(target_area / aspect_ratio)))
if cw < w and ch < h:
x1 = random.randint(0, h - ch)
y1 = random.randint(0, w - cw)
imgs[:, :, x1:x1+h, y1:y1+w] = 0
break
'''
self.data = imgs.cuda()
self.target = pids.cuda()
def _forward(self):
if self.opt.block_choice=='position':
predicted_mask, label_mask = self.model(self.data, self.target)
self.loss = self.criterion(predicted_mask, label_mask)
elif self.opt.block_choice=='position_reg':
score, feat, predicted_mask, label_mask = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, self.target, predicted_mask, label_mask)
elif self.opt.block_choice=='prior_posterior':
cls_score, posterior_mu, posterior_sigma, prior_mu, prior_sigma = self.model(self.data, self.target)
self.loss = self.criterion(cls_score, posterior_mu, posterior_sigma, prior_mu, prior_sigma, self.target)
elif self.opt.block_choice=='cross_attention':
score, feat, score_tensor = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, score_tensor, self.target)
else:
score, feat = self.model(self.data)
self.loss = self.criterion(score, feat, self.target)
def _backward(self):
self.loss.backward()
class cls_tripletTrainer_for_prior_posterior:
def __init__(self, opt, model, optimzier, criterion, summary_writer):
self.opt = opt
self.model = model
self.optimizer = optimzier
self.criterion = criterion
self.summary_writer = summary_writer
def train(self, epoch, data_loader):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
start = time.time()
for i, inputs in enumerate(data_loader):
data_time.update(time.time() - start)
# model optimizer
self._parse_data(inputs)
self._forward()
self.optimizer.zero_grad()
self._backward()
self.optimizer.step()
batch_time.update(time.time() - start)
losses.update(self.loss.item())
# tensorboard
global_step = epoch * len(data_loader) + i
self.summary_writer.add_scalar('loss', self.loss.item(), global_step)
self.summary_writer.add_scalar('lkd_loss', self.lkd_loss.item(), global_step)
self.summary_writer.add_scalar('xent_loss', self.xent_loss.item(), global_step)
self.summary_writer.add_scalar('triplet_loss', self.triplet_loss.item(), global_step)
self.summary_writer.add_scalar('posterior_mu', self.posterior_mu.mean().item(), global_step)
self.summary_writer.add_scalar('posterior_sigma', self.posterior_sigma.mean().item(), global_step)
self.summary_writer.add_scalar('prior_mu', self.prior_mu.mean().item(), global_step)
self.summary_writer.add_scalar('prior_sigma', self.prior_sigma.mean().item(), global_step)
self.summary_writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'], global_step)
start = time.time()
if (i + 1) % self.opt.print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Batch Time {:.3f} ({:.3f})\t'
'Data Time {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})\t'
.format(epoch, i + 1, len(data_loader),
batch_time.val, batch_time.mean,
data_time.val, data_time.mean,
losses.val, losses.mean))
# print('Loss {:.3f}\txent {:.3f}\tlkd {:.3f}\ttriplet {:.3f}'
# .format(losses.mean, self.xent_loss.item(), self.lkd_loss.item(),
# self.triplet_loss.item()))
# print('z_mu {:.3f}\tz_var {:.3f}\tc_mu {:.3f}\tc_var {:.6f}'
# .format(self.posterior_mu.mean().item(), self.posterior_sigma.mean().item(),
# self.prior_mu.mean().item(), self.prior_sigma.mean().item()))
param_group = self.optimizer.param_groups
print('Epoch: [{}]\tEpoch Time {:.3f} s\tLoss {:.3f}\txent {:.3f}\tlkd {:.3f}\ttriplet {:.3f}\r'
'z_mu {:.3f}\tz_var {:.3f}\tc_mu {:.3f}\tc_var {:.6f}\t'
'Lr {:.2e}'
.format(epoch, batch_time.sum, losses.mean, self.xent_loss.item(), self.lkd_loss.item(), self.triplet_loss.item(),
self.posterior_mu.mean().item(), self.posterior_sigma.mean().item(),
self.prior_mu.mean().item(), self.prior_sigma.mean().item(),
param_group[0]['lr']))
print()
def _parse_data(self, inputs):
imgs, pids, _ = inputs
if self.opt.random_crop and random.random() > 0.3:
h, w = imgs.size()[-2:]
start = int((h-2*w)*random.random())
mask = imgs.new_zeros(imgs.size())
mask[:, :, start:start+2*w, :] = 1
imgs = imgs * mask
'''
if random.random() > 0.5:
h, w = imgs.size()[-2:]
for attempt in range(100):
area = h * w
target_area = random.uniform(0.02, 0.4) * area
aspect_ratio = random.uniform(0.3, 3.33)
ch = int(round(math.sqrt(target_area * aspect_ratio)))
cw = int(round(math.sqrt(target_area / aspect_ratio)))
if cw < w and ch < h:
x1 = random.randint(0, h - ch)
y1 = random.randint(0, w - cw)
imgs[:, :, x1:x1+h, y1:y1+w] = 0
break
'''
self.data = imgs.cuda()
self.target = pids.cuda()
def _forward(self):
if self.opt.block_choice=='position':
predicted_mask, label_mask = self.model(self.data, self.target)
self.loss = self.criterion(predicted_mask, label_mask)
elif self.opt.block_choice=='position_reg':
score, feat, predicted_mask, label_mask = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, self.target, predicted_mask, label_mask)
elif self.opt.block_choice=='prior_posterior':
cls_score, posterior_mu, posterior_sigma, prior_mu, prior_sigma = self.model(self.data)
self.loss, self.lkd_loss, self.xent_loss, self.triplet_loss, self.posterior_mu, self.posterior_sigma, self.prior_mu, self.prior_sigma = \
self.criterion(cls_score, posterior_mu, posterior_sigma, prior_mu, prior_sigma, self.target)
else:
score, feat = self.model(self.data, self.target)
self.loss = self.criterion(score, feat, self.target)
def _backward(self):
self.loss.backward()
class cls_tripletTrainer_for_lgm:
def __init__(self, opt, model, optimzier, criterion, summary_writer):
self.opt = opt
self.model = model
self.optimizer = optimzier
self.criterion = criterion
self.summary_writer = summary_writer
def train(self, epoch, data_loader):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
start = time.time()
for i, inputs in enumerate(data_loader):
data_time.update(time.time() - start)
# model optimizer
self._parse_data(inputs)
self._forward()
self.optimizer.zero_grad()
self._backward()
self.optimizer.step()
batch_time.update(time.time() - start)
losses.update(self.loss.item())
# tensorboard
global_step = epoch * len(data_loader) + i
self.summary_writer.add_scalar('loss', self.loss.item(), global_step)
self.summary_writer.add_scalar('lkd_loss', self.lkd_loss.item(), global_step)
self.summary_writer.add_scalar('xent_loss', self.xent_loss.item(), global_step)
self.summary_writer.add_scalar('triplet_loss', self.triplet_loss.item(), global_step)
self.summary_writer.add_scalar('posterior_mu', self.posterior_mu.mean().item(), global_step)
self.summary_writer.add_scalar('prior_mu', self.prior_mu.mean().item(), global_step)
self.summary_writer.add_scalar('prior_sigma', self.prior_sigma.mean().item(), global_step)
self.summary_writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'], global_step)
start = time.time()
if (i + 1) % self.opt.print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Batch Time {:.3f} ({:.3f})\t'
'Data Time {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})\t'
.format(epoch, i + 1, len(data_loader),
batch_time.val, batch_time.mean,
data_time.val, data_time.mean,
losses.val, losses.mean))
param_group = self.optimizer.param_groups
print('Epoch: [{}]\tEpoch Time {:.3f} s\tLoss {:.3f}\txent {:.3f}\tlkd {:.3f}\ttriplet {:.3f}\r'
'z_mu {:.3f}\tc_mu {:.3f}\tc_var {:.3f}\t'
'Lr {:.2e}'
.format(epoch, batch_time.sum, losses.mean, self.xent_loss.item(), self.lkd_loss.item(), self.triplet_loss.item(),
self.posterior_mu.mean().item(),
self.prior_mu.mean().item(), self.prior_sigma.mean().item(),
param_group[0]['lr']))
print()
def _parse_data(self, inputs):
imgs, pids, _ = inputs
if self.opt.random_crop and random.random() > 0.3:
h, w = imgs.size()[-2:]
start = int((h-2*w)*random.random())
mask = imgs.new_zeros(imgs.size())
mask[:, :, start:start+2*w, :] = 1
imgs = | |
""" IBM based speech recognition service """
import time
import json
import collections
import os
import os.path
import asyncio
import base64
import websockets
import pyaudio
import webrtcvad
from dotenv import load_dotenv
from MqttService import MqttService
from io_buffer import BytesLoop
# ibm
CHUNK = 1024
FORMAT = pyaudio.paInt16
# Even if your default input is multi channel (like a webcam mic),
# it's really important to only record 1 channel, as the STT service
# does not do anything useful with stereo. You get a lot of "hmmm"
# back.
CHANNELS = 1
# Rate is important, nothing works without it. This is a pretty
# standard default. If you have an audio device that requires
# something different, change this.
RATE = 44100
RECORD_SECONDS = 10
FINALS = []
load_dotenv()
def get_region_map():
"""get map of region codes to transcription services"""
return {
'us-east': 'gateway-wdc.watsonplatform.net',
'us-south': 'stream.watsonplatform.net',
'eu-gb': 'stream.watsonplatform.net',
'eu-de': 'stream-fra.watsonplatform.net',
'au-syd': 'gateway-syd.watsonplatform.net',
'jp-tok': 'gateway-syd.watsonplatform.net',
}
def get_url():
"""get url for transcription service based on env vars"""
# if region is set, use lookups
# https://console.bluemix.net/docs/services/speech-to-text/websockets.html#websockets
if os.environ.get('IBM_SPEECH_TO_TEXT_REGION', False):
host = get_region_map().get(os.environ.get('IBM_SPEECH_TO_TEXT_REGION'))
return ("wss://{}/speech-to-text/api/v1/recognize" \
+ "?model=en-US_BroadbandModel").format(host)
# if url from downloaded creds
elif os.environ.get('IBM_SPEECH_TO_TEXT_URL', False):
return os.environ.get('IBM_SPEECH_TO_TEXT_URL')
# fallback to us-east
else:
return ("wss://{}/speech-to-text/api/v1/recognize" \
+ "?model=en-US_BroadbandModel").format('us-east')
def get_auth():
"""get authentications for transcription service"""
# print('AUTH')
# print(os.environ.get('IBM_SPEECH_TO_TEXT_APIKEY'))
apikey = str(os.environ.get('IBM_SPEECH_TO_TEXT_APIKEY'))
return ("apikey", apikey)
def get_headers():
"""get authentication headers for transcription service"""
headers = {}
userpass = ":".join(get_auth())
headers["Authorization"] = "Basic " + base64.b64encode(
userpass.encode()).decode()
return headers
def get_init_params():
""" get params to to initialise Watson API"""
return {
"word_confidence": False,
"content_type": "audio/l16;rate=16000;channels=1",
"action": "start",
"interim_results": False,
"speech_detector_sensitivity": 0.5,
"background_audio_suppression": 0.5,
}
class IbmAsrService(MqttService):
"""
This class listens for mqtt audio packets and publishes asr/text messages
It integrates silence detection to slice up audio and detect the end of a spoken message
It is designed to be run as a thread by calling run(run_event) (implemented in MqttService)
To activate the service for a site send a message - hermod/<site>/asr/activate
Once activated, the service will start listening for audio packets when you send
- hermod/<site>/asr/start
The service will continue to listen and emit hermod/<site>/asr/text messages every time the
deepspeech engine can recognise some non empty text
A hermod/<site>/asr/stop message will disable recognition while still leaving a loaded
deepspeech transcription instance for the site so it can be reenabled instantly
A hermod/<site>/asr/deactivate message will garbage collect any resources related to the site.
"""
FORMAT = pyaudio.paInt16
# Network/VAD rate-space
RATE_PROCESS = 16000
CHANNELS = 1
BLOCKS_PER_SECOND = 50
def __init__(
self,
config,
loop
):
"""constructor"""
self.config = config
self.loop = loop
super(IbmAsrService, self).__init__(config, loop)
# self.thread_targets.append(self.startASR)
self.sample_rate = self.RATE_PROCESS
self.block_size = int(self.RATE_PROCESS /float(self.BLOCKS_PER_SECOND))
self.frame_duration_ms = 1000 * self.block_size // self.sample_rate
self.vad = webrtcvad.Vad(config['services']['IbmAsrService'].get('vad_sensitivity', 1))
self.last_start_id = {}
self.audio_stream = {} # BytesLoop()
self.started = {} # False
self.active = {} # False
self.models = {}
self.empty_count = {}
self.restart_count = {}
self.stream_contexts = {}
self.ring_buffer = {}
self.last_audio = {}
self.ibmlistening = {}
self.connections = {}
self.no_packet_timeouts = {}
self.total_time_timeouts = {}
self.last_dialog_id = {}
self.subscribe_to = 'hermod/+/asr/activate,hermod/+/asr/deactivate,hermod/+/asr/start' \
+ ',hermod/+/asr/stop,hermod/+/hotword/detected'
self.audio_count = 0
# this_folder = os.path.dirname(os.path.realpath(__file__))
# wav_file = os.path.join(this_folder, 'turn_off.wav')
# f = open(wav_file, "rb")
# self.turn_off_wav = f.read();
# self.eventloop = asyncio.new_event_loop()
# asyncio.set_event_loop(self.eventloop)
# self.log('START ibm ASR')
# self.log(this_folder)
# self.startASR()
async def on_message(self, message):
"""handle mqtt message"""
topic = "{}".format(message.topic)
# self.log("ASR MESSAGE {}".format(topic))
parts = topic.split("/")
site = parts[1]
if topic == 'hermod/' + site + '/asr/activate':
self.log('activate ASR ' + site)
await self.activate(site)
elif topic == 'hermod/' + site + '/asr/deactivate':
self.log('deactivate ASR ' + site)
await self.deactivate(site)
elif topic == 'hermod/' + site + '/asr/start':
# self.log('start ASR '+site)
if not self.active.get(site, False):
await self.activate(site)
self.log('start ASR ' + site)
# timeout if no packets
if site in self.no_packet_timeouts:
self.no_packet_timeouts[site].cancel()
self.no_packet_timeouts[site] = self.loop.create_task(
self.no_packet_timeout(site))
# total time since start
if site in self.total_time_timeouts:
self.total_time_timeouts[site].cancel()
self.total_time_timeouts[site] = self.loop.create_task(
self.total_time_timeout(site))
payload = {}
payload_text = message.payload
try:
payload = json.loads(payload_text)
except json.JSONDecodeError:
pass
self.last_dialog_id[site] = payload.get('id', '')
self.started[site] = True
self.last_audio[site] = time.time()
payload = {}
try:
payload = json.loads(message.payload)
except json.JSONDecodeError:
pass
self.last_start_id[site] = payload.get('id', '')
self.loop.create_task(self.start_asr_vad(site))
# await self.startASR(site)
elif topic == 'hermod/' + site + '/asr/stop':
self.log('stop ASR ' + site)
# clear timeouts
if site in self.no_packet_timeouts:
self.no_packet_timeouts[site].cancel()
# total time since start
if site in self.total_time_timeouts:
self.total_time_timeouts[site].cancel()
# should be finish_stream ?
if site in self.connections:
try:
await self.connections[site].close()
except Exception:
pass
self.started[site] = False
# self.client.publish('hermod/'+site+'/speaker/play',self.turn_off_wav)
elif topic == 'hermod/' + site + '/hotword/detected':
self.log('clear buffer ' + site)
if site in self.ring_buffer:
self.ring_buffer[site].clear()
# self.client.publish('hermod/'+site+'/speaker/play',self.turn_off_wav)
elif topic == 'hermod/' + site + '/microphone/audio':
if self.started.get(site, False):
self.audio_count = self.audio_count + 1
self.audio_stream[site].write(message)
async def activate(self, site):
"""activate asr service"""
self.audio_stream[site] = BytesLoop()
self.active[site] = True
self.started[site] = False
await self.client.subscribe('hermod/' + site + '/microphone/audio')
async def deactivate(self, site):
"""deactivate asr service"""
await self.client.unsubscribe('hermod/' + site + '/microphone/audio')
self.audio_stream.pop(site, '')
self.active[site] = False
self.started[site] = False
async def total_time_timeout(self, site):
"""total timeout callback"""
await asyncio.sleep(12)
if site in self.no_packet_timeouts:
self.no_packet_timeouts[site].cancel()
await self.finish_stream(site)
async def no_packet_timeout(self, site):
"""no packets timeout callback"""
await asyncio.sleep(3.5)
print('SILENCE TIMEOUT')
if site in self.total_time_timeouts:
self.total_time_timeouts[site].cancel()
await self.finish_stream(site)
async def timeout(self, site, conn):
"""send timeout messages"""
await self.client.publish('hermod/' + site + '/asr/timeout', json.dumps({
"id": self.last_start_id.get(site, '')
}))
await self.client.publish('hermod/' + site + '/dialog/end', json.dumps({
"id": self.last_start_id.get(site, '')
}))
self.started[site] = False
await conn.close()
async def finish_stream(self, site):
"""finish transcription stream"""
try:
self.ibmlistening[site] = False
if site in self.connections:
self.log('FINISH STREAM send stop')
await self.connections[site].send(json.dumps({'action': 'stop'}))
# self.started[site] = False
else:
self.log('FINISH STREAM no connection')
self.started[site] = False
except Exception:
self.log('FINISH STREAM error')
# self.log(type(e))
# self.log(e)
self.started[site] = False
# pass
async def start_asr_vad(self, site=''):
"""start transcription stream"""
self.log('ASRVAD start')
# await self.send_sound('on',site)
# await self.client.publish('hermod/'+site+'/speaker/play',json.dumps({"sound":"on"}))
# return
text = ''
sender = None
# reconnect on error while started and no text heard
self.empty_count[site] = 0
# while site in self.started and self.started[site] \
# and not len(text) > 0 and self.empty_count[site] < 4:
# self.empty_count[site] = 0
# clear stream buffer
self.audio_stream[site] = BytesLoop()
# NEW
self.log('ASRVAD CONNECT')
async with websockets.connect(get_url(), extra_headers=get_headers()) as conn:
# CONFIGURE SOCKET SESSION
self.connections[site] = conn
await conn.send(json.dumps(get_init_params()))
await conn.recv()
# print(rec)
self.ibmlistening[site] = True
# SEND AUDIO PACKETS
# clear task from previous loop
if sender:
self.log('AUDIO SENDER CLEAR ')
sender.cancel()
sender = asyncio.create_task(self.send_audio(conn, site))
# self.log('ASRVAD start sound')
# self.log('ASRVAD start sound DONE')
# Keeps receiving transcript until we have the final transcript
while True:
self.log('ASRVAD LOOP')
# if self.empty_count[site] >= 4:
# await
# self.client.publish('hermod/'+site+'/aser/timeout',json.dumps({
# "id": self.last_start_id.get(site, '')
# }))
# await self.client.publish('hermod/'+site+'/dialog/end',json.dumps({
# "id":self.last_start_id.get(site,'')
# }))
# self.started[site] = False
# break
try:
rec = await conn.recv()
parsed = json.loads(rec)
print('=============================')
print(parsed)
print('=============================')
if parsed.get("error", False):
self.log('ASRVAD ERROR FROM IBM')
self.log(parsed.get('error'))
# self.empty_count[site] = self.empty_count[site] + 1
# self.ibmlistening[site] = False
# try:
# #await self.client.publish('hermod/'+site+'/dialog/end',
# json.dumps({"id":self.last_start_id.get(site,'')}))
# await conn.close()
# except Exception:
# pass
await self.timeout(site, conn)
break
if parsed.get('state', False) and parsed.get('state') == 'listening':
self.log('ASRVAD SET LISTENING '+site)
self.ibmlistening[site] = True
# have_results = False
if "results" in parsed:
self.log('RESULTS')
self.log(parsed["results"])
if parsed["results"]:
if "final" in parsed["results"][0]:
if parsed["results"][0]["final"]:
if parsed["results"][0]['alternatives']:
text = str(parsed["results"][0]["alternatives"][0].get(\
"transcript", ""))
self.log('ASRVAD got text [{}]'.format(text))
if text:
# self.log('send content '+site)
# self.log(self.client)
# self.log('hermod/'+site+'/asr/text')
# self.log(json.dumps({'text':text}))
# have_results = True
self.empty_count[site] = 0
await self.client.publish('hermod/'+site+'/asr/text', \
json.dumps({
'text':text,
"id":self.last_start_id.get(site, '')
}))
# self.log('sent content '+text)
self.started[site] = False
await conn.close()
break
else:
if self.empty_count[site] < 3:
self.empty_count[site] = self.empty_count[site] + 1
else:
self.timeout(site)
# await self.timeout(site,conn)
# break
# if not have_results:
# self.log('ASRVAD incc emtpy f'+ str(self.empty_count[site]))
# self.empty_count[site] = self.empty_count[site] + 1
# self.ibmlistening[site] = False
# conn.close()
# return False
# pass
except KeyError:
await self.timeout(site, conn)
break
except Exception:
await self.timeout(site, conn)
break
# cleanup
self.started[site] = False
self.ibmlistening[site] = False
if sender:
sender.cancel()
try:
await conn.close()
except Exception:
pass
async def send_audio(self, websocket_service, site):
"""send audio to transcription service"""
# Starts recording of | |
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import pytest, sys
from waflib import Logs, Errors, Utils
from waflib.Configure import ConfigurationContext
from waflib.Build import BuildContext
import settings_manager
import utils
import json
import os
import copy
class MockSettingsManagerConfigureContext(ConfigurationContext):
""" Mock context class based on ConfigurationContext"""
result_by_attribute = {}
def __init__(self, **kw):
super(ConfigurationContext, self).__init__(**kw)
class MockSettingsManagerBuildContext(BuildContext):
""" Mock context class based on BuildContext"""
result_by_attribute = {}
def __init__(self, **kw):
super(BuildContext, self).__init__(**kw)
@pytest.fixture()
def test_settings_manager_context_for_override_report(is_configure_context, is_option_name, is_option_value, override_settings_attributes):
if is_configure_context:
test_context = MockSettingsManagerConfigureContext(run_dir=sys.executable)
else:
test_context = MockSettingsManagerBuildContext(run_dir=sys.executable)
setattr(test_context,'cmd', 'build_unit_test')
def _stub_check_is_option_true(option_name):
if not is_option_name:
return True
else:
return is_option_value
def _stub_override_settings_report(is_configure, is_build, attribute, default_value, settings_value):
is_value_overridden = default_value != settings_value
test_context.result_by_attribute[attribute] = (is_configure, is_build, is_value_overridden)
setattr(test_context, 'is_option_true', _stub_check_is_option_true)
if override_settings_attributes:
for override_settings_attribute in override_settings_attributes:
report_settings_override_func_name = 'report_settings_{}'.format(override_settings_attribute)
setattr(test_context, report_settings_override_func_name, _stub_override_settings_report)
return test_context
@pytest.fixture()
def mocked_lumberyard_settings(test_default_settings_map, test_settings_map):
original_default_settings_map = settings_manager.LUMBERYARD_SETTINGS.default_settings_map
original_settings_map = settings_manager.LUMBERYARD_SETTINGS.settings_map
settings_manager.LUMBERYARD_SETTINGS.settings_map = test_settings_map
settings_manager.LUMBERYARD_SETTINGS.default_settings_map = test_default_settings_map
yield
settings_manager.LUMBERYARD_SETTINGS.settings_map = original_settings_map
settings_manager.LUMBERYARD_SETTINGS.default_settings_map = original_default_settings_map
RECURSIVE_OPT = 'internal_dont_check_recursive_execution'
@pytest.mark.parametrize(
"is_configure_context, is_option_name, is_option_value, override_settings_attributes, test_default_settings_map, test_settings_map, expected_result_map", [
pytest.param(True, RECURSIVE_OPT, True, [], {}, {}, {}, id="ReportSettingsSkipRecursiveExecution"),
pytest.param(True, RECURSIVE_OPT, False,
[],
{'attr': 'default'},
{'attr': 'default'},
{},
id="ReportSettingsConfigureContextNoOverrideNoReporter"),
pytest.param(True, RECURSIVE_OPT, False,
['attr1'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': (True, False, False)},
id="ReportSettingsConfigureContextNoOverrideWithSomeReporters"),
pytest.param(True, RECURSIVE_OPT, False,
['attr1', 'attr2'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': (True, False, False),
'attr2': (True, False, False)},
id="ReportSettingsConfigureContextNoOverrideWithAllReporter"),
pytest.param(True, RECURSIVE_OPT, False,
['attr1', 'attr2'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'override', 'attr2': 'override2'},
{'attr1': (True, False, True),
'attr2': (True, False, True)},
id="ReportSettingsConfigureContextOverrideWithAllReporter"),
pytest.param(False, RECURSIVE_OPT, False,
['attr1', 'attr2'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': (False, True, False),
'attr2': (False, True, False)},
id="ReportSettingsBuildContextNoOverrideWithAllReporter"),
])
def test_ReportSettingsOverrides_ValidSettingsScenarios_Success(test_settings_manager_context_for_override_report, mocked_lumberyard_settings, expected_result_map):
settings_manager.report_settings_overrides(test_settings_manager_context_for_override_report)
assert len(test_settings_manager_context_for_override_report.result_by_attribute) == len(expected_result_map)
for expected_key in expected_result_map.keys():
assert expected_key in test_settings_manager_context_for_override_report.result_by_attribute
assert expected_result_map[expected_key] == test_settings_manager_context_for_override_report.result_by_attribute[expected_key]
@pytest.mark.parametrize(
"input_messages", [
pytest.param(["messageA"], id="PrintSettingsOverrideSingleMessage"),
pytest.param(["messageA", "messageB"], id="PrintSettingsOverrideMultipleMessagesUnique"),
pytest.param(["messageA", "messageB", "messageA", "messageB"], id="PrintSettingsOverrideMultipleMessagesDuplicate")
])
def test_PrintSettingsOverrideMessage_PrintVariousMessages_UniqueMessagesPrinted(input_messages):
# Arrange
printed_messages = []
def _stub_log_pprint(color, msg):
printed_messages.append(msg)
old_pprint = Logs.pprint
Logs.pprint = _stub_log_pprint
test_context = MockSettingsManagerConfigureContext(run_dir=sys.executable)
# Act
for input_message in input_messages:
settings_manager.print_settings_override_message(test_context, input_message)
Logs.pprint = old_pprint
# Assert
unique_messages = set(['[SETTINGS] {}'.format(input_message) for input_message in input_messages])
assert len(printed_messages) == len(unique_messages)
for printed_message in printed_messages:
assert printed_message in unique_messages
@pytest.mark.parametrize(
"attribute, default_value, settings_value, expected_printed", [
pytest.param('attr', 'default', 'default', False, id="ReportSettingsValueUnchanged"),
pytest.param('attr', 'default', 'override', True, id="ReportSettingsValueChanged"),
])
def test_DefaultReportSettingsOverride_VariousValues_Success(attribute, default_value, settings_value, expected_printed):
# Arrange
printed_messages = []
def _stub_print_settings_override_message(msg):
printed_messages.append(msg)
test_context = MockSettingsManagerConfigureContext(run_dir=sys.executable)
setattr(test_context, 'print_settings_override_message', _stub_print_settings_override_message)
# Act
settings_manager.default_report_settings_override(test_context, attribute, default_value, settings_value)
# Assert
printed = len(printed_messages)
assert expected_printed == printed
@pytest.mark.parametrize(
"long_form, short_form, cmd_line, expected", [
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i True', 'True', id="UseShortFormValid"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild=True', 'True', id="UseLongFormValid"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i=True', type(Errors.WafError), id="UseShortFormErrorWithValue"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i=', type(Errors.WafError), id="UseShortFormErrorWithOutValue"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i True', 'True', id="UseShortFormValid"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild=True --foo-arg=False', 'True', id="UseLongFormValidWithTrailingArgs"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i=True --foo-arg=False', type(Errors.WafError), id="UseShortFormErrorWithValueWithTrailingArgs"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i= --foo-arg=False', type(Errors.WafError), id="UseShortFormErrorWithOutValueWithTrailingArgs"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild=', '', id="UseLongFormValidSetToEmpty"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild= --foo-arg=False', '', id="UseLongFormValidSetToEmptyWithTrailingArgs"),
])
def test_SettingsApplyOptionalOverride_Success(long_form, short_form, cmd_line, expected):
arguments = cmd_line.split()
if isinstance(expected, str):
result = settings_manager.Settings.apply_optional_override(long_form=long_form,
short_form=short_form,
arguments=arguments)
assert expected == result
elif isinstance(expected, type(Errors.WafError)):
with pytest.raises(Errors.WafError):
settings_manager.Settings.apply_optional_override(long_form=long_form,
short_form=short_form,
arguments=arguments)
@pytest.mark.parametrize(
"name, is_monolithic, is_test, is_server, third_party_config, has_base_config, test_default_settings_map, test_settings_map, expected_name", [
pytest.param('test_a', False, False, False, 'debug', False, {},{'output_folder_ext_test_a': 'ext_test_a'}, 'test_a', id="ConfigNoMonolithicNoTestNoServerNoBaseConfig"),
pytest.param('test_b', True, False, False, 'debug', False, {},{'output_folder_ext_test_b': 'ext_test_b'}, 'test_b', id="ConfigMonolithicNoTestNoServerNoBaseConfig"),
pytest.param('test_c', False, True, False, 'release', False, {}, {'output_folder_ext_test_c': 'ext_test_c'}, 'test_c_test', id="ConfigNoMonolithicTestNoServerNoBaseConfig"),
pytest.param('test_d', False, False, True, 'release', False, {}, {'output_folder_ext_test_d': 'ext_test_d'}, 'test_d_dedicated', id="ConfigNoMonolithicNoTestServerNoBaseConfig"),
pytest.param('test_e', False, True, True, 'debug', False, {}, {'output_folder_ext_test_e': 'ext_test_e'}, 'test_e_test_dedicated', id="ConfigNotMonolithicTestServerNoBaseConfig"),
pytest.param('test_f', False, False, False, 'release', True, {}, {'output_folder_ext_test_f': 'ext_test_f'}, 'test_f', id="ConfigNoMonolithicNoTestNoServerBaseConfig")
]
)
def test_ConfigurationSettings_ValidPermutations_Success(mocked_lumberyard_settings, name, is_monolithic, is_test, is_server, third_party_config, has_base_config, test_default_settings_map, test_settings_map, expected_name):
base_config_name = 'base_{}'.format(name)
base_config = settings_manager.ConfigurationSettings(base_config_name, is_monolithic, is_test, third_party_config,None) if has_base_config else None
test_config = settings_manager.ConfigurationSettings(name, is_monolithic, is_test, third_party_config, base_config)
expected_folder_ext = 'ext_{}'.format(name)
assert test_config.name == name
assert test_config.is_monolithic == is_monolithic
assert test_config.third_party_config == third_party_config
config_name = test_config.build_config_name(is_test, is_server)
assert config_name == expected_name
output_ext = test_config.get_output_folder_ext()
assert output_ext == expected_folder_ext
assert not test_config.does_configuration_match("__foo__")
assert test_config.does_configuration_match(name)
if has_base_config:
assert test_config.does_configuration_match(base_config_name)
assert not test_config.does_configuration_match(base_config_name, False)
@pytest.mark.parametrize(
"src_dict, configuration, expected", [
pytest.param( {
"INCLUDES": [
"include_a"
],
"DEFINES": [
"define_a"
]
},
"debug",
{
'debug': {
'INCLUDES': [
'include_a'
],
'DEFINES': [
'define_a'
]
}
},
id="SimpleDebugListEnvironments"),
pytest.param( {
"SPECIAL_NAME_A": "include_a",
"DEFINES": [
"define_a"
]
},
"profile",
{
'profile': {
"SPECIAL_NAME_A": "include_a",
'DEFINES': [
'define_a'
]
}
},
id="SimpleDebugListEnvironments"),
pytest.param( {
"?CONDITION1?:INCLUDES": [
"include_a"
],
"@CONDITION2@:DEFINES": [
"define_a"
]
},
"debug",
{
'debug': {
'@CONDITION2@:DEFINES': [
'define_a'
],
'?CONDITION1?:INCLUDES': [
'include_a'
]
}
},
id="ConditionalDebugListEnvironments"),
pytest.param( {
"?CONDITION1?:SPECIAL_NAME_A": "include_a",
"DEFINES": [
"define_a"
]
},
"profile",
{
'profile': {
'?CONDITION1?:SPECIAL_NAME_A': 'include_a',
'DEFINES': [
'define_a'
]
}
},
id="ConditionalSimpleDebugListEnvironments"),
]
)
def test_ProcessEnvDictValues_ValidInputs_Success(src_dict, configuration, expected):
env_result = {}
settings_manager.process_env_dict_values(src_dict, configuration, env_result)
assert env_result == expected
@pytest.mark.parametrize(
"src_dict, expected_configurations", [
pytest.param( {
"env": {
"name": "all"
}
},
['_'],
id="SimpleAllEnv"),
pytest.param( {
"env": {
"name": "all"
},
"env/debug": {
"name": "debug"
}
},
['_', 'debug'],
id="AllAndDebugEnv"),
pytest.param( {
"env": {
"name": "all"
},
"env/debug": {
"name": "debug"
},
"settings": {
"name": "dont_include"
}
},
['_', 'debug'],
id="AllAndDebugSkipNonEnv"),
]
)
def test_ProcessEnvDict_ValidInputs_Success(src_dict, expected_configurations):
def _mock_process_env_dict(env_dict, configuration, processed_env_dict):
assert configuration in expected_configurations
if configuration=='_':
check_key = 'env'
else:
check_key = 'env/{}'.format(configuration)
assert src_dict[check_key] == env_dict
old_process_env_dict = settings_manager.process_env_dict_values
settings_manager.process_env_dict_values = _mock_process_env_dict
try:
result = {}
settings_manager.process_env_dict(src_dict, result)
finally:
settings_manager.process_env_dict_values = old_process_env_dict
@pytest.mark.parametrize(
"source_attr_dict, merged_dict, expected_result, expected_warning", [
pytest.param( {
'NEW_SETTING': 'new'
},
{
'OLD_SETTING': 'old'
},
{
'NEW_SETTING': 'new',
'OLD_SETTING': 'old'
},
False,
id="MergeNoOverwrite"),
pytest.param( {
'NEW_SETTING': 'new'
},
{
'NEW_SETTING': 'new'
},
{
'NEW_SETTING': 'new'
},
False,
id="MergeOverwriteNoChange"),
pytest.param( {
'NEW_SETTING': 'new'
},
{
'NEW_SETTING': 'old'
},
{
'NEW_SETTING': 'new'
},
True,
id="MergeOverwrite")
]
)
def test_MergeAttributesGroup_ValidInputs_Success(source_attr_dict, merged_dict, expected_result, expected_warning):
def _mock_log_warn(msg):
assert expected_warning
old_log_warn = Logs.warn
Logs.warn = _mock_log_warn
try:
settings_manager.merge_attributes_group("includes_file", source_attr_dict, merged_dict)
finally:
Logs.warn = old_log_warn
assert merged_dict == expected_result
@pytest.mark.parametrize(
"merge_settings_dict, setting_group_name, settings_value_dicts, expected_result", [
pytest.param( {},
'group',
[],
{
'group': []
},
id="EmptyTest"),
pytest.param( {
'group': []
},
'group',
[],
{
'group': []
},
id="EmptyExistingGroupTest"),
pytest.param( {
},
'group',
[{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
}],
{
'group': [
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
},
]
},
id="NewGroupItem"),
pytest.param( {
'group': [
{
"short_form": "-n",
"long_form": "--not-foo",
"attribute": "not_foo",
"default_value": "not_foo",
"description": "Use Not Foo"
},
]
},
'group',
[{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
}],
{
'group': [
{
"short_form": "-n",
"long_form": "--not-foo",
"attribute": "not_foo",
"default_value": "not_foo",
"description": "Use Not Foo"
},
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
},
]
},
id="NewGroupItemExistingGroup"),
pytest.param( {
'group': [
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "old_foo",
"description": "Use Old Foo"
},
]
},
'group',
[{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "new_foo",
"description": "Use New Foo"
}],
{
'group': [
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "new_foo",
"description": "Use New Foo"
},
]
},
id="ReplaceExistingGroupItem")
])
def test_MergeSettingsGroup_ValidInputs_Success(merge_settings_dict, setting_group_name, settings_value_dicts, expected_result):
settings_manager.merge_settings_group('includes_file', merge_settings_dict, setting_group_name, settings_value_dicts)
assert merge_settings_dict == expected_result
@pytest.mark.parametrize(
"settings_include_file, include_to_settings_dict, expected_env_dict, expected_settings_dict, expected_attributes_dict", [
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {}
},
{},
{},
{},
id="EmptySettings"),
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {
'env': {
'MYKEY': 'MYVALUE'
}
}
},
{
'env': {
'MYKEY': 'MYVALUE'
}
},
{},
{},
id="ProcessEnv"),
pytest.param( 'testpath/test_settings.json',
{
| |
# -*- coding: UTF8 -*-
from database.utils.connector import BasicDatabaseConnector
class VMServerDBConnector(BasicDatabaseConnector):
'''
Esta clase permite gestionar las diferentes características de las imágenes
accesibles en el servidor de máquinas virtuales actual.
'''
def __init__(self, sqlUser, sqlPass, databaseName):
'''
Constructora de la clase
'''
BasicDatabaseConnector.__init__(self, sqlUser, sqlPass, databaseName)
self.connect()
self.generateMACsAndUUIDs()
self.generateVNCPorts()
def getImages(self):
'''
Devuelve una lista con todos los identificadores de imágenes que se
encuentran registradas en el servidor de máquinas virtuales.
'''
# Creamos la consulta encargada de extraer los datos
sql = "SELECT VMId FROM VirtualMachine"
# Recogemos los resultado
results = self._executeQuery(sql, False)
# Guardamos en una lista los ids resultantes
imageIds = []
for r in results:
imageIds.append(r[0])
# Devolvemos la lista resultado
return imageIds
def getName(self, imageId):
'''
Devuelve el nombre de la imagen cuyo identificador se pasa como argumento.
'''
# Creamos la consulta encargada de extraer los datos
sql = "SELECT name FROM VirtualMachine WHERE VMId = " + str(imageId)
# Recogemos los resultado
result = self._executeQuery(sql, True)
if (result == None) :
return None
# Devolvemos el resultado
return result[0]
def getImagePath(self, imageId):
'''
Devuelve la ruta donde se encuentra físicamente la imagen cuyo identificador
de imagen se pasa como argumento.
'''
# Creamos la consulta encargada de extraer los datos
sql = "SELECT dataImagePath FROM VirtualMachine WHERE VMId = " + str(imageId)
# Recogemos los resultado
result = self._executeQuery(sql, True)
if (result == None) :
return None
# Devolvemos el resultado
return result[0]
def getOsImagePath(self, imageId):
'''
Devuelve la ruta donde se encuentra físicamente la imagen del SO cuyo identificador
de imagen se pasa como argumento.
'''
# Creamos la consulta encargada de extraer los datos
sql = "SELECT osImagePath FROM VirtualMachine WHERE VMId = " + str(imageId)
# Recogemos los resultado
result = self._executeQuery(sql, True)
if (result == None) :
return None
# Devolvemos el resultado
return result[0]
def getFileConfigPath(self, imageId):
'''
Devuelve la ruta donde se encuentra el fichero de configuración asociado a
la imagen cuyo identificador se pasa como argumento
'''
# Creamos la consulta encargada de extraer los datos
sql = "SELECT definitionFilePath FROM VirtualMachine WHERE VMId = " + str(imageId)
# Recogemos los resultado
result = self._executeQuery(sql)
if (result == None) :
return None
# Devolvemos el resultado
return result[0][0] # BUG aquí
def setImageDataPath(self, imageId, path):
'''
Permite cambiar la ruta de la imagen cuyo identificador se pasa como argumento.
'''
# Creamos la consulta encargada de realizar la actualización
sql = "UPDATE VirtualMachine SET dataImagePath = '" + path + "' WHERE VMId = " + str(imageId)
# Ejecutamos el comando
self._executeUpdate(sql)
def createImage(self, imageId, name, dataImagePath, osImagePath, definitionFilePath):
'''
Permite registrar en la base de datos una nueva imagen de máquina virtual.
'''
# Introducimos los datos en la base de datos
sql = "INSERT INTO VirtualMachine(VMId,name,dataImagePath,osImagePath,definitionFilePath) VALUES("
sql += str(imageId) + ",'" + name + "','" + dataImagePath + "','" + osImagePath + "','" + definitionFilePath + "') "
# Ejecutamos el comando
self._executeUpdate(sql)
# Devolvemos el id
return imageId
def deleteImage(self, imageId):
# borramos el la MV
sql = "DELETE FROM VirtualMachine WHERE VMId =" + str(imageId)
# Ejecutamos el comando
self._executeUpdate(sql)
# Gracias al ON DELETE CASCADE debería borrarse sus referencias en
# el resto de las tablas
# Actualizamos la base de datos
def doesImageExist(self, VMId):
'''
Comprueba si una imagen existe
'''
# Contamos el número de MV con el id dado
sql = "SELECT COUNT(*) FROM VirtualMachine WHERE VMId =" + str(VMId)
# Recogemos los resultado
result = self._executeQuery(sql, True)
if (result == None) :
return None
# Si el resultado es 1, la MV existe
return (result[0] == 1)
def generateMACsAndUUIDs(self):
'''
Función encargada de crear la tabla inicial de pares (UUID,MAC) libres
'''
sql = "DROP TABLE IF EXISTS FreeMACs"
# Ejecutamos el comando
self._executeUpdate(sql)
# Creamos la tabla necesaria
sql = "CREATE TABLE IF NOT EXISTS FreeMACs(UUID VARCHAR(40) ,MAC VARCHAR(20),PRIMARY KEY(UUID,MAC)) ENGINE=MEMORY;"
# Ejecutamos el comando
self._executeUpdate(sql)
# Generamos el relleno
v = 0
# Generamos el bucle
while v < 256 :
x = str(hex(v))[2:].upper()
# Ajustamos al formato de 2 digitos cuando proceda
if v < 16:
x = '0' + x
# Creamos la consulta
sql = "INSERT INTO FreeMACs VALUES (UUID(),'" + '2C:00:00:00:00:' + x + "');"
# Ejecutamos el comando
self._executeUpdate(sql)
# incrementamos el contador
v = v + 1
def generateVNCPorts(self):
'''
Función encargada de crear la tabla inicial de pares (UUID,MAC) libres
'''
sql = "DROP TABLE IF EXISTS FreeVNCPorts;"
# Ejecutamos el comando
self._executeUpdate(sql)
# Creamos la tabla necesaria
sql = "CREATE TABLE IF NOT EXISTS FreeVNCPorts(VNCPort INTEGER PRIMARY KEY) ENGINE=MEMORY;"
# Ejecutamos el comando
self._executeUpdate(sql)
# Generamos el relleno
p = 15000
v = 0
# Generamos el bucle
while v < 256 :
# Creamos la consulta
sql = "INSERT INTO FreeVNCPorts VALUES ('" + str(p) + "');"
# Ejecutamos el comando
self._executeUpdate(sql)
# incrementamos el contador
p = p + 2
v = v + 1
def extractFreeMACAndUUID(self):
'''
Función que devuelve la primera ocurrencia de la tabla de macs libres y
la elimina de la tabla
'''
# Creamos la cosulta
sql = "SELECT * FROM FreeMACs"
# Nos quedamos con la primera ocurrencia
result = self._executeQuery(sql, True)
if (result == None) :
return None
# Eliminamos este resultado de la tabla
sql = "DELETE FROM FreeMACs WHERE UUID = '" + result[0] + "' AND MAC ='" + result[1] + "'"
# Ejecutamos el comando
self._executeUpdate(sql)
# Gracias al ON DELETE CASCADE se borrarán las imagenes registradas para este servidor
# Actualizamos la base de datos
# Devolvemos una tupla con la UUID y la MAC
return (result[0], result[1])
def insertFreeMACAndUUID(self, UUID, MAC):
'''
Añade un nuevo par del tipo UUID , MAC a la tabla freeMAC
'''
# Creamso la consulta
sql = "INSERT INTO FreeMACs VALUES ('" + UUID + "','" + MAC + "')"
# Ejecutamos el comando
self._executeUpdate(sql)
def extractFreeVNCPort(self):
'''
Función que devuelve la primera ocurrencia de la tabla de macs libres y
la elimina de la tabla
'''
# Creamos la cosulta
sql = "SELECT * FROM FreeVNCPorts"
# Nos quedamos con la primera ocurrencia
result = self._executeQuery(sql, True)
if (result == None) :
return None
# Eliminamos este resultado de la tabla
sql = "DELETE FROM FreeVNCPorts WHERE VNCPort = '" + str(result[0]) + "'"
# Ejecutamos el comando
self._executeUpdate(sql)
# Gracias al ON DELETE CASCADE se borrarán las imagenes registradas para este servidor
# Actualizamos la base de datos
# Devolvemos el puerto
return result[0]
def insertFreeVNCPort(self, VNCPort):
'''
Añade un nuevo par del tipo UUID , MAC a la tabla freeMAC
'''
# Creamso la consulta
sql = "INSERT INTO FreeVNCPorts VALUES ('" + str(VNCPort) + "')"
# Ejecutamos el comando
self._executeUpdate(sql)
def getRunningPorts(self):
'''
Devuelve una lista con los puertos VNC correspondientes a las máquinas
virtuales que se encuentran actualmente en ejecución.
'''
# Creamos la consulta encargada de extraer los datos
sql = "SELECT VNCPort FROM ActualVM"
# Recogemos los resultado
results = self._executeQuery(sql, False)
# Guardamos en una lista los ids resultantes
ports = []
for r in results:
ports.append(int(r[0]))
# Devolvemos la lista resultado
return ports
def getUsers(self):
'''
Devuelve una lista con los identificadores de todos los usuarios que actualmente
se encuentran ejecutando una determinada máquina virtual en este servidor de
máquinas virtuales.
'''
# Creamos la consulta encargada | |
<gh_stars>0
import json
import sys
import os
import datetime
import time
import argparse
from git import Repo, GitCommandError
import docker
import requests
import re
from subprocess import call
def retrieve_pr(repository_name, pr):
github_api_token = os.environ["GITHUB_API_TOKEN"]
r = requests.get("https://api.github.com/repos/" + repository_name + "/pulls/" + pr, headers={
'Authorization': 'token ' + github_api_token, 'User-Agent': 'dojot-baseline-builder'})
if "body" in r.json():
pr_comment = r.json()["body"]
title = r.json()["title"]
if not pr_comment is None:
pr_comment = (pr_comment.replace(
"https://github.com/dojot/dojot/issues/", "dojot/dojot#"))
pr_comment = (pr_comment.replace(
"http://github.com/dojot/dojot/issues/", "dojot/dojot#"))
reg = re.compile("(dojot\/dojot#.[0-9]+|#.[0-9]+)")
ret = reg.findall(pr_comment)
return [title, ret]
return [title, ""]
else:
return ["PR not found", "none"]
def get_repository_id(repository_name, owner="dojot"):
github_api_token = os.environ["GITHUB_API_TOKEN"]
print("Getting Repository id for "+owner+"/"+repository_name)
requestRepoId = {
"query": "query ($owner: String!, $repoName: String!) {repository(owner: $owner, name: $repoName) {id}}",
"variables": {"owner": "dojot", "repoName": "undefinied"}
}
requestRepoId['variables']['owner'] = owner
requestRepoId['variables']['repoName'] = repository_name
r = requests.post("https://api.github.com/graphql", json=requestRepoId, headers={
'Authorization': 'bearer ' + github_api_token, 'Content-Type': 'application/json'})
if "data" in r.json():
return r.json()["data"]["repository"]["id"]
else:
print("Cant get repositoryId from "+repository_name)
return ''
def create_pr(repoId, repository_name, branchTo="master", branchFrom="development", titlePR="new PR"):
github_api_token = os.environ["GITHUB_API_TOKEN"]
requestCreatePR = {
"query": "mutation ($branchTo: String!, $titlePR: String!, $branchFrom: String!, $repoId: ID!, $body:String) { createPullRequest(input: {baseRefName: $branchTo, title: $titlePR, body:$body, repositoryId: $repoId, headRefName: $branchFrom, maintainerCanModify: true}) { pullRequest { title id } }}",
"variables": {
"branchTo": "",
"titlePR": "",
"branchFrom": "",
"repoId": "",
"body": "Be careful, see if it is necessary to update the branch! **Most likely it is not to update.**"
}
}
requestCreatePR['variables']['branchTo'] = branchTo
requestCreatePR['variables']['titlePR'] = titlePR
requestCreatePR['variables']['branchFrom'] = branchFrom
requestCreatePR['variables']['repoId'] = repoId
print("Creating PR for " + repository_name +
"from " + branchFrom + " to " + branchTo)
r = requests.post("https://api.github.com/graphql", json=requestCreatePR, headers={
'Authorization': 'bearer ' + github_api_token, 'Content-Type': 'application/json'})
if "errors" in r.json():
print("Cant create PR branchTo:"+branchTo+" branchFrom:" +
branchFrom + " repository:"+repository_name)
print(r.json())
def create_prs(spec, selected_repo, branch_from, branch_to):
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
github_repository = repo_config['github-repository']
print("Create PR for "+repository_name)
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name + " from merging.")
continue
repo_info = github_repository.split("/")
owner = repo_info[0]
repository_name = repo_info[1]
repoID = get_repository_id(repository_name, owner)
title_pr = "Merge version "+spec['tag']+" from branch "+branch_from + \
" to branch "+branch_to
create_pr(repoID, repository_name, branch_to, branch_from, title_pr)
time.sleep(5)
def build_backlog_message(repo, repository_name, last_commit, current_commit):
offset = 0
commit_it = list(repo.iter_commits(
current_commit, max_count=1, skip=offset))[0]
messages = []
message = ""
print("Building backlog messages for repository " + repository_name)
while commit_it.hexsha != last_commit:
commit_it = list(repo.iter_commits(
current_commit, max_count=1, skip=offset))[0]
if commit_it.hexsha == last_commit:
break
searchObj = re.match(
"Merge pull request #(.*) from .*", commit_it.message)
# to squash PR
regex = r"\(#[0-9]*\)"
matches = re.search(regex, commit_it.message)
if searchObj:
pr = searchObj.group(1)
message = " - [ ] "+repository_name + "#" + pr
print("Retrieving information for PR " + message)
title, issues = retrieve_pr(repository_name, pr)
if issues:
message += ", linked issues"
for issue in issues:
if issue != '#99999':
message += " " + issue
message += ": " + title
messages.append(message)
if matches:
pr = matches.group().replace("(#", "")
pr = pr.replace(")", "")
message = " - [ ] "+repository_name + "#" + pr
print("Retrieving information for PR " + message)
title, issues = retrieve_pr(repository_name, pr)
if issues:
message += ", linked issues"
for issue in issues:
if issue != '#99999':
message += " " + issue
message += ": " + title
messages.append(message)
offset = offset + 1
if messages:
message = repository_name + "\n"
for _c in repository_name:
message += "-"
message += "\n\n"
for m in messages:
message += m + "\n"
return message
def build_backlog_messages(spec, selected_repo):
message = ""
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
github_repository = repo_config['github-repository']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name + " from merging.")
continue
last_commit = repo_config["last-commit"]
current_commit = repo_config["current-commit"]
repository_dest = "./git_repos/"+repository_name
repo = Repo(repository_dest)
repo_message = build_backlog_message(
repo, github_repository, last_commit, current_commit)
if repo_message:
repo_message += "\n\n"
message += repo_message
print("Backlog is:\n\n")
print(message)
def checkout_git_repositories(spec, selected_repo):
print("Checking out repositories...")
username = os.environ["GITHUB_USERNAME"]
usertoken = os.environ["GITHUB_TOKEN"]
branch_name = "release/"+spec['version']
github_preamble = "https://" + username + ":" + usertoken + "@github.com/"
print("Creating output directory...")
try:
os.stat("./git_repos")
except:
os.mkdir("./git_repos")
print("... output repository directory created.")
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name + " from checkout.")
continue
repository_url = github_preamble + repo_config['github-repository']
repository_dest = "./git_repos/"+repo_config['repository-name']
commit_id = repo_config['current-commit']
print("Checking out " + repository_name)
print("From GitHub repository " + repo_config['github-repository'])
print("At commit " + commit_id)
print("Cloning repository...")
repo = Repo.clone_from(repository_url, repository_dest)
print("... repository was cloned")
print("Creating branch " + branch_name + " ...")
repo.head.reference = repo.create_head(branch_name, commit_id)
repo.head.reset(index=True, working_tree=True)
print("... '"+branch_name+"' branch was created")
print("... repositories were checked out.")
def create_git_tag(spec, selected_repo):
print("Creating tag for all repositories...")
baseline_tag_name = spec["tag"]
branch_name = "release/"+spec['version']
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name + " from creating tag.")
continue
repository_dest = "./git_repos/"+repo_config['repository-name']
repo = Repo(repository_dest)
baseline_head = repo.heads[branch_name]
print("Creating tag "+baseline_tag_name +
" for repository " + repository_name + "...")
print("Checking whether tag has already been created...")
if (baseline_tag_name in repo.tags):
print("... tag has been already created.")
print("... skipping repository " + repository_name + ".")
continue
else:
print("... tag is not created yet. Good to go.")
print("Creating baseline tag...")
repo.create_tag(baseline_tag_name, ref=baseline_head,
message="Baseline: " + baseline_tag_name)
print("... baseline tag was created.")
print("... repository " + repository_name +
" was properly tagged.")
print("... all repositories were tagged.")
def push_git_tag(spec, selected_repo):
print("Pushing everything to GitHub...")
baseline_tag_name = spec["tag"]
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if 'create-tag' in repo_config and repo_config['create-tag'] == False:
print("Skipping " + repository_name +
" from pushing tag. create-tag=false")
continue
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name + " from pushing tag.")
continue
repository_dest = "./git_repos/"+repo_config['repository-name']
repo = Repo(repository_dest)
print("Pushing tag to repository " + repository_name + "...")
print("Pushing baseline tag...")
baseline_tag = repo.tags[baseline_tag_name]
repo.remotes.origin.push(baseline_tag)
print("... baseline tag was pushed.")
print("... all changes were pushed to " + repository_name + ".")
print("... everything was pushed to GitHub.")
def push_git_branchs(spec, selected_repo):
print("Pushing branchs to GitHub...")
baseline_branch_name = "release/"+spec["version"]
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name + " from pushing branch.")
continue
repository_dest = "./git_repos/"+repo_config['repository-name']
repo = Repo(repository_dest)
print("Pushing branch "+baseline_branch_name +
" to repository " + repository_name + "...")
repo.remotes.origin.push(baseline_branch_name)
print("... branch was pushed.")
print("... all changes were pushed to " + repository_name + ".")
print("... everything was pushed to GitHub.")
def build_docker_baseline(spec, selected_repo):
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name +
" from pushing Docker images.")
continue
if not "docker-hub-repositories" in repo_config:
print("No image to generate in " + repository_name)
continue
for docker_repo in repo_config["docker-hub-repositories"]:
docker_name = docker_repo["name"]
dockerfile = docker_repo["dockerfile"]
baseline_tag_name = spec["tag"]
repository_dest = "./git_repos/"+repo_config['repository-name']
print("Building image for " + docker_name)
os.system("docker build -t " + docker_name + ":" + baseline_tag_name +
" --no-cache -f " + repository_dest + "/" + dockerfile + " " + repository_dest)
def tag_docker_baseline(spec, selected_repo):
client = docker.from_env()
docker_username = os.environ["DOCKER_USERNAME"]
docker_password = os.environ["DOCKER_TOKEN"]
print("Logging into Docker Hub...")
client.login(docker_username, docker_password)
print("... logged in.")
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name +
" from pushing Docker images.")
continue
if not "docker-hub-repositories" in repo_config:
print("No image to pushing in " + repository_name)
continue
for docker_repo in repo_config["docker-hub-repositories"]:
docker_name = docker_repo["name"]
baseline_tag_name = spec["tag"]
print("Pushing new tag... " + docker_name + ":" + baseline_tag_name)
client.images.push(docker_name + ":" + baseline_tag_name)
print("... pushed.")
def remove_docker_tags(spec, selected_repo):
print("Logging into Docker Hub...")
login_data = requests.post('https://hub.docker.com/v2/users/login/',
json={"username": os.environ["DOCKER_USERNAME"],
"password": os.environ["DOCKER_TOKEN"]})
token = login_data.json().get('token')
print("... logged in.")
for repo_config in spec["components"]:
repository_name = repo_config['repository-name']
if selected_repo != "all" and repository_name != selected_repo:
print("Skipping " + repository_name +
" from untagging Docker images.")
continue
if not "docker-hub-repositories" in repo_config:
print("No image to generate in " + repository_name)
continue
for docker_repo in repo_config["docker-hub-repositories"]:
organization_name, image_name = docker_repo["name"].split("/")
tag_name = | |
<filename>sandbox/riskModelsResultsEval.py
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 10:22:47 2019
@author: lawdfo
Purpose:
Read in the csv results file generated by (e.g.) riskModelsParamSweep.py
and report back some useful statistics.
"""
# Some fairly standard modules
import os, csv, lzma
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import descartes
from itertools import product
from collections import Counter, defaultdict
import datetime
import csv
import random
import time
from copy import deepcopy
import statistics
# The geopandas module does not come standard with anaconda,
# so you'll need to run the anaconda prompt as an administrator
# and install it via "conda install -c conda-forge geopandas".
# That installation will include pyproj and shapely automatically.
# These are useful modules for plotting geospatial data.
import geopandas as gpd
import pyproj
import shapely.geometry
# These modules are useful for tracking where modules are
# imported from, e.g., to check we're using our local edited
# versions of open_cp scripts.
import sys
import inspect
import importlib
# In order to use our local edited versions of open_cp
# scripts, we insert the parent directory of the current
# file ("..") at the start of our sys.path here.
sys.path.insert(0, os.path.abspath(".."))
# Elements from PredictCode's custom "open_cp" package
import open_cp
"""
import open_cp.geometry
import open_cp.plot
import open_cp.sources.chicago as chicago
import open_cp.retrohotspot as retro
import open_cp.prohotspot as phs
import open_cp.knox
"""
# Load custom functions that make dealing with datetime and timedelta easier
from crimeRiskTimeTools import generateDateRange, \
generateLaterDate, \
generateEarlierDate, \
getTimedPointsInTimeRange, \
getSixDigitDate, \
_day
"""
Expected data format of input CSV file, by column:
Header name Type Typical contents
dataset str Chicago
event_types str BURGLARY
cell_width int 100
eval_date np.datetime64 2016-03-01
train_len str 8W
test_len str 1D
coverage_rate float 0.01/0.02/0.05/0.1
test_events int 3/2/5/etc
hit_count int 1/2/0/etc
hit_pct float 0.33333 etc
model str naivecount/phs/etc
rand_seed int
rhs_bandwidth int
phs_time_unit str 1 weeks
phs_time_band str 4 weeks
phs_dist_unit int 100
phs_dist_band int 400
phs_weight str linear
"""
csv_data_types = [str, \
str, \
int, \
np.datetime64, \
str, \
str, \
float, \
int, \
int, \
float, \
str, \
int, \
int, \
str, \
str, \
int, \
int, \
str]
def splitDataByTimespans(datalist, timespan, dateinfoname="eval_date"):
print("Performing splitDataByTimespans")
date_list = sorted(set([d[dateinfoname] for d in datalist]))
earliest_date = date_list[0]
latest_date = date_list[-1]
daterange_list = generateDateRange(start=earliest_date,
end=latest_date+_day,
step=timespan)
data_by_daterange = defaultdict(list)
for d in datalist:
d_time = d[dateinfoname]
for t in daterange_list:
if d_time >= t and d_time < generateLaterDate(t, timespan):
data_by_daterange[t].append(d)
break
print("Ending splitDataByTimespans")
return data_by_daterange
"""
Each element of output should have this info:
earliest test date of range
time band
dist band
avg hit rate
"""
def getPhsSpanStats(datalist, timespan):
print("Performing getPhsSpanStats")
data_by_daterange = splitDataByTimespans(datalist, timespan)
phs_band_rate_summary = []
for daterange in data_by_daterange:
hit_rates = getPhsHitRates(data_by_daterange[daterange])
for bp in hit_rates:
phs_band_rate_summary.append((daterange, bp[0], bp[1], hit_rates[bp]["avg_hit_rate"]))
print("Ending getPhsSpanStats")
return phs_band_rate_summary
def getModelSpanStats(datalist, timespan, model):
print("Performing getModelSpanStats")
recognized_model_list = ["random", "naive", "ideal", "phs"]
if model not in recognized_model_list:
print("model required for getModelSpanStats")
sys.exit(1)
if model=="phs":
return getPhsSpanStats(datalist, timespan)
data_by_daterange = splitDataByTimespans(datalist, timespan)
model_stats = []
for daterange, data in data_by_daterange.items():
model_stats.append((daterange, getAvgHitRates(data)))
print("Ending getModelSpanStats")
return model_stats
"""
Each element of output should have this info:
coverage
earliest test date of range
time band
dist band
avg hit rate
"""
def writeModelSummaryCsv(datalists_by_cov, timespan, model, csvname = "temp.csv"):
print("Performing writeModelSummaryCsv")
rate_summaries_by_cov = dict()
for cov, datalist in datalists_by_cov.items():
rate_summaries_by_cov[cov] = getModelSpanStats(datalist, timespan, model)
with open(csvname,"w") as csvf:
writer = csv.writer(csvf, delimiter=",", lineterminator="\n")
for cov, rate_summary in rate_summaries_by_cov.items():
for d in rate_summary:
writer.writerow([cov] + list(d))
print("Ending writeModelSummaryCsv")
sys.exit(0)
def writePhsVariabilityCsv(datalists_by_cov, timespan, csvname = "temp.csv"):
print("Performing writePhsVariabilityCsv")
bp_rate_summaries_by_cov = dict()
for cov, datalist in datalists_by_cov.items():
bp_rate_summaries_by_cov[cov] = getPhsSpanStats(datalist, timespan)
rates_by_covtimedist = defaultdict(list)
for cov, rate_summary in bp_rate_summaries_by_cov.items():
for entry in rate_summary:
rates_by_covtimedist[(cov, entry[1], entry[2])].append(entry[3])
covtimedist_trios = sorted(rates_by_covtimedist)
num_rates_list = [len(rates_by_covtimedist[x]) for x in covtimedist_trios]
num_rates = num_rates_list[0]
if not all([x==num_rates for x in num_rates_list]):
print("Error! Not all (cov, time, dist) trios have same number of results!")
print(num_rates_list)
sys.exit(1)
ratestats_by_covtimedist = dict()
for covtimedist in covtimedist_trios:
ratelist = rates_by_covtimedist[covtimedist]
rate_avg = sum(ratelist)/num_rates
rate_std = statistics.stdev(ratelist)
rate_var = statistics.variance(ratelist)
ratestats_by_covtimedist[covtimedist] = (rate_avg, rate_std, rate_var)
with open(csvname,"w") as csvf:
writer = csv.writer(csvf, delimiter=",", lineterminator="\n")
for covtimedist, ratestats in ratestats_by_covtimedist.items():
writer.writerow(list(covtimedist) + list(ratestats))
print(" ".join([str(x) for x in list(covtimedist) + list(ratestats)]))
print("Ending writePhsVariabilityCsv")
sys.exit(0)
# datalist = list of results for PHS
# timespan = how frequently to check scores. Do we look at the top n models
# from each day, or averaged over each month, etc
# topnum = how many of the top models we consider successful. Top 10? Top 1?
def checkPhsConsistency(datalist, timespan, topnum):
print("Performing checkPhsConsistency")
data_by_daterange = splitDataByTimespans(datalist, timespan)
#best_overallrate_bps = []
best_avgrate_bps = []
for daterange in data_by_daterange:
rate_info = getPhsHitRates(data_by_daterange(daterange))
#d_sort_overallrate = sorted(rate_info.items(), key=lambda ri: ri[1]["overall_hit_rate"], reverse=True)
d_sort_avgrate = sorted(rate_info.items(), key=lambda ri: ri[1]["avg_hit_rate"], reverse=True)
#best_bp_overallrate = d_sort_overallrate[:topnum]
#for d in d_sort_overallrate[topnum:]:
# if d[1]["overall_hit_rate"] < best_bp_overallrate[-1][1]["overall_hit_rate"]:
# break
# best_bp_overallrate.append(d)
best_bp_avgrate = d_sort_avgrate[:topnum]
for d in d_sort_avgrate[topnum:]:
if d[1]["avg_hit_rate"] < best_bp_avgrate[-1][1]["avg_hit_rate"]:
break
best_bp_avgrate.append(d)
#best_overallrate_bps.append(best_bp_overallrate)
best_avgrate_bps.append(best_bp_avgrate)
#findMinimalPhsBandCovering(best_overallrate_bps, daterange_list)
findMinimalPhsBandCovering(best_avgrate_bps, data_by_daterange.keys())
def findMinimalPhsBandCovering(best_bps, daterange_list):
print("Performing findMinimalPhsBandCovering")
covered_span_list = []
covered_span_dates = [[]]
covering_bps = []
bp_set = set([x[0] for x in best_bps[0]])
running_span_count = 0
for i, bp_info in enumerate(best_bps):
new_bp_set = bp_set & set(x[0] for x in bp_info)
# If set is 0, we can no longer cover current time with top choices
if len(new_bp_set) == 0:
covered_span_list.append(int(running_span_count+0))
running_span_count = 1
covered_span_dates.append([])
covering_bps.append(deepcopy(bp_set))
bp_set = set(x[0] for x in bp_info)
else:
bp_set = new_bp_set
running_span_count += 1
covered_span_dates[-1].append(daterange_list[i])
covered_span_list.append(int(running_span_count+0))
covering_bps.append(deepcopy(bp_set))
print(covered_span_list)
print(covered_span_dates)
print(covering_bps)
sorted_covered_span_list = sorted(covered_span_list)
print(sorted_covered_span_list[0])
print(sorted_covered_span_list[-1])
#sys.exit(0)
pass
"""
getPhsHitRates
Input: "datalist" = list where each entry is a dictionary containing the
information from a line of the csv results file (casted
as the appropriate data type) as well as "param_pair"
which is a tuple of the time and dist bandwidths.
Note: Ideally this datalist is a subset of the full csv data, so that
hit rates ar calculated over smaller timespans, e.g. monthly
Output: "info_by_band_pair" = dict that maps bandwidth pairs ("bp") to:
"bands": same as key; can be useful if just grabbing values
"num_tests": Number of experiments/tests/evaluations performed.
All bp's within a datalist fed into this function should end
up with the same number of tests -- I can't think of a reason
why this wouldn't happen. However, note that this number MAY
change across multiple runs of this function with different
data subsets. For example, maybe you calculate over every
month, but months have different numbers of days.
"total_events": Total number of events (i.e. crimes) in the data.
This is calculated by adding the number for the first time
each date is witnessed. So again, it's important that all bp's
are tested on all the same days.
"total_hits": Total number of hits achieved by the bp's model.
"total_rates": Sum of all daily(?) hit rates. This number is
essentially useless on its own, but used for calculating avg.
"avg_hit_rate": Average of all daily hit rates, calculated as
total_rates/num_tests
("overall_hit_rate"): A different average hit rate, being the total
number of hits divided by the total number of events. This
was removed from use (commented out) once we decided this
metric was less useful than avg_hit_rate, since this could be
swayed by a generally poor model that rarely performs extremely
well.
"""
def getPhsHitRates(datalist):
print("Performing getPhsHitRates")
# Obtain set of bandwidths
band_pair_list = sorted(set([d["param_pair"] for d in datalist]))
# Instantiate info to obtain
info_by_band_pair = dict()
for bp in band_pair_list:
info_by_band_pair[bp] = dict([\
("bands", bp),\
("num_tests", 0),\
("total_events", 0),\
("total_hits", 0),\
("total_rates", float(0))\
])
# Update info via running counts for each bandwidth pair
for result in datalist:
bp = result["param_pair"]
info_by_band_pair[bp]["num_tests"] += 1
info_by_band_pair[bp]["total_events"] += result["test_events"]
info_by_band_pair[bp]["total_hits"] += result["hit_count"]
if result["test_events"] > 0:
info_by_band_pair[bp]["total_rates"] += result["hit_count"]/result["test_events"]
# Confirm all bandwidth pairs had the same number of tests
num_tests_per_bp = [info_by_band_pair[bp]["num_tests"] for bp in band_pair_list]
if len(set(num_tests_per_bp)) != 1:
print("Error! Some bandwidth pairs have different numbers of tests!")
print(Counter(num_tests_per_bp))
sys.exit(1)
num_tests = num_tests_per_bp[0]
# Compute the average hit rates for each bandwidth pair
for bp in band_pair_list:
info_by_band_pair[bp]["avg_hit_rate"] = info_by_band_pair[bp]["total_rates"]/num_tests
# The following deprecated code computes | |
# Copyright (c) Microsoft. All rights reserved.
from random import shuffle
import json
import csv
import pandas as pd
from nltk.tokenize import word_tokenize
def load_scitail(file):
"""Loading data of scitail
"""
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
blocks = line.strip().split('\t')
assert len(blocks) > 2
if blocks[0] == '-': continue
sample = {'uid': str(cnt), 'premise': blocks[0], 'hypothesis': blocks[1], 'label': blocks[2]}
rows.append(sample)
cnt += 1
return rows
def load_snli(file, header=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
assert len(blocks) > 10
if blocks[-1] == '-': continue
lab = blocks[-1]
if lab is None:
import pdb; pdb.set_trace()
sample = {'uid': blocks[0], 'premise': blocks[7], 'hypothesis': blocks[8], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_winogrande(file, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
blocks = json.loads(line.strip())
option1 = blocks['option1']
option2 = blocks['option2']
sentence = blocks['sentence']
conj = '_'
premise = sentence.replace(conj, option1)
hypothesis = sentence.replace(conj, option2)
lab = 0
if is_train:
lab = int(blocks['answer']) - 1
if lab is None:
import pdb; pdb.set_trace()
sample = {'uid': blocks['qID'], 'premise': premise, 'hypothesis': hypothesis, 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_sickr(file, is_train=True):
rows = []
cnt = 0
df = pd.read_csv(file, encoding='utf8', sep='\t', header=0)
for i, row in df.iterrows():
if is_train and row['SemEval_set'] != "TEST":
sample = {'uid': row['pair_ID'], 'premise': row['sentence_A'], 'hypothesis': row['sentence_B'], 'label': row['relatedness_score']}
elif not is_train and row['SemEval_set'] == "TEST":
sample = {'uid': row['pair_ID'], 'premise': row['sentence_A'], 'hypothesis': row['sentence_B'], 'label': row['relatedness_score']}
else:
continue
# print(sample)
rows.append(sample)
return rows
def load_sicke(file, is_train=True):
rows = []
cnt = 0
df = pd.read_csv(file, encoding='utf8', sep='\t', header=0)
for i, row in df.iterrows():
premise = row['sentence_A'].replace(" ", " ")
# premise = word_tokenize(premise)
# premise = " ".join(premise).lower()
hypothesis = row['sentence_B'].replace(" ", " ")
# hypothesis = word_tokenize(hypothesis)
# hypothesis = " ".join(hypothesis).lower()
if is_train and row['SemEval_set'] != "TEST":
sample = {'uid': row['pair_ID'], 'premise': premise, 'hypothesis': hypothesis, 'label': row['entailment_label'].lower()}
elif not is_train and row['SemEval_set'] == "TEST":
sample = {'uid': row['pair_ID'], 'premise': premise, 'hypothesis': hypothesis, 'label': row['entailment_label'].lower()}
else:
continue
# print(sample)
rows.append(sample)
return rows
def load_wikiqa(file, is_train=True):
rows = []
cnt = 0
df = pd.read_csv(file, encoding='utf8', sep='\t', header=0, quoting=csv.QUOTE_NONE)
for i, row in df.iterrows():
# print(row)
premise = row['Question']
hypothesis = row['Sentence']
# premise = premise.replace(" ", " ")
# premise = word_tokenize(premise)
# premise = " ".join(premise).lower()
# hypothesis = hypothesis.replace(" ", " ")
# hypothesis = word_tokenize(hypothesis)
# hypothesis = " ".join(hypothesis).lower()
sample = {'uid': row['QuestionID'] + '-' + row['SentenceID'], 'premise': premise, 'hypothesis': hypothesis, 'label': row['Label']}
# print(sample)
rows.append(sample)
return rows
def load_imdb(file, is_train=True):
rows = []
cnt = 0
df = pd.read_csv(file, encoding='utf8', sep=',', header=0)
to_label = {'neg':0,'pos':1}
for i, row in df.iterrows():
if row['label'] == 'unsup':
continue
# print(row)
review = row['review'].replace('\t', " ").replace("<br />", "").replace(" ", " ")
# review = word_tokenize(review)
# review = " ".join(review).lower()
# print(review)
if is_train and row['type'] == "train":
sample = {'uid': row['id'], 'premise': review, 'label': to_label[row['label']]}
elif not is_train and row['type'] == "test":
sample = {'uid': row['id'], 'premise': review, 'label': to_label[row['label']]}
else:
continue
# print(sample)
rows.append(sample)
return rows
def load_mnli(file, header=True, multi_snli=False, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
assert len(blocks) > 9
if blocks[-1] == '-': continue
lab = "contradiction"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb; pdb.set_trace()
sample = {'uid': blocks[0], 'premise': blocks[8], 'hypothesis': blocks[9], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_ax(file, header=True, multi_snli=False, is_train=False):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
assert len(blocks) == 3
if blocks[-1] == '-': continue
lab = "contradiction"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb; pdb.set_trace()
sample = {'uid': blocks[0], 'premise': blocks[1], 'hypothesis': blocks[2], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_mrpc(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
assert len(blocks) > 4
lab = 0
if is_train:
lab = int(blocks[0])
sample = {'uid': cnt, 'premise': blocks[-2], 'hypothesis': blocks[-1], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_qnli(file, header=True, is_train=True):
"""QNLI for classification"""
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
assert len(blocks) > 2
lab = "not_entailment"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb; pdb.set_trace()
sample = {'uid': blocks[0], 'premise': blocks[1], 'hypothesis': blocks[2], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_qqp(file, header=True, is_train=True):
rows = []
cnt = 0
skipped = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
if is_train and len(blocks) < 6:
skipped += 1
continue
if not is_train: assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {'uid': cnt, 'premise': blocks[-3], 'hypothesis': blocks[-2], 'label': lab}
else:
sample = {'uid': int(blocks[0]), 'premise': blocks[-2], 'hypothesis': blocks[-1], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_rte(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header =False
continue
blocks = line.strip().split('\t')
if is_train and len(blocks) < 4: continue
if not is_train: assert len(blocks) == 3
lab = "not_entailment"
if is_train:
lab = blocks[-1]
sample = {'uid': int(blocks[0]), 'premise': blocks[-3], 'hypothesis': blocks[-2], 'label': lab}
else:
sample = {'uid': int(blocks[0]), 'premise': blocks[-2], 'hypothesis': blocks[-1], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_wnli(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header =False
continue
blocks = line.strip().split('\t')
if is_train and len(blocks) < 4: continue
if not is_train: assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {'uid': cnt, 'premise': blocks[-3], 'hypothesis': blocks[-2], 'label': lab}
else:
sample = {'uid': cnt, 'premise': blocks[-2], 'hypothesis': blocks[-1], 'label': lab}
rows.append(sample)
cnt += 1
return rows
def load_diag(file, header=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split('\t')
assert len(blocks) > 3
sample = {'uid': cnt, 'premise': blocks[-3], 'hypothesis': blocks[-2], 'label': blocks[-1]}
rows.append(sample)
cnt += 1
return rows
def load_amazon(file, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
f_csv = csv.reader(f)
for blocks in f_csv:
lab = 0
review = (blocks[1] + ' # ' + blocks[2]).replace("\t"," ").replace(" ", " ")
# review = word_tokenize(review)
# review = " ".join(review).lower()
if is_train:
lab = int(blocks[0])-1
sample = {'uid': cnt, 'premise': review, 'label': lab}
else:
sample = {'uid': cnt, 'premise': review, 'label': lab}
cnt += 1
rows.append(sample)
return rows
def load_yelp(file, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
f_csv = csv.reader(f)
for blocks in f_csv:
lab = 0
review = blocks[1].replace("\\n", " ").replace("\\\"", "\"").replace("\t", " ").replace(" ", " ")
# review = word_tokenize(review)
# review = " ".join(review).lower()
if is_train:
lab = int(blocks[0])-1
sample = {'uid': cnt, 'premise': review, 'label': lab}
else:
sample = {'uid': cnt, 'premise': review, 'label': lab}
cnt += 1
rows.append(sample)
return rows
def load_cr(file, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
# print(line)
lab = int(line[0])
review = line[2:].strip().replace("\\n", " ").replace("\\\"", "\"").replace("\t", " ").replace(" ", " ")
# review = word_tokenize(review)
# review = " ".join(review).lower()
if is_train:
sample = {'uid': cnt, 'premise': review, 'label': lab}
else:
sample = {'uid': cnt, 'premise': review, 'label': lab}
cnt += 1
rows.append(sample)
return rows
def load_cola(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line | |
e:
print(e);
def get_concAndConcUnits_experimentIDAndSampleNameShortAndTimePointAndComponentGroupName_dataStage01ReplicatesMI(self, experiment_id_I, sample_name_short_I, time_point_I, component_group_name_I):
"""Query calculated concentrations"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_group_name.like(component_group_name_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
# Query information from calibrators based on data_stage01_quantification_replicatesMI
def get_lloq_ExperimentIDAndComponentName_dataStage01LLOQAndULOQ(self, experiment_id_I, component_name_I):
'''Query lloq for a given component and experiment
NOTE: intended to be used in a loop'''
try:
calibrators_parameters = self.session.query(data_stage01_quantification_LLOQAndULOQ.lloq,
data_stage01_quantification_LLOQAndULOQ.calculated_concentration_units).filter(
data_stage01_quantification_LLOQAndULOQ.experiment_id.like(experiment_id_I),
data_stage01_quantification_LLOQAndULOQ.component_name.like(component_name_I)).first();
if calibrators_parameters:
lloq_O = calibrators_parameters.lloq;
calculated_concentration_units_O = calibrators_parameters.calculated_concentration_units;
return lloq_O, calculated_concentration_units_O;
else:
return None,None;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_averagesMI:
def get_concentrations_experimentIDAndSampleNameAbbreviationAndTimePoint_dataStage01AveragesMI(self, experiment_id_I, sample_name_abbreviation_I, time_point_I):
"""get data from experiment ID, sample name abbreviation, and time point"""
try:
data = self.session.query(data_stage01_quantification_averagesMI.calculated_concentration_average,
data_stage01_quantification_averagesMI.calculated_concentration_cv,
data_stage01_quantification_averagesMI.calculated_concentration_units,
data_stage01_quantification_averagesMI.component_group_name).filter(
data_stage01_quantification_averagesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMI.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_averagesMI.time_point.like(time_point_I),
data_stage01_quantification_averagesMI.used_.is_(True)).all();
data_O = {};
for d in data:
data_O[d.component_group_name] = {'concentration':d.calculated_concentration_average,
'concentration_cv':d.calculated_concentration_cv,
'concentration_units':d.calculated_concentration_units};
return data_O;
except SQLAlchemyError as e:
print(e);
# Query sample names from data_stage01_quantification_averagesMIgeo:
def get_sampleNameAbbreviations_experimentIDAndTimePointAndComponentName_dataStage01AveragesMIgeo(self,experiment_id_I,time_point_I,component_name_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_averagesMIgeo.sample_name_abbreviation).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.time_point.like(time_point_I),
data_stage01_quantification_averagesMIgeo.component_name.like(component_name_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).group_by(
data_stage01_quantification_averagesMIgeo.sample_name_abbreviation).order_by(
data_stage01_quantification_averagesMIgeo.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_averagesMIgeo:
def get_concentrations_experimentIDAndSampleNameAbbreviationAndTimePoint_dataStage01AveragesMIgeo(self, experiment_id_I, sample_name_abbreviation_I, time_point_I):
"""get data from experiment ID, sample name abbreviation, and time point"""
try:
data = self.session.query(data_stage01_quantification_averagesMIgeo.calculated_concentration_average,
data_stage01_quantification_averagesMIgeo.calculated_concentration_var,
data_stage01_quantification_averagesMIgeo.calculated_concentration_lb,
data_stage01_quantification_averagesMIgeo.calculated_concentration_ub,
data_stage01_quantification_averagesMIgeo.calculated_concentration_units,
data_stage01_quantification_averagesMIgeo.component_group_name).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_averagesMIgeo.time_point.like(time_point_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).all();
data_O = {};
for d in data:
data_O[d.component_group_name] = {'concentration':d.calculated_concentration_average,
'concentration_var':d.calculated_concentration_var,
'concentration_lb':d.calculated_concentration_lb,
'concentration_ub':d.calculated_concentration_ub,
'concentration_units':d.calculated_concentration_units};
return data_O;
except SQLAlchemyError as e:
print(e);
def get_data_experimentIDAndSampleNameAbbreviationAndTimePointAndComponentName_dataStage01AveragesMIgeo(self, experiment_id_I, sample_name_abbreviation_I, time_point_I, component_name_I):
"""get data from experiment ID, sample name abbreviation, and time point"""
try:
data = self.session.query(data_stage01_quantification_averagesMIgeo).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_averagesMIgeo.component_name.like(component_name_I),
data_stage01_quantification_averagesMIgeo.time_point.like(time_point_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).all();
data_O = {};
if data:
data_O={"experiment_id":data[0].experiment_id,
"sample_name_abbreviation":data[0].sample_name_abbreviation,
"time_point":data[0].time_point,
"component_group_name":data[0].component_group_name,
"component_name":data[0].component_name,
"calculated_concentration_average":data[0].calculated_concentration_average,
"calculated_concentration_var":data[0].calculated_concentration_var,
"calculated_concentration_lb":data[0].calculated_concentration_lb,
"calculated_concentration_ub":data[0].calculated_concentration_ub,
"calculated_concentration_units":data[0].calculated_concentration_units,
"used_":data[0].used_};
return data_O;
except SQLAlchemyError as e:
print(e);
# Query time points from data_stage01_quantification_averagesMIgeo
def get_timePoint_experimentID_dataStage01AveragesMIgeo(self,experiment_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_averagesMIgeo.time_point).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).group_by(
data_stage01_quantification_averagesMIgeo.time_point).order_by(
data_stage01_quantification_averagesMIgeo.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndComponentName_dataStage01AveragesMIgeo(self,experiment_id_I,component_name_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_averagesMIgeo.time_point).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.component_name.like(component_name_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).group_by(
data_stage01_quantification_averagesMIgeo.time_point).order_by(
data_stage01_quantification_averagesMIgeo.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# Query component names from data_stage01_quantification_averagesMIgeo:
def get_componentNames_experimentIDAndTimePoint_dataStage01AveragesMIgeo(self,experiment_id_I,time_point_I):
'''Querry component Names that are used from the experiment'''
try:
component_names = self.session.query(data_stage01_quantification_averagesMIgeo.component_name).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.time_point.like(time_point_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).group_by(
data_stage01_quantification_averagesMIgeo.component_name).order_by(
data_stage01_quantification_averagesMIgeo.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentNames_experimentID_dataStage01AveragesMIgeo(self,experiment_id_I):
'''Querry component Names that are used from the experiment'''
try:
component_names = self.session.query(data_stage01_quantification_averagesMIgeo.component_name).filter(
data_stage01_quantification_averagesMIgeo.experiment_id.like(experiment_id_I),
data_stage01_quantification_averagesMIgeo.used_.is_(True)).group_by(
data_stage01_quantification_averagesMIgeo.component_name).order_by(
data_stage01_quantification_averagesMIgeo.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
# Query data from ms_components:data_stage01_quantification_physiologicalRatios_replicates
def get_msGroup_componentName_MSComponents(self,component_name_I):
'''Querry component group names from the component name
NOTE: intended to be used within a for loop'''
try:
component_group_name = self.session.query(MS_components.ms_group).filter(
MS_components.component_name.like(component_name_I)).group_by(
MS_components.ms_group).all();
if len(component_group_name)>1:
print('more than 1 component_group_name retrieved per component_name')
component_group_name_O = component_group_name[0];
return component_group_name_O;
except SQLAlchemyError as e:
print(e);
# Query sample names from data_stage01_quantification_physiologicalRatios_replicates
def get_sampleNameAbbreviations_experimentID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,exp_type_I=4):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample_description.sample_name_abbreviation).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleNameAbbreviationAndRatioIDAndTimePoint_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,sample_name_abbreviation_I,physiologicalratio_id_I,time_point_I,exp_type_I=4):
'''Querry sample names that are used from the experiment by sample name abbreviation and sample description'''
try:
sample_names = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.sample_name_short).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short).order_by(
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.asc()).all();
sample_names_short_O = [];
for sn in sample_names: sample_names_short_O.append(sn.sample_name_short);
return sample_names_short_O;
except SQLAlchemyError as e:
print(e);
# Query time points from data_stage01_quantification_physiologicalRatios_replicates
def get_timePoint_experimentIDAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.time_point).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(data_stage01_quantification_physiologicalRatios_replicates.sample_name_short),
sample_description.time_point.like(data_stage01_quantification_physiologicalRatios_replicates.time_point),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point).order_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.time_point).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point).order_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndRatioID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,physiologicalratio_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.time_point).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point).order_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_physiologicalRatios_replicates
def get_ratio_experimentIDAndSampleNameShortAndTimePointAndRatioID_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_short_I, time_point_I, physiologicalratio_id_I):
"""Query calculated ratios"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_value).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
ratio_O = data[0][0];
else:
ratio_O = None;
return ratio_O;
except SQLAlchemyError as e:
print(e);
def get_ratios_experimentIDAndSampleNameAbbreviationAndTimePointAndRatioID_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_abbreviation_I, time_point_I, physiologicalratio_id_I,exp_type_I=4):
"""Query calculated ratios"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_value).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_value).all();
ratios_O = [];
for d in data:
ratios_O.append(d[0]);
return ratios_O;
except SQLAlchemyError as e:
print(e);
def get_rows_experimentIDAndSampleNameShortAndTimePoint_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_short_I, time_point_I):
"""Query calculated ratios"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).all();
rows_O = [];
if data:
for d in data:
rows_O.append({'experiment_id':d.experiment_id,
'sample_name_short':d.sample_name_short,
'time_point':d.time_point,
'physiologicalratio_id':d.physiologicalratio_id,
'physiologicalratio_name':d.physiologicalratio_name,
'physiologicalratio_value':d.physiologicalratio_value,
'physiologicalratio_description':d.physiologicalratio_description,
'used_':d.used_,
'comment_':d.comment_});
return rows_O;
except SQLAlchemyError as e:
print(e);
# Query ratio_id information from data_stage01_quantificaton_physiologicalRatios_replicates
def get_ratioIDs_experimentIDAndTimePoint_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,time_point_I):
'''Query physiologicalRatio_ids that are used from the experiment by time_point'''
try:
ratios = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).filter(
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).order_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.asc()).all();
ratios_O = {};
for r in ratios:
ratios_O[r.physiologicalratio_id] = {'name':r.physiologicalratio_name,
'description':r.physiologicalratio_description};
return ratios_O;
except SQLAlchemyError as e:
print(e);
def get_ratioIDs_experimentID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I):
'''Query physiologicalRatio_ids that are used from the experiment'''
try:
ratios = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).order_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.asc()).all();
ratios_O = {};
for r in ratios:
ratios_O[r.physiologicalratio_id] = {'name':r.physiologicalratio_name,
'description':r.physiologicalratio_description};
return ratios_O;
except SQLAlchemyError as e:
print(e);
# Query time points from data_stage01_quantification_physiologicalRatios_averages
def get_timePoint_experimentID_dataStage01PhysiologicalRatiosAverages(self,experiment_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_averages.time_point).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_averages.time_point).order_by(
data_stage01_quantification_physiologicalRatios_averages.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# Query sample names from data_stage01_quantification_physiologicalRatios_averages
def get_sampleNameAbbreviations_experimentIDAndTimePoint_dataStage01PhysiologicalRatiosAverages(self,experiment_id_I,time_point_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).order_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentIDAndTimePointAndRatioID_dataStage01PhysiologicalRatiosAverages(self,experiment_id_I,time_point_I,physiologicalratio_id_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).order_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_physiologicalRatios_averages:
def get_data_experimentIDAndTimePointAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosAverages(self, experiment_id_I,time_point_I,sample_name_abbreviation_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).all();
data_O = [];
for d in data:
data_1 = {'experiment_id':d.experiment_id,
'sample_name_abbreviation':d.sample_name_abbreviation,
'time_point':d.time_point,
'physiologicalratio_id':d.physiologicalratio_id,
'physiologicalratio_name':d.physiologicalratio_name,
'physiologicalratio_value_ave':d.physiologicalratio_value_ave,
'physiologicalratio_value_cv':d.physiologicalratio_value_cv,
'physiologicalratio_value_lb':d.physiologicalratio_value_lb,
'physiologicalratio_value_ub':d.physiologicalratio_value_ub,
'physiologicalratio_description':d.physiologicalratio_description,
'used_':d.used_,
'comment_':d.comment_};
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_data_experimentIDAndTimePointAndRatioIDAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosAverages(self, experiment_id_I,time_point_I,physiologicalratio_id_I,sample_name_abbreviation_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).all();
data_O = {};
if data:
data_O = {'experiment_id':data[0].experiment_id,
'sample_name_abbreviation':data[0].sample_name_abbreviation,
'time_point':data[0].time_point,
'physiologicalratio_id':data[0].physiologicalratio_id,
'physiologicalratio_name':data[0].physiologicalratio_name,
'physiologicalratio_value_ave':data[0].physiologicalratio_value_ave,
'physiologicalratio_value_cv':data[0].physiologicalratio_value_cv,
'physiologicalratio_value_lb':data[0].physiologicalratio_value_lb,
'physiologicalratio_value_ub':data[0].physiologicalratio_value_ub,
'physiologicalratio_description':data[0].physiologicalratio_description,
'used_':data[0].used_,
'comment_':data[0].comment_};
return data_O;
except SQLAlchemyError as e:
print(e);
def get_ratio_experimentIDAndTimePointAndRatioIDAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosAverages(self, experiment_id_I,time_point_I,physiologicalratio_id_I,sample_name_abbreviation_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).all();
ratio_O = None;
if data:
ratio_O = data[0].physiologicalratio_value_ave;
return ratio_O;
except SQLAlchemyError as e:
print(e);
# Query peakInfo_parameter from data_stage01_quantificaton_peakInformation
def get_peakInfoParameter_experimentID_dataStage01PeakInformation(self,experiment_id_I):
'''Query component_names that are used for the experiment'''
try:
names = self.session.query(data_stage01_quantification_peakInformation.peakInfo_parameter).filter(
data_stage01_quantification_peakInformation.experiment_id.like(experiment_id_I),
data_stage01_quantification_peakInformation.used_.is_(True)).group_by(
data_stage01_quantification_peakInformation.component_name).order_by(
data_stage01_quantification_peakInformation.component_name.asc()).all();
names_O = [];
for n in names:
names_O.append(n.peakInfo_parameter);
return names_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_peakInformation
def get_row_experimentIDAndComponentName_dataStage01PeakInformation(self, experiment_id_I, component_name_I):
"""Query rows"""
try:
data = self.session.query(data_stage01_quantification_peakInformation).filter(
data_stage01_quantification_peakInformation.experiment_id.like(experiment_id_I),
data_stage01_quantification_peakInformation.component_name.like(component_name_I),
data_stage01_quantification_peakInformation.used_.is_(True)).all();
data_O = {};
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
for d in data:
data_O = {'experiment_id':d.experiment_id,
'component_group_name':d.component_group_name,
| |
config_rest(dut, rest_url = url, http_method=cli_type, json_data=config_data):
st.error('Failed to {} PFC Watch-Dog counter poll'.format(mode))
return False
else:
st.error("Unsupported CLI TYPE {}".format(cli_type))
return False
if command:
response = st.config(dut, command, type=cli_type, skip_error_check=skip_error)
if any(error.lower() in response.lower() for error in errors):
st.error("The response is: {}".format(response))
return False
return True
def show_pfc_wd_config(dut, ports=[], **kwargs):
"""
To get PFC Watch-Dog configuration
Author: <NAME> (<EMAIL>)
:param dut:
:type dut:
:param ports:
:type list:
:return:
"""
cli_type = st.get_ui_type(dut, **kwargs)
cli_type = cli_type if ports else 'click'
ports = make_list(ports)
if cli_type == 'click':
command = "pfcwd show config"
output = st.show(dut, command, type=cli_type)
elif cli_type == 'klish':
output = list()
for port in ports:
intf_data = get_interface_number_from_name(port)
command = "show qos interface {} {}".format(intf_data['type'], intf_data['number'])
out = st.show(dut, command, type=cli_type)
_ = out[0].update(interface=port) if out and isinstance(out, list) and isinstance(out[0], dict) else out
output.extend(out)
elif cli_type in ['rest-patch', 'rest-put']:
rest_urls = st.get_datastore(dut, 'rest_urls')
output = list()
for port in ports:
url = rest_urls['get_pfc_params'].format(port)
out = get_rest(dut, rest_url = url)
if (out and ('output' in out) and out.get('output')):
out = _get_rest_pfc_params_config(out['output'])
_ = out[0].update(interface=port) if out and isinstance(out, list) and isinstance(out[0], dict) else out
output.extend(out)
else:
st.error("Unsupported CLI TYPE {}".format(cli_type))
return False
return output
def show_pfc_wd_stats(dut, **kwargs):
"""
To get PFC Watch-Dog statistics
Author: <NAME> (<EMAIL>)
:param dut:
:type dut:
:param ports:
:type ports:
:return:
"""
cli_type = st.get_ui_type(dut, **kwargs)
ports = make_list(kwargs.get('ports', []))
command = ''
if cli_type == 'click':
command = "pfcwd show stats"
output = st.show(dut, command, type=cli_type)
elif cli_type == 'klish':
temp_vars = st.get_testbed_vars()
if not ports:
port = 'Eth all' if temp_vars.config.ifname_type == 'alias' else 'Ethernet all'
command = "show qos interface {} priority-flow-control statistics queue".format(port)
output = st.show(dut, command, type=cli_type)
else:
output = list()
for port in ports:
intf_data = get_interface_number_from_name(port)
command = "show qos interface {} {} priority-flow-control statistics queue".format(intf_data['type'], intf_data['number'])
output.extend(st.show(dut, command, type=cli_type))
elif cli_type in ['rest-patch', 'rest-put']:
rest_urls = st.get_datastore(dut, 'rest_urls')
if not ports:
url = rest_urls['get_pfc_all_counters']
out = get_rest(dut, rest_url=url, timeout=120)
if not (out and ('output' in out) and out.get('output')):
st.error("No data found in output: {}".format(out))
return False
output = _get_rest_pfc_wd_stats_all(out['output'])
else:
output = list()
for port in ports:
url = rest_urls['get_pfcwd_counters'].format(port)
out = get_rest(dut, rest_url=url, timeout=20)
if not (out and ('output' in out) and out.get('output')):
st.error("No data found in output: {}".format(out))
return False
output.extend(_get_rest_pfc_wd_stats(out['output'], port))
else:
st.error("Unsupported CLI TYPE {}".format(cli_type))
return False
return output
def show_asymmetric_pfc(dut, ports=[], cli_type=''):
"""
To show asymmetric PFC configuration on ports
Author: <NAME> (<EMAIL>)
:param dut:
:type dut:
:param ports:
:type list:
:param cli_type:
:type cli_type:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
cli_type = cli_type if ports else 'click'
ports = make_list(ports)
if cli_type == 'click':
command = "pfc show asymmetric"
output = st.show(dut, command, type=cli_type)
elif cli_type == 'klish':
output = list()
for port in ports:
intf_data = get_interface_number_from_name(port)
command = "show qos interface {} {}".format(intf_data['type'], intf_data['number'])
out = st.show(dut, command, type=cli_type)
_ = out[0].update(interface=port) if out and isinstance(out, list) and isinstance(out[0], dict) else out
output.extend(out)
elif cli_type in ['rest-patch', 'rest-put']:
rest_urls = st.get_datastore(dut, 'rest_urls')
output = list()
for port in ports:
url = rest_urls['get_pfc_params'].format(port)
out = get_rest(dut, rest_url = url)
if (out and ('output' in out) and out.get('output')):
out = _get_rest_pfc_params_config(out['output'])
_ = out[0].update(interface=port) if out and isinstance(out, list) and isinstance(out[0], dict) else out
output.extend(out)
else:
st.error("Unsupported CLI TYPE {}".format(cli_type))
return False
return output
def clear_pfc_counters(dut, **kwargs):
"""
Author: <NAME> (<EMAIL>)
:param dut:
:return:
"""
cli_type = st.get_ui_type(dut, **kwargs)
cli_type = 'klish' if cli_type in ['rest-patch', 'rest-put'] else cli_type #Clear commands use RPC calls for those OC-YANG URLs won't be available
if cli_type == 'click':
command = "sonic-clear pfccounters"
st.show(dut, command, skip_tmpl=True)
elif cli_type == 'klish':
if not clear_interface_counters(dut, **kwargs):
st.error("Failed to clear PFC counters")
return False
else:
st.error("Unsupported CLI TYPE {}".format(cli_type))
return False
return True
def show_pfc_counters(dut, **kwargs):
"""
Author: <NAME> (<EMAIL>)
:param dut:
:return:
"""
cli_type = st.get_ui_type(dut, **kwargs)
ports = make_list(kwargs.get('ports', []))
if cli_type == 'click':
command = "show pfc counters"
rv = st.show(dut, command, type=cli_type)
elif cli_type == 'klish':
temp_vars = st.get_testbed_vars()
if not ports:
port = 'Eth all' if temp_vars.config.ifname_type == 'alias' else 'Ethernet all'
command = "show qos interface {} priority-flow-control statistics".format(port)
rv = st.show(dut, command, type=cli_type)
else:
rv = list()
for port in ports:
intf_data = get_interface_number_from_name(port)
command = "show qos interface {} {} priority-flow-control statistics".format(intf_data['type'], intf_data['number'])
rv.extend(st.show(dut, command, type=cli_type))
elif cli_type in ['rest-patch', 'rest-put']:
rest_urls = st.get_datastore(dut, 'rest_urls')
if not ports:
url = rest_urls['get_pfc_all_counters']
out = get_rest(dut, rest_url=url, timeout=120)
if not (('output' in out) and out.get('output')):
st.error("No data found in output: {}".format(out))
return False
rv = _get_rest_pfc_counters_all(out['output'])
else:
rv = list()
for port in ports:
url = rest_urls['get_pfc_pause_counters'].format(port)
out = get_rest(dut, rest_url=url, timeout=120)
if not (('output' in out) and out.get('output')):
st.error("No data found in output: {}".format(out))
return False
rv.extend(_get_rest_pfc_counters(out['output'], port))
else:
st.error("Unsupported CLI TYPE {}".format(cli_type))
return False
output = [{k: v.replace('received', 'Port Rx').replace('transmitted', 'Port Tx').replace(',', '') for k, v in each.items()} for each in rv]
return output
def get_pfc_counters(dut,interface,mode,*argv):
"""
Author: <NAME> (<EMAIL>)
:param dut:
:param interface:
:param mode:
:param argv: 'pfc0','pfc1','pfc2','pfc3','pfc4','pfc5','pfc6','pfc7'
:return:
"""
output = show_pfc_counters(dut)
port_mode = 'Port Tx'
if mode.lower() == 'rx':
port_mode = 'Port Rx'
entries = filter_and_select(output,argv,{'port':interface,'port_mode':port_mode})
return entries
def get_pfc_counters_all(dut, interface, mode='tx'):
"""
Author: <NAME> (<EMAIL>)
:param dut:
:param interface:
:param mode:
:param kwargs:
:return:
"""
output = show_pfc_counters(dut)
port_mode = 'Port Tx'
if mode.lower() == 'rx':
port_mode = 'Port Rx'
match = {'port':interface,'port_mode':port_mode}
entries = filter_and_select(output, None, match)
if not entries:
st.log("No queue couters found on {} for {} {}".format(dut, interface, mode))
return (False,0)
new_entry = {}
for i in entries[0]:
new_entry[i]=entries[0][i].replace(",","")
return (True,new_entry)
def verify_pfc_counters(dut,interface,mode='tx',**kwargs):
"""
Author: <NAME> (<EMAIL>)
:param dut:
:param interface:
:param mode:
:param kwargs:
:return:
"""
output = show_pfc_counters(dut)
port_mode = 'Port Tx'
if mode.lower() == 'rx':
port_mode = 'Port Rx'
for each in kwargs.keys():
match = {'port':interface,'port_mode':port_mode,each:kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def config_pfc_buffer_prameters(dut, hwsku, ports_dict, **kwargs):
"""
Autor: <NAME> (<EMAIL>)
To configure the platform specific buffer constants
:param hwsku:
:type hwsku:
:param dut:
:type dut:
:param ports_dict:
:type ports_dict:
"""
constants = st.get_datastore(dut, "constants")
ports_show = interface_status_show(dut, list(ports_dict.keys()))
port_speed = dict()
core_buffer_config = kwargs.get('core_buffer_config', False)
apply_buffer_config = kwargs.get('apply_buffer_config', True)
for port in ports_dict.keys():
port_speed[port] = filter_and_select(ports_show, ['speed'], {'interface': port})[0]['speed'].replace('G', '000')
native_ports_map_dict = {port:st.get_other_names(dut, [port])[0] if '/' in port else port for port in ports_dict.keys()}
retval = dict()
update_retval = lambda entries: {retval.update(entry) for entry in entries}
if hwsku.lower() in constants['TH_PLATFORMS']:
if core_buffer_config:
buffer_pool = {"BUFFER_POOL": {"egress_lossless_pool": {"mode": "static", "size": "12766208", "type": "egress"},
"egress_lossy_pool": {"mode": "dynamic", "size": "7326924", "type": "egress"},
"ingress_lossless_pool": {"mode": "dynamic", "size": "12766208", "type": "ingress", "xoff": "4625920"}}}
buffer_profile = {"BUFFER_PROFILE": {"egress_lossless_profile": {"pool": "egress_lossless_pool", "size": "0", "static_th": "12766208"}, "egress_lossy_profile": {"dynamic_th": "3", "pool": "egress_lossless_pool", "size": "1518"},
"ingress_lossy_profile": {"dynamic_th": "3", "pool": "ingress_lossless_pool", "size": "0"},
"pg_lossless_10000_300m_profile": {"dynamic_th": "-3", "pool": "ingress_lossless_pool", "size": "56368", "xoff": "55120", "xon": "18432", "xon_offset": "2496"},
"pg_lossless_25000_300m_profile": {"dynamic_th": "-3", "pool": "ingress_lossless_pool", "size": "56368", "xoff": "55120", "xon": "18432", "xon_offset": "2496"},
"pg_lossless_40000_300m_profile": {"dynamic_th": "-3", "pool": "ingress_lossless_pool", "size": "56368", "xoff": "55120", "xon": "18432", "xon_offset": "2496"},
"pg_lossless_100000_300m_profile": {"dynamic_th": "-3", "pool": "ingress_lossless_pool", "size": "56368", "xoff": "55120", "xon": "18432", "xon_offset": "2496"}}}
cable_length_config = {"CABLE_LENGTH": {"AZURE": {native_ports_map_dict[port]: "300m" for port in ports_dict.keys()}}}
update_retval([buffer_pool, buffer_profile, cable_length_config])
if apply_buffer_config:
ingress_profile_mapping = {'100000' : 'pg_lossless_100000_300m_profile', '40000' : 'pg_lossless_40000_300m_profile', '25000' : 'pg_lossless_25000_300m_profile', '10000' : 'pg_lossless_10000_300m_profile', 'lossy_profile': 'ingress_lossy_profile'}
egress_profile_mapping = {'lossy_profile' : 'egress_lossy_profile', 'lossless_profile' : 'egress_lossless_profile'}
buffer_pg = dict()
buffer_queue = dict()
get_profile = lambda profile: {"profile": "{}".format(profile)}
for port, queue_info in ports_dict.items():
native_port = native_ports_map_dict[port]
for queue_type, queues in queue_info.items():
buffer_pg.update({"{}|{}".format(native_port, queue):get_profile(ingress_profile_mapping[port_speed[port]] if queue_type == 'lossless_queues' else ingress_profile_mapping['lossy_profile']) for queue in queues})
buffer_queue.update({"{}|{}".format(native_port, queue):get_profile(egress_profile_mapping['lossless_profile'] if queue_type == 'lossless_queues' else egress_profile_mapping['lossy_profile']) for queue in queues})
buffer_pg = {"BUFFER_PG":buffer_pg}
buffer_queue = {"BUFFER_QUEUE":buffer_queue}
update_retval([buffer_pg, buffer_queue])
st.debug(retval)
elif hwsku.lower() in constants['TH2_PLATFORMS']:
if core_buffer_config:
buffer_pool = {"BUFFER_POOL": {"egress_lossless_pool": {"mode": "static", "size": "12766208", "type": "egress"},
"egress_lossy_pool": {"mode": "dynamic", "size": "7326924", "type": "egress"},
"ingress_lossless_pool": {"mode": "dynamic", "size": "12766208", "type": "ingress", "xoff": | |
<gh_stars>0
import random
import requests
from lxml import html
from googlesearch import search
from bs4 import BeautifulSoup
import wikipedia as wk
import regex
import pandas as pd
import json
import os
cdeeplearn = __import__("cdeeplearn")
thirukkural = __import__("thirukkural")
tamil_utils = __import__("tamil_utils")
WIKI_DEFAULT_LANG = 'ta'
GOOGLE_SENTENCE_COUNT = 10
WIKI_SENTENCE_COUNT = 10
" Flatten a list of lists "
flatten_list = lambda list: [item for sublist in list for item in sublist]
bot_config_file = "./ChatBot.json"
config = {}
with open(bot_config_file, 'r', encoding='utf-8') as f:
config = json.load(f)
sangam_nigandu_file = config['sangam_dictionary_file']
data_ext = ".csv"
headers = []
columns_to_display = []
headers = ["poem_type", "poet_name", "poem", "translation","notes", "meaning", "poem_id", "poet_name_e","title","title_e"]
columns_to_display= []
config["show_columns"]['show_poem']="True"
_config_show = config["show_columns"]
for col in headers:
key = 'show_'+col
if _config_show[key].lower()=="true":
columns_to_display.append(col)
data_folder = "./sangam_tamil_csv/"
poem_folder = "./sangam_tamil_poems/"
data_files = ['agananuru','purananuru','ainkurunuru','kalithokai', 'kurunthokai', 'natrinai', 'pathitrupathu', 'pattinapaalai',
'mullaipaattu', 'nedunalvaadai', 'kurinjipaattu','malaipadukadaam','maduraikaanji','porunaraatrupadai',
'perumpaanaatrupadai', 'sirupaanaatrupadai', 'thirumurugaatrupadai', 'ainthinaiezhupathu', 'ainthinaiaimpathu',
'kaarnaarpathu','thinaimozhiaimpathu','kainnilai','thinaimaalainootraimbathu']#, 'thirukkural' ]
POEM_TYPES = ['அகநானூறு', 'புறநானூறு', 'ஐங்குறுநூறு', 'கலித்தொகை', 'குறுந்தொகை', 'நற்றிணை', 'பதிற்றுப்பத்து', 'பட்டினப்பாலை',
'முல்லைப்பாட்டு', 'நெடுநல்வாடை','குறிஞ்சிப்பாட்டு','மலைபடுகடாம்', 'மதுரைக்காஞ்சி','பொருநராற்றுப்படை',
'பெரும்பாணாற்றுப்படை', 'சிறுபாணாற்றுப்படை','திருமுருகாற்றுப்படை','ஐந்திணை எழுபது','ஐந்திணை ஐம்பது','கார் நாற்பது',
'திணைமொழி ஐம்பது','கைந்நிலை','திணைமாலை நூற்றைம்பது']#,'திருக்குறள்']
def get_wikipedia_response(input,lang=WIKI_DEFAULT_LANG):
wiki = ''
if lang != WIKI_DEFAULT_LANG:
wk.set_lang(lang)
topic = input #reg_ex.group(1)
wiki = wk.summary(topic, sentences = WIKI_SENTENCE_COUNT)
return wiki
def get_google_search_response(query, index=0,sentence_count=GOOGLE_SENTENCE_COUNT):
result = ''
#print('google searching',query)
try:
search_result_list = list(search(query, tld="com", num=10, stop=3, pause=1))
page = requests.get(search_result_list[index])
tree = html.fromstring(page.content)
soup = BeautifulSoup(page.content, features="lxml")
article_text = ''
article = soup.findAll('p')
for element in article:
article_text += '\n' + ''.join(element.findAll(text = True))
article_text = article_text.replace('\n', '')
sentences = article_text.split('.')[:sentence_count]
sentences = '.'.join(sentences)
#print(len(sentences),sentences)
#chars_without_whitespace = sentences.translate(
# { ord(c): None for c in string.whitespace }
#)
# print(chars_without_whitespace)
if len(sentences) > 0:
result = sentences
else:
result = config["FALLBACK_MSG"]
return result
except:
if len(result) == 0: result = fallback
return result
class SangamPoems():
POEM_TYPE = 0
POET_NAME = 1
POEM = 2
TRANSLATION = 3
NOTES = 5
MEANING = 4
POEM_ID = 6
POET_NAME_ENGLISH = 7
RANDOM_POEM_MSG = 'சீரற்ற தேர்வு (random choice):<br>'
def __init__(self):
self.df = self.get_poem_data()
self.nigandu = self._get_nigandu_from_file()
print("There are {} words in sangam nigandu".format(len(self.nigandu)))
self.tk = thirukkural.Thirukural(data_folder+'thirukkural.csv')
def get_meaning(self, text):
meaning = dict()
for word in text.split():
try:
word_matching_dict = dict(filter(lambda item: word in item[0].split(), self.nigandu.items()))
word_matching_dict = [{item:word_matching_dict[item]} for item in sorted(word_matching_dict, key=lambda k: len(k.split()), reverse=False)][0]
meaning.update(word_matching_dict)
except:
continue
return meaning
def _get_nigandu_from_file(self,dictionary_file=sangam_nigandu_file):
nigandu = dict()
if not os.path.exists(dictionary_file):
print('Creating and writing nigandu to',dictionary_file)
nigandu = self._collect_nigandu()
f = open(dictionary_file,"w",encoding='utf-8')
for key in nigandu.keys():
if key.strip() == '':
continue
word = key.strip()
meaning = ','.join(nigandu[key])
f.write(word+"="+meaning+"\n")
f.close()
return nigandu
print('Collecting nigandu from',dictionary_file)
f = open(dictionary_file,"r",encoding='utf-8')
for line in f:
if line.strip() == '':
continue
word, meaning = line.split("=",1)
word = word.strip()
meaning = meaning.strip()
if word == '' or meaning =='':
continue
if word in nigandu.keys():
if meaning not in nigandu[word]:
#print('adding>'+meaning+'<to',nigandu_dict[word])
nigandu[word].append(meaning)
else:
nigandu[word]=list()
nigandu[word].append(meaning)
f.close()
return nigandu
def _collect_nigandu(self):
meanings = flatten_list(self.df['meaning'].str.split(","))
nigandu_dict = dict()
for word_pair in meanings:
word = ''
meaning = ''
if "–" in word_pair:
#print('word_pair',word_pair)
word, meaning = word_pair.split("–",1)
word = word.strip()
meaning = meaning.strip()
#print('word=',word,'meaning=',meaning)
else:
#print('"–" not in word_pair',word_pair)
continue
if word == '' or meaning =='':
continue
if word in nigandu_dict.keys():
if meaning not in nigandu_dict[word]:
#print('adding>'+meaning+'<to',nigandu_dict[word])
nigandu_dict[word].append(meaning)
else:
nigandu_dict[word]=list()
nigandu_dict[word].append(meaning)
return nigandu_dict
def _format_output(self, matching_row_indices, random_poem=False):
result =[]
df = self.df
for row_id in matching_row_indices:
row_id = int(row_id)
pd_series = df.loc[df.index[row_id]]
for col in columns_to_display:
col_str = pd_series[col]
result.append(col_str)
random_poem_msg = ""
if (random_poem):
random_poem_msg = SangamPoems.RANDOM_POEM_MSG
return random_poem_msg+'<br>'.join(result)
def get_poem_data(self):
df = pd.DataFrame()
for index, data_file in enumerate(data_files):
print('reading csv',data_folder+data_file+data_ext)
dfs = pd.read_csv(data_folder+data_file+data_ext,encoding='utf-8',na_filter=False)
dfs = dfs.replace({'poem' : {"\n":"<br>"}})
dfs['poem'] = dfs['poem'].apply(tamil_utils._remove_punctuation_numbers)
dfs = dfs.replace({"poet_name":{"பாடியவர்:":""}})
dfs['poet_name'] = dfs['poet_name'].str.strip()
dfs['poet_name_e'] = dfs['poet_name_e'].str.strip()
#poem_type = data_file.replace(data_ext,"")
poem_type = POEM_TYPES[index]
dfs['poem_type']=poem_type
df = df.append(dfs,ignore_index=True)
return df
def _help(self):
return config["HELP_MSG"]
def _greet(self):
return config["GREET_MSG"]
def _quit(self):
return config["QUIT_MSG"]
def _get_poem_line_word_count(self, poem_type):
poems = [poem for poem in self.df.loc[self.df['poem_type']==poem_type]['poem'].tolist()]
lines = flatten_list([poem.split("\n") for poem in poems])
line_count = random.choice([len(poem.split("\n")) for poem in poems])
word_count = sorted([len(line.split()) for line in lines if line.strip() != ''])
minimum_word_count = word_count[0]
maximum_word_count = word_count[-1]
#print('line_count,minimum_word_count,maximum_word_count',line_count,minimum_word_count,maximum_word_count)
return line_count,minimum_word_count,maximum_word_count
def _deep_learn(self,poem_type,bot_user_input,value,include_meaning=False,minimum_words_per_sentence=4):
poem = data_files[POEM_TYPES.index(poem_type)]
#print('calling sangam tamil _deep_learn()',poem_type,poem)
sentence_count, word_count_min, word_count_max = self._get_poem_line_word_count(poem_type)
words_per_sentence = random.randrange(max(word_count_min,minimum_words_per_sentence),word_count_max)
poem_word_count = sentence_count * words_per_sentence # 7 for Kural 76 for sangam aga,/puram
#print('sentence count',sentence_count,'words per sentence',words_per_sentence)
response = config["SEARCH_FAIL_MSG"]
corpus_file=poem+'_poems_corpus.json'
model_weights_file=poem+'_poems_corpus.h5'
starting_word_file=poem+'_poems_starting_words.json'
ending_word_file=poem+'_poems_ending_words.json'
model_weights_folder = "./model_weights/"
files_not_found = [model_weights_folder+file for file in [corpus_file,model_weights_file,starting_word_file,ending_word_file] if not os.path.exists(model_weights_folder+file) ]
if len(files_not_found)>0:
print("Following files needed for deep learning are missing:",files_not_found)
return response
if os.path.exists(model_weights_folder+corpus_file) and os.path.exists(model_weights_folder+model_weights_file) and \
os.path.exists(model_weights_folder+starting_word_file) and os.path.exists(model_weights_folder+ending_word_file):
cdeeplearn.set_parameters(corpus_file=corpus_file, model_weights_file=model_weights_file,
starting_word_file=starting_word_file,ending_word_file=ending_word_file)
response = cdeeplearn.generate_tokens_from_corpus(corpus_files=[poem_folder+poem+"_poems.txt"],
length=poem_word_count,perform_training=False,tokens_per_sentence=words_per_sentence)
response = tamil_utils._cleanup_generated_poem(response)
if include_meaning:
meaning = self.get_meaning(response)
for key,value in meaning.items():
response += "\n" + key+"="+''.join(value)
return response
def _contains(self,poem_type,bot_user_input,value):
response = self.df.index[ (self.df['poem_type']==poem_type) & (self.df['poem'].str.contains(value))].tolist()#.to_string(index=False)
#print(poem_type,'key == contains',value,response)
if not response:
return get_google_search_response(bot_user_input,value)
#return bot_user_input + config["SEARCH_FAIL_MSG"]
return self._format_output(response)
def _begins_with(self,poem_type,bot_user_input,value):
#print('_begins with',poem_type,bot_user_input,value)
response = self.df.index[ (self.df['poem_type']==poem_type) & (self.df['poem'].str.startswith(value))].tolist()#.to_string(index=False)
#print(poem_type,'key == begins_with',value,response)
if not response:
return get_google_search_response(bot_user_input)
#return bot_user_input + config["SEARCH_FAIL_MSG"]
return self._format_output(response)
def _ends_with(self,poem_type,bot_user_input,value):
response = self.df.index[ (self.df['poem_type']==poem_type) & (self.df['poem'].str.endswith(value))].tolist()#.to_string(index=False)
#print(poem_type,'key == ends_with',value,response)
if not response:
return get_google_search_response(bot_user_input)
#return bot_user_input + config["SEARCH_FAIL_MSG"]
return self._format_output(response)
def _poet_count(self,poem_type,bot_user_input):
response = self.list_of_poets(poem_type)
#print(poem_type,'key == ends_with',value,response)
if not response:
return get_google_search_response(bot_user_input)
return response
def _poet_poems(self,poem_type,bot_user_input,value):
#print('_poet_poems value',value)
response = self.list_poems_by_poet(poem_type, value)
#print(poem_type,'key == ends_with',value,response)
if not response:
return get_google_search_response(bot_user_input)
return response
def _get_meaning(self,bot_user_input):
inputs = self._get_key_words(bot_user_input)
if len(inputs)>1:
try:
meaning = self.get_meaning(inputs[1])
response = ''
for key,value in meaning.items():
response += key+"="+''.join(value) +"\n"
return response
except:
return bot_user_input + config["SEARCH_FAIL_MSG"]
def _get_poem_from_poem_id(self,poem_type,verse_index):
poem_id_min, poem_id_max = self._get_poem_min_max(poem_type)
#print(poem_type,poem_id_min, verse_index, poem_id_max)
if verse_index < int(poem_id_min) or verse_index > int(poem_id_max):
response = "(" + str(int(poem_id_min)) + " - "+ str(int(poem_id_max)) + ") "+config["NUMBER_LIMIT_MSG"]
return response
response = self.df.index[ (self.df['poem_type']==poem_type) & (self.df['poem_id']==verse_index)].tolist()#.to_string(index=False)
return self._format_output(response)
def _split_user_input(self,bot_user_input):
inputs = self._get_key_words(bot_user_input)
if len(inputs)==0:
response = get_google_search_response(bot_user_input)
if len(response)>0:
return response
else:
return config['FALLBACK_MSG']
poem_type = ''
key = ''
value = ''
response = []
verse_index = -1
poem_type = inputs[0]
if poem_type in POEM_TYPES and len(inputs)>1:
if str(inputs[1]).isnumeric():
verse_index = int(inputs[1])
else:
key = inputs[1]
value = ''
if len(inputs)>2:
value = ' '.join(inputs[2:])
else:
key = inputs[0]
return poem_type,key,value,verse_index
def respond_to_bot_user_input(self, bot_user_input):
#print('user_input',bot_user_input)
pd.set_option('display.max_colwidth',1000)
if bot_user_input.split()[0] in config['key_words']["திருக்குறள்"]:
response = self.tk.respond_to_bot_user_input(' '.join(bot_user_input.split()[1:]))
return response
try:
poem_type,key,value,verse_index = self._split_user_input(bot_user_input)
except:
return config["FALLBACK_MSG"]
#print('poem_type',poem_type,'key',key,'value',value,'verse_index',verse_index)
if verse_index != -1:
return self._get_poem_from_poem_id(poem_type, verse_index)
action_dict={"greet":(self._greet,[],{}),
"help":(self._help,[],{}),
"quit":(self._quit,[],{}),
"contains":(self._contains,[poem_type,bot_user_input,value],{}),
"begins_with":(self._begins_with,[poem_type,bot_user_input,value],{}),
"ends_with":(self._ends_with,[poem_type,bot_user_input,value],{}),
"new":(self._deep_learn,[poem_type,bot_user_input,value],{"include_meaning":False}),
"poet_count" : (self._poet_count,[poem_type,bot_user_input],{}),
"poet_poems" : (self._poet_poems,[poem_type,bot_user_input,value],{}),
"meaning" : (self._get_meaning,[bot_user_input],{}),
"introduce": (self._introduce_bot,[bot_user_input],{})
}
if key in action_dict.keys() or poem_type in action_dict.keys():
function, args, kwargs = action_dict[key]
return function(*args,**kwargs)
else:
return config['FALLBACK_MSG']
def _introduce_bot(self, user_message):
return "எனது பெயர் " + config["BOT_NAME"]
def _get_key_words(self, user_message):
user_message = user_message.lower()
user_words = user_message.split()
key_words = []
dict_keys = config["key_words"]
for key in dict_keys:
for value in map(str.lower, dict_keys[key]):
#print(value,"==",user_message)
if value in user_message:
key_words.append(key)
# Remove the word from user_message
"""TODO remove words that contain value """
#user_message = ' '.join(filter(lambda x: value not in x, user_message.split()))
#print("removing ",value,"from user input",user_message)
user_message = user_message.replace(value,"")
break
[key_words.append(int(s)) for s in user_message.split() if s.isdigit()]
if key_words and not str(key_words[-1]).isdigit() and user_message != '':
key_words.append(user_message.strip())
#print(key_words)
return key_words
def _get_poem_min_max(self, poem_type):
if not poem_type in POEM_TYPES:
return config["INVALID_POEM_TYPE_MSG"]
poem_id_min = self.df.loc[self.df['poem_type']==poem_type]['poem_id'].min()
poem_id_max = self.df.loc[self.df['poem_type']==poem_type]['poem_id'].max()
return poem_id_min, poem_id_max
def list_poems_by_poet(self,poem_type,poet_name):
poem_type = poem_type.strip()
poet_name = poet_name.strip()
if not poem_type in POEM_TYPES:
return config["INVALID_POEM_TYPE_MSG"]
#print('list_poems_by_poet',poem_type,poet_name)
poems_by_poet = self.df.index[(self.df['poem_type']==poem_type) &
(self.df['poet_name'].str.contains(poet_name)) |
(self.df['poet_name_e'].str.contains(poet_name,flags=regex.IGNORECASE, regex=True))].tolist()
#print('poems_by_poet',poems_by_poet)
return self._format_output(poems_by_poet)
def _create_corpus_data(self):
poem_files = [data_folder.replace("csv","poems")+ p + "_poems.txt" for p in data_files]
cdeeplearn._create_corpus_files(poem_files,corpus_file='sangam_corpus.json',starting_word_file='sangam_starting_words.json',
ending_word_file='sangam_ending_words.json',end_token_boundary=None)
def _train_corpus_data(self):
poem_files = [data_folder.replace("csv","poems")+ p + "_poems.txt" for p in data_files]
perform_training = False
poem_token_count = 94 # 7 for Kural 76 for sangam aga,/puram
tokens_per_sentence = 4
cdeeplearn.set_parameters(corpus_file='sangam_corpus.json', model_weights_file='sangam_corpus.h5',
starting_word_file='sangam_starting_words.json', ending_word_file='sangam_ending_words.json',
batch_size=15, number_of_epochs=90)
result = cdeeplearn.generate_tokens_from_corpus(corpus_files=poem_files,
length=poem_token_count, save_to_file='sangam_corpus.h5',perform_training=perform_training,
tokens_per_sentence=5)
return result
def list_of_poets(self, poem_type):
#print('List of poets')
if poem_type in POEM_TYPES:
#print('getting list of poets',poem_type)
poet_list = self.df.loc[self.df['poem_type']==poem_type,['poet_name','poem_id']]
poet_list1 = poet_list.groupby('poet_name')['poem_id'].nunique().reset_index(name='# of Poems').sort_values('# of Poems',ascending=False)
result1 = "{} எழுதிய புலவர்கள் எண்ணிக்கை: {}".format(poem_type,len(poet_list1))
poet_list = poet_list1.values.tolist()
result = '\n'.join([poet+" எழுதிய பாடல்கள்:"+str(count) for poet,count in poet_list])
return result1 + "\n" + result
else:
return config["INVALID_POEM_TYPE_MSG"]
if __name__ == "__main__":
#user_message | |
in each `TRIAL` when the Composition executes; it is assigned as the default value for the
`learning_enabled <LearningMechanism.learning_enabled>` attribute of the `LearningMechanisms
<LearningMechanism>` in the pathway, and their `LearningProjections <LearningProjection>`
(see `learning_enabled <LearningMechanism.learning_enabled>` for meaning of values).
Returns
--------
A dictionary of components that were automatically generated and added to the Composition in order to
implement Backpropagation along the pathway.
{LEARNING_MECHANISM: learning_mechanism,
COMPARATOR_MECHANISM: comparator,
TARGET_MECHANISM: target,
LEARNED_PROJECTION: learned_projection}
"""
return self.add_linear_learning_pathway(pathway,
learning_rate=learning_rate,
learning_function=BackPropagation,
loss_function=loss_function,
error_function=error_function,
learning_update=learning_update)
# NOTES:
# Learning-type-specific creation methods should:
# - create ComparatorMechanism and pass in as error_source (for 1st LearningMechanism in sequence in bp)
# - Determine and pass error_sources (aka previous_learning_mechanism) (for bp)
# - construct and pass in the learning_function
# - do the following for last LearningMechanism in sequence:
# learning_mechanism.output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition._set(False,
# override=True)
#
# Create_backprop... should pass error_function (handled by kwargs below)
# Check for existence of Learning mechanism (or do this in creation method?); if one exists, compare its
# ERROR_SIGNAL input_ports with error_sources and update/add any needed, as well as corresponding
# error_matrices (from their learned_projections) -- do so using LearningMechanism's add_ports method);
# create projections from each
# Move creation of LearningProjections and learning-related projections (MappingProjections) here
# ?Do add_nodes and add_projections here or in Learning-type-specific creation methods
def _unpack_processing_components_of_learning_pathway(self, processing_pathway):
# unpack processing components and add to composition
if len(processing_pathway) == 3:
input_source, learned_projection, output_source = processing_pathway
elif len(processing_pathway) == 2:
input_source, output_source = processing_pathway
learned_projection = MappingProjection(sender=input_source, receiver=output_source)
else:
raise CompositionError(f"Too many components in learning pathway: {pathway}. Only single-layer learning "
f"is supported by this method. See AutodiffComposition for other learning models.")
return input_source, output_source, learned_projection
# FIX: NOT CURRENTLY USED; IMPLEMENTED FOR FUTURE USE IN GENERALIZATION OF LEARNING METHODS
def _create_learning_components(self,
sender_activity_source, # aka input_source
receiver_activity_source, # aka output_source
error_sources, # aka comparator/previous_learning_mechanism
learning_function,
learned_projection,
learning_rate,
learning_update,
target_mech=None,
**kwargs # Use of type-specific learning arguments
):
# ONLY DO THIS IF ONE DOESN'T ALREADY EXIST (?pass in argument determing this?)
learning_mechanism = LearningMechanism(function=learning_function,
default_variable=[sender_activity_source.output_ports[0].value,
receiver_activity_source.output_ports[0].value,
error_sources.output_ports[0].value],
error_sources=error_sources,
learning_enabled=learning_update,
in_composition=True,
name="Learning Mechanism for " + learned_projection.name,
**kwargs)
self.enable_learning = True
return learning_mechanism
def _create_learning_related_mechanisms(self,
input_source,
output_source,
error_function,
learning_function,
learned_projection,
learning_rate,
learning_update):
"""Creates *TARGET_MECHANISM*, *COMPARATOR_MECHANISM* and *LEARNING_MECHANISM* for RL and TD learning"""
if isinstance(learning_function, type):
if issubclass(learning_function, TDLearning):
creation_method = self._create_td_related_mechanisms
elif issubclass(learning_function, Reinforcement):
creation_method = self._create_rl_related_mechanisms
else:
raise CompositionError(f"'learning_function' argument for add_linear_learning_pathway "
f"({learning_function}) must be a class of {LearningFunction.__name__}")
target_mechanism, comparator_mechanism, learning_mechanism = creation_method(input_source,
output_source,
error_function,
learned_projection,
learning_rate,
learning_update)
elif is_function_type(learning_function):
target_mechanism = ProcessingMechanism(name='Target')
comparator_mechanism = ComparatorMechanism(name='Comparator',
sample={NAME: SAMPLE,
VARIABLE: [0.], WEIGHT: -1},
target={NAME: TARGET,
VARIABLE: [0.]},
function=error_function,
output_ports=[OUTCOME, MSE])
learning_mechanism = LearningMechanism(
function=learning_function(
default_variable=[input_source.output_ports[0].value,
output_source.output_ports[0].value,
comparator_mechanism.output_ports[0].value],
learning_rate=learning_rate),
default_variable=[input_source.output_ports[0].value,
output_source.output_ports[0].value,
comparator_mechanism.output_ports[0].value],
error_sources=comparator_mechanism,
learning_enabled=learning_update,
in_composition=True,
name="Learning Mechanism for " + learned_projection.name)
else:
raise CompositionError(f"'learning_function' argument of add_linear_learning_pathway "
f"({learning_function}) must be a class of {LearningFunction.__name__} or a "
f"learning-compatible function")
learning_mechanism.output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition._set(False,
override=True)
self.enable_learning = True
return target_mechanism, comparator_mechanism, learning_mechanism
def _create_learning_related_projections(self, input_source, output_source, target, comparator, learning_mechanism):
"""Construct MappingProjections among `learning components <Composition_Learning_Components>` for pathway"""
# FIX 5/29/19 [JDC]: INTEGRATE WITH _get_back_prop_error_sources (RIGHT NOW, ONLY CALLED FOR TERMINAL SEQUENCE)
try:
sample_projection = MappingProjection(sender=output_source, receiver=comparator.input_ports[SAMPLE])
except DuplicateProjectionError:
sample_projection = [p for p in output_source.efferents
if p in comparator.input_ports[SAMPLE].path_afferents]
try:
target_projection = MappingProjection(sender=target, receiver=comparator.input_ports[TARGET])
except DuplicateProjectionError:
target_projection = [p for p in target.efferents
if p in comparator.input_ports[TARGET].path_afferents]
act_in_projection = MappingProjection(sender=input_source.output_ports[0],
receiver=learning_mechanism.input_ports[ACTIVATION_INPUT_INDEX])
act_out_projection = MappingProjection(sender=output_source.output_ports[0],
receiver=learning_mechanism.input_ports[ACTIVATION_OUTPUT_INDEX])
# FIX CROSS_PATHWAYS 7/28/19 [JDC]: THIS MAY NEED TO USE add_ports (SINCE ONE MAY EXIST; CONSTRUCT TEST FOR IT)
error_signal_projection = MappingProjection(sender=comparator.output_ports[OUTCOME],
receiver=learning_mechanism.input_ports[ERROR_SIGNAL_INDEX])
return [target_projection, sample_projection, error_signal_projection, act_out_projection, act_in_projection]
def _create_learning_projection(self, learning_mechanism, learned_projection):
"""Construct LearningProjections from LearningMechanisms to learned_projections in processing pathway"""
learning_projection = LearningProjection(name="Learning Projection",
sender=learning_mechanism.learning_signals[0],
receiver=learned_projection.parameter_ports["matrix"])
learned_projection.has_learning_projection = True
return learning_projection
def _create_rl_related_mechanisms(self,
input_source,
output_source,
error_function,
learned_projection,
learning_rate,
learning_update):
target_mechanism = ProcessingMechanism(name='Target')
comparator_mechanism = ComparatorMechanism(name='Comparator',
sample={NAME: SAMPLE,
VARIABLE: [0.], WEIGHT: -1},
target={NAME: TARGET,
VARIABLE: [0.]},
function=error_function,
output_ports=[OUTCOME, MSE])
learning_mechanism = \
LearningMechanism(function=Reinforcement(default_variable=[input_source.output_ports[0].value,
output_source.output_ports[0].value,
comparator_mechanism.output_ports[0].value],
learning_rate=learning_rate),
default_variable=[input_source.output_ports[0].value,
output_source.output_ports[0].value,
comparator_mechanism.output_ports[0].value],
error_sources=comparator_mechanism,
learning_enabled=learning_update,
in_composition=True,
name="Learning Mechanism for " + learned_projection.name)
return target_mechanism, comparator_mechanism, learning_mechanism
def _create_td_related_mechanisms(self,
input_source,
output_source,
error_function,
learned_projection,
learning_rate,
learning_update):
target_mechanism = ProcessingMechanism(name='Target',
default_variable=output_source.defaults.value)
comparator_mechanism = PredictionErrorMechanism(name='PredictionError',
sample={NAME: SAMPLE,
VARIABLE: output_source.defaults.value},
target={NAME: TARGET,
VARIABLE: output_source.defaults.value},
function=PredictionErrorDeltaFunction(gamma=1.0))
learning_mechanism = LearningMechanism(function=TDLearning(learning_rate=learning_rate),
default_variable=[input_source.output_ports[0].defaults.value,
output_source.output_ports[0].defaults.value,
comparator_mechanism.output_ports[0].defaults.value],
error_sources=comparator_mechanism,
learning_enabled=learning_update,
in_composition=True,
name="Learning Mechanism for " + learned_projection.name)
return target_mechanism, comparator_mechanism, learning_mechanism
def _create_backpropagation_learning_pathway(self, pathway, loss_function, learning_rate=0.05, error_function=None,
learning_update:tc.optional(tc.any(bool, tc.enum(ONLINE, AFTER)))=AFTER):
# FIX: LEARNING CONSOLIDATION - Can get rid of this:
if not error_function:
error_function = LinearCombination()
# Add pathway to graph and get its full specification (includes all ProcessingMechanisms and MappingProjections)
processing_pathway = self.add_linear_processing_pathway(pathway)
path_length = len(processing_pathway)
# Pathway length must be >=3 (Mechanism, Projection, Mechanism
if path_length >= 3:
# get the "terminal_sequence" --
# the last 2 nodes in the back prop pathway and the projection between them
# these components are are processed separately because
# they inform the construction of the Target and Comparator mechs
terminal_sequence = processing_pathway[path_length - 3: path_length]
else:
raise CompositionError(f"Backpropagation pathway specification ({pathway}) must not contain "
f"at least three components "
f"([{Mechanism.__name__}, {Projection.__name__}, {Mechanism.__name__}]).")
# Unpack and process terminal_sequence:
input_source, learned_projection, output_source = terminal_sequence
# If pathway includes existing terminal_sequence for the output_source, use that
if output_source in self._terminal_backprop_sequences:
# FIX CROSSED_PATHWAYS 7/28/19 [JDC]:
# THIS SHOULD BE INTEGRATED WITH CALL TO _create_terminal_backprop_learning_components
# ** NEED TO CHECK WHETHER LAST NODE IN THE SEQUENCE IS TERMINAL AND IF SO:
# ASSIGN USING: self.add_required_node_role(output_source, NodeRole.OUTPUT)
# If learned_projection already has a LearningProjection (due to pathway overlap),
# use those terminal sequence components
if (learned_projection.has_learning_projection
and any([lp for lp in learned_projection.parameter_ports[MATRIX].mod_afferents
if lp in self.projections])):
target = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM]
comparator = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM]
learning_mechanism = self._terminal_backprop_sequences[output_source][LEARNING_MECHANISM]
# Otherwise, create new ones
else:
target, comparator, learning_mechanism = \
self._create_terminal_backprop_learning_components(input_source,
output_source,
error_function,
loss_function,
learned_projection,
learning_rate,
learning_update)
sequence_end = path_length - 3
# # FIX: ALTERNATIVE IS TO TEST WHETHER IT PROJECTIONS TO ANY MECHANISMS WITH LEARNING ROLE
# Otherwise, if output_source already projects to a LearningMechanism, integrate with existing sequence
elif any(isinstance(p.receiver.owner, LearningMechanism) for p in output_source.efferents):
# Set learning_mechanism to the one to which output_source projects
learning_mechanism = next((p.receiver.owner for p in output_source.efferents
if isinstance(p.receiver.owner, LearningMechanism)))
# # Use existing target and comparator to learning_mechanism for Mechanism to which output_source project
# target = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM]
# comparator = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM]
target = None
comparator = None
sequence_end = path_length - 1
# Otherwise create terminal_sequence for the sequence,
# and eliminate existing terminal_sequences previously created for Mechanisms now in the pathway
else:
# Eliminate existing comparators and targets for Mechanisms now in the pathway that were output_sources
# (i.e., ones that belong to previously-created sequences that overlap with the current one)
for pathway_mech in [m for m in pathway if isinstance(m, Mechanism)]:
old_comparator = next((p.receiver.owner for p in pathway_mech.efferents
if (isinstance(p.receiver.owner, ComparatorMechanism)
and p.receiver.owner in self.get_nodes_by_role(NodeRole.LEARNING))),
None)
if old_comparator:
old_target = next((p.sender.owner for p in old_comparator.input_ports[TARGET].path_afferents
if p.sender.owner in self.get_nodes_by_role(NodeRole.TARGET)),
None)
self.remove_nodes([old_comparator, old_target])
# FIX CROSSING_PATHWAYS [JDC]: MAKE THE FOLLOWING A METHOD?
# Collect InputPorts that received error_signal projections from the old_comparator
# and delete after old_comparator has been deleted
# (i.e., after those InputPorts have been vacated)
old_error_signal_input_ports = []
for error_projection in old_comparator.output_port.efferents:
old_error_signal_input_ports.append(error_projection.receiver)
Mechanism_Base._delete_mechanism(old_comparator)
Mechanism_Base._delete_mechanism(old_target)
for input_port in old_error_signal_input_ports:
input_port.owner.remove_ports(input_port)
del self._terminal_backprop_sequences[pathway_mech]
del self.required_node_roles[self.required_node_roles.index((pathway_mech, NodeRole.OUTPUT))]
# Create terminal_sequence
target, comparator, learning_mechanism = \
self._create_terminal_backprop_learning_components(input_source,
output_source,
error_function,
loss_function,
learned_projection,
learning_rate,
learning_update)
self._terminal_backprop_sequences[output_source] = {LEARNING_MECHANISM: learning_mechanism,
TARGET_MECHANISM: target,
COMPARATOR_MECHANISM: comparator}
self.add_required_node_role(pathway[-1], NodeRole.OUTPUT)
sequence_end = path_length - 3
# loop backwards through the rest of the pathway to create and connect
# the remaining learning mechanisms
learning_mechanisms = [learning_mechanism]
learned_projections = [learned_projection]
for i in range(sequence_end, 1, -2):
# set variables for this iteration
input_source = processing_pathway[i - 2]
learned_projection = processing_pathway[i - 1]
output_source = processing_pathway[i]
learning_mechanism = self._create_non_terminal_backprop_learning_components(input_source,
output_source,
learned_projection,
learning_rate,
learning_update)
learning_mechanisms.append(learning_mechanism)
learned_projections.append(learned_projection)
# Add error_signal projections to any learning_mechanisms that are now dependent on the new one
for lm in learning_mechanisms:
if | |
vlan_vn_count + 1):
info = {
'name': vpg1_vmi_names[vmi_id - 1],
'vmi_id': vmi_id,
'parent_obj': proj_obj,
'vn': vn_objs[vn_names[vmi_id - 1]],
'vpg': vpg_objs[vpg_names[0]].uuid,
'fabric': fabric_name,
'pis': [],
'vlan': vlan_ids[vmi_id - 1],
'is_untagged': False}
vmi_infos.append(info)
with ExpectedException(BadRequest):
self._create_vmis(vmi_infos)
# Now ensure no change in AE-IDs
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])
# UT to verify CEM-18881
def test_for_pi_vpg_association_with_mock(self):
proj_obj, fabric_obj, pr_objs = self._create_prerequisites(
create_second_pr=True)
fabric_name = fabric_obj.get_fq_name()
test_id = self.id()
VMI_CLASS = self._api_server.get_resource_class(
'virtual-machine-interface')
VMI_CLASS_ORG_PRE_DBE_CREATE = self._api_server.get_resource_class(
'virtual-machine-interface').pre_dbe_create
vlan_vn_count = 3
def mock_pre_dbe_create(tenant_name, obj_dict, db_conn):
if not obj_dict.get('virtual-port-group-refs'):
obj_dict.update(VPG_REF)
return VMI_CLASS_ORG_PRE_DBE_CREATE(tenant_name, obj_dict, db_conn)
def process_ae_ids(x):
return [int(i) for i in sorted(x)]
def get_zk_ae_ids(prs=None):
prefix = os.path.join(
self.__class__.__name__,
'id', 'aggregated-ethernet')
zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client
if not prs:
prs = [os.path.join(prefix, pr.name) for pr in pr_objs]
else:
if not isinstance(prs, list):
prs = [prs]
prs = [os.path.join(prefix, pr) for pr in prs]
ae_ids = {}
for pr in prs:
pr_org = os.path.split(pr)[-1]
ae_ids[pr_org] = zk_client.get_children(pr)
return ae_ids
vlan_ids = range(1, vlan_vn_count + 1)
vn_names = ['vn_%s_%s' % (test_id, i)
for i in range(1, vlan_vn_count + 1)]
vn_objs = self._create_vns(proj_obj, vn_names)
pi_per_pr = 2
pi_objs = {}
pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for
i in range(1, pi_per_pr + 1)]
pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)
pi_objs.update(pr1_pi_objs)
# create one VPG
vpg_count = 1
vpg_names = ['vpg_%s_%s' % (test_id, i) for
i in range(1, vpg_count + 1)]
vpg_objs = self._create_vpgs(fabric_obj, vpg_names)
VPG_REF = {'virtual_port_group_refs': [{
'to': list(vpg_objs.values())[0].get_fq_name(),
'uuid': list(vpg_objs.values())[0].uuid
}]}
# record AE-IDs in ZK before creating any VPG
ae_ids = [x for x in get_zk_ae_ids().values() if x]
self.assertEqual(len(ae_ids), 0)
# attach PI1/PR1 and PI2/PR1 to VPG-1
# 1 AE-ID to be allocated
ae_ids = {}
vpg_name = vpg_names[0]
vpg_obj = vpg_objs[vpg_name]
for pi in range(2):
vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]])
self.api.virtual_port_group_update(vpg_obj)
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)
pi_refs = vpg_obj.get_physical_interface_refs()
ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num
for ref in pi_refs}
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])
# Now add VMI-1 and VMI-2 info
vmi_infos = []
vpg1_vmi_names = ['vmi_vpg1_%s_%s' % (test_id, vmi_id) for vmi_id in
range(1, vlan_vn_count + 1)]
for vmi_id in range(1, vlan_vn_count + 1):
info = {
'name': vpg1_vmi_names[vmi_id - 1],
'vmi_id': vmi_id,
'parent_obj': proj_obj,
'vn': vn_objs[vn_names[vmi_id - 1]],
'vpg': vpg_objs[vpg_names[0]].uuid,
'fabric': fabric_name,
'pis': [],
'vlan': vlan_ids[vmi_id - 1],
'is_untagged': False}
vmi_infos.append(info)
with mock.patch.object(VMI_CLASS, 'pre_dbe_create',
side_effect=mock_pre_dbe_create):
self._create_vmis(vmi_infos)
# Now ensure still only 1 AE-ID is allocated
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])
# UT to verify CEM-18881 for update functionality
def test_for_pi_vpg_association_with_pre_dbe_update_mock(self):
proj_obj, fabric_obj, pr_objs = self._create_prerequisites(
create_second_pr=True)
fabric_name = fabric_obj.get_fq_name()
test_id = self.id()
VMI_CLASS = self._api_server.get_resource_class(
'virtual-machine-interface')
VMI_CLASS_ORG_PRE_DBE_CREATE = self._api_server.get_resource_class(
'virtual-machine-interface').pre_dbe_create
vlan_vn_count = 3
def mock_pre_dbe_create(tenant_name, obj_dict, db_conn):
if not obj_dict.get('virtual-port-group-refs'):
obj_dict.update(VPG_REF)
return VMI_CLASS_ORG_PRE_DBE_CREATE(tenant_name, obj_dict, db_conn)
def process_ae_ids(x):
return [int(i) for i in sorted(x)]
def get_zk_ae_ids(prs=None):
prefix = os.path.join(
self.__class__.__name__,
'id', 'aggregated-ethernet')
zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client
if not prs:
prs = [os.path.join(prefix, pr.name) for pr in pr_objs]
else:
if not isinstance(prs, list):
prs = [prs]
prs = [os.path.join(prefix, pr) for pr in prs]
ae_ids = {}
for pr in prs:
pr_org = os.path.split(pr)[-1]
ae_ids[pr_org] = zk_client.get_children(pr)
return ae_ids
vlan_ids = range(1, vlan_vn_count + 1)
vn_names = ['vn_%s_%s' % (test_id, i)
for i in range(1, vlan_vn_count + 1)]
vn_objs = self._create_vns(proj_obj, vn_names)
pi_per_pr = 2
pi_objs = {}
pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for
i in range(1, pi_per_pr + 1)]
pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)
pi_objs.update(pr1_pi_objs)
# create one VPG
vpg_count = 1
vpg_names = ['vpg_%s_%s' % (test_id, i) for
i in range(1, vpg_count + 1)]
vpg_objs = self._create_vpgs(fabric_obj, vpg_names)
VPG_REF = {'virtual_port_group_refs': [{
'to': list(vpg_objs.values())[0].get_fq_name(),
'uuid': list(vpg_objs.values())[0].uuid
}]}
# record AE-IDs in ZK before creating any VPG
ae_ids = [x for x in get_zk_ae_ids().values() if x]
self.assertEqual(len(ae_ids), 0)
# attach PI1/PR1 and PI2/PR1 to VPG-1
# 1 AE-ID to be allocated
ae_ids = {}
vpg_name = vpg_names[0]
vpg_obj = vpg_objs[vpg_name]
for pi in range(2):
vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]])
self.api.virtual_port_group_update(vpg_obj)
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)
pi_refs = vpg_obj.get_physical_interface_refs()
ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num
for ref in pi_refs}
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])
# Now add VMI-1 and VMI-2 info
vmi_infos = []
vpg1_vmi_names = ['vmi_vpg1_%s_%s' % (test_id, vmi_id) for vmi_id in
range(1, vlan_vn_count + 1)]
for vmi_id in range(1, vlan_vn_count + 1):
info = {
'name': vpg1_vmi_names[vmi_id - 1],
'vmi_id': vmi_id,
'parent_obj': proj_obj,
'vn': vn_objs[vn_names[vmi_id - 1]],
'vpg': vpg_objs[vpg_names[0]].uuid,
'fabric': fabric_name,
'pis': [],
'vlan': vlan_ids[vmi_id - 1],
'is_untagged': False}
vmi_infos.append(info)
with mock.patch.object(VMI_CLASS, 'pre_dbe_create',
side_effect=mock_pre_dbe_create):
vmi_objs = self._create_vmis(vmi_infos)
# Now ensure still only 1 AE-ID is allocated
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])
# Now update VMI-1 info
# Mock pre_dbe_update and update vmi info
VMI_CLASS = self._api_server.get_resource_class(
'virtual-machine-interface')
VMI_CLASS_ORG_PRE_DBE_UPDATE = self._api_server.get_resource_class(
'virtual-machine-interface').pre_dbe_update
def mock_pre_dbe_update(id, fq_name, obj_dict, db_conn,
prop_collection_updates=None, **kwargs):
if not obj_dict.get('virtual-port-group-refs'):
obj_dict.update(VPG_REF)
return VMI_CLASS_ORG_PRE_DBE_UPDATE(id, fq_name, obj_dict, db_conn,
prop_collection_updates=None,
**kwargs)
vmi_id = 1
vmi_infos = []
vpg1_vmi_names = ['vmi_vpg1_%s_%s' % (test_id, vmi_id) for vmi_id in
range(1, vlan_vn_count + 1)]
vmi_name = vpg1_vmi_names[0]
info = {
'name': vmi_name,
'vmi_uuid': vmi_objs[vmi_name].uuid,
'parent_obj': proj_obj,
'vn': vn_objs[vn_names[vmi_id - 1]],
'vpg': vpg_objs[vpg_names[0]].uuid,
'fabric': fabric_name,
'pis': [],
'vlan': '4094',
'is_untagged': True}
vmi_infos.append(info)
with mock.patch.object(VMI_CLASS, 'pre_dbe_update',
side_effect=mock_pre_dbe_update):
vmi_objs = self._update_vmis(vmi_infos)
# Now ensure no change in AE-IDs
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])
# Test cases to verify CEM-20323 and CEM-19807
def test_add_user_defined_ae_num(self):
"""
UT to validate user-defined ae-num=1.
Assign PI-1/PR-1, PI-2/PR-1 with ae-num=1 to VPG-1
Verify that allocated AE-ID=1
"""
proj_obj, fabric_obj, pr_objs = self._create_prerequisites(
create_second_pr=True)
test_id = self.id()
def process_ae_ids(x):
return [int(i) for i in sorted(x) if i is not None]
def get_zk_ae_ids(prs=None):
prefix = os.path.join(
self.__class__.__name__,
'id', 'aggregated-ethernet')
zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client
if not prs:
prs = [os.path.join(prefix, pr.name) for pr in pr_objs]
else:
if not isinstance(prs, list):
prs = [prs]
prs = [os.path.join(prefix, pr) for pr in prs]
ae_ids = {}
for pr in prs:
pr_org = os.path.split(pr)[-1]
ae_ids[pr_org] = zk_client.get_children(pr)
return ae_ids
pi_per_pr = 2
pi_objs = {}
pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for
i in range(1, pi_per_pr + 1)]
pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for
i in range(1, pi_per_pr + 1)]
pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)
pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)
pi_objs.update(pr1_pi_objs)
pi_objs.update(pr2_pi_objs)
# create a VPG
vpg_count = 1
vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(
1, vpg_count + 1)]
vpg_objs = self._create_vpgs(fabric_obj, vpg_names)
# record AE-IDs in ZK before creating any VPG
ae_ids = [x for x in get_zk_ae_ids().values() if x]
self.assertEqual(len(ae_ids), 0)
def _attach_pi_to_lr(vpg_obj, pi_uuids):
# Attach PIs from PR1 to VPG-1
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)
for pi_uuid in pi_uuids:
self.api.ref_update(
"virtual-port-group",
vpg_obj.uuid,
"physical-interface",
pi_uuid,
None,
"ADD",
{"ae_num": 1})
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)
pi_refs = vpg_obj.get_physical_interface_refs()
return vpg_obj, pi_refs
# Case 1
# Attach 2 PIs from PR1 to VPG-1
ae_ids = {}
vpg_name = vpg_names[0]
vpg_obj = vpg_objs[vpg_name]
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)
pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(2)]
vpg_obj, pi_refs = _attach_pi_to_lr(vpg_obj, pi_uuids)
# verify PI-refs are correct
self.assertEqual(len(pi_refs), 2)
ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num
for ref in pi_refs}
# verify all AE-IDs allocated per prouter are unique
self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))
self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)
ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values())
self.assertEqual(ae_id_sorted, [1] * 2)
# verification at Physical Routers
pr_ae_ids = get_zk_ae_ids()
self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)
self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1])
self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), | |
"""
This file is part of pynadc
https://github.com/rmvanhees/pynadc
Calibrate Sciamachy Solar Mean Radiance measurements
Copyright (c) 2016-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
#
# Code layout:
# SECTION VERSION
# - define various versions of the S/W
# SECTION AUXILIARY CKD
# - define functions to read and pre-process CKD required for the calibration
# - defined functions: get_h5_RadSensMoni
# SECTION READ DATA
# - define class SDMFreadSun to read the Sun measurements
# SECTION CALIBRATE DATA
# - define class 'SMRcalib' to calibrate the Sun measurements
# SECTION WRITE DATA
# - define class 'SMRdb' to write calibrated spectra and meta-data to database
# SECTION DISPLAY DATA
# - define class 'SMRshow' to display Sun measurements
# SECTION ARGPARSE
# - define function 'handleCmdParams' to obtain command-line parameters
# SECTION MAIN
# - code to be run as a standalone program
#
from datetime import datetime
from math import cos, pi
import argparse
import re
import sys
import h5py
import numpy as np
from numpy import ma
import matplotlib.pyplot as plt
from pynadc.scia import db, lv1
# -------------------------SECTION VERSION-----------------------------------
_swVersion = {'major': 0,
'minor': 8,
'revision': 4}
_calibVersion = {'major': 1,
'minor': 1,
'revision': 0}
_dbVersion = {'major': 0,
'minor': 9,
'revision': 3}
# -------------------------SECTION ERROR CLASSES-----------------------------
class dbError(Exception):
pass
class readSunInfo(Exception):
pass
# -------------------------SECTION AUXILIARY CKD-----------------------------
def get_h5_RadSensMoni(NDF=True, debug=False):
fmtRSPM = '%-dfloat32, %-dfloat32, 8192float32, (%-d,%-d,8192)float64'
nameRSPM = ('ang_ele', 'ang_asm', 'wvlen', 'sensitivity')
fid = h5py.File('/SCIA/share/nadc_tools/key_radsens.h5', 'r')
grp = fid['PPG0']
dset = grp['PPG0']
ppg0 = dset[:]
grp = fid['ABS_RAD']
dset = grp['Axis 1 Wavelength']
ref_wl = dset[:] # regrid all data to ref_wl instead of Sun spectrum
dset = grp['ABS_RAD']
abs_rad = dset[:].astype('float64')
abs_rad /= (5.035e8 * ref_wl * ppg0)
del ppg0
grp = fid['OBM_s_p']
dset = grp['Axis 1 Wavelength']
obm_s_p_wl = dset[:]
dset = grp['OBM_s_p']
tmp = dset[:].astype('float64')
obm_s_p = np.empty_like(tmp)
for nc in range(8):
i_mn = nc * 1024
i_mx = i_mn + 1024
obm_s_p[i_mn:i_mx] = np.interp(ref_wl[i_mn:i_mx],
obm_s_p_wl[i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
grp = fid['ELEV_p']
dset = grp['Axis 1 wavelength']
elev_p_wl = dset[:]
dset = grp['Axis 2 elevation angle']
elev_p_angle = dset[:]
dset = grp['ELEV_p']
elev_p = dset[2, :]
grp = fid['ELEV_s']
dset = grp['Axis 1 wavelength']
elev_s_wl = dset[:]
dset = grp['Axis 2 elevation angle']
elev_s_angle = dset[:]
dset = grp['ELEV_s']
elev_s = dset[2, :]
elev_p_a0 = np.interp(ref_wl, elev_p_wl, elev_p.astype('float64'))
elev_s_a0 = np.interp(ref_wl, elev_s_wl, elev_s.astype('float64'))
elev_a0 = obm_s_p * elev_s_a0 + elev_p_a0
if NDF:
grp = fid['NDF']
dset = grp['Axis 1 Wavelength']
ndf_wl = dset[:]
dset = grp['NDF']
tmp = dset[:].astype('float64')
ndf = np.empty_like(tmp)
for nc in range(8):
i_mn = nc * 1024
i_mx = i_mn + 1024
ndf[i_mn:i_mx] = np.interp(ref_wl[i_mn:i_mx],
ndf_wl[i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
grp = fid['NDF_s_p']
dset = grp['Axis 1 Wavelength']
ndf_s_p_wl = dset[:]
dset = grp['NDF_s_p']
ndf_s_p = dset[:]
tmp = dset[:].astype('float64')
ndf_s_p = np.empty_like(tmp)
for nc in range(8):
i_mn = nc * 1024
i_mx = i_mn + 1024
ndf_s_p[i_mn:i_mx] = np.interp(ref_wl[i_mn:i_mx],
ndf_s_p_wl[i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
abs_rad *= (2 * ndf / (1 + ndf_s_p))
obm_s_p *= ndf_s_p
grp = fid['BRDF_p']
dset = grp['Axis 1 vacuum wavelength']
brdf_p_wl = dset[:]
dset = grp['Axis 2 elevation angle']
brdf_p_ele = dset[:]
dset = grp['Axis 3 ASM angle']
brdf_p_asm = dset[:]
dset = grp['BRDF_p']
brdf_p = dset[:].astype('float64')
grp = fid['BRDF_s']
dset = grp['Axis 1 vacuum wavelength']
brdf_s_wl = dset[:]
dset = grp['Axis 2 elevation angle']
brdf_s_ele = dset[:]
dset = grp['Axis 3 ASM angle']
brdf_s_asm = dset[:]
dset = grp['BRDF_s']
brdf_s = dset[:].astype('float64')
fid.close()
# re-arrange data: switch ang_asm and ang_ele and store both increasing
dimWave = brdf_p_wl.shape[0]
dimASM = brdf_p_asm.shape[0]
dimELE = brdf_p_ele.shape[0]
dimBRDF = dimASM * dimELE
indx = []
for ni in range(dimELE):
indx += list(ni
+ np.linspace(dimBRDF-dimELE, 0, num=dimASM).astype(int))
# write data to output structure, interpolated to the ele,asm grid
rspm = np.empty(1, dtype=fmtRSPM % (dimELE, dimASM, dimELE, dimASM))
rspm.dtype.names = nameRSPM
rspm['ang_ele'] = brdf_p_ele
rspm['ang_asm'] = brdf_p_asm[::-1]
rspm['wvlen'] = ref_wl
brdf_p = brdf_p.reshape(dimBRDF, dimWave)
brdf_s = brdf_s.reshape(dimBRDF, dimWave)
for ni in range(dimBRDF):
brdf_p_ni = np.interp(ref_wl, brdf_p_wl, brdf_p[indx[ni], :])
brdf_s_ni = np.interp(ref_wl, brdf_s_wl, brdf_s[indx[ni], :])
rspm['sensitivity'][0, ni // dimASM, ni % dimASM, :] = \
abs_rad * (obm_s_p * brdf_s_ni + brdf_p_ni) / elev_a0
if debug:
fid = h5py.File('scia_key_rspm.h5', 'w')
dset = fid.create_dataset('rspm', data=rspm)
fid.close()
return rspm
# -------------------------SECTION READ DATA---------------------------------
class SDMFextractSun:
"""
Read Sciamachy Sun/State 62 SDMF (v3.1) data
"""
def __init__(self, state_id=62, sun_db='/SCIA/SDMF31/sdmf_extract_sun.h5'):
self.sun_db = sun_db
self.state_id = state_id
self.numChannels = 8 # add some constants for SCIA
self.channelSize = 1024
self.numPixels = self.numChannels * self.channelSize
self.smr = None
self.wvlen = None
with h5py.File(sun_db, 'r') as fid:
dset = fid['ClusDef']
self.clusDef = dset[:]
def selectOrbits(self, orbitRange):
with h5py.File(self.sun_db, 'r') as fid:
grp = fid['State_%02d' % self.state_id]
dset = grp['orbitList']
orbitList = dset[:]
if isinstance(orbitRange, int):
self.metaIndx = np.argmin(abs(orbitList - orbitRange))
else:
self.metaIndx = np.where((orbitList >= orbitRange[0])
& (orbitList <= orbitRange[1]))[0]
self.orbitList = orbitList[self.metaIndx]
if self.orbitList.size == 0:
print('* Info: no orbits selected from sdmf_extract_sun.h5')
raise readSunInfo
def readData(self):
if self.metaIndx is None or self.metaIndx.size == 0:
raise readSunInfo
if isinstance(self.metaIndx, np.ndarray):
metaIndx = self.metaIndx[0]
self.metaIndx = self.metaIndx[1:]
else:
metaIndx = self.metaIndx
self.metaIndx = None
fid = h5py.File(self.sun_db, 'r')
grp = fid['State_%02d' % self.state_id]
dset = grp['metaTable']
self.mtbl = dset[metaIndx]
self.absOrbit = self.mtbl['absOrbit']
self.obmTemp = self.mtbl['obmTemp']
self.detTemp = self.mtbl['detTemp']
dset = grp['pointing']
pointing = dset[metaIndx]
self.julianDay = np.array([x[0] for x in pointing])
self.asmAngle = np.array([x[1] for x in pointing])
self.esmAngle = np.array([x[2] for x in pointing])
self.sunAzim = np.array([x[3] for x in pointing])
self.sunElev = np.array([x[4] for x in pointing])
dset = grp['cluster_01']
self.numSpectra = dset[0].shape[0]
self.coaddf = np.ones((self.numPixels,), dtype=np.uint8)
self.pet = np.zeros((self.numPixels,), dtype=float)
self.spectra = ma.array(
np.zeros((self.numSpectra, self.numPixels), dtype='float64'),
mask=np.zeros((self.numSpectra, self.numPixels), dtype=int),
hard_mask=True)
for nc in range(self.clusDef.shape[0]):
dset = grp['cluster_%02d' % (nc+1)]
x = self.clusDef[nc]
self.coaddf[x[2]:x[2]+x[3]] = dset.attrs['coaddf'][0]
self.pet[x[2]:x[2]+x[3]] = dset.attrs['PET'][0]
self.spectra[:, x[2]:x[2]+x[3]] = dset[metaIndx].astype('float64')
fid.close()
# -------------------------SECTION CALIBRATE DATA----------------------------
class SMRcalib:
"""
Listing of implemented calibration IDs:
Mask dead & blinded pixels
Co-addition division correction
1. Memory Effect correction
2. Non-Linearity correction
3. Background Signal correction
4. Stray Light correction
5. Apply fit parameters
6. Apply mirror model
7. Radiance correction
8. Combine scans to Sun Mean Reference (implied for option "db")
"""
def __init__(self):
self.funclist = (
'maskDead', 'coaddDivision', 'memoryEffect', 'nonLinearity',
'backGround', 'strayLight', 'fitParam', 'mirrorModel',
'radiance', 'combineSpectra'
)
self.funcdict = dict(
zip((self.funclist),
(self.maskDead, self.coaddDivision,
self.memoryEffect, self.nonLinearity,
self.backGround, self.strayLight,
self.fitParam, self.mirrorModel,
self.radiance, self.combineSpectra))
)
@staticmethod
def maskDead(smr, verbose=False):
"""
(*) Identifies dead pixels (based on measurements)
and blinded pixels at start and end of detector array.
Parameters
----------
None
Returns
-------
mask where the dead/blinded pixels have boolean value True
Notes
-----
None
"""
if verbose:
print('(*) Perform masking of the dead/bad pixel')
smr.errorType = 'F'
#
# mask blinded pixels
#
smr.blinded = np.empty((smr.numPixels,), dtype=bool)
smr.blinded[:] = False
id_list = np.array(list(range(10)) + list(range(1024-10, 1024)))
smr.blinded[0+id_list] = True # channel 1
smr.blinded[1024+id_list] = True # channel 2
smr.blinded[2048+id_list] = True # channel 3
smr.blinded[3072+id_list] = True # channel 4
smr.blinded[4096+id_list] = True # channel 5
smr.blinded[5120+id_list] = True # channel 6
smr.blinded[6144+id_list] = True # channel 7
smr.blinded[7168+id_list] = True # channel 8
#
# mask dead pixels
#
i_masked = smr.spectra.mask.sum()
smr.spectra = ma.masked_equal(smr.spectra, 0, copy=False)
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with zero signal'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
smr.spectra = ma.masked_where((smr.spectra / smr.coaddf) >= 65535.,
smr.spectra, copy=False)
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with saturated signal'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
@staticmethod
def coaddDivision(smr, verbose=False):
"""
(*) Co-addition division correction divides the data by the number
of measurements added in the on-board co-added.
Parameters
----------
coaddf : number of measurements co-added (C3)
dimension should be one or equal to the spatial dimension
Returns
-------
Signal S_out(i,j) corrected for coadding [ADC counts]
Notes
-----
* None
"""
if verbose:
print('(*) Perform division by co-adding factor')
smr.errorType = 'M'
smr.spectra /= smr.coaddf
@staticmethod
def memoryEffect(smr, verbose=False):
"""
(1) Memory Effect correction, this is the effect that the current
measurement depends on the previous measurement.
Parameters
----------
c_mem : memory correction parameters [i,j] (C1)
Returns
| |
#!/usr/bin/env python3
# coding: utf-8
"""The game logic.
This should be independent of media used to interact with player."""
from typing import Tuple, List, Set, Dict
from const import PLAYER_SHIFT, LAST_ON_PATH, END_PROGRESS
from piece import Piece
from player import Player
from util import progress_to_position
from action import roll_dice
def set_board(num_players: int, num_pieces: int):
pieces: List[Piece] = []
for player_num in range(num_players):
for piece_num in range(num_pieces):
pieces.append(Piece(player_num, piece_num))
return pieces
def do_move(
status: List[Piece],
player: Player,
piece_to_move: int,
dice: int,
player_shift: int = PLAYER_SHIFT,
last_on_path: int = LAST_ON_PATH,
) -> bool:
"""Check if the move is valid. If it is, perform it. Returns whether it is valid."""
movable_piece_nums = [p.index() for p in get_valid_moves(player, dice, status)]
if not (piece_to_move in movable_piece_nums):
return False
current = [
p for p in status if p.player() == player.number and p.index() == piece_to_move
]
assert len(current) == 1
piece = current[0]
if piece.progress() == 0:
if dice == 6:
piece.move(1)
else:
raise ValueError("Home can only be left with a full dice")
else:
piece.move(dice)
if 0 < piece.progress() <= last_on_path:
others = others_on_position(
status, player.number, piece.position(), player_shift, last_on_path
)
for other in others:
other.send_home()
return True
def choose_first(players: Set[Player]) -> Player:
""" score 0 means player hasn't drawn, -1 means is already out of drawing
"""
m = 0
score = [0] * len(players)
need_more = True
while need_more:
for i in range(len(score)):
if score[i] != -1:
# TODO: Decouple this logic from console interaction
score[i] = roll_dice(player_num=i)
m = max(score)
if len([v for v in score if v == m]) > 1:
for i in range(len(score)):
score[i] = 0 if score[i] == m else -1
else:
need_more = False
return Player.get(score.index(m))
def check_endgame(status: List[Piece]) -> bool:
"""Check if any of the players has ended the game.
>>> check_endgame([Piece(0, 0),Piece(0, 1),Piece(0, 2),Piece(0, 3),\
Piece(1, 0),Piece(1, 1),Piece(1, 2),Piece(1, 3),\
Piece(2, 0),Piece(2, 1),Piece(2, 2),Piece(2, 3),\
Piece(3, 0),Piece(3, 1),Piece(3, 2),Piece(3, 3)])
False
>>> check_endgame([Piece(0, 0),Piece(0, 1),Piece(0, 2),Piece(0, 3),\
Piece(1, 0),Piece(1, 1),Piece(1, 2),Piece(1, 3),\
Piece(2, 0),Piece(2, 1),Piece(2, 2),Piece(2, 3),\
Piece(3, 0, 62),Piece(3, 1, 62),Piece(3, 2, 62),Piece(3, 3, 62)])
True
>>> check_endgame([Piece(0, 0),Piece(0, 1),Piece(0, 2),Piece(0, 3),\
Piece(1, 0, 62),Piece(1, 1, 62),Piece(1, 2, 62),Piece(1, 3, 61),\
Piece(2, 0, 60),Piece(2, 1, 60),Piece(2, 2, 60),Piece(2, 3, 60),\
Piece(3, 0, 10),Piece(3, 1, 20),Piece(3, 2, 30),Piece(3, 3, 40)])
False
A real game we played that had a bug:
>>> check_endgame([Piece(0,0,62),Piece(0,1,57),Piece(0,2,62),Piece(0,3,21),\
Piece(1,0,28),Piece(1,1,62),Piece(1,2,62),Piece(1,3,62),\
Piece(2,0,62),Piece(2,1,20),Piece(2,2,58),Piece(2,3,62),\
Piece(3,0,62),Piece(3,1,62),Piece(3,2,0),Piece(3,3,62)])
False
"""
player_finished: Dict[int, bool] = {}
for piece in status:
player = piece.player()
preexisting = player_finished[player] if player in player_finished else True
player_finished[player] = preexisting and piece.is_finished()
return len([k for k, v in player_finished.items() if v]) > 0
def __coord_in_home(piece: Piece) -> Tuple[int, int]:
"""Draw in home positions: each piece has its location. Progress is always same, thus irrelevant
>>> __coord_in_home(Piece(0, 0))
(5, 2)
>>> __coord_in_home(Piece(1, 1))
(2, 13)
>>> __coord_in_home(Piece(2, 2))
(13, 15)
>>> __coord_in_home(Piece(3, 3))
(16, 6)
"""
assert piece.progress() == 0
zones = [(5, 2), (2, 12), (12, 15), (15, 5)]
shift = [(0, 0), (0, 1), (1, 0), (1, 1)]
return (
zones[piece.player()][0] + shift[piece.index()][0],
zones[piece.player()][1] + shift[piece.index()][1],
)
def __coord_on_path(piece: Piece) -> Tuple[int, int]:
"""Draws on path: if two or more pieces on same cell, instead of number,
draws a placeholder, which does not need to show piece number
Logic split this in 4 different cases, determined by player offset.
Parameter piece does't influence logic.
Player Progress to Position conversion:
P0 1..56: (pos)
P1 1..42: (p_num * shift + pos)
43..56: (p_num * shift + pos) % end_progress
P2 1..28: (p_num * shift + pos)
29..56: (p_num * shift + pos) % end_progress
P3 1..14: (p_num * shift + pos)
15..56: (p_num * shift + pos) % end_progress
Test player 1:
>>> __coord_on_path(Piece(0, 1, 1))
(8, 2)
Test player 2:
>>> __coord_on_path(Piece(1, 1, 1))
(2, 10)
Test player 3:
>>> __coord_on_path(Piece(2, 1, 1))
(10, 16)
Test player 4:
>>> __coord_on_path(Piece(3, 1, 1))
(16, 8)
Test path wrap:
>>> __coord_on_path(Piece(3, 1, 56))
(16, 9)
Test overlap:
>> __coord_on_path(Piece(2, 1, 17))
(10, 14)
"""
assert 1 <= piece.progress() <= LAST_ON_PATH and 0 <= piece.player() <= 3
POSITION_TO_ROWCOL: Tuple[Tuple[int, int], ...] = (
(0, 0),
(8, 2),
(8, 3),
(8, 4),
(8, 5),
(7, 5),
(6, 5),
(5, 5),
(5, 6),
(5, 7),
(5, 8),
(4, 8),
(3, 8),
(2, 8),
(2, 9),
(2, 10),
(3, 10),
(4, 10),
(5, 10),
(5, 11),
(5, 12),
(5, 13),
(6, 13),
(7, 13),
(8, 13),
(8, 14),
(8, 15),
(8, 16),
(9, 16),
(10, 16),
(10, 15),
(10, 14),
(10, 13),
(11, 13),
(12, 13),
(13, 13),
(13, 12),
(13, 11),
(13, 10),
(14, 10),
(15, 10),
(16, 10),
(16, 9),
(16, 8),
(15, 8),
(14, 8),
(13, 8),
(13, 7),
(13, 6),
(13, 5),
(12, 5),
(11, 5),
(10, 5),
(10, 4),
(10, 3),
(10, 2),
(9, 2),
)
return POSITION_TO_ROWCOL[piece.position()]
def __coord_on_finish(piece: Piece) -> Tuple[int, int]:
"""Piece number is irrelevant
>>> __coord_on_finish(Piece(0, 1, 57))
(9, 3)
>>> __coord_on_finish(Piece(0, 1, 61))
(9, 7)
>>> __coord_on_finish(Piece(1, 1, 57))
(3, 9)
>>> __coord_on_finish(Piece(2, 1, 58))
(9, 14)
>>> __coord_on_finish(Piece(3, 1, 59))
(13, 9)
>>> __coord_on_finish(Piece(3, 1, 61))
(11, 9)
"""
pos = piece.progress() - LAST_ON_PATH
assert 0 < pos < 6
player = piece.player()
(x, y) = (0, 0)
if player in [0, 2]:
x = 9
y = pos + 2 if player == 0 else 15 - (pos - 1)
elif player in [1, 3]:
x = pos + 2 if player == 1 else 15 - (pos - 1)
y = 9
else:
raise NotImplementedError()
return (x, y)
def __coord_in_target(piece: Piece) -> Tuple[int, int]:
"""Draw in target positions: each piece has its location.
Progress is always same, thus irrelevant
>>> __coord_in_target(Piece(0, 0, 62))
(7, 6)
>>> __coord_in_target(Piece(1, 1, 62))
(6, 11)
>>> __coord_in_target(Piece(2, 2, 62))
(11, 11)
>>> __coord_in_target(Piece(3, 3, 62))
(12, 8)
"""
assert piece.progress() == 62
zones = [(7, 6), (6, 10), (10, 11), (11, 7)]
shift = [(0, 0), (0, 1), (1, 0), (1, 1)]
return (
zones[piece.player()][0] + shift[piece.index()][0],
zones[piece.player()][1] + shift[piece.index()][1],
)
def put_piece_on_board(piece: Piece) -> Tuple[int, int]:
"""Currently player is in [1..4], piece is in [0..3]. Do we need to change this?
TODO: Refactor to implement startegy pattern
"""
coords = (0, 0)
progress = piece.progress()
if progress == 0:
coords = __coord_in_home(piece)
elif 0 < progress <= LAST_ON_PATH:
coords = __coord_on_path(piece)
elif LAST_ON_PATH < progress < END_PROGRESS:
coords = __coord_on_finish(piece)
elif progress == END_PROGRESS:
coords = __coord_in_target(piece)
else:
raise NotImplementedError()
return coords
def is_valid_move(
piece: Piece,
dice: int,
status: List[Piece],
player_shift: int = PLAYER_SHIFT,
last_on_path: int = LAST_ON_PATH,
end_progress: int = END_PROGRESS,
) -> bool:
"""
>>> p = Piece(1, 1); is_valid_move(p, 6, [p])
True
>>> p = Piece(1, 1); is_valid_move(p, 1, [p])
False
>>> p = Piece(1, 1, 1); is_valid_move(p, 1, [p])
True
>>> p = Piece(1, 1, 1); is_valid_move(p, 6, [p])
True
>> p = Piece(1, 1); is_valid_move(p, 6, [p, Piece(0, 0, 15)])
True
>>> p = Piece(1, 1); is_valid_move(p, 6, [p, Piece(0, 0, 15), Piece(0, 1, 15)])
False
>>> piece = Piece(0, 0, 58); is_valid_move(piece, 6, [piece])
False
>>> piece = Piece(1, 0, 0); is_valid_move(piece, 5, [piece])
False
>>> piece = Piece(2, 0, 28); is_valid_move(piece, 1, [piece, Piece(0, 0, 1), Piece(0, 1, 1)])
False
>>> p = Piece(0,0,0); is_valid_move(p, 6, [p, Piece(0,1,0), Piece(1,0,29), Piece(1,1,29)],28,56)
False
>>> p = Piece(0,1,0); is_valid_move(p, 6, [Piece(0,0,0), p, Piece(1,0,29), Piece(1,1,29)],28,56)
False
"""
if dice < 1 or dice > 6:
raise ValueError("Invalid dice: {}".format(dice))
# can exit from home?
pos = piece.progress()
if pos == 0:
if dice != 6:
return False
# Do other players block exit from home
expected = progress_to_position(piece.player(), 1, player_shift, last_on_path)
return 2 > len(
others_on_position(
status, piece.player(), expected, player_shift, last_on_path
)
)
if 0 < pos <= last_on_path:
if pos + dice > last_on_path:
return True
expected = progress_to_position(
piece.player(), pos + dice, player_shift, last_on_path
)
return 2 > len(
others_on_position(
status, piece.player(), | |
= win_project.filter() # very important not win_project.Filtering
'''
obj_tree = cloud_win.make_kdtree()
eu_cluster = cloud_win.make_EuclideanClusterExtraction()
eu_cluster.set_ClusterTolerance(width_grid)
eu_cluster.set_MinClusterSize(1)
eu_cluster.set_MaxClusterSize(6000)
eu_cluster.set_SearchMethod(obj_tree)
cluster_indices = eu_cluster.Extract()
if len(cluster_indices) > 1:
max_z = 1.0
for j, indices in enumerate(cluster_indices):
cloud_cluster = cloud_win.extract(indices, negative=False)
tmp_array = np.asarray(cloud_cluster)
height_cluster = np.mean(tmp_array,axis=0)[2]
if height_cluster < max_z:
max_z = 1.0 * height_cluster
cloud_win = cloud_cluster
'''
# print(cloud_win.width, cloud_win.size)
if cloud_win.size < thresh_pt_subgrid:
# near to be the boundary of point cloud, some subgrid is almost empty
# output_pc[2, y, x] = max_dis_project + np.random.random() * self.gripper_radius
output_pc[2, y, x] = 4.0 * self.gripper_radius
cnt_null += 1
# print("pos: ", output_pc[0, y, x], output_pc[1, y, x])
if cnt_null > int(0.9 * (float(resolution_pc) ** 2)):
print("{} number of null points are detected".format(cnt_null))
return False, np.zeros([1, 3], dtype=np.float32)
continue
tmp_array = (np.asarray(cloud_win)).astype(np.float64)
# select the point, which is the nearest one with gripper
# print(tmp_array.shape)
# tmp_array = tmp_array[tmp_array[:, 2].argsort(), :]
# output_pc[2, y, x] = tmp_array[0, 2]
tmp_z = np.mean(tmp_array, axis=0)[2]
output_pc[2, y, x] = tmp_z
output_pc = self.scale_obj * output_pc
rate_filling = 1.0 - float(cnt_null) / float(resolution_pc ** 2)
output_pc[1, 0, 0] = 1.0 * rate_filling
# for PyTorch, only float32 is avaliable for GPU acceleration
return True, output_pc.astype(np.float32)
def generate_pc_PCL_vis(self, graspable,
mesh_gripper=None,
resolution=24,
flg_disturb_bd=False, noise_bd=0.005,
flg_random_rotZ=False):
"""
FUNCTION: generate point cloud for a list of grasp candidate based on Point Cloud Libaray
:param graspable: obj: `GraspableObject3D` the object to grasp
:param resolution: int: resolution of point cloud
:param flg_disturb_bd: bool: add disturbance before desampling
:param noise_bd: float: noise level (metirc: m)
:param flg_random_rotZ: bool: if rotate the point cloud along with the Z-axis to improve the robustness
:param mesh_gripper: mesh obj or None: check mesh.py
:return:
"""
'''
print("####################################")
print(graspable.mesh.num_vertices)
tmp = graspable.mesh.normals
print(graspable.mesh.normals)
print("####################################")
'''
ax_z = np.array([0., 0., 1.])
g_info = GraspInfo()
c1 = Contact3D(graspable, g_info.pos_grasp, in_direction=None)
'''For test'''
# 002_master_can
# g_info.pos_grasp = np.array([-0.045040, 0.030736, 0.063758]) # q = 0.94
# g_info.pos_grasp = np.array([-0.06216502, 0.01049824, 0.10267749]) # q = 0.7770
# g_info.pos_grasp = np.array([0.02034432, 0.01516858, 0.00304357]) # q = 0.6297
# g_info.pos_grasp = np.array([0.01878754, -0.04554584, 0.10734783]) # q = 0.3784
# g_info.pos_grasp = np.array([-0.04192688, -0.05332974, 0.00148679]) # q = 0.6651
# 011_banana
g_info.pos_grasp = np.array([-0.01628514, 0.042222, 0.00409378]) # q = 0.6691
c1 = Contact3D(graspable, g_info.pos_grasp, in_direction=None)
# dir is towards to object (face inward)
''''''
dir, t1, t2 = c1.tangents()
g_info.t1_grasp = 1.0 * t1
g_info.t2_grasp = 1.0 * t2
g_info.dir_grasp = 1.0 * dir
'''
g_info.dir_grasp = np.array([-0.87676113, 0.31158753, -0.36633745])# q = 0.3784
g_info.t1_grasp = np.array([0.38914601, 0.90722539, -0.1597106])
g_info.t2_grasp = np.array([0.2825868, -0.2825868, -0.91667301])
'''
'''
g_info.dir_grasp = np.array([0.56594919, 0.666413, 0.48538152]) # q = 0.6651
g_info.t1_grasp = np.array([0.79813762, -0.59039949, -0.1200199])
g_info.t2_grasp = np.array([0.20658619, 0.45532642, -0.8660254])
'''
'''
g_info.dir_grasp = np.array([0.86559391, -0.38507159, 0.32010476]) # q = 0.7770
g_info.t1_grasp = np.array([0.28056871, 0.90244506, 0.32691606])
g_info.t2_grasp = np.array([-0.41476305, -0.19316517, 0.88918999])
'''
'''
g_info.dir_grasp = np.array([0.86559391, -0.38507159, 0.32010476]) # q = 0.7770
g_info.t1_grasp = np.array([0.28056871, 0.90244506, 0.32691606])
g_info.t2_grasp = np.array([-0.41476305, -0.19316517, 0.88918999])
'''
''''''
# banana
g_info.dir_grasp = np.array([0.66853772, -0.32575243, 0.66853772]) # q = 0.64
g_info.t1_grasp = np.array([0.74367785, 0.29192584, -0.60143375])
g_info.t2_grasp = np.array([0.55067649e-04, 8.99257838e-01, 4.37418302e-01])
""" Test end"""
#####################
_, surface_obj_GCS, _, _ = \
DexterousVacuumPoint.crop_surface_grasp(contact_pt=c1,
direction=g_info.dir_grasp,
u1=g_info.t1_grasp,
u2=g_info.t2_grasp,
width_win=2 * self.gripper_radius,
depth_win=self.gripper_max_depth,
flg_exclude_opposite=True)
surface_obj_GCS = surface_obj_GCS[:, 0:3]
# rotate
if flg_random_rotZ:
rotate_z = 2 * np.pi * np.random.random()
surface_grasp_GCS = math_robot.rot_along_axis(ax_z, rotate_z, surface_obj_GCS)
else:
surface_grasp_GCS = surface_obj_GCS
flg_success, final_pc = \
self.desampling_pc(org_pc=surface_grasp_GCS,
resolution_pc=resolution,
scale_obj=1.0,
thresh_pt_subgrid=1,
flg_disturb_bd=flg_disturb_bd, noise_bd=noise_bd)
# re-oder to top view
min_z = np.min(final_pc[2, :, :])
final_pc[2, :, :] = final_pc[2, :, :] - min_z
'''
max_z = np.max(final_pc[2, :, :])
final_pc[2, :, :] = max_z - final_pc[2, :, :]
'''
gl_vis = GL_Visualizer()
'''draw x, y, z axis
ax_width = 5.0
x_ax = np.array([.0, .0, .0, 1.0, .0, .0])
gl_vis.draw_lines(x_ax, width=ax_width, num_color=1)
y_ax = np.array([.0, .0, .0, .0, 1.0, .0])
gl_vis.draw_lines(y_ax, width=ax_width, num_color=2)
z_ax = np.array([.0, .0, .0, .0, .0, 1.0])
gl_vis.draw_lines(z_ax, width=ax_width, num_color=4)
'''
gl_vis.display_mesh(100.0 * graspable.mesh.vertices, graspable.mesh.triangles,
graspable.mesh.normals)
# gl_vis.draw_spheres(100.0 * g_info.pos_grasp, radius=0.4, num_color=1)
'''
offset_gripper = np.array([0., 0., -5.0]).reshape([1, 3]) # cm
gripper_vertices = 0.1 * mesh_gripper.vertices + offset_gripper
gripper_vertices = math_robot.transfer_CS_reverse(g_info.t1_grasp,
g_info.t2_grasp,
g_info.dir_grasp,
100.0*g_info.pos_grasp,
gripper_vertices)[:, 0:3]
gripper_normals = mesh_gripper.normals
gripper_normals = math_robot.transfer_CS_reverse(g_info.t1_grasp,
g_info.t2_grasp,
g_info.dir_grasp,
100.0*g_info.pos_grasp,
gripper_normals)[:, 0:3]
gripper_normals = gripper_normals / np.linalg.norm(gripper_normals, axis=1).\
reshape([gripper_normals.shape[0], 1])
gl_vis.display_mesh(vertices=gripper_vertices, triangles=mesh_gripper.triangles,
v_normals=gripper_normals)
'''
tmp_pc = np.zeros([final_pc.shape[1] * final_pc.shape[2], 3])
for y in range(final_pc.shape[1]):
for x in range(final_pc.shape[2]):
tmp_pc[final_pc.shape[1] * y + x, :] = np.array([final_pc[0, y, x],
final_pc[1, y, x],
final_pc[2, y, x]])
# tmp_pc = tmp_pc + -0.005*np.array([0.0, 0.0, 1.0]).reshape([1, 3])
tmp_pc[:, 2] = tmp_pc[:, 2] + min_z - 0.000
tmp_pc = math_robot.transfer_CS_reverse(g_info.t1_grasp,
g_info.t2_grasp,
g_info.dir_grasp,
g_info.pos_grasp,
tmp_pc)[:, 0:3]
gl_vis.draw_spheres(100.0*tmp_pc, radius=0.1, num_color=7)
np.save('test_pc.npy', final_pc)
print("visualization completed!")
gl_vis.UI_lisener()
str_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
if flg_success:
logger.info("{}: Point cloud generation -> success.".format(str_time))
else:
logger.info("{}: Point cloud generation -> failed.".format(str_time))
return True
def generate_pc_PCL(self, graspable, lst_grasp,
path_save='',
vis=False,
mesh_gripper=None,
resolution=24,
flg_disturb_bd=False, noise_bd=0.005,
flg_random_rotZ=False):
"""
FUNCTION: generate point cloud for a list of grasp candidate based on Point Cloud Libaray
:param graspable: obj: `GraspableObject3D` the object to grasp
:param lst_grasp: list: list of grasp candidate
:param path_save: str: path to save point cloud
:param resolution: int: resolution of point cloud
:param flg_disturb_bd: bool: add disturbance before desampling
:param noise_bd: float: noise level (metirc: m)
:param flg_random_rotZ: bool: if rotate the point cloud along with the Z-axis to improve the robustness
:param vis: bool:
:param mesh_gripper: mesh obj or None: check mesh.py
:return:
"""
'''
print("####################################")
print(graspable.mesh.num_vertices)
tmp = graspable.mesh.normals
print(graspable.mesh.normals)
print("####################################")
'''
ax_z = np.array([0., 0., 1.])
for cnt, g_info in enumerate(lst_grasp):
self.set_scale_obj(g_info.scale_obj)
c1 = Contact3D(graspable, g_info.pos_grasp, in_direction=None)
'''For test
# 002_master_can
# g_info.pos_grasp = np.array([-0.045040, 0.030736, 0.063758]) # q = 0.94
# g_info.pos_grasp = np.array([-0.06216502, 0.01049824, 0.10267749]) # q = 0.7770
# g_info.pos_grasp = np.array([0.02034432, 0.01516858, 0.00304357]) # q = 0.6297
g_info.pos_grasp = np.array([0.01878754, -0.04554584, 0.10734783]) # q = 0.3784
# g_info.pos_grasp = np.array([-0.04192688, -0.05332974, 0.00148679]) # q = 0.6651
# 011_banana
# g_info.pos_grasp = np.array([-0.00637234, -0.05888856, 0.01202402]) # q = 0.6691
c1 = Contact3D(graspable, g_info.pos_grasp, in_direction=None)
# dir is towards to object (face inward)
''''''
dir, t1, t2 = c1.tangents()
g_info.t1_grasp = 1.0 * t1
g_info.t2_grasp = 1.0 * t2
g_info.dir_grasp = 1.0 * dir
'''
'''
g_info.dir_grasp = np.array([-0.87676113, 0.31158753, -0.36633745])# q = 0.3784
g_info.t1_grasp = np.array([0.38914601, 0.90722539, -0.1597106])
g_info.t2_grasp = np.array([0.2825868, -0.2825868, -0.91667301])
'''
'''
g_info.dir_grasp = np.array([0.56594919, 0.666413, 0.48538152]) # q = 0.6651
g_info.t1_grasp = np.array([0.79813762, -0.59039949, -0.1200199])
g_info.t2_grasp = np.array([0.20658619, 0.45532642, -0.8660254])
'''
'''
g_info.dir_grasp = np.array([0.86559391, -0.38507159, 0.32010476]) # q = 0.7770
g_info.t1_grasp = np.array([0.28056871, 0.90244506, 0.32691606])
g_info.t2_grasp = np.array([-0.41476305, -0.19316517, 0.88918999])
'''
'''
g_info.dir_grasp = np.array([0.86559391, -0.38507159, 0.32010476]) # q = 0.7770
g_info.t1_grasp = np.array([0.28056871, 0.90244506, 0.32691606])
g_info.t2_grasp = np.array([-0.41476305, -0.19316517, 0.88918999])
'''
""" Test end"""
#####################
_, surface_obj_GCS, _, _ = \
DexterousVacuumPoint.crop_surface_grasp(contact_pt=c1,
direction=g_info.dir_grasp,
u1=g_info.t1_grasp,
u2=g_info.t2_grasp,
width_win=2 * self.gripper_radius,
depth_win=self.gripper_max_depth,
flg_exclude_opposite=True)
surface_obj_GCS = surface_obj_GCS[:, 0:3]
# rotate
if flg_random_rotZ:
rotate_z = 2 * np.pi * np.random.random()
surface_grasp_GCS = math_robot.rot_along_axis(ax_z, rotate_z, surface_obj_GCS)
else:
surface_grasp_GCS = surface_obj_GCS
'''
flg_rot_success, surface_grasp_GCS = \
self.rot_pt_xy_axes(org_pc=surface_obj_GCS,
rotate_x=g_info.rot_x, rotate_y=g_info.rot_y)
if not flg_rot_success:
logging.debug('Point cloud generation failed.')
return False
surface_grasp_GCS = surface_grasp_GCS[0:3, :].T
'''
flg_success, final_pc = \
self.desampling_pc(org_pc=surface_grasp_GCS,
resolution_pc=resolution,
scale_obj=self.scale_obj,
thresh_pt_subgrid=1,
flg_disturb_bd=flg_disturb_bd, noise_bd=noise_bd)
# re-oder to top view
min_z = np.min(final_pc[2, :, :])
final_pc[2, :, :] = final_pc[2, :, :] - min_z
'''
max_z = np.max(final_pc[2, :, :])
final_pc[2, :, :] = max_z - final_pc[2, :, :]
'''
if vis:
gl_vis = GL_Visualizer()
'''draw x, y, z axis
ax_width = 5.0
x_ax = np.array([.0, .0, .0, 1.0, .0, .0])
gl_vis.draw_lines(x_ax, width=ax_width, num_color=1)
y_ax = np.array([.0, .0, .0, .0, 1.0, .0])
gl_vis.draw_lines(y_ax, width=ax_width, num_color=2)
z_ax = np.array([.0, .0, .0, .0, .0, 1.0])
gl_vis.draw_lines(z_ax, width=ax_width, num_color=4)
'''
gl_vis.display_mesh(100.0 * graspable.mesh.vertices, graspable.mesh.triangles,
graspable.mesh.normals)
gl_vis.draw_spheres(100.0 * g_info.pos_grasp, radius=0.4, num_color=1)
offset_gripper = np.array([0., 0., -5.0]).reshape([1, 3]) # cm
gripper_vertices = 0.1 * mesh_gripper.vertices + offset_gripper
gripper_vertices = math_robot.transfer_CS_reverse(g_info.t1_grasp,
g_info.t2_grasp,
g_info.dir_grasp,
100.0*g_info.pos_grasp,
gripper_vertices)[:, 0:3]
| |
in content:
self.log.info("X509 cert is enforced in CE")
def check_roles_base_access(self):
""" from Watson, roles base access for admin should not in in CE """
if self.user_add is None:
self.fail("We need to pass user name (user_add) to run this test. ")
if self.user_role is None:
self.fail("We need to pass user roles (user_role) to run this test. ")
api = self.rest.baseUrl + "settings/rbac/users/" + self.user_add
self.log.info("url to run this test: %s" % api)
""" add admin user """
param = "name=%s&roles=%s" % (self.user_add, self.user_role)
try:
status, content, header = self.rest._http_request(api, 'PUT', param)
except Exception as ex:
if ex:
print(ex)
if status:
self.fail("CE should not allow to add admin users")
else:
self.log.info("roles base is enforced in CE! ")
def check_root_certificate(self):
""" from watson, ce should not see root certificate
manual test:
curl -u Administrator:password -X GET
http://localhost:8091/pools/default/certificate """
api = self.rest.baseUrl + "pools/default/certificate"
try:
status, content, header = self.rest._http_request(api, 'GET')
except Exception as ex:
if ex:
print(ex)
if status:
self.fail("CE should not see root certificate!")
elif b'requires enterprise edition' in content:
self.log.info("root certificate is enforced in CE! ")
def check_settings_audit(self):
""" from watson, ce should not set audit
manual test:
curl -u Administrator:password -X GET
http://localhost:8091/settings/audit """
api = self.rest.baseUrl + "settings/audit"
try:
status, content, header = self.rest._http_request(api, 'GET')
except Exception as ex:
if ex:
print(ex)
if status:
self.fail("CE should not allow to set audit !")
elif b'requires enterprise edition' in content:
self.log.info("settings audit is enforced in CE! ")
def check_infer(self):
""" from watson, ce should not see infer
manual test:
curl -H "Content-Type: application/json" -X POST
-d '{"statement":"infer `bucket_name`;"}'
http://localhost:8093/query/service
test params: new_services=kv-index-n1ql,default_bucket=False """
self.rest.force_eject_node()
self.sleep(7, "wait for node reset done")
self.rest.init_node()
bucket = "default"
self.rest.create_bucket(bucket, ramQuotaMB=200)
api = self.rest.query_baseUrl + "query/service"
param = urllib.parse.urlencode({"statement":"infer `%s` ;" % bucket})
try:
status, content, header = self.rest._http_request(api, 'POST', param)
json_parsed = json.loads(content)
except Exception as ex:
if ex:
print(ex)
if json_parsed["status"] == "success":
self.fail("CE should not allow to run INFER !")
elif json_parsed["status"] == "fatal":
self.log.info("INFER is enforced in CE! ")
def check_query_monitoring(self):
self.rest.force_eject_node()
self.sleep(7, "wait for node reset done")
self.rest.init_node()
bucket = "default"
self.rest.create_bucket(bucket, ramQuotaMB=200)
api = self.rest.query_baseUrl + "admin/settings"
param = {'profile': 'phases'}
try:
status, content, header = self.rest._http_request(api, 'POST', json.dumps(param))
except Exception as ex:
if ex:
print(ex)
if status:
self.fail("CE should not be allowed to do query monitoring !")
elif b'Profiling is an EE only feature' in content:
self.log.info("Query monitoring is enforced in CE! ")
def check_flex_index(self):
""" from watson, ce should not see infer
manual test:
curl -H "Content-Type: application/json" -X POST
-d '{"statement":"infer `bucket_name`;"}'
http://localhost:8093/query/service
test params: new_services=kv-index-n1ql,default_bucket=False """
self.rest.force_eject_node()
self.sleep(7, "wait for node reset done")
self.rest.init_node()
bucket = "default"
self.rest.create_bucket(bucket, ramQuotaMB=200)
api = self.rest.query_baseUrl + "query/service"
param = urllib.parse.urlencode({"statement":"SELECT META(d).id FROM `%s` AS d USE INDEX (USING FTS) WHERE d.f2 = 100;" % bucket})
try:
status, content, header = self.rest._http_request(api, 'POST', param)
json_parsed = json.loads(content)
except Exception as ex:
if ex:
print(ex)
if json_parsed["status"] == "success":
self.fail("CE should not allow to run flex index !")
elif json_parsed["status"] == "fatal":
self.log.info("Flex index is enforced in CE! ")
def check_index_partitioning(self):
self.rest.force_eject_node()
self.sleep(7, "wait for node reset done")
self.rest.init_node()
bucket = "default"
self.rest.create_bucket(bucket, ramQuotaMB=200)
api = self.rest.query_baseUrl + "query/service"
param = urllib.parse.urlencode(
{"statement": "CREATE INDEX idx ON `%s`(id) PARTITION BY HASH(META().id)" % bucket})
try:
status, content, header = self.rest._http_request(api, 'POST', param)
json_parsed = json.loads(content)
except Exception as ex:
if ex:
print(ex)
if json_parsed["status"] == "success":
self.fail("CE should not be allowed to run index partitioning !")
elif json_parsed["status"] == "fatal":
self.log.info("Index partitioning is enforced in CE! ")
def check_query_cost_based_optimizer(self):
self.rest.force_eject_node()
self.sleep(7, "wait for node reset done")
self.rest.init_node()
bucket = "default"
self.rest.create_bucket(bucket, ramQuotaMB=200)
api = self.rest.query_baseUrl + "query/service"
param = urllib.parse.urlencode(
{"statement": "UPDATE STATISTICS for `hotel` (type, address, city, country, free_breakfast, id, phone);"})
try:
status, content, header = self.rest._http_request(api, 'POST', param)
json_parsed = json.loads(content)
except Exception as ex:
if ex:
print(ex)
if json_parsed["status"] == "success":
self.fail("CE should not be allowed to run CBO !")
elif json_parsed["status"] == "fatal":
self.log.info("CBO is enforced in CE! ")
def check_query_window_functions(self):
self.rest.force_eject_node()
self.sleep(7, "wait for node reset done")
self.rest.init_node()
bucket = "default"
self.rest.create_bucket(bucket, ramQuotaMB=200)
api = self.rest.query_baseUrl + "query/service"
param = urllib.parse.urlencode(
{"statement": "SELECT d.id, d.destinationairport, CUME_DIST() OVER (PARTITION BY d.destinationairport \
ORDER BY d.distance NULLS LAST) AS `rank` \
FROM `%s` AS d \
WHERE d.type='route' \
LIMIT 7;" % bucket})
try:
status, content, header = self.rest._http_request(api, 'POST', param)
json_parsed = json.loads(content)
except Exception as ex:
if ex:
print(ex)
if json_parsed["status"] == "success":
self.fail("CE should not be allowed to use window functions !")
elif json_parsed["status"] == "fatal":
self.log.info("Window functions is enforced in CE! ")
def check_auto_complete(self):
""" this feature has not complete to block in CE """
""" Check new features from spock start here """
def check_cbbackupmgr(self):
""" cbbackupmgr should not available in CE from spock """
if self.cb_version[:5] in COUCHBASE_FROM_SPOCK:
file_name = "cbbackupmgr" + self.file_extension
self.log.info("check if cbbackupmgr in bin dir in CE")
result = self.remote.file_exists(self.bin_path, file_name)
if result:
self.fail("cbbackupmgr should not in bin dir of CE")
else:
self.log.info("cbbackupmgr is enforced in CE")
self.remote.disconnect()
def test_max_ttl_bucket(self):
"""
From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
This test is make sure CE could not create bucket with option --max-ttl
This test must pass default_bucket=False
"""
if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
self.log.info("This test only for vulcan and later")
return
cmd = 'curl -X POST -u Administrator:password \
http://{0}:8091/pools/default/buckets \
-d name=bucket0 \
-d maxTTL=100 \
-d ramQuotaMB=100 '.format(self.master.ip)
if self.cli_test:
cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
--password password --bucket bucket0 --bucket-type couchbase \
--bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
--bucket-eviction-policy fullEviction --enable-flush 0 \
--enable-index-replica 1 --max-ttl 200".format(self.bin_path,
self.master.ip)
conn = RemoteMachineShellConnection(self.master)
output, error = conn.execute_command(cmd)
conn.log_command_output(output, error)
mesg = "Max TTL is supported in enterprise edition only"
if self.cli_test:
mesg = "Maximum TTL can only be configured on enterprise edition"
if output and mesg not in str(output[0]):
self.fail("max ttl feature should not in Community Edition")
buckets = RestConnection(self.master).get_buckets()
if buckets:
for bucket in buckets:
self.log.info("bucekt in cluser: {0}".format(bucket.name))
if bucket.name == "bucket0":
self.fail("Failed to enforce feature max ttl in CE.")
conn.disconnect()
def test_setting_audit(self):
"""
CE does not allow to set audit from vulcan 5.5.0
"""
if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
self.log.info("This test only for vulcan and later")
return
cmd = 'curl -X POST -u Administrator:password \
http://{0}:8091/settings/audit \
-d auditdEnabled=true '.format(self.master.ip)
if self.cli_test:
cmd = "{0}couchbase-cli setting-audit -c {1}:8091 -u Administrator \
-p password --audit-enabled 1 --audit-log-rotate-interval 604800 \
--audit-log-path /opt/couchbase/var/lib/couchbase/logs --set"\
.format(self.bin_path, self.master.ip)
conn = RemoteMachineShellConnection(self.master)
output, error = conn.execute_command(cmd)
conn.log_command_output(output, error)
mesg = "This http API endpoint requires enterprise edition"
if output and mesg not in str(output[0]):
self.fail("setting-audit feature should not in Community Edition")
conn.disconnect()
def test_setting_autofailover_enterprise_only(self):
"""
CE does not allow set auto failover if disk has issue
and failover group from vulcan 5.5.0
"""
if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
self.log.info("This test only for vulcan and later")
return
self.failover_disk_period = self.input.param("failover_disk_period", False)
self.failover_server_group = self.input.param("failover_server_group", False)
failover_disk_period = ""
if self.failover_disk_period:
if self.cli_test:
failover_disk_period = "--failover-data-disk-period 300"
else:
failover_disk_period = "-d failoverOnDataDiskIssues[timePeriod]=300"
failover_server_group = ""
if self.failover_server_group and self.cli_test:
failover_server_group = "--enable-failover-of-server-group 1"
cmd = 'curl -X POST -u Administrator:password \
http://{0}:8091/settings/autoFailover -d enabled=true -d timeout=120 \
-d maxCount=1 \
-d failoverOnDataDiskIssues[enabled]=true {1} \
-d failoverServerGroup={2}'.format(self.master.ip, failover_disk_period,
self.failover_server_group)
if self.cli_test:
cmd = "{0}couchbase-cli setting-autofailover -c {1}:8091 \
-u Administrator -p password \
--enable-failover-on-data-disk-issues 1 {2} {3} "\
.format(self.bin_path, self.master.ip,
failover_disk_period,
failover_server_group)
conn = RemoteMachineShellConnection(self.master)
output, error = conn.execute_command(cmd)
conn.log_command_output(output, error)
mesg = "Auto failover on Data Service disk issues can only be " + \
"configured on enterprise edition"
if not self.cli_test:
if self.failover_disk_period or \
self.failover_server_group:
if output and not error:
self.fail("setting autofailover disk issues feature\
should not in Community Edition")
else:
if self.failover_server_group:
mesg = "--enable-failover-of-server-groups can only be " + \
"configured on enterprise edition"
if output and mesg not | |
cc * (2-cc) # to be removed in future
c1a = c1 * (1 - (1 - hsig**2) * cc * (2 - cc)) # adjust for variance loss
if 11 < 3: # diagnostic data
# self.out['hsigcount'] += 1 - hsig
if not hsig:
self.hsiglist.append(self.countiter)
if 11 < 3: # diagnostic message
if not hsig:
print(str(self.countiter) + ': hsig-stall')
if 11 < 3: # for testing purpose
hsig = 1 # TODO:
# put correction term, but how?
if self.countiter == 1:
print('hsig=1')
self.pc = (1 - cc) * self.pc + hsig * (
(cc * (2 - cc) * self.sp.weights.mueff)**0.5 / self.sigma
/ cmean) * (self.mean - mold) / self.sigma_vec.scaling
# covariance matrix adaptation/udpate
pop_zero = pop - mold
if c1a + cmu > 0:
# TODO: make sure cc is 1 / N**0.5 rather than 1 / N
# TODO: simplify code: split the c1 and cmu update and call self.sm.update twice
# caveat: for this the decay factor ``c1_times_delta_hsigma - sum(weights)`` should be zero in the second update
sampler_weights = [c1a] + [cmu * w for w in sp.weights]
if len(pop_zero) > len(sp.weights):
sampler_weights = (
sampler_weights[:1+sp.weights.mu] +
(len(pop_zero) - len(sp.weights)) * [0] +
sampler_weights[1+sp.weights.mu:])
if 'inc_cmu_pos' in self.opts['vv']:
sampler_weights = np.asarray(sampler_weights)
sampler_weights[sampler_weights > 0] *= 1 + self.opts['vv']['inc_cmu_pos']
# logger = logging.getLogger(__name__) # "global" level needs to be DEBUG
# logger.debug("w[0,1]=%f,%f", sampler_weights[0],
# sampler_weights[1]) if self.countiter < 2 else None
# print(' injected solutions', tuple(self._injected_solutions_archive.values()))
for i, x in enumerate(pop):
try:
self._injected_solutions_archive.pop(x)
# self.gp.repaired_solutions.pop(x)
except KeyError:
pass # print(i)
else:
# print(i + 1, '-th weight set to zero')
sampler_weights[i + 1] = 0 # weight zero is for pc
for s in list(self._injected_solutions_archive):
if self._injected_solutions_archive[s]['iteration'] < self.countiter - 2:
warnings.warn("""orphanated injected solution %s
This could be a bug in the calling order/logics or due to
a too small popsize used in `ask()` or when only using
`ask(1)` repeatedly. Please check carefully.
In case this is desired, the warning can be surpressed with
``warnings.simplefilter("ignore", cma.evolution_strategy.InjectionWarning)``
""" % str(self._injected_solutions_archive.pop(s)),
InjectionWarning)
assert len(sampler_weights) == len(pop_zero) + 1
if flg_diagonal:
self.sigma_vec.update(
[self.sm.transform_inverse(self.pc)] +
list(self.sm.transform_inverse(pop_zero /
(self.sigma * self.sigma_vec.scaling))),
array(sampler_weights) / 2) # TODO: put the 1/2 into update function!?
else:
self.sm.update([(c1 / (c1a + 1e-23))**0.5 * self.pc] + # c1a * pc**2 gets c1 * pc**2
list(pop_zero / (self.sigma * self.sigma_vec.scaling)),
sampler_weights)
if any(np.asarray(self.sm.variances) < 0):
raise RuntimeError("""A sampler variance has become
negative after update, this must be considered as a bug.
Variances `self.sm.variances`=%s""" % str(self.sm.variances))
self._updateBDfromSM(self.sm)
# step-size adaptation, adapt sigma
# in case of TPA, function_values[0] and [1] must reflect samples colinear to xmean - xmean_old
try:
self.sigma *= self.adapt_sigma.update2(self,
function_values=function_values)
except (NotImplementedError, AttributeError):
self.adapt_sigma.update(self, function_values=function_values)
if 11 < 3 and self.opts['vv']:
if self.countiter < 2:
print('constant sigma applied')
print(self.opts['vv']) # N=10,lam=10: 0.8 is optimal
self.sigma = self.opts['vv'] * self.sp.weights.mueff * sum(self.mean**2)**0.5 / N
if any(self.sigma * self.sigma_vec.scaling * self.dC**0.5 <
np.asarray(self.opts['minstd'])):
self.sigma = max(np.asarray(self.opts['minstd']) /
(self.sigma_vec * self.dC**0.5))
assert all(self.sigma * self.sigma_vec * self.dC**0.5 >=
(1-1e-9) * np.asarray(self.opts['minstd']))
elif any(self.sigma * self.sigma_vec.scaling * self.dC**0.5 >
np.asarray(self.opts['maxstd'])):
self.sigma = min(np.asarray(self.opts['maxstd']) /
self.sigma_vec * self.dC**0.5)
# g = self.countiter
# N = self.N
# mindx = eval(self.opts['mindx'])
# if utils.is_str(self.opts['mindx']) else self.opts['mindx']
if self.sigma * min(self.D) < self.opts['mindx']: # TODO: sigma_vec is missing here
self.sigma = self.opts['mindx'] / min(self.D)
if self.sigma > 1e9 * self.sigma0:
alpha = self.sigma / max(self.sm.variances)**0.5
if alpha > 1:
self.sigma /= alpha**0.5 # adjust only half
self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
self.sm *= alpha
self._updateBDfromSM()
# TODO increase sigma in case of a plateau?
# Uncertainty noise measurement is done on an upper level
# move mean into "feasible preimage", leads to weird behavior on
# 40-D tablet with bound 0.1, not quite explained (constant
# dragging is problematic, but why doesn't it settle), still a bug?
if 11 < 3 and isinstance(self.boundary_handler, BoundTransform) \
and not self.boundary_handler.is_in_bounds(self.mean):
self.mean = array(self.boundary_handler.inverse(
self.boundary_handler.repair(self.mean, copy_if_changed=False),
copy_if_changed=False), copy=False)
if _new_injections:
self.pop_injection_directions = self._prepare_injection_directions()
if self.opts['verbose'] > 4 and self.countiter < 3 and type(self.adapt_sigma) is not CMAAdaptSigmaTPA and len(self.pop_injection_directions):
utils.print_message(' %d directions prepared for injection %s' %
(len(self.pop_injection_directions),
"(no more messages will be shown)" if
self.countiter == 2 else ""))
self.number_of_injections_delivered = 0
self.pop = [] # remove this in case pop is still needed
# self.pop_sorted = []
self._flgtelldone = True
try: # shouldn't fail, but let's be nice to code abuse
self.timer.pause()
except AttributeError:
utils.print_warning("""
"timer" attribute not found, probably because `ask` was never called.
Timing is likely to work only until `tell` is called (again), because
`tic` will never be called again afterwards.
""",
'tell', 'CMAEvolutionStrategy',
self.countiter)
self.timer = utils.ElapsedWCTime()
self.more_to_write.check()
# end tell()
def inject(self, solutions, force=None):
"""inject list of one or several genotypic solution(s).
This is the preferable way to pass outside proposal solutions
into `CMAEvolutionStrategy`. Passing (bad) solutions directly
via `tell` is likely to fail when ``CMA_active is True`` as by
default.
Unless ``force is True``, the `solutions` are used as direction
relative to the distribution mean to compute a new candidate
solution returned in method `ask_geno` which in turn is used in
method `ask`. Even when ``force is True``, the update in `tell`
takes later care of possibly trimming the update vector.
`inject` is to be called before `ask` or after `tell` and can be
called repeatedly.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 2) #doctest: +ELLIPSIS
(4_w,...
>>> while not es.stop():
... es.inject([4 * [0.0]])
... X = es.ask()
... if es.countiter == 0:
... assert X[0][0] == X[0][1] # injected sol. is on the diagonal
... es.tell(X, [cma.ff.sphere(x) for x in X])
Details: injected solutions are not used in the "active" update which
would decrease variance in the covariance matrix in this direction.
"""
for solution in solutions:
if solution is None:
continue
if len(solution) != self.N:
raise ValueError('method `inject` needs a list or array'
+ (' each el with dimension (`len`) %d' % self.N))
solution = array(solution, copy=False, dtype=float)
if force:
self.pop_injection_solutions.append(solution)
else:
self.pop_injection_directions.append(solution - self.mean)
@property
def result(self):
"""return a `CMAEvolutionStrategyResult` `namedtuple`.
:See: `cma.evolution_strategy.CMAEvolutionStrategyResult`
or try ``help(...result)`` on the ``result`` property
of an `CMAEvolutionStrategy` instance or on the
`CMAEvolutionStrategyResult` instance itself.
"""
# TODO: how about xcurrent?
# return CMAEvolutionStrategyResult._generate(self)
res = self.best.get() + ( # (x, f, evals) triple
self.countevals,
self.countiter,
self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair),
self.gp.scales * self.sigma * self.sigma_vec.scaling *
self.dC**0.5,
self.stop())
try:
return CMAEvolutionStrategyResult(*res)
except NameError:
return res
def result_pretty(self, number_of_runs=0, time_str=None,
fbestever=None):
"""pretty print result.
Returns `result` of ``self``.
"""
if fbestever is None:
fbestever = self.best.f
s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
% number_of_runs if number_of_runs else ''
for k, v in self.stop().items():
print('termination on %s=%s%s' % (k, str(v), s +
(' (%s)' % time_str if time_str else '')))
print('final/bestever f-value = %e %e' % (self.best.last.f,
fbestever))
if self.N < 9:
print('incumbent solution: ' + str(list(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair))))
print('std deviation: ' + str(list(self.sigma * self.sigma_vec.scaling * np.sqrt(self.dC) * self.gp.scales)))
else:
print('incumbent solution: %s ...]' % (str(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)[:8])[:-1]))
print('std deviations: %s ...]' % (str((self.sigma * self.sigma_vec.scaling * np.sqrt(self.dC) * self.gp.scales)[:8])[:-1]))
return self.result
def pickle_dumps(self):
"""return ``pickle.dumps(self)``,
if necessary remove unpickleable (and also unnecessary) local
function reference beforehand.
The resulting `bytes` string-object can be saved to a file like::
import cma
es = cma.CMAEvolutionStrategy(3 * [1], 1)
es.optimize(cma.ff.elli, iterations=22)
filename = 'es-pickle-test'
open(filename, 'wb').write(es.pickle_dumps())
and recovered like::
import pickle
es = pickle.load(open(filename, 'rb'))
or::
es = pickle.loads(open(filename, 'rb').read())
es.optimize(cma.ff.elli, iterations=22) # continue optimizing
"""
import pickle
try: # fine if local function self.objective_function was not assigned
s = pickle.dumps(self)
except:
self.objective_function, fun = None, self.objective_function
try:
s = pickle.dumps(self)
except: raise # didn't work out
finally: # reset changed attribute either way
self.objective_function = fun
return s
def repair_genotype(self, x, copy_if_changed=False):
"""make sure that solutions fit to the sample distribution.
| |
<reponame>jaratma/astro-nex
# -*- coding: utf-8 -*-
import pango
from datetime import datetime
from .. utils import format_latitud, format_longitud
from .. drawing.roundedcharts import RadixChart,HouseChart,NodalChart
from .. drawing.aspects import SimpleAspectManager
from .. countries import cata_reg
from .. boss import boss
curr = boss.get_state()
aspcol = None
labels = { 'draw_nat': _('Radix'), 'draw_nod': _('Carta Nodal'),
'draw_house':_('Carta de las Casas'),'draw_radsoul':_('Carta Clics Radix alma'),
'draw_local': _('Carta Local'), 'draw_soul': _('Carta del Alma'),
'draw_dharma': _('Carta del Dharma'),
'draw_prof': _('Carta del Perfil'), 'draw_int': _('Carta de Integracion'),
'draw_single': _('Carta Clics Individual'), 'dat_nat': _('Datos Radix'),
'dat_nod': _('Datos C. Nodal'), 'dat_house': _('Datos C. Casas'),
'prog_nat': _('Progresion E. Radix'), 'prog_nod': _('Progresion E. Nodal'),
'prog_local': _('Progresion E. Local'), 'prog_soul': _('Progresion E. Alma'),
'bio_nat': _('Biografia radix'), 'bio_nod': _('Biografia nodal'), 'bio_soul': _('Biografia alma'),
'dyn_cuad': _('Cuadrantes dinamicos'),'dyn_cuad2': _('Clic Cuadrantes dinamicos'),
'click_hh': _('Clics Casas-Casas'), 'click_nn': _('Clics Nodal-Nodal'),
'click_nh': _('Clics Nodal-Casas'), 'click_hn': _('Clics Casas-Nodal'),
'click_rr': _('Clics Radix-Radix'),
'click_bridge': _('Clic puente'),'dyn_stars':_('Estrellas dinamicas'),
'draw_transits':_('Transitos'), 'rad_and_transit': _('Radix con transitos'),
'subject_click':_('Clic subjetivo'), 'ascent_star' :_('Estrella de ascenso'),
'compo_one': _('Comparacion pareja 1'), 'compo_two': _('Comparacion pareja 2'),
'click_counterpanel': _('Contra horoscopos'), 'paarwabe_plot': _('Panal de la pareja'),
'crown_comp': _('Uniones corona'),
'wundersensi_star': _('Estrella maravillosa'),'polar_star':_('Analisis de polaridades'),
'comp_pe': _('PE de la pareja'),
'solar_rev': _('Revolución Solar'), 'sec_prog': 'Progresión Secundaria' }
conj_class = [[(0,4),(0,6),(0,9),(3,4),(3,9),(4,6),(4,7),(4,8),(4,9),(6,7),(7,9)],
[(0,1),(0,3),(0,5),(0,7),(0,8),(1,3),(1,5),(1,6),(2,6),(2,7),(3,5),(3,6),(3,9),(5,7),(5,9),(6,8),(6,9)],
[(0,2),(1,2),(1,4),(1,7),(1,8),(1,9),(2,3),(2,4),(2,5),(2,8),(2,9),(3,7),(4,5),(5,6),(5,8),(7,8),(8,9)]]
plan_class = [[0,4,9],[3,6,7],[1,2,5,8]]
weekdays = [_('Lunes'),_('Martes'),_('Miercoles'),_('Jueves'),_('Viernes'),_('Sabado'),_('Domingo')]
def get_personal_info():
from .. utils import parsestrtime
date,time = parsestrtime(curr.curr_chart.date)
d = date.split('/'); d.reverse()
d = datetime(*[int(x) for x in d])
wday = weekdays[d.weekday()]
datestr = " ".join([wday, date, time])
lat = curr.curr_chart.latitud
long = curr.curr_chart.longitud
geodat = format_longitud(long) + " " + format_latitud(lat)
name = curr.curr_chart.first + " " + curr.curr_chart.last
loc = curr.curr_chart.city + " (" + t(curr.curr_chart.country) + ")"
return name,datestr,loc,geodat
class SheetMixin(object):
zodlet = ( 'q','w','e','r','t','y','u','i','o','p','a','s' )
planlet = [ 'd','f','h','j','k','l','g','z','x','c','v' ]
asplet = ( '1','2','3','4','5','6','7','6','5','4','3','2' )
def __init__(self,zodiac):
global aspcol
aspcol = zodiac.get_aspcolors()
self.zod = zodiac.zod
self.plan = zodiac.plan
def dat_nat(self,cr,w,h,chartob):
cr.save()
cr.set_line_width(0.5)
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
self.main_labels(cr,font)
cr.set_source_rgb(0,0,0)
cr.move_to(50,80)
cr.line_to(540,80)
cr.stroke()
self.data_planh(cr,font)
chartob.__class__ = RadixChart
self.data_aspects(cr,chartob)
self.data_dyncalc(cr)
self.data_rays(cr)
cr.restore()
def dat_house(self,cr,w,h,chartob):
cr.save()
cr.set_line_width(0.5)
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
self.main_labels(cr,font)
cr.set_source_rgb(0,0,0)
cr.move_to(50,80)
cr.line_to(540,80)
cr.stroke()
self.data_house_planh(cr)
chartob.__class__ = HouseChart
self.data_aspects(cr,chartob,kind='house')
cr.restore()
def dat_nod(self,cr,w,h,chartob):
cr.save()
cr.set_line_width(0.5)
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
self.main_labels(cr,font)
cr.set_source_rgb(0,0,0)
cr.move_to(50,80)
cr.line_to(540,80)
cr.stroke()
self.data_nodal_planh(cr)
chartob.__class__ = NodalChart
chartob.name = 'nodal'
self.data_aspects(cr,chartob,kind='nodal')
cr.restore()
def data_rays(self,cr):
layout = cr.create_layout()
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
rays = curr.curr_chart.rays_calc()
hm = 426; vm = 620
cr.set_source_rgb(0,0,0.4)
layout.set_markup("<u>"+ _("Carta de Rayos")+"</u>")
cr.move_to(hm+10,vm+16)
cr.show_layout(layout)
layout.set_markup("<b>%s</b>" % (rays[0]))
cr.move_to(hm+10,vm+38)
cr.show_layout(layout)
layout.set_markup("")
layout.set_text("%s %s %s" % (rays[1], rays[2], rays[3]))
cr.move_to(hm+26,vm+38)
cr.show_layout(layout)
layout.set_text("(%s %s %s) %s" % (rays[4], rays[5], rays[6], rays[7]))
cr.move_to(hm+63,vm+38)
cr.show_layout(layout)
def data_dyncalc(self,cr):
layout = cr.create_layout()
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
whole = curr.curr_chart.dyncalc_list()
hm = 50; vm = 680
ho = 44; vo = 20
cols=[(0,0,0),(0.6,0,0),(0,0,0.5),(0,0.5,0),
self.zod[0].col,self.zod[1].col,
self.zod[2].col,self.zod[3].col]
tcols=[self.zod[0].col,self.zod[1].col,
self.zod[2].col,self.zod[3].col]
cr.set_source_rgb(0,0,0.4)
layout.set_markup("<u>"+_("Calculos dinamicos")+"</u>")
cr.move_to(50,636)
cr.show_layout(layout)
layout.set_markup("")
texts = ("Total"," "+_("Card")," "+_("Fija")," "+_("Mut"), _("Fuego")," "+_("Tierra")," "+_("Aire"),_("Agua"))
for i in range(len(texts)):
o = [0,18][i>3]
layout.set_text(texts[i])
cr.move_to(50+(44*i)+o,660)
cr.set_source_rgb(*cols[i])
cr.show_layout(layout)
font = pango.FontDescription("Monospace")
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
for i in range(8):
for j in(0,1,2):
o = [0,18][i>3]
cr.move_to(hm+8+o+ho*i,vm+vo*j)
cr.set_source_rgb(*cols[i])
text ="%s" % whole[j][i]
layout.set_text(text.rjust(3,' '))
cr.show_layout(layout)
cr.set_source_rgb(0,0,0)
cr.set_line_width(0.4)
cr.move_to(50,676)
cr.line_to(410,676)
cr.move_to(50,716)
cr.line_to(410,716)
cr.move_to(90,658)
cr.line_to(90,736)
cr.move_to(222,676)
cr.line_to(222,716)
cr.move_to(240,676)
cr.line_to(240,716)
cr.stroke()
font = pango.FontDescription(self.opts.font)
font.set_size(8*pango.SCALE)
layout.set_font_description(font)
cr.set_source_rgb(0.2,0.2,0.4)
cr.move_to(228,681)
layout.set_text(_('s'))
cr.show_layout(layout)
cr.move_to(228,701)
layout.set_text(_('c'))
cr.show_layout(layout)
def data_aspects(self,cr,chartob,kind='radix'):
cr.save()
aspects = curr.curr_chart.aspects(kind)
hm = 50; vm = 325
ho = 44; vo = 20
layout = cr.create_layout()
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
cr.set_source_rgb(0,0,0.4)
layout.set_markup("<u>"+_("Tabla de aspectos")+"</u>")
cr.move_to(50,305)
cr.show_layout(layout)
layout.set_markup("")
cr.set_source_rgb(0,0,0)
cr.set_line_width(0.4)
for i in range(12):
cr.move_to(hm,vm+vo*i)
cr.line_to(hm+484,vm+vo*i)
cr.stroke()
for i in range(12):
cr.move_to(hm+ho*i,vm)
cr.line_to(hm+ho*i,vm+vo*11)
cr.stroke()
font = pango.FontDescription("Astro-Nex")
font.set_size(11*pango.SCALE)
layout.set_font_description(font)
for i in range(10):
cr.move_to(hm+15,vm+2+vo*(i+1))
colp = self.plan[i].col
cr.set_source_rgb(*colp)
text ="%s" % self.planlet[i]
layout.set_text(text)
cr.layout_path(layout)
cr.fill()
cr.new_path()
hmm = hm + 16
for i in range(1,11):
cr.move_to(hmm+ho*i,vm+2)
colp = self.plan[i].col
cr.set_source_rgb(*colp)
text ="%s" % self.planlet[i]
layout.set_text(text)
cr.layout_path(layout)
cr.fill()
cr.new_path()
asp_count = [0]*12
conj_count = [0]*3
layout = cr.create_layout()
for i in range(len(aspects)):
asp = aspects[i]
if not self.goodwill and asp['gw']:
continue
a = asp['a']
if a > 0:
asp_count[a] += 1
else:
for ij,cj in enumerate(conj_class):
if (asp['p1'],asp['p2']) in cj:
conj_count[ij] += 1
break
else:
for il,pl in enumerate(plan_class):
if (asp['p1']) in pl:
conj_count[il] += 1
break
f1 = int(10-asp['f1']*10)
if f1 > 9: f1 = 9;
elif f1 < 0: f1 = ' '
elif f1 == 0: f1 = 1
f2 = int(10-asp['f2']*10)
if f2 > 9: f2 = 9
elif f2 < 0: f2 = ' '
elif f2 == 0: f2 = 1
font = pango.FontDescription("Astro-Nex")
font.set_size(11*pango.SCALE)
layout.set_font_description(font)
cr.move_to(hmm+ho*asp['p2'],vm+2+vo*(asp['p1']+1))
cr.set_source_rgb(*aspcol[asp['a']])
text ="%s" % self.asplet[asp['a']]
layout.set_text(text)
cr.layout_path(layout)
cr.fill()
cr.new_path()
font = pango.FontDescription('Monospace')
font.set_size(8*pango.SCALE)
layout.set_font_description(font)
cr.move_to(hmm-9+ho*asp['p2'],vm+4+vo*(asp['p1']+1))
text = "%s %s" % (f1,f2)
layout.set_text(text)
cr.layout_path(layout)
cr.fill()
cr.new_path()
vo = 556
font = pango.FontDescription("Astro-Nex")
font.set_size(10*pango.SCALE)
layout.set_font_description(font)
cr.move_to(54,vo)
cr.set_source_rgb(*aspcol[1])
layout.set_text(self.asplet[1])
cr.layout_path(layout)
cr.move_to(100,vo)
layout.set_text(self.asplet[5])
cr.layout_path(layout)
if conj_count[2]:
cr.move_to(144,vo)
layout.set_text(self.asplet[0])
cr.layout_path(layout)
cr.fill()
cr.set_source_rgb(*aspcol[2])
cr.move_to(54,vo+20)
layout.set_text(self.asplet[2])
cr.layout_path(layout)
cr.move_to(100,vo+20)
layout.set_text(self.asplet[4])
cr.layout_path(layout)
if conj_count[1]:
cr.move_to(144,vo+20)
layout.set_text(self.asplet[0])
cr.layout_path(layout)
cr.fill()
cr.set_source_rgb(*aspcol[3])
cr.move_to(54,vo+40)
layout.set_text(self.asplet[3])
cr.layout_path(layout)
cr.move_to(100,vo+40)
layout.set_text(self.asplet[6])
cr.layout_path(layout)
if conj_count[0]:
cr.move_to(144,vo+40)
layout.set_text(self.asplet[0])
cr.layout_path(layout)
cr.fill()
font = pango.FontDescription('Monospace')
font.set_size(8*pango.SCALE)
layout.set_font_description(font)
cr.set_source_rgb(0,0,0.4)
cr.move_to(80,vo)
layout.set_text(str(asp_count[1]+asp_count[11]))
cr.layout_path(layout)
cr.move_to(124,vo)
layout.set_text(str(asp_count[5]+asp_count[7]))
cr.layout_path(layout)
cr.move_to(80,vo+20)
layout.set_text(str(asp_count[2]+asp_count[10]))
cr.layout_path(layout)
cr.move_to(124,vo+20)
layout.set_text(str(asp_count[4]+asp_count[8]))
cr.layout_path(layout)
cr.move_to(80,vo+40)
layout.set_text(str(asp_count[3]+asp_count[9]))
cr.layout_path(layout)
cr.move_to(124,vo+40)
layout.set_text(str(asp_count[6]))
cr.layout_path(layout)
if conj_count[2]:
cr.move_to(164,vo)
layout.set_text(str(conj_count[2]))
cr.layout_path(layout)
if conj_count[1]:
cr.move_to(164,vo+20)
layout.set_text(str(conj_count[1]))
cr.layout_path(layout)
if conj_count[0]:
cr.move_to(164,vo+40)
layout.set_text(str(conj_count[0]))
cr.layout_path(layout)
cr.fill()
green = asp_count[1]+asp_count[11]+asp_count[5]+asp_count[7]+conj_count[2]
blue = asp_count[2]+asp_count[10]+asp_count[4]+asp_count[8]+conj_count[1]
red = asp_count[3]+asp_count[6]+asp_count[9]+conj_count[0]
cr.move_to(182,vo)
layout.set_text('= '+str(green))
cr.layout_path(layout)
cr.move_to(182,vo+20)
layout.set_text('= '+str(blue))
cr.layout_path(layout)
cr.move_to(182,vo+40)
layout.set_text('= '+str(red))
cr.layout_path(layout)
cr.fill()
cr.move_to(218,vo+20)
layout.set_text(str(red+blue+green))
cr.layout_path(layout)
cr.fill()
cr.rectangle(216,vo+16,16,16)
cr.stroke()
pl = chartob.get_planets()
am = SimpleAspectManager()
asp_for_slop = am.strong_chain(pl)
slopeob = chartob.slope_classify(asp_for_slop)
#print slopeob.__dict__
#if kind != 'radix':
# return
cr.set_line_width(0.85)
cr.set_source_rgb(*aspcol[1])
cr.move_to(300,vo+5)
cr.line_to(310,vo+5)
cr.stroke()
cr.move_to(300,vo+30)
cr.line_to(310,vo+20)
cr.stroke()
cr.move_to(305,vo+50)
cr.line_to(305,vo+40)
cr.stroke()
cr.set_source_rgb(*aspcol[2])
cr.move_to(350,vo+5)
cr.line_to(360,vo+5)
cr.stroke()
cr.move_to(350,vo+30)
cr.line_to(360,vo+20)
cr.stroke()
cr.move_to(355,vo+50)
cr.line_to(355,vo+40)
cr.stroke()
cr.set_source_rgb(*aspcol[3])
cr.move_to(400,vo+5)
cr.line_to(410,vo+5)
cr.stroke()
cr.move_to(400,vo+30)
cr.line_to(410,vo+20)
cr.stroke()
cr.move_to(405,vo+50)
cr.line_to(405,vo+40)
cr.stroke()
cr.set_line_width(0.5)
cr.set_source_rgb(0,0,0.4)
cr.move_to(325,vo)
layout.set_text(str(slopeob.hg))
cr.layout_path(layout)
cr.move_to(375,vo)
layout.set_text(str(slopeob.hb))
cr.layout_path(layout)
cr.move_to(425,vo)
layout.set_text(str(slopeob.hr))
cr.layout_path(layout)
cr.move_to(445,vo)
layout.set_text('= '+str(slopeob.hr+slopeob.hg+slopeob.hb))
cr.layout_path(layout)
cr.move_to(325,vo+20)
layout.set_text(str(slopeob.dg))
cr.layout_path(layout)
cr.move_to(375,vo+20)
layout.set_text(str(slopeob.db))
cr.layout_path(layout)
cr.move_to(425,vo+20)
layout.set_text(str(slopeob.dr))
cr.layout_path(layout)
cr.move_to(445,vo+20)
layout.set_text('= '+str(slopeob.dr+slopeob.dg+slopeob.db))
cr.layout_path(layout)
cr.move_to(325,vo+40)
layout.set_text(str(slopeob.vg))
cr.layout_path(layout)
cr.move_to(375,vo+40)
layout.set_text(str(slopeob.vb))
cr.layout_path(layout)
cr.move_to(425,vo+40)
layout.set_text(str(slopeob.vr))
cr.layout_path(layout)
cr.move_to(445,vo+40)
layout.set_text('= '+str(slopeob.vr+slopeob.vg+slopeob.vb))
cr.layout_path(layout)
cr.fill()
cr.restore()
def data_nodal_planh(self,cr):
h = ["I","II","III","IV","V","VI","VII","VIII","IX","X","XI","XII"]
plan = curr.curr_chart.planets[:]
cusp = plan[10]
plan[10] = curr.curr_chart.houses[0]
cr.set_source_rgb(0,0,0.4)
layout = cr.create_layout()
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
taba = pango.TabArray(1,True)
taba.set_tab(0,pango.TAB_LEFT,120)
layout.set_tabs(taba)
layout.set_markup("<u>"+_("Planetas")+"</u>\t<u>"+_("Casa")+"</u>")
cr.move_to(50,86)
cr.show_layout(layout)
layout.set_markup("")
taba.set_tab(0,pango.TAB_LEFT,98)
layout.set_tabs(taba)
for i in range(len(plan)):
deg = plan[i] - cusp
if deg > 0: deg = 360 - deg
else: deg = abs(deg)
house = int(deg/30)
deg -= house*30
d = int(deg)
m = int(60*(deg-d))
d = str(d).rjust(2,'0')
m = str(m).rjust(2,'0')
res = u"%s\u00b0 %s\u00b4\t%s" % (d,m,h[house%12])
cr.move_to(74,105+i*16)
layout.set_text(res)
cr.layout_path(layout)
cr.fill()
cr.new_path()
font = pango.FontDescription("Astro-Nex")
font.set_size(11*pango.SCALE)
layout.set_font_description(font)
for i in range(11):
cr.move_to(50,105+i*16)
colp = self.plan[i].col
cr.set_source_rgb(*colp)
text ="%s" % self.planlet[i]
layout.set_text(text)
cr.layout_path(layout)
cr.fill()
cr.new_path()
def data_house_planh(self,cr):
hh = ["I","II","III","IV","V","VI","VII","VIII","IX","X","XI","XII"]
pl = curr.curr_chart.housepos_and_sector()
cr.set_source_rgb(0,0,0.4)
layout = cr.create_layout()
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
taba = pango.TabArray(2,True)
taba.set_tab(0,pango.TAB_LEFT,100)
taba.set_tab(1,pango.TAB_LEFT,150)
layout.set_tabs(taba)
layout.set_markup("<u>"+_("Planetas")+"</u>\t<u>"+_("Casa")+"</u>\t<u>"+_("Zona")+"</u>")
cr.move_to(50,86)
cr.show_layout(layout)
layout.set_markup("")
signs = curr.curr_chart.which_all_signs()
text = ""
taba = pango.TabArray(2,True)
taba.set_tab(0,pango.TAB_LEFT,90)
taba.set_tab(1,pango.TAB_LEFT,140)
layout.set_tabs(taba)
cr.set_source_rgb(0,0,0)
for i in range(len(pl)):
l = str(pl[i][0]).rjust(2,'0')
m = str(pl[i][1]).rjust(2,'0')
h = hh[pl[i][2]]
z = pl[i][3]
res = u"%s\u00b0 %s\u00b4\t%s\t%s" % (l,m,h,z)
cr.move_to(70,105+i*16)
layout.set_text(res)
cr.layout_path(layout)
cr.fill()
cr.new_path()
font = pango.FontDescription("Astro-Nex")
font.set_size(11*pango.SCALE)
layout.set_font_description(font)
for i in range(11):
cr.move_to(50,105+i*16)
colp = self.plan[i].col
cr.set_source_rgb(*colp)
text ="%s" % self.planlet[i]
layout.set_text(text)
cr.layout_path(layout)
cr.fill()
cr.new_path()
def data_planh(self,cr,font):
cr.set_source_rgb(0,0,0.4)
layout = cr.create_layout()
taba = pango.TabArray(4,True)
taba.set_tab(0,pango.TAB_LEFT,50)
taba.set_tab(1,pango.TAB_LEFT,200)
taba.set_tab(2,pango.TAB_LEFT,350)
taba.set_tab(3,pango.TAB_LEFT,466)
layout.set_tabs(taba)
layout.set_markup("\t<u>"+_("Planetas")+"</u>\t<u>"+_("Casas")+"</u>\t<u>"+_("P.Inv.")+"</u>\t<u>"+_("P.Rep.")+"</u>")
layout.set_font_description(font)
font.set_size(9*pango.SCALE)
cr.move_to(0,86)
cr.show_layout(layout)
layout.set_markup("")
signs = curr.curr_chart.which_all_signs()
text = ""
font = pango.FontDescription("Astro-Nex")
font.set_size(11*pango.SCALE)
layout.set_font_description(font)
for i in range(11):
cr.move_to(50,105+i*16)
colp = self.plan[i].col
cr.set_source_rgb(*colp)
text ="%s" % self.planlet[i]
layout.set_text(text)
cr.show_layout(layout)
cr.new_path()
cr.set_source_rgb(0,0,0)
font = pango.FontDescription(self.opts.font)
font.set_size(9*pango.SCALE)
layout.set_font_description(font)
for i in range(11):
cr.move_to(70,105+i*16)
text = signs[i]['deg']
layout.set_text(text)
| |
"""file_tools: All tools to load and save data
##################################
2018 01 31 Included Nion Swift files to be opened
major revision 2020 09 to include sidpy and pyNSID data formats
2022 change to ase format for structures: this changed the default unit of length to Angstrom!!!
##################################
"""
import numpy as np
import h5py
import os
import pickle
# For structure files of various flavor for instance POSCAR
import ase.io
import ipyfilechooser
# =============================================
# Include pycroscopy libraries #
# =============================================
import SciFiReaders
import pyNSID
import sidpy
import ipywidgets as widgets
from IPython.display import display
# =============================================
# Include pyTEMlib libraries #
# =============================================
import pyTEMlib.crystal_tools
from .config_dir import config_path
QT_available = False
try:
from pyTEMlib.file_tools_qt import *
QT_available = True
except ImportError:
print('QT Dialogs are not available')
Dimension = sidpy.Dimension
get_slope = sidpy.base.num_utils.get_slope
__version__ = '2022.3.3'
class FileWidget(object):
"""Widget to select directories or widgets from a list
Works in google colab.
The widget converts the name of the nion file to the one in Nion's swift software,
because it is otherwise incomprehensible
Attributes
----------
dir_name: str
name of starting directory
extension: list of str
extensions of files to be listed in widget
Methods
-------
get_directory
set_options
get_file_name
Example
-------
>>from google.colab import drive
>>drive.mount("/content/drive")
>>file_list = pyTEMlib.file_tools.FileWidget()
next code cell:
>>dataset = pyTEMlib.file_tools.open_file(file_list.file_name)
"""
def __init__(self, dir_name=None, extension=['*']):
self.save_path = False
self.dir_dictionary = {}
self.dir_list = ['.', '..']
self.display_list = ['.', '..']
self.dir_name = '.'
if dir_name is None:
self.dir_name = get_last_path()
self.save_path = True
elif os.path.isdir(dir_name):
self.dir_name = dir_name
self.get_directory(self.dir_name)
self.dir_list = ['.']
self.extensions = extension
self.file_name = ''
self.select_files = widgets.Select(
options=self.dir_list,
value=self.dir_list[0],
description='Select file:',
disabled=False,
rows=10,
layout=widgets.Layout(width='70%')
)
display(self.select_files)
self.set_options()
self.select_files.observe(self.get_file_name, names='value')
def get_directory(self, directory=None):
self.dir_name = directory
self.dir_dictionary = {}
self.dir_list = []
self.dir_list = ['.', '..'] + os.listdir(directory)
def set_options(self):
self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))
dir_list = os.listdir(self.dir_name)
file_dict = update_directory_list(self.dir_name)
sort = np.argsort(file_dict['directory_list'])
self.dir_list = ['.', '..']
self.display_list = ['.', '..']
for j in sort:
self.display_list.append(f" * {file_dict['directory_list'][j]}")
self.dir_list.append(file_dict['directory_list'][j])
sort = np.argsort(file_dict['display_file_list'])
for i, j in enumerate(sort):
if '--' in dir_list[j]:
self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
else:
self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
self.dir_list.append(file_dict['file_list'][j])
self.dir_label = os.path.split(self.dir_name)[-1] + ':'
self.select_files.options = self.display_list
def get_file_name(self, b):
if os.path.isdir(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
self.set_options()
elif os.path.isfile(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
self.file_name = os.path.join(self.dir_name, self.dir_list[self.select_files.index])
class ChooseDataset(object):
"""Widget to select dataset object """
def __init__(self, input_object, show_dialog=True):
if isinstance(input_object, sidpy.Dataset):
if isinstance(input_object.h5_dataset, h5py.Dataset):
self.current_channel = input_object.h5_dataset.parent
elif isinstance(input_object, h5py.Group):
self.current_channel = input_object
elif isinstance(input_object, h5py.Dataset):
self.current_channel = input_object.parent
else:
raise ValueError('Need hdf5 group or sidpy Dataset to determine image choices')
self.dataset_names = []
self.dataset_list = []
self.dataset_type = None
self.dataset = None
self.reader = SciFiReaders.NSIDReader(self.current_channel.file.filename)
self.get_dataset_list()
self.select_image = widgets.Dropdown(options=self.dataset_names,
value=self.dataset_names[0],
description='select dataset:',
disabled=False,
button_style='')
if show_dialog:
display(self.select_image)
self.select_image.observe(self.set_dataset, names='value')
self.set_dataset(0)
self.select_image.index = (len(self.dataset_names) - 1)
def get_dataset_list(self):
""" Get by Log number sorted list of datasets"""
datasets = self.reader.read()
order = []
for dset in datasets:
if self.dataset_type is None or dset.data_type == self.data_type:
if 'Log' in dset.title:
position = dset.title.find('Log_') + 4
order.append(int(dset.title[position:position + 3])+1)
else:
order.append(0)
for index in np.argsort(order):
dset = datasets[index]
self.dataset_names.append('/'.join(dset.title.replace('-', '_').split('/')[-1:]))
self.dataset_list.append(dset)
def set_dataset(self, b):
index = self.select_image.index
self.dataset = self.dataset_list[index]
# Find
self.dataset.title = self.dataset.title.split('/')[-1]
def add_to_dict(file_dict, name):
full_name = os.path.join(file_dict['directory'], name)
basename, extension = os.path.splitext(name)
size = os.path.getsize(full_name) * 2 ** -20
display_name = name
if len(extension) == 0:
display_file_list = f' {name} - {size:.1f} MB'
elif extension[0] == 'hf5':
if extension in ['.hf5']:
display_file_list = f" {name} - {size:.1f} MB"
elif extension in ['.h5', '.ndata']:
try:
reader = SciFiReaders.NionReader(full_name)
dataset_nion = reader.read()
display_name = dataset_nion.title
display_file_list = f" {display_name}{extension} - {size:.1f} MB"
except:
display_file_list = f" {name} - {size:.1f} MB"
else:
display_file_list = f' {name} - {size:.1f} MB'
file_dict[name] = {'display_string': display_file_list, 'basename': basename, 'extension': extension,
'size': size, 'display_name': display_name}
def update_directory_list(directory_name):
dir_list = os.listdir(directory_name)
if '.pyTEMlib.files.pkl' in dir_list:
with open(os.path.join(directory_name, '.pyTEMlib.files.pkl'), 'rb') as f:
file_dict = pickle.load(f)
if directory_name != file_dict['directory']:
print('directory moved since last time read')
file_dict['directory'] = directory_name
dir_list.remove('.pyTEMlib.files.pkl')
else:
file_dict = {'directory': directory_name}
# add new files
file_dict['file_list'] = []
file_dict['display_file_list'] = []
file_dict['directory_list'] = []
for name in dir_list:
if os.path.isfile(os.path.join(file_dict['directory'], name)):
if name not in file_dict:
add_to_dict(file_dict, name)
file_dict['file_list'].append(name)
file_dict['display_file_list'].append(file_dict[name]['display_string'])
else:
file_dict['directory_list'].append(name)
remove_item = []
# delete items of deleted files
save_pickle = False
for name in file_dict.keys():
if name not in dir_list and name not in ['directory', 'file_list', 'directory_list', 'display_file_list']:
remove_item.append(name)
else:
if 'extension' in file_dict[name]:
save_pickle = True
for item in remove_item:
file_dict.pop(item)
if save_pickle:
with open(os.path.join(file_dict['directory'], '.pyTEMlib.files.pkl'), 'wb') as f:
pickle.dump(file_dict, f)
return file_dict
####
# General Open and Save Methods
####
def get_last_path():
"""Returns the path of the file last opened"""
try:
fp = open(config_path + '\\path.txt', 'r')
path = fp.read()
fp.close()
except IOError:
path = ''
if len(path) < 2:
path = '.'
return path
def save_path(filename):
"""Save path of last opened file"""
if len(filename) > 1:
fp = open(config_path + '\\path.txt', 'w')
path, fname = os.path.split(filename)
fp.write(path)
fp.close()
else:
path = '.'
return path
"""
class open_file_dialog(ipyfilechooser.FileChooser):
def __init__(self, directory=None):
if directory is None:
directory = get_last_path()
super().__init__(directory)
self._use_dir_icons = True
def _apply_selection(self):
super()._apply_selection()
selected = os.path.join(
self._selected_path,
self._selected_filename
)
if os.path.isfile(selected):
self._label.value = self._LBL_TEMPLATE.format(
self._selected_filename,
'blue'
)
else:
self._label.value = self._LBL_TEMPLATE.format(
self._selected_filename,
'green'
)
save_path(selected)
def _set_form_values(self, path: str, filename: str):
""Set the form values.""
# Disable triggers to prevent selecting an entry in the Select
# box from automatically triggering a new event.
self._pathlist.unobserve(
self._on_pathlist_select,
names='value'
)
self._dircontent.unobserve(
self._on_dircontent_select,
names='value'
)
self._filename.unobserve(
self._on_filename_change,
names='value'
)
# In folder only mode zero out the filename
if self._show_only_dirs:
filename = ''
# Set form values
self._pathlist.options = ipyfilechooser.utils.get_subpaths(path)
self._pathlist.value = path
self._filename.value = filename
# file/folder real names
dircontent_real_names = ipyfilechooser.utils.get_dir_contents(
path,
show_hidden=self._show_hidden,
prepend_icons=False,
show_only_dirs=self._show_only_dirs,
filter_pattern=self._filter_pattern
)
# file/folder
names
dircontent_display_names = ipyfilechooser.utils.get_dir_contents(
path,
show_hidden=self._show_hidden,
prepend_icons=self._use_dir_icons,
show_only_dirs=self._show_only_dirs,
filter_pattern=self._filter_pattern
)
dircontent_display_names = self.set_display_names(dircontent_real_names, dircontent_display_names)
# Dict to map real names to display names
self._map_name_to_disp = {
real_name: disp_name
for real_name, disp_name in zip(
dircontent_real_names,
dircontent_display_names
)
}
# Dict to map display names to real names
self._map_disp_to_name = {
disp_name: real_name
for real_name, disp_name in
self._map_name_to_disp.items()
}
# Set _dircontent form value to display names
self._dircontent.options = dircontent_display_names
# If the value in the filename Text box equals a value in the
# Select box and the entry is a file then select the entry.
if ((filename in dircontent_real_names) and
os.path.isfile(os.path.join(path, filename))):
self._dircontent.value = self._map_name_to_disp[filename]
else:
self._dircontent.value = None
# Reenable triggers again
self._pathlist.observe(
self._on_pathlist_select,
names='value'
)
self._dircontent.observe(
self._on_dircontent_select,
names='value'
)
self._filename.observe(
self._on_filename_change,
names='value'
)
# Update the state of the select button
if self._gb.layout.display is None:
# Disable the select button if path and filename
# - equal an existing folder in the current view
# - equal the already selected values
# - don't match the provided filter pattern(s)
check1 = filename in dircontent_real_names
check2 = os.path.isdir(os.path.join(path, filename))
check3 = False
check4 = False
# Only check selected if selected is set
if ((self._selected_path is not None) and
(self._selected_filename is not None)):
selected = os.path.join(
self._selected_path,
self._selected_filename
)
check3 = os.path.join(path, filename) == selected
# Ensure only allowed extensions are used
if self._filter_pattern:
check4 = not ipyfilechooser.utils.match_item(filename, self._filter_pattern)
if (check1 and check2) or check3 or check4:
self._select.disabled = True
else:
self._select.disabled = False
def set_display_names(self, dircontent_real_names, dircontent_display_names):
for i in range(len(dircontent_display_names)):
name = dircontent_display_names[i]
full_name = os.path.join(self._pathlist.value, dircontent_real_names[i])
if os.path.isfile(full_name):
size = os.path.getsize(full_name) * 2 ** -20
basename, extension = os.path.splitext(name)
if extension in ['.hf5']:
dircontent_display_names[i] = f" {dircontent_display_names[i]:50} -- {size:.1f} MB"
elif extension in ['.h5', '.ndata']:
try:
reader = SciFiReaders.NionReader(full_name)
dataset_nion = reader.read()
dircontent_display_names[i] = f" {dataset_nion.title+extension:50} - {size:.1f} MB"
except IOError:
dircontent_display_names[i] = dircontent_display_names[i]
else:
dircontent_display_names[i] = dircontent_display_names[i]
return dircontent_display_names"""
def open_file_dialog_qt(file_types=None): # , multiple_files=False):
"""Opens a File dialog which is used in open_file() function
This function uses pyQt5.
The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.
In jupyter notebooks use %gui Qt early in the notebook.
The file looks first for a path.txt file for the last directory you used.
Parameters
----------
| |
return self.get('Time mark')
@property
def by_user(self) -> str:
return self.get('by user')
# @property
# def (self) -> str:
# return self.dict[''].strip()
def __init__(self, source) -> None:
self.source = tools.read(source)
if isinstance(self.source, str):
self.source = self.source.splitlines()
parts = [] #List[str]
self.individuals = [] # List['ParseDisplayOutput.Individual']
in_header = True
for line in self.source:
line = line.strip()
## skip header, until a line starts with Version
if in_header:
if line.startswith('Version'):
in_header = False
else:
continue
if line.startswith('Version'):
mpos = line.index(', Market:')
self.version = line[8:mpos].strip()
self.market = line[mpos+9:].strip()
continue
if line.startswith('First'):
last = line.find('Last:')-1
while last > 0 and line[last] == ' ':
last=last-1
if last>0 and line[last] != ',':
line = line[:last+1] + ',' + line[last+1:]
if len(line) > 0 :
parts.extend(map(str.strip, line.split(',')))
else:
individual = self.parse_individual(parts)
if individual is not None:
self.individuals.append(individual)
parts = []
def __str__(self) -> str:
return "\n".join( [str(i) for i in self.individuals ] )
@property
def is_valid(self) -> bool:
return not self.individuals is None
@property
def first_trace(self) -> str:
return self.individuals[0].get('First')
@property
def last_trace(self) -> str:
return self.individuals[0].get('Last')
def get_individual(self, id) -> 'Individual':
if isinstance(id, int):
return self.individuals[id] if id < len(self.individuals) else None
for ind in self.individuals[1:]: # Avoid header
if ind.id == id or ind.unit_name == id:
return ind
return None
### convenience method that returns the id of Individual matching unitname, or None
def get_id(self, unitname) -> str:
ind = self.get_individual(unitname)
return ind.id if not ind is None else None
## Trace ind: 3, State: setup , Stored: 0, Size per lim: 5000, Type : unit-trace , Rotating: on , Textlevel: all, Lim no : 1, Unit no: 0206, Unit name: CMP , Time mark: 2018-12-13 16:46:11 (CET), by user: mxone_admin
@staticmethod
def parse_individual(parts) -> 'Individual':
d = dict(map(str.strip, itm.split(':', 1)) for itm in parts)
return ParseDisplayOutput.Individual(d) if len(d) > 0 else None
##----- End parse_display.py -------------------------------------------------##
return locals()
@modulize('settings')
def _settings(__name__):
##----- Begin settings.py ----------------------------------------------------##
import json
import io
import tools
import os.path
import re
class Settings:
def __init__(self, settings_file):
if isinstance(settings_file, io.TextIOBase):
self.__init__(settings_file.read())
return
elif isinstance(settings_file, str) and (settings_file.count('\n') > 1 or not os.path.exists(settings_file) ):
self.raw_data = settings_file
trimmed = Settings.trim_json_comments(settings_file)
self.data = json.load(io.StringIO(trimmed))
tools.tracelevel = self.debug_trace_level
elif isinstance(settings_file, str):
with tools.open_read_file(settings_file) as f:
self.__init__(f.read())
return
@staticmethod
def trim_json_comments(data_string):
result = []
for line in data_string.split("\n"):
stripped = line.strip()
if len(stripped) < 1 or stripped[0:2] == "//":
line = "" # remove
elif line[-1] not in r"\,\"\'":
line = re.sub(r"\/\/.*?$", "", line)
result.append(line)
return "\n".join(result)
@property
def gangs(self) -> [{}]:
return self.data['gangs']
def get_gang(self, name) -> [str]:
gangs_list = self.gangs
for g in gangs_list:
if g['name'] == name:
return g['members']
return None
def expand_to_ids(self, ids_or_gangs:[str]) -> str:
res = []
ls = ids_or_gangs if isinstance(ids_or_gangs, list) else [ids_or_gangs]
for id in ls:
members = self.get_gang(id) or [id]
res.extend(members)
return res
@property
def default_textlevel(self) -> str:
return self.data.get('default_textlevel', 'default') # none means "default"
@property
def trace_cmd(self) -> str:
return self.data.get('trace_cmd', 'trace')
@property
def trace_args(self) -> [str]:
"""
If out trace-command requires some extra prefixed arguments.
:returns: list might be empty, but never none
"""
return self.data.get('trace_args', [])
@property
def file_prefix(self) -> str:
"""
Prefix for trace output files
"""
return self.data.get('file_prefix', 'trace_mx_')
@property
def file_postfix(self) -> str:
"""
Postfix for trace output files
"""
return self.data.get('file_postfix', '.log')
@property
def file_separators(self) -> str:
"""
Which separators are allowed
"""
return self.data.get('file_separators', '-_/=')
@property
def zip_prefix(self) -> str:
"""
Prefix for trace output files
"""
return self.data.get('zip_prefix', self.file_prefix)
@property
def zip_postfix(self) -> str:
"""
Postfix for trace output files
"""
return self.data.get('file_postfix', self.file_postfix)
@property
def debug_trace_level(self) -> int:
return self.data.get('debug_trace_level', 7)
@property
def debug_trace_commands(self) -> int:
return self.data.get('debug_trace_commands', 7)
@property
def debug_trace_output(self) -> int:
v = self.data.get('debug_trace_output')
return v is not v is None or self.debug_trace_output
##----- End settings.py ------------------------------------------------------##
return locals()
@modulize('command_generator')
def _command_generator(__name__):
##----- Begin command_generator.py -------------------------------------------##
from parse_display import ParseDisplayOutput
from tools import trace
from settings import Settings
import sys
class CommandGenerator:
def __init__(self, display_output:'ParseDisplayOutput', settings:'Settings'):
self.display_output = display_output
self.mx_version = self.display_output.version
self.settings = settings
pass
@property
def trace_cmd(self) -> str:
return self.settings.trace_cmd
@property
def trace_prefix_args(self) -> [str]:
return self.settings.trace_args
@staticmethod
def get_cmd_add_individual(name:str, lim:str) -> [str]:
lim_switch = [] if lim is None else ['-lim', lim]
return lim_switch + ['-unit', name]
@staticmethod
def get_cmd_remove_individual(name:str) -> [str]:
return ['-remove', name]
@staticmethod
def get_cmd_set_textlevel(n:str, textlevel:str = 'normal') -> [str]:
return ['-modify', n, '-textlevel', textlevel]
@staticmethod
def get_cmd_start(n_list:[str]) -> [str]:
return ['-start', ",".join(n_list) ]
@staticmethod
def get_cmd_stop(n_list:[str]) -> [str]:
return ['-stop', ",".join(n_list) ]
@staticmethod
def get_cmd_clear(n_list:[str]) -> [str]:
return ['-clear', ",".join(n_list) ]
@staticmethod
def get_cmd_print(unit_id:str) -> [str]:
return ['-print', unit_id ]
def expand_names(self, name_or_list) -> [str]:
def expand_ranges(ls:[str]) -> [str]:
res = []
for s in ls:
for s2 in s.split(','):
dash = s2.find('-')
if dash > 0:
n1 = s2[:dash]
n2 = s2[dash+1:]
r = list(map( str, range(int(n1), int(n2)+1)))
res += r
else:
res.append(s2)
return res
res = []
ls = name_or_list if isinstance(name_or_list, list) else [name_or_list]
ls = expand_ranges(ls)
for name in ls:
# if name is not gang, assume unit-name
gang = self.settings.get_gang(name)
if not gang is None:
res +=self.expand_names(gang)
else:
res.append(name)
return res
def get_ids_of(self, name_or_list) -> [str]:
res = []
for name in self.expand_names(name_or_list):
ind = self.display_output.get_individual(name)
if ind is None:
trace(2, "Unknown unit '" + name + "', ignored")
continue
res.append(ind.id)
return res
def add_indv(self, name:[str], lim:str = "1", extra_args:[str] = []) -> [str]:
res = []
for member in self.settings.expand_to_individuals(name):
if self.display_output.get_individual(member) is None:
res.append(CommandGenerator.get_cmd_add_individual(member, lim) + extra_args)
return res
def remove(self, name:[str]) -> [str]:
res = []
for member in self.settings.expand_to_individuals(name):
if self.display_output.get_individual(member) is None:
res.append(CommandGenerator.get_cmd_remove_individual(member))
return res
def set_textlevel(self, name:[str], textlevel:str = 'normal') -> [str]:
res = []
for member in self.settings.expand_to_individuals(name):
indv = self.display_output.get_individual(member)
if indv is None:
trace(2, "Unknown gang-member '" + member + "' Textlevel not changed")
continue
id = indv.id
res.append(CommandGenerator.get_cmd_set_textlevel(id, textlevel))
return res
def start(self, name:[str]) -> [str]:
ids = self.get_ids_of(name)
return map(lambda i: self.get_cmd_start(i), ids)
def stop(self, name:[str]) -> [str]:
ids = self.get_ids_of(name)
return map(lambda i: self.get_cmd_stop(i), ids)
def clear(self, name:[str]) -> [str]:
ids = self.get_ids_of(name)
return map(lambda i: self.get_cmd_clear(i), ids)
### Returns a list of tuples(print-cmd, target-filename)
def save_cmd(self, names:[str], prefix:str = "", postfix:str = ".log", ) -> [(str, str)]:
def gen_tuple(unitname, id):
cmd = CommandGenerator.get_cmd_print(id)
filename = (prefix+sep+unitname+postfix).strip(sep)
return (cmd, filename)
if self.settings.file_separators.find(prefix[-1]) >= 0:
sep = prefix[-1]
prefix = prefix[:-1]
elif self.settings.file_separators.find(postfix[0]) >= 0:
sep = postfix[0]
postfix = postfix[1:]
else:
sep = '_'
res = []
for name in self.expand_names(names):
id = self.display_output.get_id(name)
if id is None:
trace(2, "print_cmd: unknown unit " + name + ", not printed", file=sys.stderr)
continue
res.append(gen_tuple(name, id))
return res
### Returns a list print-cmd
def print_cmd(self, names:[str]) -> [str]:
res = []
for name in names:
id = self.display_output.get_id(name)
if id is None:
trace(2, "print_cmd: unknown unit " + name + ", not printed", file=sys.stderr)
continue
cmd = CommandGenerator.get_cmd_print(id)
res.append(cmd)
return res
##----- End command_generator.py ---------------------------------------------##
return locals()
@modulize('executor')
def _executor(__name__):
##----- Begin executor.py ----------------------------------------------------##
from tools import trace
from tools import tracelevel
from parse_display import ParseDisplayOutput
from command_generator import CommandGenerator
import subprocess
import sys
class Executor:
def __init__(self, program:str, args:[str], trace_cmd_level:int = 7, trace_result_level:int = 8):
self.program = program
self.args = args
prog_args = [self.program]
if not args is None: prog_args += args
trace(trace_cmd_level, 'Executor ' + program + ' ', args)
try:
self.result = subprocess.check_output(prog_args)
if tracelevel >= trace_result_level:
trace(trace_result_level, self.str_result)
except subprocess.CalledProcessError as e:
trace(1, e.cmd, 'failed | |
<gh_stars>0
# buildifier: disable=module-docstring
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("//rust/private:common.bzl", "rust_common")
load(
"//rust/private:repository_utils.bzl",
"BUILD_for_toolchain",
"DEFAULT_STATIC_RUST_URL_TEMPLATES",
"DEFAULT_TOOLCHAIN_NAME_PREFIX",
"check_version_valid",
"load_llvm_tools",
"load_rust_compiler",
"load_rust_src",
"load_rust_stdlib",
"load_rustc_dev_nightly",
"load_rustfmt",
"should_include_rustc_srcs",
_load_arbitrary_tool = "load_arbitrary_tool",
)
# Reexport `load_arbitrary_tool` as it's currently in use in https://github.com/google/cargo-raze
load_arbitrary_tool = _load_arbitrary_tool
# Note: Code in `.github/workflows/crate_universe.yaml` looks for this line, if you remove it or change its format, you will also need to update that code.
DEFAULT_TOOLCHAIN_TRIPLES = {
"aarch64-apple-darwin": "rust_darwin_aarch64",
"aarch64-unknown-linux-gnu": "rust_linux_aarch64",
"x86_64-apple-darwin": "rust_darwin_x86_64",
"x86_64-pc-windows-msvc": "rust_windows_x86_64",
"x86_64-unknown-freebsd": "rust_freebsd_x86_64",
"x86_64-unknown-linux-gnu": "rust_linux_x86_64",
}
def rules_rust_dependencies():
"""Dependencies used in the implementation of `rules_rust`."""
maybe(
http_archive,
name = "platforms",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.5/platforms-0.0.5.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.5/platforms-0.0.5.tar.gz",
],
sha256 = "379113459b0feaf6bfbb584a91874c065078aa673222846ac765f86661c27407",
)
maybe(
http_archive,
name = "rules_cc",
urls = ["https://github.com/bazelbuild/rules_cc/releases/download/0.0.1/rules_cc-0.0.1.tar.gz"],
sha256 = "4dccbfd22c0def164c8f47458bd50e0c7148f3d92002cdb459c2a96a68498241",
)
maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz",
],
sha256 = "af87959afe497dc8dfd4c6cb66e1279cb98ccc84284619ebfec27d9c09a903de",
)
# Make the iOS simulator constraint available, which is referenced in abi_to_constraints()
# rules_rust does not require this dependency; it is just imported as a convenience for users.
maybe(
http_archive,
name = "build_bazel_apple_support",
sha256 = "5bbce1b2b9a3d4b03c0697687023ef5471578e76f994363c641c5f50ff0c7268",
url = "https://github.com/bazelbuild/apple_support/releases/download/0.13.0/apple_support.0.13.0.tar.gz",
)
# process_wrapper needs a low-dependency way to process json.
maybe(
http_archive,
name = "rules_rust_tinyjson",
sha256 = "1a8304da9f9370f6a6f9020b7903b044aa9ce3470f300a1fba5bc77c78145a16",
url = "https://crates.io/api/v1/crates/tinyjson/2.3.0/download",
strip_prefix = "tinyjson-2.3.0",
type = "tar.gz",
build_file = "@rules_rust//util/process_wrapper:BUILD.tinyjson.bazel",
)
# buildifier: disable=unnamed-macro
def rust_register_toolchains(
dev_components = False,
edition = None,
include_rustc_srcs = False,
iso_date = None,
register_toolchains = True,
rustfmt_version = None,
sha256s = None,
extra_target_triples = ["wasm32-unknown-unknown", "wasm32-wasi"],
urls = DEFAULT_STATIC_RUST_URL_TEMPLATES,
version = rust_common.default_version):
"""Emits a default set of toolchains for Linux, MacOS, and Freebsd
Skip this macro and call the `rust_repository_set` macros directly if you need a compiler for \
other hosts or for additional target triples.
The `sha256` attribute represents a dict associating tool subdirectories to sha256 hashes. As an example:
```python
{
"rust-1.46.0-x86_64-unknown-linux-gnu": "e3b98bc3440fe92817881933f9564389eccb396f5f431f33d48b979fa2fbdcf5",
"rustfmt-1.4.12-x86_64-unknown-linux-gnu": "1894e76913303d66bf40885a601462844eec15fca9e76a6d13c390d7000d64b0",
"rust-std-1.46.0-x86_64-unknown-linux-gnu": "ac04aef80423f612c0079829b504902de27a6997214eb58ab0765d02f7ec1dbc",
}
```
This would match for `exec_triple = "x86_64-unknown-linux-gnu"`. If not specified, rules_rust pulls from a non-exhaustive \
list of known checksums..
See `load_arbitrary_tool` in `@rules_rust//rust:repositories.bzl` for more details.
Args:
dev_components (bool, optional): Whether to download the rustc-dev components (defaults to False). Requires version to be "nightly".
edition (str, optional): The rust edition to be used by default (2015, 2018, or 2021). If absent, every rule is required to specify its `edition` attribute.
include_rustc_srcs (bool, optional): Whether to download rustc's src code. This is required in order to use rust-analyzer support.
See [rust_toolchain_repository.include_rustc_srcs](#rust_toolchain_repository-include_rustc_srcs). for more details
iso_date (str, optional): The date of the nightly or beta release (ignored if the version is a specific version).
register_toolchains (bool): If true, repositories will be generated to produce and register `rust_toolchain` targets.
rustfmt_version (str, optional): The version of rustfmt. Either "nightly", "beta", or an exact version. Defaults to `version` if not specified.
sha256s (str, optional): A dict associating tool subdirectories to sha256 hashes.
extra_target_triples (list, optional): Additional rust-style targets that rust toolchains should support.
urls (list, optional): A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format).
version (str, optional): The version of Rust. Either "nightly", "beta", or an exact version. Defaults to a modern version.
"""
if dev_components and version != "nightly":
fail("Rust version must be set to \"nightly\" to enable rustc-dev components")
if not rustfmt_version:
rustfmt_version = version
for exec_triple, name in DEFAULT_TOOLCHAIN_TRIPLES.items():
maybe(
rust_repository_set,
name = name,
dev_components = dev_components,
edition = edition,
exec_triple = exec_triple,
extra_target_triples = extra_target_triples,
include_rustc_srcs = include_rustc_srcs,
iso_date = iso_date,
register_toolchain = register_toolchains,
rustfmt_version = rustfmt_version,
sha256s = sha256s,
urls = urls,
version = version,
)
# buildifier: disable=unnamed-macro
def rust_repositories(**kwargs):
"""**Deprecated**: Use [rules_rust_dependencies](#rules_rust_dependencies) \
and [rust_register_toolchains](#rust_register_toolchains) directly.
Args:
**kwargs (dict): Keyword arguments for the `rust_register_toolchains` macro.
"""
rules_rust_dependencies()
rust_register_toolchains(**kwargs)
def _rust_toolchain_repository_impl(ctx):
"""The implementation of the rust toolchain repository rule."""
check_version_valid(ctx.attr.version, ctx.attr.iso_date)
# Conditionally download rustc sources. Generally used for `rust-analyzer`
if should_include_rustc_srcs(ctx):
load_rust_src(ctx)
build_components = [load_rust_compiler(ctx)]
if ctx.attr.rustfmt_version:
build_components.append(load_rustfmt(ctx))
# Rust 1.45.0 and nightly builds after 2020-05-22 need the llvm-tools gzip to get the libLLVM dylib
if ctx.attr.version >= "1.45.0" or (ctx.attr.version == "nightly" and ctx.attr.iso_date > "2020-05-22"):
load_llvm_tools(ctx, ctx.attr.exec_triple)
for target_triple in [ctx.attr.exec_triple] + ctx.attr.extra_target_triples:
build_components.append(load_rust_stdlib(ctx, target_triple))
# extra_target_triples contains targets such as wasm, which don't have rustc_dev components
if ctx.attr.dev_components and target_triple not in ctx.attr.extra_target_triples:
load_rustc_dev_nightly(ctx, target_triple)
ctx.file("WORKSPACE.bazel", "")
ctx.file("BUILD.bazel", "\n".join(build_components))
def _rust_toolchain_repository_proxy_impl(ctx):
build_components = []
for target_triple in [ctx.attr.exec_triple] + ctx.attr.extra_target_triples:
build_components.append(BUILD_for_toolchain(
name = "{toolchain_prefix}_{target_triple}".format(
toolchain_prefix = ctx.attr.toolchain_name_prefix,
target_triple = target_triple,
),
exec_triple = ctx.attr.exec_triple,
parent_workspace_name = ctx.attr.parent_workspace_name,
target_triple = target_triple,
))
ctx.file("WORKSPACE.bazel", "")
ctx.file("BUILD.bazel", "\n".join(build_components))
rust_toolchain_repository = repository_rule(
doc = (
"Composes a single workspace containing the toolchain components for compiling on a given " +
"platform to a series of target platforms.\n" +
"\n" +
"A given instance of this rule should be accompanied by a rust_toolchain_repository_proxy " +
"invocation to declare its toolchains to Bazel; the indirection allows separating toolchain " +
"selection from toolchain fetching."
),
attrs = {
"auth": attr.string_dict(
doc = (
"Auth object compatible with repository_ctx.download to use when downloading files. " +
"See [repository_ctx.download](https://docs.bazel.build/versions/main/skylark/lib/repository_ctx.html#download) for more details."
),
),
"dev_components": attr.bool(
doc = "Whether to download the rustc-dev components (defaults to False). Requires version to be \"nightly\".",
default = False,
),
"edition": attr.string(
doc = (
"The rust edition to be used by default (2015, 2018, or 2021). " +
"If absent, every rule is required to specify its `edition` attribute."
),
),
"exec_triple": attr.string(
doc = "The Rust-style target that this compiler runs on",
mandatory = True,
),
"extra_target_triples": attr.string_list(
doc = "Additional rust-style targets that this set of toolchains should support.",
),
"include_rustc_srcs": attr.bool(
doc = (
"Whether to download and unpack the rustc source files. These are very large, and " +
"slow to unpack, but are required to support rust analyzer. An environment variable " +
"`RULES_RUST_TOOLCHAIN_INCLUDE_RUSTC_SRCS` can also be used to control this attribute. " +
"This variable will take precedence over the hard coded attribute. Setting it to `true` to " +
"activates this attribute where all other values deactivate it."
),
default = False,
),
"iso_date": attr.string(
doc = "The date of the tool (or None, if the version is a specific version).",
),
"rustfmt_version": attr.string(
doc = "The version of the tool among \"nightly\", \"beta\", or an exact version.",
),
"sha256s": attr.string_dict(
doc = "A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.",
),
"toolchain_name_prefix": attr.string(
doc = "The per-target prefix expected for the rust_toolchain declarations in the parent workspace.",
),
"urls": attr.string_list(
doc = "A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format).",
default = DEFAULT_STATIC_RUST_URL_TEMPLATES,
),
"version": attr.string(
doc = "The version of the tool among \"nightly\", \"beta\", or an exact version.",
mandatory = True,
),
},
implementation = _rust_toolchain_repository_impl,
environ = ["RULES_RUST_TOOLCHAIN_INCLUDE_RUSTC_SRCS"],
)
rust_toolchain_repository_proxy = repository_rule(
doc = (
"Generates a toolchain-bearing repository that declares the toolchains from some other " +
"rust_toolchain_repository."
),
attrs = {
"exec_triple": attr.string(
doc = "The Rust-style target triple for the compilation platform",
mandatory = True,
),
"extra_target_triples": attr.string_list(
doc = "The Rust-style triples for extra compilation targets",
),
"parent_workspace_name": attr.string(
doc = "The name of the other rust_toolchain_repository",
mandatory = True,
),
"toolchain_name_prefix": attr.string(
doc = "The per-target prefix expected for the rust_toolchain declarations in the parent workspace.",
),
},
implementation = _rust_toolchain_repository_proxy_impl,
)
def rust_repository_set(
name,
version,
exec_triple,
include_rustc_srcs = False,
extra_target_triples = [],
iso_date = None,
rustfmt_version = None,
edition = None,
dev_components = False,
sha256s = None,
urls = DEFAULT_STATIC_RUST_URL_TEMPLATES,
auth = None,
register_toolchain = True):
"""Assembles a remote repository for the given toolchain params, produces a proxy repository \
to contain the toolchain declaration, and registers the toolchains.
N.B. A "proxy repository" is needed to allow for registering the toolchain (with constraints) \
without actually downloading the toolchain.
Args:
name (str): The name of the generated repository
version | |
TODO: move this to a game mode called "fixed" once we implement a way to randomize roles (and have that game mode be called "random")
DEFAULT_ROLE = "villager"
ROLE_INDEX = ( 4 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 15 , 16 , 18 , 20 , 21 , 23 , 24 )
ROLE_GUIDE = OrderedDict([ # This is order-sensitive - many parts of the code rely on this order!
# wolf roles
("wolf" , ( 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 , 2 , 2 , 3 , 3 , 3 )),
("alpha wolf" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("werecrow" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("werekitten" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wolf mystic" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wolf shaman" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("fallen angel" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("doomsayer" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wolf cub" , ( 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("traitor" , ( 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("hag" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )),
("sorcerer" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 )),
("warlock" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("minion" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("cultist" , ( 0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
# villager roles
("seer" , ( 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("oracle" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("harlot" , ( 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("shaman" , ( 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("hunter" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("vigilante" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("augur" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 )),
("detective" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("prophet" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("guardian angel" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("bodyguard" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 )),
("priest" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("doctor" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("mad scientist" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("mystic" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("matchmaker" , ( 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("village drunk" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("time lord" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("villager" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , | |
# The MIT License (MIT)
# Copyright © 2021 Opentensor.ai
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import asyncio
import grpc
import math
import sys
import time
import torch
import pandas as pd
import torch.nn as nn
import traceback
from termcolor import colored
from types import SimpleNamespace
from typing import Tuple, List, Optional
from loguru import logger
from munch import Munch
import bittensor
import bittensor.utils.stats as stat_utils
import bittensor.serialization as serialization
from bittensor.exceptions.handlers import rollbar
class Dendrite(nn.Module):
r"""
Creates Forward and Backward calls to other neurons on the network. It behaves like a normal torch nn.Module and is differentiable.
Messages passed through this module will be sent to other neuron objects, either remote or local, and return responses as torch tensors.
Gradients passing through this module on a .backward() call will trigger Backward rpc calls to the axon terminals of the downstream neurons
called during associated Forward operation.
"""
def __init__(self, config: Munch = None, wallet: 'bittensor.wallet.Wallet' = None, metagraph: 'bittensor.metagraph.Metagraph' = None):
r""" Initializes a new Dendrite entry point.
Args:
config (:obj:`Munch`, `optional`):
dendrite.Dendrite.config()
wallet (:obj:`bittensor.wallet.Wallet`, `optional`):
bittensor wallet with hotkey and coldkeypub.
metagraph (:obj:`bittensor.metagraph.Metagraph`, `optional`):
bittensor network metagraph.
"""
super().__init__()
# Config: Holds all config items for this items and those that are recursively defined. Specifically
# config for you wallet and metagraph.
if config == None:
config = Dendrite.build_config()
self.config = config
# Wallet: Holds you hotkey keypair and coldkey pub, which can be used to sign messages
# and subscribe to the chain.
if wallet == None:
wallet = bittensor.wallet.Wallet(self.config)
self.wallet = wallet
# Metagraph: Maintains a connection to the subtensor chain and can be queried for the latest state.
if metagraph == None:
metagraph = bittensor.metagraph.Metagraph(self.config, self.wallet)
self.metagraph = metagraph
# Receptors: Holds a set map of publickey -> receptor objects. Receptors encapsulate a TCP connection between
# this dendrite and an upstream neuron (i.e. a peer we call for representations)
self._receptors = {}
# Stats: hold statistics for this dendrite.
self.stats = SimpleNamespace(
qps = stat_utils.timed_rolling_avg(0.0, 0.01),
)
def forward_text(self, neurons: List[bittensor.proto.Neuron],
x: List[torch.Tensor]) -> Tuple[List[torch.Tensor], torch.Tensor]:
r""" Forward text inputs to neurons.
Args:
neurons (:obj:`List[bittensor.proto.Neuron]` of shape :obj:`(num_neurons)`, `required`):
List of remote neurons which match length of x. Tensors from x are sent forward to these neurons.
x (:obj:`List[torch.Tensor]` of shape :obj:`(num_neurons * [batch_size, sequence_len])`, `required`):
List of tensors to send to corresponsing neurons. Tensors are text input_ids encoded using the
bittensor tokenizer of shape [batch_size, sequence_len].
Returns:
forwad_output (:obj:`List[torch.FloatTensor]` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`):
Output encodings of inputs produced by remote neurons. Non-responses are zeroes of common shape.
return_codes (:obj:`List[torch.LongTensor]` of shape :obj:`[num_neurons]`, `required`):
dendrite call return ops.
"""
if len(x[0].shape) != 2:
error_msg = 'Text inputs should rank 2 with semantic shape: [batch_size, sequence_len]'
raise ValueError(error_msg)
if len(x) != len(neurons):
error_msg = 'List of text inputs x should have the same length as passed destination neurons, got {} and {}'.format(len(x), len(neurons))
raise ValueError(error_msg)
if len(x) < 1:
error_msg = 'Must pass more than 0 input for argument x, got {}'.format(len(x))
raise ValueError(error_msg)
return self.forward(neurons, x, bittensor.proto.Modality.TEXT)
def forward_image(self, neurons: List[bittensor.proto.Neuron],
x: List[torch.Tensor]) -> Tuple[List[torch.Tensor], torch.Tensor]:
r""" Forward image inputs to neurons.
Args:
neurons (:obj:`List[bittensor.proto.Neuron]` of shape :obj:`(num_neurons)`, `required`):
List of remote neurons which match length of x. Tensors from x are sent forward to these neurons.
x (:obj:`List[torch.Tensor]` of shape :obj:`(num_neurons * [batch_size, sequence_len, channels, rows, cols])`, `required`):
List of image-tensors to send to corresponsing neurons. Tensors are images encoded using the
torch.toTensor() or other encoding which produces the shape [batch_size, channels, rows, cols].
Returns:
forwad_output (:obj:`List[torch.FloatTensor]` of shape :obj:`(batch_size, sequence_len, bittensor.network_size)`, `required`):
Output encodings of images produced by remote neurons. Non-responses are zeroes of common shape.
return_codes (:obj:`List[torch.LongTensor]` of shape :obj:`[num_neurons]`, `required`):
dendrite call return ops.
"""
# TODO(const): Checks across all tensors and other shape checks.
if len(x[0].shape) != 5:
error_msg = 'Image inputs should be rank 5 with semantic shape: [batch_size, sequence_dim, channels, rows, cols]'
raise ValueError(error_msg)
if len(x) != len(neurons):
error_msg = 'List of image inputs x should have the same length as passed destination neurons, got {} and {}'.format(len(x), len(neurons))
raise ValueError(error_msg)
if len(x) < 1:
error_msg = 'Must pass more than 0 input for argument x, got {}'.format(len(x))
raise ValueError(error_msg)
return self.forward(neurons, x, bittensor.proto.Modality.IMAGE)
def forward_tensor(self, neurons: List[bittensor.proto.Neuron],
x: List[torch.Tensor]) -> Tuple[List[torch.Tensor], torch.Tensor]:
r""" Forward tensor inputs to neurons.
Args:
neurons (:obj:`List[bittensor.proto.Neuron]` of shape :obj:`(num_neurons)`, `required`):
List of remote neurons which match length of x. Tensors from x are sent forward to these neurons.
x (:obj:`List[torch.Tensor]` of shape :obj:`(num_neurons * [batch_size, sequence_len, bittensor.__network_dim__])`, `required`):
List of tensors to send to corresponsing neurons. Tensors are of arbitrary type and
with shape [batch_size, sequence_len, bittensor.__network_dim__].
Returns:
forwad_output (:obj:`List[torch.FloatTensor]` of shape :obj:`num_neurons * (batch_size, sequence_len, bittensor.__network_dim__)]`, `required`):
Output encodings of tensors produced by remote neurons. Non-responses are zeroes of common shape.
return_codes (:obj:`List[torch.LongTensor]` of shape :obj:`[num_neurons]`, `required`):
dendrite call return ops.
"""
if len(x[0].shape) != 3:
error_msg = 'Tensor inputs should be rank 3 with semantic shape: [batch_size, sequence_len, feature_len]'
raise ValueError(error_msg)
if len(x) != len(neurons):
error_msg = 'List of tensor inputs x should have the same length as passed destination neurons, got {} and {}'.format(len(x), len(neurons))
raise ValueError(error_msg)
if x[0].shape[2] != bittensor.__network_dim__:
error_msg = 'Passed tensor must have last dimension {} got {}'.format(bittensor.__network_dim__, x[0].shape[2])
raise ValueError(error_msg)
if len(x) == 0:
error_msg = 'Must pass more than 0 input for argument x, got {}'.format(len(x))
raise ValueError(error_msg)
return self.forward(neurons, x, bittensor.proto.Modality.TENSOR)
def forward(self, neurons: List[bittensor.proto.Neuron],
x: List[torch.Tensor],
mode: bittensor.proto.Modality) -> Tuple[List[torch.Tensor], torch.LongTensor]:
r""" Forward tensor inputs to neurons.
Args:
neurons (:obj:`List[bittensor.proto.Neuron]` of shape :obj:`(num_neurons)`, `required`):
List of remote neurons which match length of x. Tensors from x are sent forward to these neurons.
x (:obj:`List[torch.Tensor]` of shape :obj:`(num_neurons * [shape])`, `required`):
List of tensors to send to corresponsing neurons. Tensors are of arbitrary type and shape depending on the
modality.
mode (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
Bittensor forward modality type. Enum in [TEXT, IMAGE, TENSOR]
Returns:
forward_outputs (:obj:`List[torch.FloatTensor]` of shape :obj:`num_neurons * (batch_size, sequence_len, bittensor.network_size)]`, `required`):
Output encodings of tensors produced by remote neurons. Non-responses are zeroes of common shape.
return_codes (:obj:`List[torch.LongTensor]` of shape :obj:`[num_neurons]`, `required`):
dendrite call return ops.
"""
if len(x) != len(neurons):
error_msg = 'List of inputs x should have the same length as passed destination neurons, got {} and {}'.format(len(x), len(neurons))
raise ValueError(error_msg)
if len(x) < 1:
error_msg = 'Must pass more than 0 input for argument x, got {}'.format(len(x))
raise ValueError(error_msg)
# ---- Stats ---
self.stats.qps.update(1)
# ---- Run async calls ----
loop = asyncio.new_event_loop()
results = loop.run_until_complete(self._gather(loop, x, neurons, mode))
loop.stop()
# ---- Process results and return ----
tensor_results = [res[0] for res in results]
return_codes = torch.tensor([res[1] for res in results])
return tensor_results, return_codes
async def _gather(self, loop: asyncio.base_events.BaseEventLoop, inputs, neurons, mode) -> List[Tuple[torch.FloatTensor, torch.LongTensor]]:
r""" Creates and returns the results from len(neurons) torch forward requests. Uses asyncio for concurrency.
Args:
loop | |
<gh_stars>1-10
import os
import sys
import string
from config import *
from util import err
from util import out
from util import warn
from util import process_info
from util import get_result_info
from util import get_suffixes_from_info
from util import get_variant_bases
from util import overwrite_expected
from util import load_dir
from util import prepare_dir
microbases = get_variant_bases(MICROBENCHMARK_DIR)
benchbases = get_variant_bases(BENCHMARK_DIR)
exploits = get_variant_bases(EXPLOIT_DIR)
webbases = get_variant_bases(WEBSITE_FILE)
RESULTSDIR = OUTDIR
TARGETDIRS = {
BENCHMARK_DIR: {
'basenames': benchbases,
'wrap': False,
},
MICROBENCHMARK_DIR: {
'basenames': microbases,
'wrap': True,
},
EXPLOIT_DIR: {
'basenames': exploits,
'wrap': False,
},
WEBSITE_DIR: {
'basenames': webbases,
'wrap': False,
},
}
# Profiling-related configuration
octaneProcessedStepSpec = {
'beginafter': ' suite.NotifySkipped(runner);\n } else {\n',
'endafter': ' continuation = suite.RunStep(runner);\n',
'matchall': False,
'indent': 10,
'noquotes': True
}
octaneOptimizedStepSpec = {
'beginafter': 'function CheckCompatibility(){',
'endafter': '&&Run()',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True,
'noquotes': True
}
octaneProcessedSpec = {
'beginafter': 'function CheckCompatibility() {\n',
'endafter': ' Run()\n }\n',
'matchall': False,
'indent': 2
}
octaneOptimizedSpec = {
'beginafter': 'function CheckCompatibility(){',
'endafter': '&&Run()',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
octaneOriginalSpec = {
'beginafter': 'function CheckCompatibility() {\n',
'endafter': ' Run();\n',
'matchall': False,
'indent': 2
}
imageloaderClosureSpec = {
'beginafter': 'function doLoad() {\n',
'endafter': 'img$$2.fetch()\n',
'matchall': False,
'indent': 6
}
imageloaderProcessedSpec = {
'beginafter': 'function doLoad() {\n',
'endafter': 'img$$2.fetch();\n',
'matchall': False,
'indent': 4
}
imageloaderCollapsedSpec = {
'beginafter': 'function doLoad() {\n',
'endafter': 'JAM.call(img$$2.fetch, img$$2, []);\n',
'matchall': False,
'indent': 2
}
imageloaderOptimizedSpec = {
'beginafter': 'function doLoad(){',
'endafter': 'img.fetch()',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
imageloaderOriginalSpec = {
'beginafter': 'function doLoad() {\n',
'endafter': 'img.fetch()\n',
'matchall': False,
'indent': 2
}
jsqrcodeClosureSpec = {
'beginafter': 'image$$7.onload = function() {',
'endbefore': 'if (qrcode.callback != null)',
'matchall': True,
'indent': 6
}
jsqrcodeProcessedSpec = {
'beginafter': 'function v144() {',
'endbefore': ['if (v239) {', 'if (v242) {'],
'matchall': False,
'indent': 4
}
jsqrcodeCollapsedSpec = {
'beginafter': 'function v144() {',
'endbefore': 'if (qrcode.callback != null) {',
'matchall': True,
'indent': 4
}
jsqrcodeOptimizedSpec = {
'beginafter': 'function v144(){',
'endbefore': 'null!=qrcode.callback&&',
'matchall': True,
'indent': -1
}
jsqrcodeOriginalSpec = {
'beginafter': 'image.onload=function(){',
'endbefore': 'if(qrcode.callback!=null)',
'matchall': True,
'indent': 6
}
snoteOriginalEditSpec = {
'beginafter': 'function handleEditDialogOk() {',
'endbefore': ' }\n\n function handleEditDialogCancel() {',
'matchall': False,
'indent': 4
}
snoteClosureEditSpec = {
'beginafter': 'function handleEditDialogOk() {',
'endbefore': ' }\n function handleEditDialogCancel() {',
'matchall': False,
'indent': 4
}
snoteProcessedEditSpec = {
'beginafter': 'function handleEditDialogOk() {',
'endbefore': ' return;\n }\n function handleEditDialogCancel() {',
'matchall': False,
'indent': 4
}
snoteOptimizedEditSpec = {
'beginafter': 'function handleEditDialogOk(){',
'endbefore': '}function handleEditDialogCancel(){',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
snoteOriginalReadSpec = {
'beginafter': 'function handleUnlockDialogOk() {',
'endbefore': ' }\n\n function handleUnlockDialogCancel() {',
'matchall': False,
'indent': 4
}
snoteClosureReadSpec = {
'beginafter': 'function handleUnlockDialogOk() {',
'endbefore': ' }\n function handleUnlockDialogCancel() {',
'matchall': False,
'indent': 4
}
snoteProcessedReadSpec = {
'beginafter': 'function handleUnlockDialogOk() {',
'endbefore': ' return;\n }\n function handleUnlockDialogCancel() {',
'matchall': False,
'indent': 4
}
snoteOptimizedReadSpec = {
'beginafter': 'function handleUnlockDialogOk(){',
'endbefore': '}function handleUnlockDialogCancel(){',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
mwwidgetsOriginalEditSpec = {
'beginafter': 'function createEditDialogOkHandler(strServerPath, strNoteId) {\n\treturn function() {',
'endbefore': ' }\n }\n\n function createEditDialogCancelHandler(strNoteId) {',
'matchall': False,
'indent': 12
}
mwwidgetsClosureEditSpec = {
'beginafter': 'function createEditDialogOkHandler(strServerPath$$5, strNoteId$$43) {\n return function() {',
'endbefore': ' }\n }\n function createEditDialogCancelHandler(strNoteId$$44) {',
'matchall': False,
'indent': 6
}
mwwidgetsProcessedEditSpec = {
'beginafter': 'function createEditDialogOkHandler(strServerPath$$5, strNoteId$$43) {\n function v8() {',
'endbefore': ' }\n return v8;\n',
'matchall': False,
'indent': 6
}
mwwidgetsOptimizedEditSpec = {
'beginafter': 'function createEditDialogOkHandler(strServerPath,strNoteId){function v8(){',
'endbefore': '}return v8}',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
mwwidgetsOriginalReadSpec = {
'beginafter': 'function createUnlockDialogOkHandler(strNoteId) {\n\treturn function() {',
'endbefore': '\t}\n }\n\n function createUnlockDialogCancelHandler(strNoteId) {',
'matchall': False,
'indent': 6
}
mwwidgetsClosureReadSpec = {
'beginafter': 'function createUnlockDialogOkHandler(strNoteId$$40) {\n return function() {',
'endbefore': ' }\n }\n function createUnlockDialogCancelHandler(strNoteId$$41) {',
'matchall': False,
'indent': 6
}
mwwidgetsProcessedReadSpec = {
'beginafter': 'function createUnlockDialogOkHandler(strNoteId$$40) {\n function v5() {',
'endbefore': ' return;\n }\n return v5;\n',
'matchall': False,
'indent': 6
}
mwwidgetsOptimizedReadSpec = {
'beginafter': 'function createUnlockDialogOkHandler(strNoteId){function v5(){',
'endbefore': '}return v5}',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
phylojiveOriginalSetupSpec = {
'beginafter': ' function init() {',
'endbefore': '} \n\n\n// SimpleTemplate_webApp.html',
'matchall': False,
'indent': 8
}
phylojiveCollapsedSetupSpec = {
'beginafter': 'function init() {',
'endafter': '"raw valleybottom":[13.6]}}});',
'matchall': False,
'indent': 2
}
phylojiveClosureSetupSpec = {
'beginafter': 'function init() {',
'endafter': '"raw valleybottom":[13.6]}}})',
'matchall': False,
'indent': 2
}
phylojiveProcessedSetupSpec = {
'beginafter': 'function init() {',
'endafter': ' phylogenyExplorer_init(v8922);',
'matchall': False,
'indent': 2
}
phylojiveOptimizedSetupSpec = {
'beginafter': 'function init(){',
'endafter': 'phylogenyExplorer_init()',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
googiespellOriginalSubmitSpec = {
'beginafter': 'GoogieSpell.prototype.spellCheck = function(ignore) {',
'endbefore': '}\n\n\n//////\n// Spell checking functions',
'matchall': False,
'indent': 4
}
googiespellProcessedSubmitSpec = {
'beginafter': 'function v182(ignore) {',
'endbefore': ' return;\n}\nfunction v181(text',
'matchall': False,
'indent': 6
}
googiespellClosureSubmitSpec = {
'beginafter': 'GoogieSpell.prototype.spellCheck = function(ignore) {',
'endbefore': '};\nGoogieSpell.prototype.parseResult = function(r_text$$1) {',
'matchall': False,
'indent': 6
}
googiespellOptimizedSubmitSpec = {
'beginafter': 'function v182(ignore){',
'endbefore': '}function v181(text){',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
googiespellOriginalParseSpec = {
'beginafter': 'GoogieSpell.prototype.parseResult = function(r_text) {',
'endbefore': ' return results;\n}\n\n//////\n// Counters',
'matchall': False,
'indent': 4
}
googiespellProcessedParseSpec = {
'beginafter': 'function v183(r_text$$1) {',
'endbefore': ' return results;\n}\nfunction v182(ignore) {',
'matchall': False,
'indent': 2
}
googiespellClosureParseSpec = {
'beginafter': 'GoogieSpell.prototype.parseResult = function(r_text$$1) {',
'endbefore': ' return results;\n};\nGoogieSpell.prototype.errorFixed = function() {',
'matchall': False,
'indent': 2
}
googiespellOptimizedParseSpec = {
'beginafter': 'function v183(r_text){',
'endbefore': 'return results}\nfunction v182(ignore){',
'matchall': False,
'indent': -1,
}
sms2codonplotOriginalSpec = {
'beginafter': 'function codonPlot (theDocument) {\t',
'endbefore': '\treturn true;\t\n\n\n\n}\n\nfunction writeCodonPlot(',
'matchall': False,
'indent': 2
}
sms2codonplotProcessedSpec = {
'beginafter': 'function codonPlot(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeCodonPlot(',
'matchall': False,
'indent': 2
}
sms2codonplotOptimizedSpec = {
'beginafter': 'function v4(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2codonusageOriginalSpec = {
'beginafter': 'function codonUsage (theDocument) {\t',
'endbefore': '\treturn true;\n\n}\n\n\n\n\nfunction writeCodonTable(',
'matchall': False,
'indent': 2
}
sms2codonusageProcessedSpec = {
'beginafter': 'function codonUsage(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeCodonTable(',
'matchall': False,
'indent': 2
}
sms2codonusageOptimizedSpec = {
'beginafter': 'function v5(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2cpgislandsOriginalSpec = {
'beginafter': 'function cpgIslands (theDocument) {\t',
'endbefore': '\treturn true;\n}\n\nfunction cpgIslandRegions (',
'matchall': False,
'indent': 2
}
sms2cpgislandsProcessedSpec = {
'beginafter': 'function cpgIslands(theDocument) {',
'endbefore': ' return true;\n}\nfunction cpgIslandRegions(',
'matchall': False,
'indent': 2
}
sms2cpgislandsOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2dnamwOriginalSpec = {
'beginafter': 'function dnaMw (theDocument) {\t',
'endbefore': '\treturn true;\n}\n\nfunction writeDnaMw (',
'matchall': False,
'indent': 2
}
sms2dnamwProcessedSpec = {
'beginafter': 'function dnaMw(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeDnaMw(',
'matchall': False,
'indent': 2
}
sms2dnamwOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2dnapatternOriginalSpec = {
'beginafter': 'function dnaPattern (theDocument) {\t',
'endbefore': '\treturn true;\n}\n\nfunction writeDnaPattern (',
'matchall': False,
'indent': 2
}
sms2dnapatternProcessedSpec = {
'beginafter': 'function dnaPattern(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeDnaPattern(',
'matchall': False,
'indent': 2
}
sms2dnapatternOptimizedSpec = {
'beginafter': 'function dnaPattern(){',
'endbefore': '}\nfunction writeDnaPattern(',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2dnastatsOriginalSpec = {
'beginafter': 'function dnaStats (theDocument) {\t',
'endbefore': '\treturn true;\n}\n\n\n// dna_stats.html',
'matchall': False,
'indent': 2
}
sms2dnastatsClosureSpec = {
'beginafter': 'function dnaStats(theDocument) {',
'endbefore': ' return true;\n}\ndocument.onload = ',
'matchall': False,
'indent': 2
}
sms2dnastatsProcessedSpec = {
'beginafter': 'function dnaStats(theDocument) {',
'endafter': ' closeWindow();\n',
'matchall': False,
'indent': 2
}
sms2dnastatsOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2fuzzysearchdnaOriginalSpec = {
'beginafter': 'function fuzzySearchDna (theDocument) {\t',
'endbefore': '\treturn true;\n}\t\n\nfunction _fuzzySearchDna (',
'matchall': False,
'indent': 2
}
sms2fuzzysearchdnaProcessedSpec = {
'beginafter': 'function fuzzySearchDna(theDocument) {',
'endbefore': ' return true;\n}\nfunction _fuzzySearchDna(',
'matchall': False,
'indent': 2
}
sms2fuzzysearchdnaOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2fuzzysearchproteinOriginalSpec = {
'beginafter': 'function fuzzySearchProtein (theDocument) {\t',
'endbefore': '\treturn true;\n}\t\n\nfunction _fuzzySearchProtein (',
'matchall': False,
'indent': 2
}
sms2fuzzysearchproteinProcessedSpec = {
'beginafter': 'function fuzzySearchProtein(theDocument) {',
'endbefore': ' return true;\n}\nfunction _fuzzySearchProtein(',
'matchall': False,
'indent': 2
}
sms2fuzzysearchproteinOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2identsimOriginalSpec = {
'beginafter': 'function identSim (theDocument) {',
'endbefore': '\treturn true;\n}\n\nfunction writeIdentAndSim (',
'matchall': False,
'indent': 2
}
sms2identsimProcessedSpec = {
'beginafter': 'function identSim(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeIdentAndSim(',
'matchall': False,
'indent': 2
}
sms2identsimOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2multirevtransOriginalSpec = {
'beginafter': 'function multiRevTrans (theDocument) {',
'endbefore': '\treturn true;\n}\n\nfunction writeConsensusSeq(',
'matchall': False,
'indent': 2
}
sms2multirevtransProcessedSpec = {
'beginafter': 'function multiRevTrans(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeConsensusSeq(',
'matchall': False,
'indent': 2
}
sms2multirevtransOptimizedSpec = {
'beginafter': 'function v3(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2mutatefordigestOriginalSpec = {
'beginafter': 'function mutateForDigest (theDocument) {\t',
'endbefore': '\treturn true;\n}\t\n\nfunction layoutRestTrans (',
'matchall': False,
'indent': 2
}
sms2mutatefordigestProcessedSpec = {
'beginafter': 'function mutateForDigest(theDocument) {',
'endbefore': ' return true;\n}\nfunction layoutRestTrans(',
'matchall': False,
'indent': 2
}
sms2mutatefordigestOptimizedSpec = {
'beginafter': 'function v8(){try{',
'endbefore': '}catch(e){alert',
'matchall': False,
'indent': -1,
'prefixsemicolonend': True
}
sms2orffindOriginalSpec = {
'beginafter': 'function orfFind (theDocument) {\t',
'endbefore': '\treturn true;\n}\n\nfunction writeOrfs (',
'matchall': False,
'indent': 2
}
sms2orffindProcessedSpec = {
'beginafter': 'function orfFind(theDocument) {',
'endbefore': ' return true;\n}\nfunction writeOrfs(',
'matchall': False,
'indent': 2
}
sms2orffindOptimizedSpec = {
| |
any(isinstance(cell, str) and cell.startswith('!') for cell in data[0]):
column_heading = data.pop(0)
for i_cell, cell in enumerate(column_heading):
if isinstance(cell, str):
cell = cell.strip()
column_heading[i_cell] = cell
column_headings.append(column_heading)
elif column_headings:
column_headings.insert(0, [None] * len(column_headings[0]))
else:
raise ValueError("Worksheet '{}' must have {} header rows(s)".format(sheet_name, num_row_heading_columns))
# separate header columns
row_headings = []
for i_col in range(num_row_heading_columns):
row_heading = []
row_headings.append(row_heading)
for row in data:
cell = row.pop(0)
if isinstance(cell, str):
cell = cell.strip()
row_heading.append(cell)
for column_heading in column_headings:
column_heading.pop(0) # pragma: no cover # unreachable because row_headings and column_headings cannot both be non-empty
# remove empty rows and columns
def remove_empty_rows(data):
for row in list(data):
empty = True
for cell in row:
if cell not in ['', None]:
empty = False
break
if empty:
data.remove(row)
if ignore_empty_rows:
remove_empty_rows(data)
if ignore_empty_cols:
data = transpose(data)
remove_empty_rows(data)
data = transpose(data)
return (data, row_headings, column_headings, top_comments)
@classmethod
def read_worksheet_metadata(cls, sheet_name, rows):
""" Read worksheet metadata
Args:
sheet_name (:obj:`str`): sheet name
rows (:obj:`list`): rows
Returns:
:obj:`tuple`:
* :obj:`dict`: dictionary of document properties
* :obj:`dict`: dictionary of model properties
* :obj:`list` of :obj:`str`: comments
"""
format = 'ObjTables'
# version = obj_tables.__version__
doc_metadata_headings = []
model_metadata_headings = []
comments = []
for row in list(rows):
if not row or all(cell in ['', None] for cell in row):
rows.remove(row)
elif row and isinstance(row[0], str) and \
row[0].startswith('%/') and row[0].endswith('/%') and \
not any(row[1:]):
comment = row[0][2:-2].strip()
if comment:
comments.append(comment)
rows.remove(row)
elif row and isinstance(row[0], str) and row[0].startswith('!!!'):
if row[0].startswith('!!!' + format):
doc_metadata_headings.append(row[0])
rows.remove(row)
elif row and isinstance(row[0], str) and row[0].startswith('!!'):
if row[0].startswith('!!' + format):
model_metadata_headings.append(row[0])
rows.remove(row)
else:
break
assert len(doc_metadata_headings) <= 1, \
'document metadata in sheet "{}" must consist of a list of key-value pairs.'.format(sheet_name)
assert len(model_metadata_headings) == 1 and model_metadata_headings[0].startswith('!!'), \
'Sheet "{}" must contain metadata consisting of a list of key-value pairs.'.format(sheet_name)
assert len(model_metadata_headings) == 1, \
'Model metadata in sheet "{}" must consist of a list of key-value pairs.'.format(sheet_name)
if doc_metadata_headings:
doc_metadata_heading = doc_metadata_headings[0]
assert re.match(cls.DOC_METADATA_PATTERN, doc_metadata_heading), \
'Document metadata for sheet "{}" must consist of a list of key-value pairs.'.format(sheet_name)
else:
doc_metadata_heading = ''
model_metadata_heading = model_metadata_headings[0]
assert re.match(cls.MODEL_METADATA_PATTERN, model_metadata_heading), \
'Model metadata for sheet "{}" must consist of a list of key-value pairs.'.format(sheet_name)
doc_metadata = cls.parse_worksheet_heading_metadata(doc_metadata_heading, sheet_name=sheet_name)
model_metadata = cls.parse_worksheet_heading_metadata(model_metadata_heading, sheet_name=sheet_name)
# assert metadata.get(format + 'Version') == version, '{}Version for sheet "{}" must be {}'.format(
# format, sheet_name, version)
return (doc_metadata, model_metadata, comments)
@classmethod
def parse_worksheet_heading_metadata(cls, heading, sheet_name=None):
""" Parse key-value pairs of metadata from heading
Args:
heading (:obj:`str`): heading with key-value pairs of metadata
sheet_name (:obj:`str`, optional): sheet name
Returns:
:obj:`dict`: dictionary of document metadata
Raises:
:obj:`ValueError`: if a key is repeated
"""
results = re.findall(r" +(.*?)=('((?:[^'\\]|\\.)*)'|\"((?:[^\"\\]|\\.)*)\")",
heading)
metadata = {}
for key, val, _, _ in results:
val = val[1:-1]
if key in metadata:
raise ValueError('"{}" metadata {}cannot be repeated.'.format(
key, (f'for sheet "{sheet_name}" ' if sheet_name else '')))
metadata[key] = val
return metadata
def merge_doc_metadata(self, metadata):
""" Merge metadata into document metadata
Args:
metadata (:obj:`dict`): meta data
Raises:
:obj:`ValueError`: if the meta data conflicts with existing document
metadata
"""
for key, val in metadata.items():
if key in self._doc_metadata and self._doc_metadata[key] != val:
raise ValueError('Tables must have consistent document metadata for key "{}"'.format(key))
self._doc_metadata[key] = val
def link_model(self, model, attributes, data, objects, objects_by_primary_attribute, decoded=None):
""" Construct object graph
Args:
model (:obj:`Model`): an :obj:`obj_tables.core.Model`
attributes (:obj:`list` of :obj:`Attribute`): attribute order of :obj:`data`
data (:obj:`list` of :obj:`list` of :obj:`object`): nested list of object data
objects (:obj:`list`): list of model objects in order of :obj:`data`
objects_by_primary_attribute (:obj:`dict`): dictionary of model objects grouped by model
decoded (:obj:`dict`, optional): dictionary of objects that have already been decoded
Returns:
:obj:`list` of :obj:`str`: list of parsing errors
"""
errors = []
for obj_data, obj in zip(data, objects):
for (group_attr, sub_attr), attr_value in zip(attributes, obj_data):
if group_attr is None and isinstance(sub_attr, BaseRelatedAttribute):
value, error = sub_attr.deserialize(attr_value, objects_by_primary_attribute, decoded=decoded)
if error:
error.set_location_and_value(utils.source_report(obj, sub_attr.name), attr_value)
errors.append(error)
else:
setattr(obj, sub_attr.name, value)
elif group_attr and attr_value not in [None, '']:
if isinstance(sub_attr, BaseRelatedAttribute):
value, error = sub_attr.deserialize(attr_value, objects_by_primary_attribute, decoded=decoded)
else:
value, error = sub_attr.deserialize(attr_value)
if error:
error.set_location_and_value(utils.source_report(obj, group_attr.name + '.' + sub_attr.name), attr_value)
errors.append(error)
else:
sub_obj = getattr(obj, group_attr.name)
if not sub_obj:
sub_obj = group_attr.related_class()
setattr(obj, group_attr.name, sub_obj)
setattr(sub_obj, sub_attr.name, value)
for attr in model.Meta.attributes.values():
if isinstance(attr, RelatedAttribute) and attr.related_class.Meta.table_format == TableFormat.multiple_cells:
val = getattr(obj, attr.name)
if val:
if attr.related_class not in objects_by_primary_attribute:
objects_by_primary_attribute[attr.related_class] = {}
serialized_val = val.serialize()
same_val = objects_by_primary_attribute[attr.related_class].get(serialized_val, None)
if same_val:
for sub_attr in attr.related_class.Meta.attributes.values():
sub_val = getattr(val, sub_attr.name)
if isinstance(sub_val, list):
setattr(val, sub_attr.name, [])
else:
setattr(val, sub_attr.name, None)
setattr(obj, attr.name, same_val)
else:
objects_by_primary_attribute[attr.related_class][serialized_val] = val
return errors
@classmethod
def header_row_col_names(cls, index, file_ext, table_format):
""" Determine row and column names for header entries.
Args:
index (:obj:`int`): index in header sequence
file_ext (:obj:`str`): extension for model file
table_format (:obj:`TableFormat`): orientation of the stored table
Returns:
:obj:`tuple` of row, column, header_entries
"""
if table_format == TableFormat.row:
row, col, hdr_entries = (1, index, 'column')
else:
row, col, hdr_entries = (index, 1, 'row')
if 'xlsx' in file_ext:
col = xlsx_col_name(col)
return (row, col, hdr_entries)
@classmethod
def get_model_sheet_name(cls, sheet_names, model):
""" Get the name of the worksheet/file which corresponds to a model
Args:
sheet_names (:obj:`list` of :obj:`str`): names of the sheets in the workbook/files
model (:obj:`Model`): model
Returns:
:obj:`str`: name of sheet corresponding to the model or :obj:`None` if there is no sheet for the model
Raises:
:obj:`ValueError`: if the model matches more than one sheet
"""
used_sheet_names = []
possible_sheet_names = cls.get_possible_model_sheet_names(model)
for sheet_name in sheet_names:
for possible_sheet_name in possible_sheet_names:
if sheet_name.lower() == possible_sheet_name.lower():
used_sheet_names.append(sheet_name)
break
used_sheet_names = det_dedupe(used_sheet_names)
if len(used_sheet_names) == 1:
return used_sheet_names[0]
if len(used_sheet_names) > 1:
raise ValueError('Model {} matches multiple sheets'.format(model.__name__))
return None
@classmethod
def get_possible_model_sheet_names(cls, model):
""" Return set of possible sheet names for a model
Args:
model (:obj:`Model`): Model
Returns:
:obj:`set`: set of possible sheet names for a model
"""
return set(['!!' + model.__name__,
'!!' + model.Meta.verbose_name,
'!!' + model.Meta.verbose_name_plural])
class MultiSeparatedValuesReader(ReaderBase):
""" Read a list of model objects from a single text file which contains
multiple comma or tab-separated files
"""
def run(self, path, schema_name=None, models=None,
allow_multiple_sheets_per_model=False,
ignore_missing_models=False, ignore_extra_models=False,
ignore_sheet_order=False,
include_all_attributes=True, ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
group_objects_by_model=True, validate=True):
""" Read a list of model objects from a single text file which contains
multiple comma or tab-separated files
Args:
path (:obj:`str`): path to file(s)
schema_name (:obj:`str`, optional): schema name
models (:obj:`types.TypeType` or :obj:`list` of :obj:`types.TypeType`, optional): type or list
of type of objects to read
allow_multiple_sheets_per_model (:obj:`bool`, optional): if :obj:`True`, allow multiple sheets per model
ignore_missing_models (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_models (:obj:`bool`, optional): if :obj:`True` and all :obj:`models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in :obj:`Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in :obj:`models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided
in the canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
group_objects_by_model (:obj:`bool`, optional): if :obj:`True`, group decoded objects by their
types
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
Returns:
:obj:`obj`: if :obj:`group_objects_by_model` set returns :obj:`dict`: of model objects grouped by :obj:`Model` class;
else returns :obj:`list`: of all model objects
Raises:
:obj:`ValueError`: if :obj:`path` contains a glob pattern
"""
tmp_dirname = tempfile.mkdtemp()
_, ext = splitext(path)
ext = ext.lower()
reader_cls = wc_utils.workbook.io.get_reader(ext)
reader = reader_cls(path)
i_sheet = 0
i_sheet_start = 0
i_sheet_class = {}
last_sheet_metadata = None
data = reader.read_worksheet('')
if data and data[0] and isinstance(data[0][0], str):
match = re.match(WorkbookReader.DOC_METADATA_PATTERN, data[0][0])
if match:
self._doc_metadata | |
<filename>nautobot_chatops/dispatchers/slack.py
"""Dispatcher implementation for sending content to Slack."""
import json
import logging
import os
import time
from django.conf import settings
from slack_sdk import WebClient
from slack_sdk.webhook.client import WebhookClient
from slack_sdk.errors import SlackApiError, SlackClientError
from nautobot_chatops.metrics import backend_action_sum
from .base import Dispatcher
logger = logging.getLogger("rq.worker")
# pylint: disable=abstract-method
# Create a metric to track time spent and requests made.
BACKEND_ACTION_LOOKUP = backend_action_sum.labels("slack", "platform_lookup")
BACKEND_ACTION_MARKDOWN = backend_action_sum.labels("slack", "send_markdown")
BACKEND_ACTION_BLOCKS = backend_action_sum.labels("slack", "send_blocks")
BACKEND_ACTION_SNIPPET = backend_action_sum.labels("slack", "send_snippet")
class SlackDispatcher(Dispatcher):
"""Dispatch messages and cards to Slack."""
# pylint: disable=too-many-public-methods
platform_name = "Slack"
platform_slug = "slack"
platform_color = "4A154B" # Slack Aubergine
command_prefix = settings.PLUGINS_CONFIG["nautobot_chatops"]["slack_slash_command_prefix"]
"""Prefix prepended to all commands, such as "/" or "!" in some clients."""
def __init__(self, *args, **kwargs):
"""Init a SlackDispatcher."""
super().__init__(*args, **kwargs)
self.slack_client = WebClient(token=settings.PLUGINS_CONFIG["nautobot_chatops"]["slack_api_token"])
self.slack_menu_limit = int(os.getenv("SLACK_MENU_LIMIT", "100"))
# pylint: disable=too-many-branches
@classmethod
@BACKEND_ACTION_LOOKUP.time()
def platform_lookup(cls, item_type, item_name):
"""Call out to the chat platform to look up, e.g., a specific user ID by name.
Args:
item_type (str): One of "organization", "channel", "user"
item_name (str): Uniquely identifying name of the given item.
Returns:
(str, None)
"""
instance = cls(context=None)
cursor = None
if item_type == "organization": # pylint: disable=no-else-raise
# The admin_teams_list API requires admin access and only works under Enterprise
raise NotImplementedError
elif item_type == "channel":
while True:
try:
response = instance.slack_client.conversations_list(cursor=cursor, limit=20, exclude_archived=True)
except SlackApiError as err:
if err.response["error"] == "ratelimited":
delay = int(err.response.headers["Retry-After"])
time.sleep(delay)
continue
raise err
for channel in response["channels"]:
if channel["name"] == item_name:
return channel["id"]
cursor = response["response_metadata"]["next_cursor"]
if not cursor:
break
elif item_type == "user":
while True:
try:
response = instance.slack_client.users_list(cursor=cursor, limit=20)
except SlackApiError as err:
if err.response["error"] == "ratelimited":
delay = int(err.response.headers["Retry-After"])
time.sleep(delay)
continue
raise err
for member in response["members"]:
if member["name"] == item_name:
return member["id"]
cursor = response["response_metadata"]["next_cursor"]
if not cursor:
break
return None
# More complex APIs for presenting structured data - these typically build on the more basic functions below
def command_response_header(self, command, subcommand, args, description="information", image_element=None):
"""Construct a consistently forwarded header including the command that was issued.
Args:
command (str): Primary command string
subcommand (str): Secondary command string
args (list): of tuples, either (arg_name, human_readable_value, literal_value) or (arg_name, literal_value)
description (str): Short description of what information is contained in the response
image_element (dict): As constructed by self.image_element()
"""
fields = []
for name, value, *_ in args:
fields.append(self.markdown_element(self.bold(name)))
fields.append(self.markdown_element(value))
command = f"{self.command_prefix}{command}"
block = {
"type": "section",
"text": self.markdown_element(
f"Hey {self.user_mention()}, here is that {description} you requested\n"
f"Shortcut: `{command} {subcommand} {' '.join(arg[-1] for arg in args)}`"
),
}
# Add to block "accessory" key if image_element exists. Otherwise do not
if image_element:
block["accessory"] = image_element
# Slack doesn't like it if we send an empty fields list, we have to omit it entirely
if fields:
block["fields"] = fields
return [block]
# Send various content to the user or channel
@BACKEND_ACTION_MARKDOWN.time()
def send_markdown(self, message, ephemeral=False):
"""Send a Markdown-formatted text message to the user/channel specified by the context."""
try:
if ephemeral:
self.slack_client.chat_postEphemeral(
channel=self.context.get("channel_id"),
user=self.context.get("user_id"),
text=message,
)
else:
self.slack_client.chat_postMessage(
channel=self.context.get("channel_id"),
user=self.context.get("user_id"),
text=message,
)
except SlackClientError as slack_error:
self.send_exception(slack_error)
# pylint: disable=arguments-differ
@BACKEND_ACTION_BLOCKS.time()
def send_blocks(self, blocks, callback_id=None, modal=False, ephemeral=False, title="Your attention please!"):
"""Send a series of formatting blocks to the user/channel specified by the context.
Slack distinguishes between simple inline interactive elements and modal dialogs. Modals can contain multiple
inputs in a single dialog; more importantly for our purposes, certain inputs (such as textentry) can ONLY
be used in modals and will be rejected if we try to use them inline.
Args:
blocks (list): List of block contents as constructed by other dispatcher functions.
callback_id (str): Callback ID string such as "command subcommand arg1 arg2". Required if `modal` is True.
modal (bool): Whether to send this as a modal dialog rather than an inline block.
ephemeral (bool): Whether to send this as an ephemeral message (only visible to the targeted user).
title (str): Title to include on a modal dialog.
"""
logger.info("Sending blocks: %s", json.dumps(blocks, indent=2))
try:
if modal:
if not callback_id:
self.send_error("Tried to create a modal dialog without specifying a callback_id")
return
self.slack_client.views_open(
trigger_id=self.context.get("trigger_id"),
view={
"type": "modal",
"title": self.text_element(title),
"submit": self.text_element("Submit"),
"blocks": blocks,
# Embed the current channel information into to the modal as modals don't store this otherwise
"private_metadata": json.dumps(
{
"channel_id": self.context.get("channel_id"),
}
),
"callback_id": callback_id,
},
)
elif ephemeral:
self.slack_client.chat_postEphemeral(
channel=self.context.get("channel_id"),
user=self.context.get("user_id"),
blocks=blocks,
)
else:
self.slack_client.chat_postMessage(
channel=self.context.get("channel_id"),
user=self.context.get("user_id"),
blocks=blocks,
)
except SlackClientError as slack_error:
self.send_exception(slack_error)
@BACKEND_ACTION_SNIPPET.time()
def send_snippet(self, text, title=None):
"""Send a longer chunk of text as a file snippet."""
if self.context.get("channel_name") == "directmessage":
channels = [self.context.get("user_id")]
else:
channels = [self.context.get("channel_id")]
channels = ",".join(channels)
logger.info("Sending snippet to %s: %s", channels, text)
try:
self.slack_client.files_upload(channels=channels, content=text, title=title)
except SlackClientError as slack_error:
self.send_exception(slack_error)
def send_image(self, image_path):
"""Send an image as a file upload."""
if self.context.get("channel_name") == "directmessage":
channels = [self.context.get("user_id")]
else:
channels = [self.context.get("channel_id")]
channels = ",".join(channels)
logger.info("Sending image %s to %s", image_path, channels)
self.slack_client.files_upload(channels=channels, file=image_path)
def send_warning(self, message):
"""Send a warning message to the user/channel specified by the context."""
self.send_markdown(f":warning: {self.bold(message)} :warning:", ephemeral=True)
def send_error(self, message):
"""Send an error message to the user/channel specified by the context."""
self.send_markdown(f":warning: {self.bold(message)} :warning:", ephemeral=True)
# pylint: disable=unnecessary-pass
def send_busy_indicator(self):
"""Send a "typing" indicator to show that work is in progress."""
# Currently the Slack Events API does not support the "user_typing" event.
# We're trying not to use the legacy Slack RTM API as it's deprecated.
# So for now, we do nothing.
pass
def send_exception(self, exception):
"""Try to report an exception to the user."""
self.slack_client.chat_postEphemeral(
channel=self.context.get("channel_id"),
user=self.context.get("user_id"),
text=f"Sorry @{self.context.get('user_name')}, an error occurred :sob:\n```{exception}```",
)
# pylint: disable=no-self-use
def delete_message(self, response_url):
"""Delete a message that was previously sent."""
WebhookClient(response_url).send_dict({"delete_original": "true"})
# Prompt the user for various basic inputs
def prompt_for_text(self, action_id, help_text, label, title="Your attention please!"):
"""Prompt the user to enter freeform text into a field.
Args:
action_id (str): Identifier string to attach to the "submit" action.
help_text (str): Markdown string to display as help text.
label (str): Label text to display adjacent to the text field.
title (str): Title to include on the modal dialog.
"""
textentry = {
"type": "input",
"block_id": action_id,
"label": self.text_element(label),
"element": {"type": "plain_text_input", "action_id": action_id, "placeholder": self.text_element(label)},
}
blocks = [self.markdown_block(help_text), textentry]
# In Slack, a textentry element can ONLY be sent in a modal dialog
return self.send_blocks(blocks, callback_id=action_id, ephemeral=True, modal=True, title=title)
def get_prompt_from_menu_choices(self, choices, offset=0):
"""Returns choices list to accommodate for Slack menu limits.
Args:
choices (list): List of (display, value) tuples
offset (int): If set, starts displaying choices at index location from all choices,
as Slack only displays 100 options at a time
Returns:
choices (list): List of (display, value) tuples accommodating for Slack menu limits
"""
choice_length = len(choices)
if choice_length > self.slack_menu_limit:
try:
# Since we are showing "Next..." at the end, this isn't required to show to users anymore
self.send_warning(
f"More than {self.slack_menu_limit} options are available."
f"Slack limits us to only displaying {self.slack_menu_limit} options at a time."
)
except SlackApiError:
pass
new_offset = offset + self.slack_menu_limit - 1
if offset == 0:
choices = choices[: self.slack_menu_limit - 1] # 1 is to leave space for 'next' insert
else:
choices = choices[offset:new_offset]
if choice_length > new_offset:
# Only insert a 'next' offset if we still have more choices left to see
choices.append(("Next...", f"menu_offset-{new_offset}"))
return choices
def prompt_from_menu(
self, action_id, help_text, choices, default=(None, None), confirm=False, offset=0
): # pylint: disable=too-many-arguments
"""Prompt the user for a selection from a menu.
Args:
action_id (str): Identifier string to attach to the "submit" action.
help_text (str): Markdown string to display as help text.
choices (list): List of (display, value) tuples.
default (tuple): Default (display, value) to pre-select.
confirm (bool): If True, prompt the user to confirm their selection (if the platform supports this).
offset (int): If set, starts displaying choices at index location from all choices,
as Slack only displays 100 options at a time.
"""
choices = self.get_prompt_from_menu_choices(choices, offset=offset)
menu = self.select_element(action_id, choices, default=default, confirm=confirm)
cancel_button = {
"type": "button",
"text": self.text_element("Cancel"),
"action_id": "action",
"value": "cancel",
}
blocks | |
'cond_scale_factor': 4,
'cond_encode_depth': 1,
'cond_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
},
# Encoding the inputs
'main_encode_depth': 4,
'main_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
},
# Calculate moments
'combine_moments': 'minus' if method is 'sign' else 'concat',
# ONLY USED IF combine_moments is 'concat'
'combine_moments_encode_depth' : 1,
'combine_moments_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 128},
}
},
'moments_encode_depth' : 5,
'moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
}
},
'moments_main_cond_encode_depth': 3,
'moments_main_cond_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
#'pool': {'size' : 2, 'stride' : 2, 'type' : 'max'}
}
},
# Predict next moments
'delta_moments_encode_depth' : 11,
'delta_moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 0, 'nonlinearity': nonlin},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 2, 'nonlinearity': nonlin},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4, 'nonlinearity': nonlin},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': {1: 14}, 'nonlinearity': nonlin},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 0, 'nonlinearity': nonlin},
6 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 2, 'nonlinearity': nonlin},
7 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4, 'nonlinearity': nonlin},
8 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': {1: 18}, 'nonlinearity': nonlin},
9 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 0, 'nonlinearity': nonlin},
10 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 2, 'nonlinearity': nonlin},
11 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4},
},
'combine_delta': 'plus' if method is 'sign' else 'concat',
# ONLY USED IF combine_delta is 'concat'
'combine_delta_encode_depth' : 1,
'combine_delta_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 128},
}
},
'next_main_encode_depth': 11,
'next_main_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 0, 'nonlinearity': nonlin},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 2, 'nonlinearity': nonlin},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4, 'nonlinearity': nonlin},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': {1: 26}, 'nonlinearity': nonlin},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 0, 'nonlinearity': nonlin},
6 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 2, 'nonlinearity': nonlin},
7 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4, 'nonlinearity': nonlin},
8 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': {1: 30}, 'nonlinearity': nonlin},
9 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 0, 'nonlinearity': nonlin},
10 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 2, 'nonlinearity': nonlin},
11 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4},
},
'deconv_depth': 2,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 128},
'bypass': 4
},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : n_classes},
'bypass': 2
},
}
}
def cfg_mom_flat_bypass(n_classes, use_cond=False, method='sign', nonlin='relu'):
return {
'use_cond': use_cond,
# ONLY USED IF use_cond = True!!!
'cond_scale_factor': 4,
'cond_encode_depth': 1,
'cond_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
},
# Encoding the inputs
'main_encode_depth': 4,
'main_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
},
# ONLY USED IF use_cond = True!!!
'encode_depth': 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
#'pool': {'size' : 2, 'stride' : 2, 'type' : 'max'}
}
},
# Calculate moments
'combine_moments': 'minus' if method is 'sign' else 'concat',
# ONLY USED IF combine_moments is 'concat'
'combine_moments_encode_depth' : 1,
'combine_moments_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 128},
}
},
'moments_encode_depth' : 5,
'moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
}
},
# Predict next moments
'delta_moments_encode_depth' : 11,
'delta_moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': [0,1], 'nonlinearity': nonlin},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4, 'nonlinearity': nonlin},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 6, 'nonlinearity': nonlin},
6 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
7 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': [0,1], 'nonlinearity': nonlin},
8 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
9 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 4, 'nonlinearity': nonlin},
10 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
11 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'bypass': 6},
},
'combine_delta': 'plus' if method is 'sign' else 'concat',
# ONLY USED IF combine_delta is 'concat'
'combine_delta_encode_depth' : 1,
'combine_delta_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 128},
}
},
'deconv_depth': 2,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, | |
<reponame>hugovk/dbnd
import logging
from datetime import timedelta
from time import sleep
from airflow_monitor.airflow_data_saving import (
save_airflow_monitor_data,
save_airflow_server_info,
)
from airflow_monitor.airflow_instance_details import create_instance_details_list
from airflow_monitor.airflow_monitor_utils import (
dump_unsent_data,
log_fetching_parameters,
log_received_tasks,
save_error_message,
send_metrics,
set_airflow_server_info_started,
wait_interval,
)
from airflow_monitor.airflow_servers_fetching import AirflowServersGetter
from airflow_monitor.errors import AirflowFetchingException
from dbnd._core.errors.friendly_error.tools import logger_format_for_databand_error
from dbnd._core.utils.dotdict import _as_dotted_dict
from dbnd._core.utils.timezone import utcnow
from dbnd._vendor import pendulum
logger = logging.getLogger(__name__)
# Fetch types
COMPLETE = "complete"
INCOMPLETE_TYPE1 = "incomplete_type1"
INCOMPLETE_TYPE2 = "incomplete_type2"
DAGS_ONLY = "dags_only"
def try_fetching_from_airflow(
airflow_instance_detail, since, fetch_type, incomplete_offset
):
"""
Try fetching from Airflow and return the data that was fetched.
If Exception occurred, return None.
"""
try:
log_fetching_parameters(
airflow_instance_detail.url,
since,
airflow_instance_detail.config.dag_ids,
airflow_instance_detail.config.fetch_quantity,
fetch_type,
incomplete_offset,
airflow_instance_detail.config.include_logs,
airflow_instance_detail.config.include_task_args,
airflow_instance_detail.config.include_xcom,
)
data = airflow_instance_detail.data_fetcher.get_data(
since,
airflow_instance_detail.config.include_logs,
airflow_instance_detail.config.include_task_args,
airflow_instance_detail.config.include_xcom,
airflow_instance_detail.config.dag_ids,
airflow_instance_detail.config.fetch_quantity,
fetch_type,
incomplete_offset,
)
return data
except AirflowFetchingException as afe:
message = logger_format_for_databand_error(afe)
save_error_message(airflow_instance_detail, message)
return None
except Exception as e:
message = "Exception occurred while trying to fetch data from Airflow url {}. Exception: {}".format(
e, airflow_instance_detail.url
)
save_error_message(airflow_instance_detail, message)
return None
def get_active_dags(export_data):
active_dags = {
dag["dag_id"]: [task["task_id"] for task in dag["tasks"]]
for dag in export_data.dags
if dag.get("is_active", True)
}
return active_dags
def get_max_end_dates(export_data):
"""
Return the max end date from all task instances.
If there are no task instances, return max end date from all dag_runs.
If there are no dag runs as well, return None.
"""
task_instances_end_dates = [
pendulum.parse(str(t["end_date"]))
for t in export_data.task_instances
if t["end_date"] is not None
]
dag_runs_end_dates = [
pendulum.parse(str(dr["end_date"]))
for dr in export_data.dag_runs
if dr["end_date"] is not None
]
logger.info(
"Got %d task end dates, the last is %s and got %d dag run end dates, the last is %s",
len(task_instances_end_dates),
max(task_instances_end_dates) if task_instances_end_dates else None,
len(dag_runs_end_dates),
max(dag_runs_end_dates) if dag_runs_end_dates else None,
)
end_dates = (
task_instances_end_dates if task_instances_end_dates else dag_runs_end_dates
)
if not end_dates:
return None
return max(end_dates)
def update_since(airflow_instance_detail, max_end_date, is_incomplete):
if not max_end_date:
if is_incomplete:
logger.info(
"Keeping incomplete since as it was %s",
airflow_instance_detail.incomplete_since,
)
else:
logger.info("Keeping since as it was %s", airflow_instance_detail.since)
return
if is_incomplete:
logger.info(
"Updating incomplete since, old value: %s, new value: %s",
airflow_instance_detail.incomplete_since,
max_end_date,
)
airflow_instance_detail.incomplete_since = max_end_date
else:
logger.info(
"Updating since, old value: %s, new value: %s",
airflow_instance_detail.since,
max_end_date,
)
airflow_instance_detail.since = max_end_date
def process_and_update_airflow_server_info(
airflow_instance_detail, export_data, max_end_date, is_incomplete
):
active_dags = get_active_dags(export_data)
airflow_instance_detail.update_airflow_server(
airflow_version=export_data.airflow_version,
dags_path=export_data.dags_path,
logs_path=export_data.logs_path,
airflow_export_version=export_data.airflow_export_version,
synced_from=airflow_instance_detail.airflow_server_info.synced_from
or airflow_instance_detail.since,
active_dags=active_dags,
)
if is_incomplete:
airflow_instance_detail.airflow_server_info.incomplete_synced_to = (
max_end_date if max_end_date else utcnow()
)
else:
airflow_instance_detail.airflow_server_info.synced_to = (
max_end_date if max_end_date else utcnow()
)
synced_to_name = "incomplete_synced_to" if is_incomplete else "synced_to"
synced_to = (
airflow_instance_detail.airflow_server_info.incomplete_synced_to
if is_incomplete
else airflow_instance_detail.airflow_server_info.synced_to
)
logger.info(
"Using last end date %s, New %s date is %s",
max_end_date if max_end_date else None,
synced_to_name,
str(synced_to),
)
logging.info(
"Sending airflow server info: url=%s, synced_from=%s, %s=%s, last_sync_time=%s",
airflow_instance_detail.airflow_server_info.base_url,
airflow_instance_detail.airflow_server_info.synced_from,
synced_to_name,
str(synced_to),
airflow_instance_detail.airflow_server_info.last_sync_time,
)
save_airflow_server_info(airflow_instance_detail.airflow_server_info)
def do_data_fetching_iteration(
since, fetch_type, incomplete_offset, airflow_instance_detail
):
"""
This function performs one fetching iteration from Airflow webserver to Databand.
This iteration can fetch complete/incomplete according to the parameters given.
It will fetch from Airflow, send to Databand and return the fetched data
"""
logger.info(
"Running sync iteration with since=%s, fetch_type=%s, incomplete_offset=%s",
since,
fetch_type,
incomplete_offset,
)
data = try_fetching_from_airflow(
airflow_instance_detail=airflow_instance_detail,
since=since,
fetch_type=fetch_type,
incomplete_offset=incomplete_offset,
)
if data is None:
logger.warning("Didn't receive any data, probably an error occurred")
return None
if "error" in data:
logger.error("Error in Airflow Export Plugin: \n%s", data["error"])
save_error_message(airflow_instance_detail, data["error"])
return None
log_received_tasks(airflow_instance_detail.url, data)
send_metrics(airflow_instance_detail.airflow_server_info.base_url, data)
try:
save_airflow_monitor_data(
data,
airflow_instance_detail.url,
airflow_instance_detail.airflow_server_info.last_sync_time,
)
logger.info(
"Total %s task instances, %s dag runs, %s dags saved to databand web server",
len(data["task_instances"]),
len(data["dag_runs"]),
len(data["dags"]),
)
export_data = _as_dotted_dict(**data)
return export_data
except Exception as e:
logger.exception(
"An error occurred while trying to sync data from airflow to databand: %s",
e,
)
dump_unsent_data(data)
save_error_message(airflow_instance_detail, e)
return None
def sync_dags_only(airflow_instance_detail):
"""
Sync only dags from Airflow in their raw form.
"""
logger.info(
"Syncing dags list from %s",
airflow_instance_detail.airflow_server_info.base_url,
)
do_data_fetching_iteration(
since=None,
fetch_type=DAGS_ONLY,
incomplete_offset=None,
airflow_instance_detail=airflow_instance_detail,
)
def sync_all_complete_data(airflow_instance_detail):
"""
Sync all completed data (task instances and dag runs).
If the returned amount of data is less than fetch_quantity, we can stop.
"""
logger.info("Starting to sync complete data from %s", airflow_instance_detail.url)
while True:
export_data = do_data_fetching_iteration(
since=airflow_instance_detail.since,
fetch_type=COMPLETE,
incomplete_offset=None,
airflow_instance_detail=airflow_instance_detail,
)
if export_data is None:
return
max_end_date = get_max_end_dates(export_data)
process_and_update_airflow_server_info(
airflow_instance_detail, export_data, max_end_date, False
)
update_since(airflow_instance_detail, max_end_date, False)
fetch_count = max(len(export_data.task_instances), len(export_data.dag_runs))
if fetch_count < airflow_instance_detail.config.fetch_quantity:
break
logger.info(
"Finished syncing complete data from %s",
airflow_instance_detail.airflow_server_info.base_url,
)
def sync_all_incomplete_data_type1(airflow_instance_detail):
"""
Fetch all incomplete task instances from completed dag runs.
If the returned amount of data is less than fetch_quantity, we can stop.
"""
logger.info(
"Starting to sync incomplete data from complete dag runs for %s",
airflow_instance_detail.url,
)
offset = 0
last_dag_run_in_previous_iteration = None
while True:
export_data = do_data_fetching_iteration(
since=airflow_instance_detail.incomplete_since,
fetch_type=INCOMPLETE_TYPE1,
incomplete_offset=offset,
airflow_instance_detail=airflow_instance_detail,
)
if export_data is None:
return
fetch_count = len(export_data.task_instances)
# No more to fetch, we can stop now
if fetch_count < airflow_instance_detail.config.fetch_quantity:
if len(export_data.dag_runs) != 0:
new_since = pendulum.parse(str(export_data.dag_runs[0]["end_date"]))
update_since(airflow_instance_detail, new_since, True)
process_and_update_airflow_server_info(
airflow_instance_detail, export_data, new_since, True
)
else:
if last_dag_run_in_previous_iteration:
new_since = pendulum.parse(
str(last_dag_run_in_previous_iteration["end_date"])
)
update_since(airflow_instance_detail, new_since, True)
process_and_update_airflow_server_info(
airflow_instance_detail, export_data, new_since, True
)
break
export_data.dag_runs.sort(key=lambda x: pendulum.parse(str(x["end_date"])))
last_full_dag_run = (
export_data.dag_runs[-2] if len(export_data.dag_runs) >= 2 else None
)
last_dag_run = export_data.dag_runs[-1]
number_of_task_instances_in_last_dag_run = len(
[
task_instance
for task_instance in export_data.task_instances
if task_instance["dag_id"] == last_dag_run["dag_id"]
and task_instance["execution_date"] == last_dag_run["execution_date"]
]
)
# Here we assume that we received all task instances from all dag runs except for the last one.
# The last run may be full but it may be not.
if last_full_dag_run:
# Bump the since according to the last full dag run and change the offset to be the number of task instances
# that we received in the last dag run
max_end_date = pendulum.parse(str(last_full_dag_run["end_date"]))
update_since(airflow_instance_detail, max_end_date, True)
offset = number_of_task_instances_in_last_dag_run
process_and_update_airflow_server_info(
airflow_instance_detail, export_data, max_end_date, True
)
else:
if last_dag_run_in_previous_iteration and (
last_dag_run_in_previous_iteration["dag_id"] != last_dag_run["dag_id"]
or last_dag_run_in_previous_iteration["execution_date"]
!= last_dag_run["execution_date"]
):
# We received one run which is different than the previous one, bump the since and change the offset
max_end_date = pendulum.parse(
str(last_dag_run_in_previous_iteration["end_date"])
)
update_since(airflow_instance_detail, max_end_date, True)
offset = number_of_task_instances_in_last_dag_run
process_and_update_airflow_server_info(
airflow_instance_detail, export_data, max_end_date, True
)
else:
# We received only one run and don't know if its full - keep the since and bump the offset
offset += number_of_task_instances_in_last_dag_run
last_dag_run_in_previous_iteration = last_dag_run
logger.info(
"Finished syncing incomplete data from complete dag runs for %s",
airflow_instance_detail.url,
)
def sync_all_incomplete_data_type2(airflow_instance_detail):
"""
Sync all incomplete task instances (from incomplete dag runs) and incomplete dag runs.
Use offset to advance instead of since (because it uses pagination).
If the returned amount of data is less than fetch_quantity, we can stop.
"""
logger.info(
"Starting to sync incomplete from incomplete dag runs for %s",
airflow_instance_detail.url,
)
incomplete_offset = 0
# Max time to look for incomplete data, we do not update this but use pagination instead
since = utcnow() - timedelta(
days=airflow_instance_detail.config.oldest_incomplete_data_in_days
)
while True:
logger.info(
"Starting sync iteration of incomplete data for %s with incomplete offset %s-%s",
airflow_instance_detail.url,
incomplete_offset,
incomplete_offset + airflow_instance_detail.config.fetch_quantity,
)
export_data = do_data_fetching_iteration(
since=since,
fetch_type=INCOMPLETE_TYPE2,
incomplete_offset=incomplete_offset,
airflow_instance_detail=airflow_instance_detail,
)
if export_data is None:
return
max_end_date = get_max_end_dates(export_data)
update_since(airflow_instance_detail, max_end_date, True)
fetch_count = max(len(export_data.task_instances), len(export_data.dag_runs))
if fetch_count < airflow_instance_detail.config.fetch_quantity:
break
else:
incomplete_offset += airflow_instance_detail.config.fetch_quantity
logger.info(
"Fetched incomplete data from %s, new incomplete offset: %d",
airflow_instance_detail.airflow_server_info.base_url,
incomplete_offset,
)
logger.info(
"Finished syncing incomplete data from %s",
airflow_instance_detail.airflow_server_info.base_url,
)
def fetch_one_server_until_synced(airflow_instance_detail):
"""
Fetch continuously all types of data from one server until we are up to date.
"""
# Sync all dags first
sync_dags_only(airflow_instance_detail)
# Sync all completed
sync_all_complete_data(airflow_instance_detail)
# Sync all completed task instances from completed dag runs (and their dag runs).
# Use the original since that we started with and not the one after we finished syncing all complete data.
sync_all_incomplete_data_type1(airflow_instance_detail)
# Sync all other incomplete data (incomplete task instances from incomplete dag runs)
# that can't be synced in any other way - using pagination
sync_all_incomplete_data_type2(airflow_instance_detail)
def sync_all_servers(monitor_args, airflow_instance_details):
details_to_remove = []
for airflow_instance_detail in airflow_instance_details:
logger.info("Starting to sync server %s", airflow_instance_detail.url)
set_airflow_server_info_started(airflow_instance_detail.airflow_server_info)
fetch_one_server_until_synced(airflow_instance_detail)
logger.info("Finished syncing server %s", airflow_instance_detail.url)
if monitor_args.history_only:
logger.info(
"Finished syncing history of server %s", airflow_instance_detail.url
)
details_to_remove.append(airflow_instance_detail)
if len(details_to_remove) > 0:
for detail in details_to_remove:
| |
<gh_stars>100-1000
from .cspace import CSpace
from .. import robotsim
from ..model import collide
from .cspaceutils import EmbeddedCSpace
import math
import random
class RobotCSpace(CSpace):
"""A basic robot cspace that allows collision free motion.
Args:
robot (RobotModel): the robot that's moving.
collider (:class:`WorldCollider`, optional): a collide.WorldCollider
instance instantiated with the world in which the robot lives.
Any ignored collisions in the collider will be respected in the
feasibility tests of this CSpace.
If this is not provided, then only self-collisions will be checked.
.. warning::
If your robot has non-standard joints, like a free-
floating base or continuously rotating (spin) joints, you may need to
overload the :meth:`sample` method. The default implementation
assumes that everything with unbounded limits is a rotational joint.
"""
def __init__(self,robot,collider=None):
CSpace.__init__(self)
self.robot = robot
self.setBounds(list(zip(*robot.getJointLimits())))
self.collider = collider
self.addFeasibilityTest((lambda x: self.inJointLimits(x)),"joint limits")
def setconfig(x):
self.robot.setConfig(x)
return True
if collider:
bb0 = ([float('inf')]*3,[float('-inf')]*3)
bb = [bb0[0],bb0[1]]
def calcbb(x):
bb[0] = bb0[0]
bb[1] = bb0[1]
for i in range(self.robot.numLinks()):
g = self.robot.link(i).geometry()
if not g.empty():
bbi = g.getBB()
bb[0] = [min(a,b) for (a,b) in zip(bb[0],bbi[0])]
bb[1] = [max(a,b) for (a,b) in zip(bb[1],bbi[1])]
return True
def objCollide(o):
obb = self.collider.world.rigidObject(o).geometry().getBB()
if not collide.bb_intersect(obb,bb): return False
return any(True for _ in self.collider.robotObjectCollisions(self.robot.index,o))
def terrCollide(o):
obb = self.collider.world.terrain(o).geometry().getBB()
if not collide.bb_intersect(obb,bb): return False
return any(True for _ in self.collider.robotTerrainCollisions(self.robot.index,o))
self.addFeasibilityTest(setconfig,"setconfig")
self.addFeasibilityTest(calcbb,"calcbb",dependencies="setconfig")
self.addFeasibilityTest((lambda x: not self.selfCollision()),"self collision",dependencies="setconfig")
#self.addFeasibilityTest((lambda x: not self.envCollision()),"env collision")
for o in range(self.collider.world.numRigidObjects()):
self.addFeasibilityTest((lambda x,o=o: not objCollide(o)),"obj collision "+str(o)+" "+self.collider.world.rigidObject(o).getName(),dependencies="calcbb")
for o in range(self.collider.world.numTerrains()):
self.addFeasibilityTest((lambda x,o=o: not terrCollide(o)),"terrain collision "+str(o)+" "+self.collider.world.terrain(o).getName(),dependencies="calcbb")
else:
self.addFeasibilityTest(setconfig,"setconfig")
self.addFeasibilityTest((lambda x: not self.selfCollision()),"self collision",dependencies="setconfig")
self.joint_limit_failures = [0]*len(self.bound)
self.properties['geodesic'] = 1
def addConstraint(self,checker,name=None):
self.addFeasibilityTest(checker,name)
def sample(self):
"""Overload this to implement custom sampling strategies or to handle
non-standard joints. This one will handle spin joints and
rotational axes of floating bases."""
res = CSpace.sample(self)
for i,x in enumerate(res):
if math.isnan(x):
res[i] = random.uniform(0,math.pi*2.0)
return res
def inJointLimits(self,x):
"""Checks joint limits of the configuration x"""
for i,(xi,bi) in enumerate(zip(x,self.bound)):
if xi < bi[0] or xi > bi[1]:
self.joint_limit_failures[i] += 1
return False
return True
def selfCollision(self,x=None):
"""Checks whether the robot at its current configuration is in
self collision"""
#This should be faster than going through the collider...
if x is not None: self.robot.setConfig(x)
return self.robot.selfCollides()
#if not self.collider: return False
#return any(self.collider.robotSelfCollisions(self.robot.index))
def envCollision(self,x=None):
"""Checks whether the robot at its current configuration is in
collision with the environment."""
if not self.collider: return False
if x is not None: self.robot.setConfig(x)
for o in range(self.collider.world.numRigidObjects()):
if any(self.collider.robotObjectCollisions(self.robot.index,o)):
return True
for o in range(self.collider.world.numTerrains()):
if any(self.collider.robotTerrainCollisions(self.robot.index,o)):
return True
return False
def interpolate(self,a,b,u):
return self.robot.interpolate(a,b,u)
def distance(self,a,b):
return self.robot.distance(a,b)
def sendPathToController(self,path,controller):
"""Given a planned CSpace path 'path' and a SimRobotController 'controller',
sends the path so that it is executed correctly by the controller (this assumes
a fully actuated robot)."""
controller.setMilestone(path[0])
for q in path[1:]:
controller.appendMilestoneLinear(q)
class ClosedLoopRobotCSpace(RobotCSpace):
"""A closed loop cspace. Allows one or more IK constraints to be
maintained during the robot's motion.
Attributes:
solver (IKSolver): the solver containing all IK constraints
maxIters (int): the maximum number of iterations for numerical IK
solver
tol (float): how closely the IK constraint must be met, in meters and/
or radians
To satisfy the IK constraint, the motion planner ensures that configuration
samples are projected to the manifold of closed-loop IK solutions. To
create edges between samples a and b, the straight line path a and b is
projected to the manifold via an IK solve.
"""
def __init__(self,robot,iks,collider=None):
RobotCSpace.__init__(self,robot,collider)
self.solver = robotsim.IKSolver(robot)
if hasattr(iks,'__iter__'):
for ik in iks:
self.solver.add(ik)
else:
self.solver.add(iks)
#root finding iterations
self.maxIters = 100
self.tol = 1e-3
self.addFeasibilityTest((lambda x: self.closedLoop(x)),'closed loop constraint')
def setIKActiveDofs(self,activeSet):
"""Marks that only a subset of the DOFs of the robot are to be used for
solving the IK constraint.
Args:
activeSet (list of int): the robot DOF indices that should be active.
"""
self.solver.setActiveDofs(activeSet)
def sample(self):
"""Samples directly on the contact manifold. The basic method samples
arbitrarily in the configuration space and then solves IK constraints.
This may be an ineffective method especially for floating-base robots,
since the floating joints may be sampled arbitrarily.
"""
x = RobotCSpace.sample(self)
return self.solveConstraints(x)
def sampleneighborhood(self,c,r):
"""Samples a neighborhood in ambient space and then projects onto the
contact manifold.
"""
x = RobotCSpace.sampleneighborhood(self,c,r)
return self.solveConstraints(x)
def solveConstraints(self,x):
"""Given an initial configuration of the robot x, attempts to solve the
IK constraints given in this space. Return value is the best
configuration found via local optimization.
"""
self.robot.setConfig(x)
self.solver.setMaxIters(self.maxIters)
self.solver.setTolerance(self.tol)
res = self.solver.solve()
return self.robot.getConfig()
def closedLoop(self,config=None,tol=None):
"""Returns true if the closed loop constraint has been met at config,
or if config==None, the robot's current configuration."""
if config is not None: self.robot.setConfig(config)
e = self.solver.getResidual()
if tol==None: tol = self.tol
return max(abs(ei) for ei in e) <= tol
def interpolate(self,a,b,u):
"""Interpolates on the manifold. Used by edge collision checking"""
x = RobotCSpace.interpolate(self,a,b,u)
return self.solveConstraints(x)
def interpolationPath(self,a,b,epsilon=1e-2):
"""Creates a discretized path on the contact manifold between the
points a and b, with resolution epsilon.
"""
d = self.distance(a,b)
nsegs = int(math.ceil(d/epsilon))
if nsegs <= 1: return [a,b]
res = [a]
for i in range(nsegs-1):
u = float(i+1)/float(nsegs)
res.append(self.interpolate(a,b,u))
res.append(b)
return res
def discretizePath(self,path,epsilon=1e-2):
"""Given a :class:`CSpace` path ``path``, generates a path that
satisfies closed-loop constraints up to the given distance between
milestones.
"""
if path is None: return None
if len(path)==0: return []
respath = [path[0]]
for a,b in zip(path[:-1],path[1:]):
respath += self.interpolationPath(a,b,epsilon)[1:]
return respath
def sendPathToController(self,path,controller,epsilon=1e-2):
"""Given a :class:`CSpace` path ``path``, sends the path to be executed
to the :class:`SimRobotController` ``controller``.
This discretizes the path and sends it as a piecewise linear curve,
limited in speed by the robot's maximum velocity.
.. note::
This isn't the best thing to do for robots with slow acceleration
limits and/or high inertias because it ignores acceleration. A
better solution can be found in the MInTOS package or the C++ code
in Klampt/Cpp/Planning/RobotTimeScaling.h.
"""
dpath = self.discretizePath(path,epsilon)
vmax = controller.model().getVelocityLimits()
assert len(dpath[0]) == len(vmax)
controller.setMilestone(dpath[0])
for a,b in zip(dpath[:-1],dpath[1:]):
dt = 0.0
for i in range(len(a)):
if vmax[i] == 0:
if a[i] != b[i]: print("ClosedLoopRobotCSpace.sendPathToController(): Warning, path moves on DOF %d with maximum velocity 0"%(i,))
else:
dt = max(dt,abs(a[i]-b[i])/vmax[i])
#this does a piecewise lienar interpolation
controller.appendLinear(dt,b)
class ImplicitManifoldRobotCSpace(RobotCSpace):
"""A closed loop cspace with an arbitrary numerical manifold f(q)=0
to constrain the robot's motion. The argument implicitConstraint
should be a function f(q) returning a list of values that should be
equal to 0 up to the given tolerance. Essentially this is a
ClosedLoopRobotCSpace except with a user-provided function.
See :class:`ClosedLoopRobotCSpace`.
"""
def __init__(self,robot,implicitConstraint,collider=None):
RobotCSpace.__init__self(robot,collider)
self.implicitConstraint = implicitConstraint
#root finding iterations
self.maxIters = 100
self.tol = 1e-3
self.addFeasibilityTest((lambda x: self.onManifold(x)),'implicit manifold constraint')
def sample(self):
"""Samples directly on the contact manifold"""
x = RobotCSpace.sample()
return self.solveManifold(x)
def onManifold(self,x,tol=None):
"""Returns true if the manifold constraint has been met at x."""
e = self.implicitConstraint.eval(x)
if tol==None: tol = self.tol
return max(abs(ei) for ei in e) <= tol
def solveManifold(self,x,tol=None,maxIters=None):
"""Solves the manifold constraint starting from x, to the given
tolerance and with the given maximum iteration count. Default
uses the values set as attributes of this class.
"""
if tol==None: tol = self.tol
if maxIters==None: maxIters = self.maxIters
import rootfind
rootfind.setXTolerance(1e-8)
rootfind.setFTolerance(tol)
rootfind.setVectorField(self.implicitConstraint)
(res,x,val) = rootfind.findRootsBounded(x,self.bound)
return x
def interpolate(self,a,b,u):
"""Interpolates on the manifold. Used by edge collision checking"""
x = RobotCSpace.interpolate(self,a,b,u)
return self.solveManifold(x)
class EmbeddedRobotCSpace(EmbeddedCSpace):
"""A basic robot cspace that allows collision free motion of a *subset*
of joints. The subset is given by the indices in the list "subset"
provided to the constructor. The configuration space is R^k where k
is the number of DOFs in the subset.
Args:
ambientspace (RobotCSpace): a RobotCSpace, ClosedLoopRobotCSpace, etc.
subset (list of ints): the indices of moving DOFs
xinit (configuration, optional): the reference configuration, or None
to use the robot's current configuration as the reference.
"""
def __init__(self,ambientspace,subset,xinit=None):
self.robot = ambientspace.robot
if xinit is None:
xinit = | |
front = back.front # NOQA
# import IPython
# IPython.embed()
ut.embed()
@blocking_slot()
def dev_cls(back):
"""Help -> Developer Mode"""
logger.info('[back] dev_cls')
logger.info('\n'.join([''] * 100))
if back.ibs is not None:
back.ibs.reset_table_cache()
back.refresh_state()
from wbia.plottool import draw_func2 as df2
df2.update()
@slot_()
@backreport
def dev_export_annotations(back):
ibs = back.ibs
ibs.export_to_xml()
def start_web_server_parallel(back, browser=True):
import wbia
ibs = back.ibs
if back.web_ibs is None:
logger.info('[guiback] Starting web service')
# back.web_ibs = wbia.opendb_in_background(dbdir=ibs.get_dbdir(), web=True, browser=browser)
back.web_ibs = wbia.opendb_bg_web(
dbdir=ibs.get_dbdir(), web=True, browser=browser, start_job_queue=False
)
logger.info('[guiback] Web service started')
else:
logger.info('[guiback] CANNOT START WEB SERVER: WEB INSTANCE ALREADY RUNNING')
def kill_web_server_parallel(back):
if back.web_ibs is not None:
logger.info('[guiback] Stopping web service')
# back.web_ibs.terminate()
back.web_ibs.terminate2()
back.web_ibs = None
else:
logger.info('[guiback] CANNOT TERMINATE WEB SERVER: WEB INSTANCE NOT RUNNING')
@blocking_slot()
def fix_and_clean_database(back):
"""Help -> Fix/Clean Database"""
logger.info('[back] Fix/Clean Database')
back.ibs.fix_and_clean_database()
back.front.update_tables()
@blocking_slot()
def run_integrity_checks(back):
back.ibs.run_integrity_checks()
# --------------------------------------------------------------------------
# File Slots
# --------------------------------------------------------------------------
@blocking_slot()
def new_database(back, new_dbdir=None):
"""File -> New Database
Args:
new_dbdir (None): (default = None)
CommandLine:
python -m wbia.gui.guiback new_database --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.gui.guiback import * # NOQA
>>> import wbia
>>> #back = testdata_guiback(defaultdb='testdb1')
>>> back = testdata_guiback(defaultdb=None)
>>> dbdir = None
>>> result = back.new_database(dbdir)
>>> ut.quit_if_noshow()
>>> gt.qtapp_loop(qwin=back.front, freq=10)
"""
if new_dbdir is None:
old = False
if old:
new_dbname = back.user_input(
msg='What do you want to name the new database?', title='New Database'
)
if new_dbname is None or len(new_dbname) == 0:
logger.info('Abort new database. new_dbname=%r' % new_dbname)
return
new_dbdir_options = ['Choose Directory', 'My Work Dir']
reply = back.user_option(
msg='Where should I put the new database?',
title='Import Images',
options=new_dbdir_options,
default=new_dbdir_options[1],
use_cache=False,
)
if reply == 'Choose Directory':
logger.info('[back] new_database(): SELECT A DIRECTORY')
putdir = gt.select_directory(
'Select new database directory',
other_sidebar_dpaths=[back.get_work_directory()],
)
elif reply == 'My Work Dir':
putdir = back.get_work_directory()
else:
logger.info('Abort new database')
return
new_dbdir = join(putdir, new_dbname)
if not exists(putdir):
raise ValueError('Directory %r does not exist.' % putdir)
if exists(new_dbdir):
raise ValueError('New DB %r already exists.' % new_dbdir)
ut.ensuredir(new_dbdir)
logger.info('[back] new_database(new_dbdir=%r)' % new_dbdir)
back.open_database(dbdir=new_dbdir)
else:
from wbia.guitool.__PYQT__.QtCore import Qt # NOQA
from wbia.guitool.__PYQT__ import QtGui # NOQA
dlg = NewDatabaseWidget.as_dialog(
back.front, back=back, on_chosen=back.open_database, mode='new'
)
dlg.exec_()
@blocking_slot()
def open_database(back, dbdir=None):
"""
File -> Open Database
Args:
dbdir (None): (default = None)
CommandLine:
python -m wbia.gui.guiback --test-open_database
Example:
>>> # xdoctest: +REQUIRES(--gui)
>>> from wbia.gui.guiback import * # NOQA
>>> back = testdata_guiback(defaultdb='testdb1')
>>> #testdb0 = sysres.db_to_dbdir('testdb0')
>>> testdb1 = sysres.db_to_dbdir('testdb1')
>>> print('[TEST] TEST_OPEN_DATABASE testdb1=%r' % testdb1)
>>> back.open_database(testdb1)
>>> #print('[TEST] TEST_OPEN_DATABASE testdb0=%r' % testdb0)
>>> #back.open_database(testdb0)
>>> import wbia
>>> #dbdir = join(wbia.sysres.get_workdir(), 'PZ_MTEST', '_ibsdb')
>>> dbdir = None
>>> result = back.open_database(dbdir)
>>> print(result)
"""
if dbdir is None:
logger.info('[back] new_database(): SELECT A DIRECTORY')
# director
dbdir = gt.select_directory(
'Open a database directory',
other_sidebar_dpaths=[back.get_work_directory()],
)
if dbdir is None:
return
logger.info('[back] open_database(dbdir=%r)' % dbdir)
with ut.Indenter(lbl=' [opendb]'):
try:
# should this use wbia.opendb? probably. at least it should be
# be request IBEISControl
# ibs = IBEISControl.IBEISController(dbdir=dbdir)
ibs = IBEISControl.request_IBEISController(dbdir=dbdir)
back.connect_wbia_control(ibs)
except Exception as ex:
ut.printex(ex, 'caught Exception while opening database')
raise
else:
sysres.set_default_dbdir(dbdir)
@blocking_slot()
def export_database_as_csv(back):
"""File -> Export Database"""
logger.info('[back] export_database_as_csv')
dump_dir = join(back.ibs.get_dbdir(), 'CSV_DUMP')
ut.ensuredir(dump_dir)
ut.view_directory(dump_dir)
back.ibs.dump_database_csv()
@blocking_slot()
def backup_database(back):
"""File -> Backup Database"""
logger.info('[back] backup_database')
back.ibs.backup_database()
@blocking_slot()
def make_database_duplicate(back):
"""File -> Copy Database"""
logger.info('[back] make_database_duplicate')
def on_chosen(new_dbdir):
back.ibs.copy_database(new_dbdir)
dlg = NewDatabaseWidget.as_dialog(
back.front, back=back, on_chosen=on_chosen, mode='copy'
)
dlg.exec_()
@blocking_slot()
def import_images_from_file(
back, gpath_list=None, refresh=True, as_annots=False, clock_offset=False
):
r"""
File -> Import Images From File
Example
>>> # xdoctest: +REQUIRES(--gui)
>>> print('[TEST] GET_TEST_IMAGE_PATHS')
>>> # The test api returns a list of interesting chip indexes
>>> mode = 'FILE'
>>> if mode == 'FILE':
>>> gpath_list = list(map(utool.unixpath, grabdata.get_test_gpaths()))
>>> #
>>> # else:
>>> # dir_ = utool.truepath(join(sysres.get_workdir(), 'PZ_MOTHERS/images'))
>>> # gpath_list = utool.list_images(dir_, fullpath=True, recursive=True)[::4]
>>> print('[TEST] IMPORT IMAGES FROM FILE\n * gpath_list=%r' % gpath_list)
>>> gid_list = back.import_images(gpath_list=gpath_list)
>>> thumbtup_list = ibs.get_image_thumbtup(gid_list)
>>> imgpath_list = [tup[1] for tup in thumbtup_list]
>>> gpath_list2 = ibs.get_image_paths(gid_list)
>>> for path in gpath_list2:
>>> assert path in imgpath_list, "Imported Image not in db, path=%r" % path
>>> elif mode == 'DIR':
>>> dir_ = grabdata.get_testdata_dir()
>>> print('[TEST] IMPORT IMAGES FROM DIR\n * dir_=%r' % dir_)
>>> gid_list = back.import_images(dir_=dir_)
>>> else:
>>> raise AssertionError('unknown mode=%r' % mode)
>>> print('[TEST] * len(gid_list)=%r' % len(gid_list))
"""
logger.info('[back] import_images_from_file')
if back.ibs is None:
raise ValueError('back.ibs is None! must open IBEIS database first')
if gpath_list is None:
gpath_list = gt.select_images('Select image files to import')
ibs = back.ibs
gid_list = back.ibs.add_images(
gpath_list,
as_annots=as_annots,
location_for_names=ibs.cfg.other_cfg.location_for_names,
)
back._process_new_images(refresh, gid_list, clock_offset=clock_offset)
return gid_list
@blocking_slot()
def import_button_click(back):
msg = 'How do you want to import images?'
ans = back.user_option(
msg=msg,
title='Import Images',
options=[
'Directory',
'Files',
'Smart XML',
'Encounters (1)',
'Encounters (2)',
],
use_cache=False,
default='Directory',
)
if ans == 'Directory':
back.import_images_from_dir()
elif ans == 'Files':
back.import_images_from_file()
elif ans == 'Smart XML':
back.import_images_from_dir_with_smart()
elif ans == 'Encounters (1)':
back.import_images_from_encounters_1()
elif ans == 'Encounters (2)':
back.import_images_from_encounters_2()
elif ans is None:
pass
else:
raise Exception('Unknown anser=%r' % (ans,))
@blocking_slot()
def import_images_from_dir(
back,
dir_=None,
size_filter=None,
refresh=True,
clock_offset=False,
return_dir=False,
defaultdir=None,
):
"""File -> Import Images From Directory"""
logger.info('[back] import_images_from_dir')
if dir_ is None:
dir_ = gt.select_directory(
'Select directory with images in it', directory=defaultdir
)
if dir_ is None:
return
gpath_list = ut.list_images(dir_, fullpath=True, recursive=True)
if size_filter is not None:
raise NotImplementedError('Can someone implement the size filter?')
ibs = back.ibs
gid_list = back.ibs.add_images(
gpath_list, location_for_names=ibs.cfg.other_cfg.location_for_names
)
back._process_new_images(refresh, gid_list, clock_offset=clock_offset)
if return_dir:
return gid_list, dir_
else:
return gid_list
@blocking_slot()
def import_images_from_dir_with_smart(
back,
dir_=None,
size_filter=None,
refresh=True,
smart_xml_fpath=None,
defaultdir=None,
):
"""File -> Import Images From Directory with smart
Args:
dir_ (None): (default = None)
size_filter (None): (default = None)
refresh (bool): (default = True)
Returns:
list: gid_list
CommandLine:
python -m wbia.gui.guiback --test-import_images_from_dir_with_smart --show
python -m wbia.gui.guiback --test-import_images_from_dir_with_smart --show --auto
Example:
>>> # DEV_GUI_DOCTEST
>>> # xdoctest: +REQUIRES(--gui)
>>> from wbia.gui.guiback import * # NOQA
>>> back = testdata_guiback(defaultdb='freshsmart_test', delete_ibsdir=True, allow_newdir=True)
>>> ibs = back.ibs
>>> defaultdir = ut.truepath('~/lewa-desktop/Desktop/GZ_Foal_Patrol_22_06_2015')
>>> dir_ = None if not ut.get_argflag('--auto') else join(defaultdir, 'Photos')
>>> smart_xml_fpath = None if not ut.get_argflag('--auto') else join(defaultdir, 'Patrols', 'LWC_000526LEWA_GZ_FOAL_PATROL.xml')
>>> size_filter = None
>>> refresh = True
>>> gid_list = back.import_images_from_dir_with_smart(dir_, size_filter, refresh, defaultdir=defaultdir, smart_xml_fpath=smart_xml_fpath)
>>> result = ('gid_list = %s' % (str(gid_list),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> gt.qtapp_loop(back.mainwin, frequency=100)
"""
logger.info('[back] import_images_from_dir_with_smart')
gid_list, add_dir_ = back.import_images_from_dir(
dir_=dir_,
size_filter=size_filter,
refresh=False,
clock_offset=False,
return_dir=True,
defaultdir=defaultdir,
)
back._group_images_with_smartxml(
gid_list,
refresh=refresh,
smart_xml_fpath=smart_xml_fpath,
defaultdir=dirname(add_dir_),
)
def _group_images_with_smartxml(
back, gid_list, refresh=True, smart_xml_fpath=None, defaultdir=None
):
"""
Clusters the newly imported images with smart xml file
"""
if gid_list is not None and len(gid_list) > 0:
if smart_xml_fpath is None:
name_filter = 'XML Files (*.xml)'
xml_path_list = gt.select_files(
caption='Select Patrol XML File:',
directory=defaultdir,
name_filter=name_filter,
single_file=True,
)
try:
assert len(xml_path_list) == 1, 'Must specity one Patrol XML file'
smart_xml_fpath = xml_path_list[0]
assert (
len(smart_xml_fpath) > 0
), 'Must specity a valid Patrol XML file'
except AssertionError as e:
back.ibs.delete_images(gid_list)
logger.info(
(
'[back] ERROR: Parsing Patrol XML file failed, '
'rolling back by deleting %d images...'
)
% (len(gid_list))
)
raise e
back.ibs.compute_occurrences_smart(gid_list, smart_xml_fpath)
if refresh:
back.update_special_imagesets_()
# back.front.update_tables([gh.IMAGESET_TABLE])
back.front.update_tables()
def _process_new_images(back, refresh, gid_list, clock_offset=False):
if refresh:
back.update_special_imagesets_()
back.front.update_tables([gh.IMAGE_TABLE, gh.IMAGESET_TABLE])
if clock_offset:
co_wgt = clock_offset_gui.ClockOffsetWidget(back.ibs, gid_list)
co_wgt.show()
return gid_list
@blocking_slot()
def import_images_from_encounters(
back,
level=1,
dir_list=None,
size_filter=None,
refresh=True,
clock_offset=False,
return_dir=False,
defaultdir=None,
):
import os
""" File -> Import Images From Encounters"""
logger.info('[back] import_images_from_encounters')
assert level in [1, 2]
if dir_list is None:
if level == 1:
prompt = 'Select folder(s) of encounter(s) (1 level - folders with only images)'
if level == 2:
prompt = 'Select folder(s) of encounter(s) (2 levels - folders of folders with only images)'
dir_list = gt.select_directories(prompt, directory=defaultdir)
if dir_list is None or len(dir_list) == 0:
return
# We need to check that the first directory is not a subdirectory of the others
if len(dir_list) >= 2:
subdir1 = dir_list[0]
subdir2, _ = os.path.split(dir_list[1])
if subdir1 == subdir2:
dir_list = dir_list[1:]
# Check the folders for invalid | |
<reponame>muteria/muteria
from __future__ import print_function
import os
import sys
import re
import shutil
import imp
import logging
import filecmp
from distutils.spawn import find_executable
import muteria.common.mix as common_mix
from muteria.drivers import DriversUtils
ERROR_HANDLER = common_mix.ErrorHandler
class KTestTestFormat(object):
@classmethod
def installed(cls, custom_binary_dir=None):
""" Check that the tool is installed
:return: bool reprenting whether the tool is installed or not
(executable accessible on the path)
- True: the tool is installed and works
- False: the tool is not installed or do not work
"""
for prog in (cls.tool,):
if custom_binary_dir is not None:
prog = os.path.join(custom_binary_dir, prog)
if not DriversUtils.check_tool(prog=prog, args_list=['--version'],\
expected_exit_codes=[1]):
return False
if not DriversUtils.check_tool(prog=cls.stdbuf, args_list=['--version'],\
expected_exit_codes=[0]):
return False
return True
#~ def installed()
@classmethod
def get_test_replay_tool(cls, custom_replay_tool_binary_dir=None):
if custom_replay_tool_binary_dir is None:
kr_file = find_executable(cls.tool)
ERROR_HANDLER.assert_true(kr_file is not None, \
"Could not fine test replay tool on path", __file__)
else:
kr_file = os.path.join(custom_replay_tool_binary_dir, cls.tool)
ERROR_HANDLER.assert_true(os.path.isfile(kr_file), \
"replay tool not found in custom_binary_dir", \
__file__)
return kr_file
#~ def get_test_replay_tool()
@classmethod
def _get_replay_prog_args(cls, executable_file, test_file, \
custom_replay_tool_binary_dir=None):
prog = cls.tool
if custom_replay_tool_binary_dir is not None:
prog = os.path.join(custom_replay_tool_binary_dir, prog)
ERROR_HANDLER.assert_true(os.path.isfile(prog), \
"The tool {} is missing from the specified dir {}"\
.format(cls.tool, custom_replay_tool_binary_dir), \
__file__)
args = [executable_file, test_file]
return prog, args
#~ def _get_replay_prog_args()
@classmethod
def execute_test(cls, executable_file, test_file, env_vars, stdin=None, \
must_exist_dir_list=None, \
timeout=None, collected_output=None, \
custom_replay_tool_binary_dir=None):
prog, args = cls._get_replay_prog_args(executable_file, test_file, \
custom_replay_tool_binary_dir)
# klee-replay may create files or dir. in KLEE version with LLVM-3.4,
# those are created in a temporary dir set as <cwd>.temps
# XXX XXX. make sure each test has its own
test_work_dir = test_file+".execdir"
klee_replay_temps = test_work_dir + '.temps'
for d in (test_work_dir, klee_replay_temps):
if os.path.isdir(d):
try:
shutil.rmtree(d)
except PermissionError:
cls._dir_chmod777(d)
shutil.rmtree(d)
if not os.path.isdir(test_work_dir):
os.mkdir(test_work_dir)
if must_exist_dir_list is not None:
for d in must_exist_dir_list:
td = os.path.join(test_work_dir, d)
if not os.path.isdir(td):
os.makedirs(td)
# XXX Execution setup
tmp_env = os.environ.copy()
if env_vars is not None:
#for e, v in env_vars.items():
# tmp_env[e] = v
tmp_env.update(env_vars)
timeout_return_codes = cls.timedout_retcodes + \
DriversUtils.EXEC_TIMED_OUT_RET_CODE
if timeout is not None:
tmp_env['KLEE_REPLAY_TIMEOUT'] = str(timeout)
kt_over = 10 # 1second
timeout += kt_over
else:
# DBG
logging.warning("@KTEST: calling ktest execution without timeout.")
# XXX Get the parsing regexes to use
retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
prog=prog, args_list=['--help'], \
merge_err_to_out=True)
clean_regex, status_regex = cls._get_regexes(out, \
clean_everything=True)
# XXX Execute the ktest
#logging.debug("DBG: test_work_dir is {}. its content is {}".format(
# test_work_dir, list(os.listdir(test_work_dir))))
#if collected_output is not None:
# XXX: Use stdbuf to line buffer the stderr to avoid mixing or
# err between klee-replay and the executd prog
use_stdbuf = True
if use_stdbuf:
args = ["--output=L", "--error=L", prog] + args
prog = "stdbuf"
# TODO: check that stdbuf is installed
retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
prog=prog, args_list=args, env=tmp_env, \
stdin=stdin, \
timeout=timeout, timeout_grace_period=5, \
merge_err_to_out=True, cwd=test_work_dir)
retcode, out, exit_status = cls._remove_output_noise(retcode, out, \
clean_regex, status_regex)
# In klee-replay, when exit_status here is not None, retcode is 0
# When there is an issue, like timeout, exit_status is None and
# retcode has the ode of the issue
if exit_status is None:
exit_status = retcode
if collected_output is not None:
collected_output.extend((exit_status, out, \
(retcode in timeout_return_codes or \
retcode in DriversUtils.EXEC_TIMED_OUT_RET_CODE)))
#else:
# retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
# prog=prog, args_list=args, env=tmp_env, \
# timeout=timeout, timeout_grace_period=5, \
# out_on=False, err_on=False, \
# cwd=test_work_dir)
# XXX: Go back to previous CWD
for d in (test_work_dir, klee_replay_temps):
if os.path.isdir(d):
try:
shutil.rmtree(d)
except PermissionError:
cls._dir_chmod777(d)
shutil.rmtree(d)
#if must_exist_dir_list is not None:
# try:
# shutil.rmtree(test_work_dir)
# except PermissionError:
# cls._dir_chmod777(test_work_dir)
# shutil.rmtree(test_work_dir)
if retcode in timeout_return_codes + \
DriversUtils.EXEC_SEGFAULT_OUT_RET_CODE:
verdict = common_mix.GlobalConstants.FAIL_TEST_VERDICT
else:
verdict = common_mix.GlobalConstants.PASS_TEST_VERDICT
return verdict
#~ def execute_test()
@staticmethod
def _dir_chmod777(dirpath):
try:
for root_, dirs_, files_ in os.walk(dirpath):
for sub_d in dirs_:
if os.path.isdir(os.path.join(root_, sub_d)):
os.chmod(os.path.join(root_, sub_d), 0o777)
for f_ in files_:
if os.path.isfile(os.path.join(root_, f_)):
os.chmod(os.path.join(root_, f_), 0o777)
except PermissionError:
ret,out,_ = DriversUtils.execute_and_get_retcode_out_err('sudo', \
args_list=['chmod', '777', '-R', dirpath])
ERROR_HANDLER.assert_true(ret == 0, \
"'sudo chmod 777 -R "+dirpath+"' failed (returned "+\
str(ret)+"), error: "+out, __file__)
#~ def _dir_chmod777()
@classmethod
def get_replay_test_wrapper_str(cls, exe_env_var, ktest_env_var, \
timeout_env_var, \
custom_replay_tool_binary_dir=None):
# XXX: This is used by shadow to replay generated tests through
# executing base dev test used to generate them.
# TODO: Let shadow store the mapping between dev test and gen test
# TODO: Use the mapping to run tests using the returned wrapper by this function
prog, args = cls._get_replay_prog_args('${}'.format(exe_env_var), \
'${}'.format(ktest_env_var), \
custom_replay_tool_binary_dir)
python_code = ';'.join(['import sys', \
'from muteria.drivers.testgeneration' \
+ '.testcase_formats.ktest.ktest import KTestTestFormat', \
'r, e_s = KTestTestFormat._remove_output_noise(sys.stdin.read())',\
'print(r)'])
bash_timeout_retcode = os.system('timeout 0.1 sleep 1')
ret_str = "#! /bin/bash\n\n"
ret_str += "set -u\nset -o pipefail\n\n"
ret_str += "export KLEE_REPLAY_TIMEOUT={}\n".format(timeout_env_var)
ret_str += " ".join([prog] + args) + ' 2>&1 | {} -c "{}"\n'.format(\
sys.executable, python_code)
ret_str += "exit_code=$?\n"
ret_str += '{} -c "exit(not($exit_code in {}))" && exit_code={}\n'\
.format(sys.executable, \
cls.timedout_retcodes, \
bash_timeout_retcode)
ret_str += "exit $exit_code\n"
return ret_str
#~ def get_replay_test_wrapper_str()
timedout_retcodes = (88,) # taken from klee_replay source code
tool = 'klee-replay'
stdbuf = 'stdbuf'
# Newer version (after klee github commit 88bb205)
# (88bb205e422ee2aaf75594e4e314b21f77f219e3)
clean_everything_regex_new = re.compile("^KLEE-REPLAY: ")
clean_part_regex_new = re.compile("(" + "|".join(["^KLEE-REPLAY: NOTE: ",\
"^KLEE-REPLAY: WARNING: ",\
"^KLEE-REPLAY: klee_warning: ",\
"^KLEE-REPLAY: klee_warning_once: ",\
"^KLEE-REPLAY: klee_assume",\
]) + ")")
status_regex_new = re.compile(\
"^(KLEE-REPLAY: NOTE:\\s+)(EXIT STATUS: .*?)"+\
"(\\s+\\([0-9]+\\s+seconds\\))$")
# the option "--keep-replay-dir" was added on klee github commit 5b1214a,
# right after commit 88bb205
# So we will use that option to decide whether to use old or new regex
# Older version (before klee github commit 88bb205)
clean_everything_regex_old = re.compile("(" + "|".join([\
#"^EXIT STATUS: .* \\([0-9]+\\s+seconds\\)$", \
#""+tool+": EXIT STATUS: .* \\([0-9]+\\s+seconds\\)$",\
""+tool+": received signal [0-9]+\\s+. "+\
"Killing monitored process\\(es\\)$", \
"^note: (pty|pipe) (master|slave): ",\
""+tool+": PTY (MASTER|SLAVE): EXIT STATUS: ", \
"^warning: check_file .*: .* "+\
"mismatch: [0-9]+ [vV][sS] [0-9]+$", \
"^RUNNING GDB: /usr/bin/gdb --pid [0-9]+ -q --batch", \
"^TIMEOUT: ATTEMPTING GDB EXIT$", \
#"^ERROR: ", \
#"^Error: (executable|chroot:) ", \
#"^klee_range(\(|:)", \
#"^make_symbolic mismatch, different sizes: ", \
#"^WARNING: ", \
#"^rootdir: ", \
#""+tool+": error: input file ", \
""+tool+": TEST CASE: ", \
""+tool+": ARGS: ", \
]) + ")")
clean_part_regex_old = re.compile(("(" + "|".join([\
#"^EXIT STATUS: .* \\([0-9]+\\s+seconds\\)$", \
#""+tool+": EXIT STATUS: .* \\([0-9]+\\s+seconds\\)$", \
""+tool+": received signal [0-9]+\\s+. "+\
"Killing monitored process\\(es\\)$", \
"^note: (pty|pipe) (master|slave): ",\
""+tool+": PTY (MASTER|SLAVE): EXIT STATUS: ", \
"^warning: check_file .*: .* "+\
"mismatch: [0-9]+ [vV][sS] [0-9]+$", \
"^RUNNING GDB: /usr/bin/gdb --pid [0-9]+ -q --batch", \
"^TIMEOUT: ATTEMPTING GDB EXIT$", \
]) + ")"))
status_regex_old = re.compile("^("+tool+\
":\\s+)?(EXIT STATUS: .*?)(\\s+\\([0-9]+\\s+seconds\\))?$")
@classmethod
def _get_regexes(cls, out, clean_everything=True):
if '--keep-replay-dir' in out:
clean_regex = cls.clean_everything_regex_new if clean_everything \
else cls.clean_part_regex_new
status_regex = cls.status_regex_new
else:
clean_regex = cls.clean_everything_regex_old if clean_everything \
else cls.clean_part_regex_old
status_regex = cls.status_regex_old
return clean_regex, status_regex
#~ def _get_regexes()
@classmethod
def _remove_output_noise(cls, retcode, out, clean_regex, status_regex):
res = []
if len(out) > 0 and out[-1] == '\n':
out = out[:-1]
last_char = "\n"
else:
last_char = ""
# If not None, must be an integer
exit_status = None
found_exit_status = False
for line in out.encode('utf-8', 'backslashreplace').splitlines():
line = line.decode('utf-8', 'backslashreplace')
if status_regex.search(line) is not None:
ERROR_HANDLER.assert_true(not found_exit_status,
"Exit status found multiple times in output", \
__file__)
found_exit_status = True
line = status_regex.sub("\g<2>", line)
ls = line.split()
if ls[-2] == 'ABNORMAL':
try:
exit_status = int(ls[-1])
except ValueError:
ERROR_HANDLER.error_exit(\
"Invalid exit status {}".format(ls[-1]), \
__file__)
elif ls[-1] == 'OUT' and ls[-2] == 'TIMED':
# Case where klee-replay call to gdb fails to attach process
if retcode == 0:
retcode = cls.timedout_retcodes[0]
# klee-replay may pu another exit status
found_exit_status = False
res.append("@MUTERIA.KLEE-REPLAY: "+line)
elif clean_regex.search(line) is None:
# None is matched
res.append(line)
res = '\n'.join(res) + last_char
return retcode, res, exit_status
#~ def _remove_output_noise()
ktest_extension = '.ktest'
STDIN_KTEST_DATA_FILE = "muteria-stdin-ktest-data"
@classmethod
def ktest_fdupes(cls, *args, custom_replay_tool_binary_dir=None):
"""
This function computes the fdupes of the klee | |
0, 0, 0, 0
])
sno_m = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])
snh_m = np.array(
[
36.8501, 34.24807, 32.16088, 30.77985, 29.79539, 29.23881, 28.52221, 27.95867, 28.12564, 27.55166,
27.4647, 26.45589, 26.1776, 26.28891, 26.78984, 27.29076, 27.12379, 27.42295, 28.33436, 28.2787,
27.7082, 26.68548, 26.57068, 26.62286, 27.4647, 28.33436, 29.26664, 30.63027, 30.58853, 28.16043,
29.79539, 29.82322, 30.30328, 32.13305, 33.77149, 34.30373, 35.63953, 36.06393, 35.93174, 35.45168,
35.69519, 35.62561, 35.37863, 34.9438, 34.14371, 32.70355, 31.46515, 30.68245
])
snd_m = np.array([
8.83796, 8.12874, 7.57721, 7.03733, 6.8276, 6.70331, 6.67651, 6.70797, 6.80895, 6.87226, 6.83148,
6.51687, 6.16452, 5.97622, 5.89466, 5.93893, 6.02127, 6.24266, 6.58679, 6.56193, 6.40035, 5.95447,
5.83406, 5.73386, 5.86902, 6.04691, 6.325, 6.23723, 6.22169, 6.20149, 6.64116, 6.71884, 6.84702,
7.18881, 7.56439, 7.74422, 8.00834, 8.26041, 8.38353, 8.42198, 8.3377, 8.19958, 8.01416, 7.7753,
7.56245, 7.18726, 6.94567, 6.74992
])
xnd_m = np.array(
[
14.285, 14.324, 14.253, 14.12885714, 13.86557143, 13.82085714, 13.61228571, 13.56142857,
13.39714286, 13.14242857, 12.781, 12.69185714, 12.45014286, 12.30028571, 12.18071429, 11.84928571,
11.56014286, 11.286, 11.00357143, 10.82, 10.60885714, 10.41957143, 10.21028571, 10.09157143,
10.10342857, 10.096, 10.16971429, 10.31971429, 10.37285714, 10.37185714, 10.397, 10.38342857,
10.517, 10.738, 10.97457143, 11.09728571, 11.08542857, 11.01028571, 11.37128571, 11.45142857,
11.70314286, 11.81, 11.59014286, 11.96185714, 11.94042857, 12.18742857, 12.21185714, 12.12128571
])
salk_m = np.array(
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
])
q_m = np.array([
25837, 24434.42857, 23460.57143, 23183.14286, 21537.57143, 23435.57143, 22513.71429, 22524, 21785.71429,
20117.57143, 18841.71429, 21912.14286, 20802.42857, 19755.28571, 20390.71429, 18081.42857, 17573.42857,
18296.71429, 18018.28571, 18088.42857, 17634.85714, 17306.28571, 17140.28571, 17623.71429, 18903.57143,
18585.71429, 18803.71429, 20603.14286, 19821.28571, 18723.71429, 19266, 18838, 20200.71429, 21855,
21706.57143, 20578.14286, 19279.14286, 18634, 24062.85714, 22179.28571, 21564, 21389.71429, 17987.42857,
24012.42857, 22299.85714, 22383.71429, 21308, 19839.42857
])
rnd = np.random.randn(48)
# 각 유입수 성상의 Std. Var. value, assume 10% of mean value
si_s = 0
ss_s = 0.1 * ss_m
xi_s = 0.1 * xi_m
xs_s = 0.1 * xs_m
xbh_s = 0.1 * xbh_m
xba_s = 0
xp_s = 0
so_s = 0
sno_s = 0
snh_s = 0.1 * snh_m
snd_s = 0.1 * snd_m
xnd_s = 0.1 * xnd_m
salk_s = 0
q_s = 0.1 * q_m
rnd = np.random.randn(48)
si = si_m + si_s * rnd
ss = ss_m + ss_s * rnd
xi = xi_m + xi_s * rnd
xs = xs_m + xs_s * rnd
xbh = xbh_m + xbh_s * rnd
xba = xba_m + xba_s * rnd
xp = xp_m + xp_s * rnd
so = so_m + so_s * rnd
sno = sno_m + sno_s * rnd
snh = snh_m + snh_s * rnd
snd = snd_m + snd_s * rnd
xnd = xnd_m + xnd_s * rnd
salk = salk_m + salk_s * rnd
q = q_m + q_s * rnd
si_3 = sum(si * q) / sum(q)
ss_3 = sum(ss * q) / sum(q)
xi_3 = sum(xi * q) / sum(q)
xs_3 = sum(xs * q) / sum(q)
xbh_3 = sum(xbh * q) / sum(q)
xba_3 = sum(xba * q) / sum(q)
xp_3 = sum(xp * q) / sum(q)
so_3 = sum(so * q) / sum(q)
sno_3 = sum(sno * q) / sum(q)
snh_3 = sum(snh * q) / sum(q)
snd_3 = sum(snd * q) / sum(q)
xnd_3 = sum(xnd * q) / sum(q)
salk_3 = sum(salk * q) / sum(q)
"""
List of variables :
0=V, 1=Si, 2=Ss, 3=Xi, 4=Xs, 5=Xbh, 6=Xba, 7=Xp, 8=So, 9=Sno, 10=Snh, 11=Snd, 12=Xnd, 13=Salk
(ref. BSM1 report Tbl. 1)
"""
influent_mixed_instant = [0.66, si_3, ss_3, xi_3, xs_3, xbh_3, xba_3, xp_3, so_3, sno_3, snh_3, snd_3,
xnd_3, salk_3]
influent_var_instant = [si, ss, xi, xs, xbh, xba, xp, so, sno, snh, snd, xnd, salk]
elif switch == 4: # High, morning
# 각 유입수 성상의 Mean value
si_m = 1.5 * np.array(
[30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30
])
ss_m = 1.5 * np.array(
[63.63455, 61.67313, 61.71973, 62.16018, 64.57526, 67.84871, 72.14054, 72.47069, 69.7643, 67.49915,
62.83833, 62.27237143, 53.74197, 54.44886, 50.06769, 46.35457, 44.35819, 42.57931, 40.00033, 40.38873,
40.77713, 40.46641, 40.83539, 40.46641, 40.50525, 40.52467, 40.31105, 40.11685, 40.04693, 40.34212,
40.58293, 40.89365, 41.16553, 41.86465, 42.42395, 43.94649, 48.77431, 59.88648, 71.46084, 80.00567,
94.90087, 105.7761, 115.2104, 120.011, 118.9701, 112.1109, 102.4553, 94.81153
])
xi_m = 1.5 * np.array(
[52.28757143, 51.66285714, 46.76185714, 49.95214286, 48.007, 48.50685714, 48.31542857, 39.62985714,
38.09571429, 36.67671429, 34.69028571, 31.66557143, 28.23228571, 26.256, 23.77214286, 21.898,
21.01385714, 19.36242857, 19.47071429, 19.28542857, 18.47757143, 18.88957143, 19.06842857, 18.80642857,
18.04585714, 18.102, 16.91514286, 16.862, 16.75885714, 16.83957143, 16.54742857, 16.87985714,
16.81342857, 16.53628571, 17.664, 17.901, 20.22471429, 22.31228571, 31.09085714, 37.80471429,
42.14871429, 50.65642857, 59.44585714, 70.141, 77.07, 82.70885714, 87.174, 84.82457143
])
xs_m = 1.5* np.array(
[228.8422857, 224.8784286, 213.1182857, 210.2168571, 209.8551429, 209.8774286, 212.3002857, 211.598,
207.301, 198.8494286, 192.2178571, 186.6502857, 181.1207143, 173.3857143, 165.7001429, 157.7538571,
149.8708571, 142.9755714, 137.0042857, 131.949, 128.582, 125.5931429, 123.5375714, 122.3611429,
121.5711429, 120.4541429, 119.0282857, 117.3594286, 115.659, 114.534, 113.6197143, 112.9752857,
112.5301429, 112.2802857, 112.3448571, 112.8771429, 113.3478571, 116.9234286, 122.7555714, 133.1755714,
148.2372857, 162.0105714, 175.9845714, 190.1444286, 206.4641429, 221.929, 235.1714286, 249.2904286
])
xbh_m = 1.5 * np.array(
[31.23642857, 30.72685714, 28.87557143, 28.90757143, 28.65114286, 28.70928571, 28.95714286, 27.91428571,
27.26628571, 26.16942857, 25.212, 24.25714286, 23.26142857, 22.18257143, 21.05257143, 19.96128571,
18.98714286, 18.03742857, 17.38614286, 16.80371429, 16.34, 16.05385714, 15.84528571, 15.68528571,
15.51285714, 15.39485714, 15.10471429, 14.91371429, 14.71328571, 14.597, 14.46285714, 14.42828571,
14.37128571, 14.313, 14.44514286, 14.531, 14.84128571, 15.47042857, 17.09414286, 18.99771429,
21.15414286, 23.62971429, 26.15885714, 28.92071429, 31.50371429, 33.84857143, 35.816, 37.12385714
])
xba_m = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])
xp_m = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])
so_m = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])
sno_m = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])
snh_m = 1.5 * np.array(
[30.24762, 30.21283, 31.04771, 31.67387, 33.15925, 35.11773, 37.86587, 37.86587, 36.8501, 36.47441,
34.38721, 34.67942, 32.06695, 29.89975, 27.06813, 24.70265, 23.38772, 22.19802, 20.31607, 20.54218,
20.71611, 20.7509, 20.66393, 20.61175, 20.54218, 20.43782, 20.40303, 20.15953, 20.02734, 19.99951,
20.10039, 20.40303, 20.82047, 21.16834, 21.6136, 22.1354, 24.68178, 29.65625, 34.24807, 37.93544,
43.39692, 46.61468, 49.24802, 49.99941, 49.24802, 45.74154, 40.85751, 39.03469
])
snd_m = 1.5 * np.array(
[6.36346, 6.16731, 6.17197, 6.21602, 6.45753, 6.78487, 7.21405, 7.24707, 6.97643, 6.74992, 6.28383,
6.227238571, 5.3742, 5.44489, 5.00677, 4.63546, 4.43582, 4.25793, 4.00003, 4.03887, 4.07771, 4.04664,
4.08354, 4.04664, 4.05053, 4.05247, 4.03111, 4.01169, 4.00469, 4.03421, 4.05829, 4.08937, 4.11655,
4.18647, 4.2424, 4.39465, 4.87743, 5.98865, 7.14608, 8.00057, 9.49009, 10.57761, 11.52104, 12.0011,
11.89701, 11.21109, 10.24553, 9.48115
])
xnd_m = 1.5 * np.array(
[11.74314286, 11.55142857, 10.85542857, 10.86742857, 10.77085714, 10.793, 10.88585714, 10.49428571,
10.25071429, 9.838, 9.478428571, 9.119142857, 8.745, 8.339428571, 7.914285714, 7.504285714, 7.138,
6.781142857, 6.536285714, 6.317142857, 6.142714286, 6.035142857, 5.956571429, 5.896714286, 5.832,
5.787285714, 5.678571429, 5.606714286, 5.531142857, 5.487571429, 5.437285714, 5.424285714, 5.402857143,
5.381, 5.430571429, 5.462714286, 5.579571429, | |
= data_dgraph_result['standard_count'][0]['count']
return (
rt_recent_result_list,
rd_recent_result_list,
tdw_recent_result_list,
standard_recent_result_list,
standard_count,
)
def parse_recent_type_result(data_dgraph_result, key_name_list):
recent_result_list = []
if key_name_list:
for key_name in key_name_list:
created_day = key_name.rsplit('_', 1)[1]
recent_result_list.append(
{'created_day': created_day, 'index_count': data_dgraph_result[key_name][0]['count']}
)
return recent_result_list
def dgraph_get_project_count(dgraph_query):
dgraph_query = dgraph_query.replace('project_id:ProjectInfo.project_id', 'count(uid)')
return dgraph_query
def gen_zero_data_result():
return {
"count": 0,
"project_list": [],
"bk_biz_count": 0,
"data_source_count": 0,
"results": [],
"dataset_count": 0,
"project_count": 0,
}
def floating_window_query_dgraph(
params,
conn,
need_detail_type,
need_only_uids=False,
need_all_data_dict_list=False,
need_recent_detail_list=False,
data_source_distribute=False,
data_source_detail_distribute=False,
data_type_distribute=False,
sankey_diagram_distribute=False,
): # 悬浮框查询接口,dgraph实现
# 以下 为输入参数的处理
bk_biz_id = params.get('bk_biz_id')
project_id = params.get('project_id')
tag_codes_list = params.get('tag_ids')
tag_codes_list = parse_tag_codes_list(tag_codes_list)
keyword = params.get('keyword')
keyword = keyword.strip()
if keyword and len(keyword) < 3:
raise ValidationError
single_tag_code = params.get('tag_code')
cal_type_list = params.get('cal_type')
cal_type_list = [] if cal_type_list is None else cal_type_list
has_standard = params.get('has_standard') # 是否是tag节点并且下面有标准的
me_type = params.get('me_type')
only_standard = True if CAL_TYPE_ONLY_STANDARD in cal_type_list else False
only_not_standard = True if CAL_TYPE_ONLY_NOT_STANDARD in cal_type_list else False
platform = params.get('platform', TYPE_ALL)
data_set_type = params.get('data_set_type', TYPE_ALL)
created_by = params.get('created_by', None)
# 其实创建时间和终止创建时间两个参数要同时存在
if params.get('created_at_start') and params.get('created_at_end'):
created_at_start = str(arrow.get(params.get('created_at_start')).format('YYYY-MM-DDTHH:mm:ss+08:00'))
created_at_end = str(arrow.get(params.get('created_at_end')).format('YYYY-MM-DDTHH:mm:ss+08:00'))
else:
created_at_start = None
created_at_end = None
# 按照存储类型过滤
storage_type = params.get('storage_type', None)
# 按照广度、热度排序
range_operate = params.get('range_operate', None)
range_score = params.get('range_score', None)
heat_operate = params.get('heat_operate', None)
heat_score = params.get('heat_score', None)
importance_operate = params.get('importance_operate', None)
importance_score = params.get('importance_score', None)
asset_value_operate = params.get('asset_value_operate', None)
asset_value_score = params.get('asset_value_score', None)
assetvalue_to_cost_operate = params.get('assetvalue_to_cost_operate', None)
assetvalue_to_cost = params.get('assetvalue_to_cost', None)
storage_capacity_operate = params.get('storage_capacity_operate', None)
# 默认单位为byte,前端传递的单位为MB
storage_capacity = params['storage_capacity'] * 1024 * 1024 if params.get('storage_capacity', None) else None
order_time = params.get('order_time', None) if need_only_uids and (not platform == 'tdw') else None
order_heat = params.get('order_heat', None) if need_only_uids and (not platform == 'tdw') else None
order_range = params.get('order_range', None) if need_only_uids and (not platform == 'tdw') else None
order_importance = params.get('order_importance', None) if need_only_uids and (not platform == 'tdw') else None
order_asset_value = params.get('order_asset_value', None) if need_only_uids and (not platform == 'tdw') else None
order_assetvalue_to_cost = (
params.get('order_assetvalue_to_cost', None) if need_only_uids and (not platform == 'tdw') else None
)
order_storage_capacity = (
params.get('order_storage_capacity', None) if need_only_uids and (not platform == 'tdw') else None
)
parent_tag_code = params.get('parent_tag_code', 'all')
page_size = params.get('page_size', None)
page = params.get('page', None)
if platform == 'tdw' and project_id:
project_id = None
if data_set_type == 'raw_data' and project_id:
project_id = None
standard_content_id = params.get('standard_content_id', None)
token_pkey = params.get('token_pkey', None)
token_msg = params.get('token_msg', None)
# 以上 为输入参数的处理
if need_detail_type == NEED_DATA_SET_ID_DETAIL: # 数据字典接口
# 下面过滤条件的组合,搜索结果为空
if (
(platform == TYPE_TDW and data_set_type == TYPE_RAW_DATA)
or (only_standard and platform == TYPE_TDW)
or (only_standard and data_set_type == TYPE_RAW_DATA)
or (only_not_standard and platform == TYPE_TDW)
or (only_not_standard and data_set_type == TYPE_RAW_DATA)
or (storage_type == 'tdw' and platform == 'bk_data')
or (storage_type and data_set_type == TYPE_RAW_DATA)
or (range_operate and range_score and platform == TYPE_TDW)
or (heat_operate and heat_score and platform == TYPE_TDW)
or (importance_operate and importance_score and platform == TYPE_TDW)
or (asset_value_operate and asset_value_score and platform == TYPE_TDW)
or (assetvalue_to_cost_operate and assetvalue_to_cost and platform == TYPE_TDW)
or (storage_capacity_operate and storage_capacity and platform == TYPE_TDW)
):
return gen_zero_data_result()
is_virtual_other_node_pre = False
is_virtual_other_not_standard_node_pre = False
if single_tag_code.startswith(VIRTUAL_OTHER_NODE_PRE):
is_virtual_other_node_pre = True
single_tag_code = single_tag_code.replace(VIRTUAL_OTHER_NODE_PRE, '', 1)
elif single_tag_code.startswith(VIRTUAL_OTHER_NOT_STANDARD_NODE_PRE):
is_virtual_other_not_standard_node_pre = True
single_tag_code = single_tag_code.replace(VIRTUAL_OTHER_NOT_STANDARD_NODE_PRE, '', 1)
business_code_list, system_code_list, desc_code_list = judge_tag_type(conn, tag_codes_list)
result_dict = {} # 返回的结果
# 将过滤条件拼接到GrapgQL语句中,目前看到只用于"选了标准以后的数据字典列表"和"数据地图右侧统计汇总指标"
# 需要过滤的数据集列表
d_query_uids, final_entites_name = dgraph_cond_uids(
bk_biz_id,
project_id,
keyword,
platform,
business_code_list,
system_code_list,
desc_code_list,
only_standard=only_standard,
created_by=created_by,
created_at_start=created_at_start,
created_at_end=created_at_end,
only_not_standard=only_not_standard,
)
if me_type == CAL_TYPE_STANDARD: # 统计数据标准节点的相关指标
if platform == TYPE_TDW: # TDW平台
floating_zero_result(result_dict)
return result_dict
else:
# 在DmTaskDetail中的数据集合
dgraph_standard_query_uids, standard_entitys_name = dgraph_standard_uids2(
bk_biz_id,
project_id,
keyword,
platform,
business_code_list,
system_code_list,
desc_code_list,
final_entites_name,
order_range,
order_heat,
)
dgraph_standard_query_statement = '{' + d_query_uids + dgraph_standard_query_uids
# 按照存储类型过滤
if storage_type:
dgraph_standard_query_statement, standard_entitys_name = filter_dataset_by_storage_type(
dgraph_standard_query_statement, params, standard_entitys_name, storage_type=storage_type
)
# 按照热度、广度过滤
if (
(range_operate is not None and range_score is not None)
or (heat_operate is not None and heat_score is not None)
or (importance_operate is not None and importance_score is not None)
or (asset_value_operate is not None and asset_value_score is not None)
or (assetvalue_to_cost_operate is not None and assetvalue_to_cost is not None)
or (storage_capacity_operate is not None and storage_capacity is not None)
):
dgraph_standard_query_statement, standard_entitys_name = filter_dataset_by_lifecycle(
dgraph_standard_query_statement,
params,
standard_entitys_name,
range_operate=range_operate,
range_score=range_score,
heat_operate=heat_operate,
heat_score=heat_score,
importance_operate=importance_operate,
importance_score=importance_score,
asset_value_operate=asset_value_operate,
asset_value_score=asset_value_score,
assetvalue_to_cost_operate=assetvalue_to_cost_operate,
assetvalue_to_cost=assetvalue_to_cost,
storage_capacity_operate=storage_capacity_operate,
storage_capacity=storage_capacity,
)
# 统计数据量
dgraph_standard_query_statement += get_single_standard_query2(
single_tag_code,
standard_entitys_name,
is_singel_detail=True,
order_range=order_range,
order_heat=order_heat,
range_operate=range_operate,
range_score=range_score,
heat_operate=heat_operate,
heat_score=heat_score,
importance_operate=importance_operate,
importance_score=importance_score,
asset_value_operate=asset_value_operate,
asset_value_score=asset_value_score,
assetvalue_to_cost_operate=assetvalue_to_cost_operate,
assetvalue_to_cost=assetvalue_to_cost,
storage_capacity_operate=storage_capacity_operate,
storage_capacity=storage_capacity,
standard_content_id=standard_content_id,
)
if 'tmp_rt_count as DmTaskDetail.data_set' in dgraph_standard_query_statement:
standard_entitys_name = 'tmp_rt_count'
if (need_detail_type == NEED_DATA_SET_ID_DETAIL and need_only_uids) or need_all_data_dict_list:
# 修改排序逻辑
dgraph_standard_query_statement = gen_data_set_details_query(
dgraph_standard_query_statement,
params,
standard_entitys_name,
order_time,
order_heat,
order_range,
order_assetvalue_to_cost,
order_importance,
order_asset_value,
order_storage_capacity,
)
# 数据盘点接口
if need_detail_type == NEED_DATA_SET_ID_DETAIL and (
data_source_distribute or data_source_detail_distribute or data_type_distribute
):
dgraph_standard_query_ret = gen_data_source_type_distr_query(
dgraph_standard_query_statement,
params,
standard_entitys_name,
data_source_distribute,
data_source_detail_distribute,
data_type_distribute,
parent_tag_code,
)
return dgraph_standard_query_ret
# 桑基图
if need_detail_type == NEED_DATA_SET_ID_DETAIL and sankey_diagram_distribute:
dgraph_query_ret = gen_sankey_diagram_distribute_query(
dgraph_standard_query_statement, params, standard_entitys_name
)
return dgraph_query_ret
dgraph_standard_query_statement += '}'
get_floating_dgraph_result(
dgraph_standard_query_statement,
result_dict,
platform,
need_detail_type=need_detail_type,
page=page,
page_size=page_size,
token_pkey=token_pkey,
token_msg=token_msg,
)
return result_dict
lifecycle_metric_dict = dict(
data_set_type=data_set_type,
only_standard=only_standard,
need_only_uids=need_only_uids,
order_range=order_range,
order_heat=order_heat,
order_assetvalue_to_cost=order_assetvalue_to_cost,
order_importance=order_importance,
order_asset_value=order_asset_value,
order_storage_capacity=order_storage_capacity,
created_at_start=created_at_start,
created_at_end=created_at_end,
range_operate=range_operate,
range_score=range_score,
heat_operate=heat_operate,
heat_score=heat_score,
importance_operate=importance_operate,
importance_score=importance_score,
asset_value_operate=asset_value_operate,
asset_value_score=asset_value_score,
assetvalue_to_cost_operate=assetvalue_to_cost_operate,
assetvalue_to_cost=assetvalue_to_cost,
storage_capacity_operate=storage_capacity_operate,
storage_capacity=storage_capacity,
storage_type=storage_type,
only_not_standard=only_not_standard,
)
if (single_tag_code == VIRTUAL_DATA_MART_ROOT_NAME) and (
not business_code_list and not system_code_list and not desc_code_list
): # [数据集市]节点,没有选中标签的情况下
# 用户没有选择具体标签的情况下,计算[数据集市]节点
data_mart_dgraph_query, final_filter_uids_name, type_uids_dict = dgraph_data_mart_uids2(
bk_biz_id,
project_id,
keyword,
platform,
dgrah_cal_type=DGRAPH_FLOATING,
need_detail_type=need_detail_type,
created_by=created_by,
**lifecycle_metric_dict,
)
data_mart_dgraph_query_final = '{\n' + data_mart_dgraph_query
# 如果是数据字典列表接口
if (need_detail_type == NEED_DATA_SET_ID_DETAIL and need_only_uids) or need_all_data_dict_list:
# 按照存储类型过滤
if storage_type:
data_mart_dgraph_query_final, final_filter_uids_name = filter_dataset_by_storage_type(
data_mart_dgraph_query_final, params, final_filter_uids_name, storage_type=storage_type
)
# 按照热度、广度过滤
if (
(range_operate is not None and range_score is not None)
or (heat_operate is not None and heat_score is not None)
or (importance_operate is not None and importance_score is not None)
or (asset_value_operate is not None and asset_value_score is not None)
or (assetvalue_to_cost_operate is not None and assetvalue_to_cost is not None)
or (storage_capacity_operate is not None and storage_capacity is not None)
):
data_mart_dgraph_query_final, final_filter_uids_name = filter_dataset_by_lifecycle(
data_mart_dgraph_query_final,
params,
final_filter_uids_name,
range_operate=range_operate,
range_score=range_score,
heat_operate=heat_operate,
heat_score=heat_score,
importance_operate=importance_operate,
importance_score=importance_score,
asset_value_operate=asset_value_operate,
asset_value_score=asset_value_score,
assetvalue_to_cost_operate=assetvalue_to_cost_operate,
assetvalue_to_cost=assetvalue_to_cost,
storage_capacity_operate=storage_capacity_operate,
storage_capacity=storage_capacity,
)
# 修改排序逻辑的地方
data_mart_dgraph_query_final = gen_data_set_details_query(
data_mart_dgraph_query_final,
params,
final_filter_uids_name,
order_time,
order_heat,
order_range,
order_assetvalue_to_cost,
order_importance,
order_asset_value,
order_storage_capacity,
)
# 数据盘点接口
if need_detail_type == NEED_DATA_SET_ID_DETAIL and (
data_source_distribute or data_source_detail_distribute or data_type_distribute
):
data_mart_dgraph_query_final_ret = gen_data_source_type_distr_query(
data_mart_dgraph_query_final,
params,
final_filter_uids_name,
data_source_distribute,
data_source_detail_distribute,
data_type_distribute,
parent_tag_code,
)
return data_mart_dgraph_query_final_ret
# 桑基图接口
if need_detail_type == NEED_DATA_SET_ID_DETAIL and sankey_diagram_distribute:
dgraph_query_ret = gen_sankey_diagram_distribute_query(
data_mart_dgraph_query_final, params, final_filter_uids_name
)
return dgraph_query_ret
data_mart_dgraph_query_final += '\n}'
if need_detail_type != NEED_RECENT_DETAIL:
get_floating_dgraph_result(
data_mart_dgraph_query_final,
result_dict,
platform,
need_detail_type=need_detail_type,
page=page,
page_size=page_size,
token_pkey=token_pkey,
token_msg=token_msg,
)
elif single_tag_code == DATA_MART_OTHER_NAME: # [其他]节点
if only_standard or only_not_standard or business_code_list: # 选择了只展示标准的数据或选中的标签中有业务标签
floating_zero_result(result_dict)
else:
if not business_code_list and not system_code_list and not desc_code_list: # 未选择任何标签的情况下
data_mart_dgraph_uids_query, final_entites_name, type_uids_dict = dgraph_data_mart_uids2(
bk_biz_id,
project_id,
keyword,
platform,
dgrah_cal_type=DGRAPH_FLOATING,
need_detail_type=need_detail_type,
created_by=created_by,
data_set_type=data_set_type,
need_only_uids=True,
only_standard=only_standard,
order_range=order_range,
order_heat=order_heat,
order_assetvalue_to_cost=order_assetvalue_to_cost,
order_importance=order_importance,
order_asset_value=order_asset_value,
order_storage_capacity=order_storage_capacity,
created_at_start=created_at_start,
created_at_end=created_at_end,
range_operate=range_operate,
range_score=range_score,
heat_operate=heat_operate,
heat_score=heat_score,
importance_operate=importance_operate,
importance_score=importance_score,
asset_value_operate=asset_value_operate,
asset_value_score=asset_value_score,
assetvalue_to_cost_operate=assetvalue_to_cost_operate,
assetvalue_to_cost=assetvalue_to_cost,
storage_capacity_operate=storage_capacity_operate,
storage_capacity=storage_capacity,
storage_type=storage_type,
only_not_standard=only_not_standard,
)
else: # 若用户选择了具体标签
data_mart_dgraph_uids_query, final_entites_name, type_uids_dict = dgraph_cond_uids2(
single_tag_code,
bk_biz_id,
project_id,
keyword,
platform,
business_code_list,
system_code_list,
desc_code_list,
created_by=created_by,
is_virtual_other_node_pre=is_virtual_other_node_pre,
is_virtual_other_not_standard_node_pre=is_virtual_other_not_standard_node_pre,
data_set_type=data_set_type,
only_standard=only_standard,
need_only_uids=True,
order_range=order_range,
order_heat=order_heat,
order_assetvalue_to_cost=order_assetvalue_to_cost,
order_storage_capacity=order_storage_capacity,
order_importance=order_importance,
order_asset_value=order_asset_value,
created_at_start=created_at_start,
created_at_end=created_at_end,
range_operate=range_operate,
range_score=range_score,
heat_operate=heat_operate,
heat_score=heat_score,
importance_operate=importance_operate,
importance_score=importance_score,
asset_value_operate=asset_value_operate,
asset_value_score=asset_value_score,
assetvalue_to_cost_operate=assetvalue_to_cost_operate,
assetvalue_to_cost=assetvalue_to_cost,
storage_capacity_operate=storage_capacity_operate,
storage_capacity=storage_capacity,
storage_type=storage_type,
only_not_standard=only_not_standard,
) # 需要过滤的数据集列表
data_mart_dgraph_uids_query_final = (
'{\n'
+ data_mart_dgraph_uids_query
+ """
my as var(func:eq(Tag.code,"metric_domain")) @recurse{
parentids as ~Tag.parent_tag
}
var(func:uid(parentids,my)){
supported_entitys as Tag.targets
}
"""
)
data_set_type_map = gen_data_set_map(
data_set_type,
only_standard,
platform=platform,
project_id=project_id,
only_not_standard=only_not_standard,
)
bk_biz_id_list_uids = ''
if TYPE_RESULT_TABLE in data_set_type_map:
data_mart_dgraph_uids_query_final += """
tmp_other_rt_uids as var(func:uid(supported_entitys)) @filter(uid($tmp_rt_count))
other_rt_uids as var(func:uid($tmp_rt_count)) @filter(not uid(tmp_other_rt_uids)){
rt_p as ResultTable.project
rt_b as ResultTable.bk_biz
}
rt_count(func:uid(other_rt_uids)){
count(uid)
}
p_list(func:uid(rt_p)){
project_id:ProjectInfo.project_id
}
""".replace(
'$tmp_rt_count', type_uids_dict[D_UID_RESULT_TABLE_KEY]
)
bk_biz_id_list_uids = gen_bk_biz_id_list_uids(bk_biz_id_list_uids, 'rt_b')
if TYPE_RAW_DATA in data_set_type_map:
data_mart_dgraph_uids_query_final += """
tmp_other_rd_uids as var(func:uid(supported_entitys)) @filter(uid($tmp_rd_count))
other_rd_uids as var(func:uid($tmp_rd_count)) @filter(not uid(tmp_other_rd_uids)){
rd_b as AccessRawData.bk_biz
}
rd_count(func:uid(other_rd_uids)){
count(uid)
}
""".replace(
'$tmp_rd_count', type_uids_dict[D_UID_RAW_DATA_KEY]
)
bk_biz_id_list_uids = gen_bk_biz_id_list_uids(bk_biz_id_list_uids, 'rd_b')
if TYPE_TDW in data_set_type_map:
data_mart_dgraph_uids_query_final += """
tmp_tdw_count as var(func:uid(supported_entitys)) @filter(uid($tdw_count))
other_tdw_uids as var(func:uid($tdw_count)) @filter(not uid(tmp_tdw_count))
tdw_count(func:uid(other_tdw_uids)){
count(uid)
}
| |
(TinyDB) objects
data_storage_type : str
The name of the storage (TinyDB) table name that's supposed to be used for caching ppr matrices
dataset : str
The name of the dataset for which this model will be applied. This is necessary to make sure the
correct ppr matrix is loaded from the disk for conscutive calls
make_directed : bool
Wether the dataset passed to this model will be a directed graph or not. Necessary for the same
reason as the dataset name
"""
self.n_features = n_features
if isinstance(n_filters, list):
self.n_filters = n_filters
elif isinstance(n_filters, int):
self.n_filters = [n_filters] * (n_layers - 1)
else:
raise TypeError("n_filters must be integer or list of integers")
self.n_classes = n_classes
self.dropout = dropout
self.alpha = alpha
self.eps = eps
self.topk = topk
self.ppr_normalization = ppr_normalization
self.forward_batch_size = forward_batch_size
self.batch_norm = batch_norm
self.mean = mean
self.mean_kwargs = mean_kwargs
self.ppr_cache_params = ppr_cache_params
@abstractmethod
def model_forward(self, *args, **kwargs):
pass
def release_cache(self):
self.ppr_cache_params = None
@typechecked
def forward_wrapper(self,
attr: TensorType["n_nodes", "n_classes"],
adj: Union[SparseTensor, sp.csr_matrix, TensorType["n_nodes", "n_nodes"]] = None,
ppr_scores: SparseTensor = None,
ppr_idx=None):
"""
Wrapper around the forward function of PPRGo models.
Fully (auto)-differentiable only iff ppr_scores is not None!
If the ppr_scores are not given, they will be calculated on the fly or loaded from cache (disk)
Parameters
----------
attr : Torch.Tensor
The feature/attribute matrix of shape (n_nodes, n_features)
adj : Union[SparseTensor, sp.csr_matrix],
The adjacency matrix used for calculating the personalized page rank matrix.
Should be of shape (n_nodes, n_nodes)
ppr_scores : SparseTensor
The precalculated personalized page rank matrix
ppr_idx: np.Array
The list of node ids for which the personalized page rank matrix should be calculated from the adjacency
"""
device = next(self.parameters()).device
if isinstance(adj, torch.Tensor):
adj = SparseTensor.from_dense(adj.cpu()).to(device)
if ppr_scores is not None:
source_idx, neighbor_idx, ppr_vals = ppr_scores.coo()
ppr_matrix = ppr_scores[:, neighbor_idx.unique()]
attr_matrix = attr[neighbor_idx.unique()]
return self.model_forward(attr_matrix.to(device), ppr_matrix.to(device))
else:
# we need to precompute the ppr_score first
if isinstance(adj, SparseTensor):
adj = adj.to_scipy(layout="csr")
num_nodes = adj.shape[0]
if ppr_idx is None:
ppr_idx = np.arange(num_nodes)
# try to read topk test from storage:
topk_ppr = None
if self.ppr_cache_params is not None:
# late import as a workaround to avoid circular import issue
from rgnn_at_scale.helper.io import Storage
storage = Storage(self.ppr_cache_params["data_artifact_dir"])
params = dict(dataset=self.ppr_cache_params["dataset"],
alpha=self.alpha,
ppr_idx=np.array(ppr_idx),
eps=self.eps,
topk=self.topk,
ppr_normalization=self.ppr_normalization,
make_undirected=self.ppr_cache_params["make_undirected"])
stored_topk_ppr = storage.find_sparse_matrix(self.ppr_cache_params["data_storage_type"],
params, find_first=True)
topk_ppr, _ = stored_topk_ppr[0] if len(stored_topk_ppr) == 1 else (None, None)
if topk_ppr is None:
topk_ppr = ppr.topk_ppr_matrix(adj, self.alpha, self.eps, ppr_idx.copy(),
self.topk, normalization=self.ppr_normalization)
# save topk_ppr to disk
if self.ppr_cache_params is not None:
params["ppr_idx"] = np.array(ppr_idx)
storage.save_sparse_matrix(self.ppr_cache_params["data_storage_type"], params,
topk_ppr, ignore_duplicate=True)
# there are usually to many nodes for a single forward pass, we need to do batched prediction
data_set = RobustPPRDataset(
attr_matrix_all=attr,
ppr_matrix=topk_ppr,
indices=ppr_idx,
allow_cache=False)
data_loader = torch.utils.data.DataLoader(
dataset=data_set,
sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(data_set),
batch_size=self.forward_batch_size, drop_last=False
),
batch_size=None,
num_workers=0,
)
num_predictions = topk_ppr.shape[0]
logits = torch.zeros(num_predictions, self.n_classes, device="cpu", dtype=torch.float32)
num_batches = len(data_loader)
display_step = max(int(num_batches / 10), 1)
for batch_id, (idx, xbs, _) in enumerate(data_loader):
if batch_id % display_step == 0:
logging.info(f"Memory Usage before inference batch {batch_id}/{num_batches}:")
logging.info(utils.get_max_memory_bytes() / (1024 ** 3))
if device.type == "cuda":
logging.info(torch.cuda.max_memory_allocated() / (1024 ** 3))
xbs = [xb.to(device) for xb in xbs]
start = batch_id * self.forward_batch_size
end = start + xbs[1].size(0) # batch_id * batch_size
logits[start:end] = self.model_forward(*xbs).cpu()
return logits
@typechecked
def fit(self,
adj: Union[SparseTensor, sp.csr_matrix],
attr: TensorType["n_nodes", "n_classes"],
labels: TensorType["n_nodes"],
idx_train: np.ndarray,
idx_val: np.ndarray,
lr: float,
weight_decay: float,
patience: int,
use_annealing_scheduler: bool = False,
scheduler_warm_restarts: bool = True,
annealing_scheduler_T_0: int = 3,
scheduler_time: str = "epoch",
scheduler_step: int = 20,
optim: str = "Adam",
max_epochs: int = 200,
batch_size: int = 512,
batch_mult_val: int = 4,
eval_step: int = 1,
display_step: int = 50,
# for loading ppr from disk
ppr_cache_params: dict = None,
** kwargs):
device = next(self.parameters()).device
if ppr_cache_params is not None:
# update ppr_cache_params
self.ppr_cache_params = ppr_cache_params
if isinstance(adj, SparseTensor):
adj = adj.to_scipy(layout="csr")
topk_train = None
# try to read topk test from storage:
if self.ppr_cache_params is not None:
# late import as a workaround to avoid circular import issue
from rgnn_at_scale.helper.io import Storage
storage = Storage(self.ppr_cache_params["data_artifact_dir"])
params = dict(dataset=self.ppr_cache_params["dataset"],
alpha=self.alpha,
ppr_idx=np.array(idx_train),
eps=self.eps,
topk=self.topk,
ppr_normalization=self.ppr_normalization,
make_undirected=self.ppr_cache_params["make_undirected"])
stored_topk_train = storage.find_sparse_matrix(self.ppr_cache_params["data_storage_type"],
params, find_first=True)
topk_train, _ = stored_topk_train[0] if len(stored_topk_train) == 1 else (None, None)
if topk_train is None:
# looks like there was no ppr calculated before hand, so we need to calculate it now
topk_train = ppr.topk_ppr_matrix(adj, self.alpha, self.eps, idx_train.copy(),
self.topk, normalization=self.ppr_normalization)
# save topk_ppr to disk
if self.ppr_cache_params is not None:
params["ppr_idx"] = np.array(idx_train)
storage.save_sparse_matrix(self.ppr_cache_params["data_storage_type"], params,
topk_train, ignore_duplicate=True)
logging.info("Memory Usage after calculating/loading topk ppr for train:")
logging.info(utils.get_max_memory_bytes() / (1024 ** 3))
# try to read topk train from disk:
topk_val = None
if self.ppr_cache_params is not None:
params["ppr_idx"] = np.array(idx_val)
stored_topk_val = storage.find_sparse_matrix(self.ppr_cache_params["data_storage_type"],
params, find_first=True)
topk_val, _ = stored_topk_val[0] if len(stored_topk_val) == 1 else (None, None)
if topk_val is None:
topk_val = ppr.topk_ppr_matrix(adj, self.alpha, self.eps, idx_val.copy(),
self.topk, normalization=self.ppr_normalization)
# save topk_ppr to disk
if self.ppr_cache_params is not None:
params["ppr_idx"] = np.array(idx_val)
storage.save_sparse_matrix(self.ppr_cache_params["data_storage_type"], params,
topk_val, ignore_duplicate=True)
logging.info("Memory Usage after calculating/loading topk ppr for validation:")
logging.info(utils.get_max_memory_bytes() / (1024 ** 3))
train_set = RobustPPRDataset(attr_matrix_all=attr,
ppr_matrix=topk_train,
indices=idx_train,
labels_all=labels,
allow_cache=False)
val_set = RobustPPRDataset(attr_matrix_all=attr,
ppr_matrix=topk_val,
indices=idx_val,
labels_all=labels,
allow_cache=False)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
sampler=torch.utils.data.BatchSampler(
torch.utils.data.RandomSampler(train_set),
batch_size=batch_size, drop_last=False
),
batch_size=None,
num_workers=0,
)
trace_train_loss = []
trace_val_loss = []
trace_train_acc = []
trace_val_acc = []
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
if optim == "SGD":
optimizer = torch.optim.SGD(self.parameters(), lr=lr, weight_decay=weight_decay)
else: # use adam
optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)
if use_annealing_scheduler:
if scheduler_warm_restarts:
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer, annealing_scheduler_T_0)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, max_epochs)
best_epoch_loss = np.inf
num_batches = len(train_loader)
step = 0
epoch_pbar = tqdm(range(max_epochs), desc='Training Epoch...')
for it in epoch_pbar:
epoch_loss_val = 0
epoch_acc_val = 0
epoch_acc_train = 0
for batch_train_idx, xbs, yb in train_loader:
xbs, yb = [xb.to(device) for xb in xbs], yb.to(device)
# logging.info(f"Memory Usage before training batch {step}:")
# logging.info(utils.get_max_memory_bytes() / (1024 ** 3))
# if device.type == "cuda":
# logging.info(torch.cuda.max_memory_allocated() / (1024 ** 3))
loss_train, ncorrect_train = self.__run_batch(xbs, yb, optimizer, train=True)
train_acc = ncorrect_train / float(yb.shape[0])
# validation on batch of val_set
val_batch_size = batch_mult_val * batch_size
rnd_idx = np.random.choice(len(val_set), size=len(val_set), replace=False)[:val_batch_size]
batch_val_idx, xbs, yb = val_set[rnd_idx]
xbs, yb = [xb.to(device) for xb in xbs], yb.to(device)
loss_val, ncorrect_val = self.__run_batch(xbs, yb, None, train=False)
val_acc = ncorrect_val / float(yb.shape[0])
epoch_loss_val += loss_val / num_batches
epoch_acc_val += val_acc / num_batches
epoch_acc_train += train_acc / num_batches
trace_train_loss.append(loss_train)
trace_val_loss.append(loss_val)
trace_train_acc.append(train_acc)
trace_val_acc.append(val_acc)
if use_annealing_scheduler and scheduler_time == "batch":
if step % scheduler_step == 0:
logging.info("Scheduler Batch Step CosineAnnealingWarmRestarts\n")
scheduler.step()
step += 1
epoch_pbar.set_description(f"Training Epoch... acc_train: {epoch_acc_train: .4f},"
f"acc_val: {epoch_acc_val: .4f}", refresh=False)
if use_annealing_scheduler and scheduler_time == "epoch":
logging.info("Scheduler Epoch Step CosineAnnealingWarmRestarts\n")
scheduler.step()
if epoch_loss_val < best_epoch_loss:
best_epoch_loss = epoch_loss_val
best_epoch = it
best_state = {key: value.cpu() for key, value in self.state_dict().items()}
# logging.info(f"Save best_state for new best_epoch_loss {best_epoch_loss}\n")
else:
if it >= best_epoch + patience:
logging.info("Early stopping due to increase in validation loss")
break
# logging.info(f"No decrease in validation loss in epoch {it} since best epoch {best_epoch} ...")
# restore the best validation state
self.load_state_dict(best_state)
return {"loss": trace_val_loss, "acc": trace_val_acc}, {"loss": trace_train_loss, "acc": trace_train_acc}
@typechecked
def __run_batch(self, xbs: list, yb: TensorType["batch_size"], optimizer, train: bool):
# Set model to training mode
if train:
self.train()
else:
self.eval()
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
with torch.set_grad_enabled(train):
logits = self.model_forward(*xbs)
loss = F.cross_entropy(logits, yb)
top1 = torch.argmax(logits, dim=1)
ncorrect = torch.sum(top1 == yb)
# backward + optimize only if in training phase
if train:
loss.backward()
optimizer.step()
return loss.detach().cpu().item(), ncorrect.detach().cpu().item()
class PPRGoWrapper(PPRGo, PPRGoWrapperBase):
"""
Wrapper class around the Vanilla PPRGo model.
Use this class to instantiate a PPRGo model that includes the calculation and caching of
the ppr matrix as well as the training procedure.
"""
def __init__(self,
*args,
**kwargs):
# using the constructor of the wrapper base class to set/validate the required/optional model params
PPRGoWrapperBase.__init__(self, *args, **kwargs)
PPRGo.__init__(self, self.n_features, self.n_classes, self.n_filters, self.dropout,
batch_norm=self.batch_norm, mean=self.mean, mean_kwargs=self.mean_kwargs)
def | |
request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[BusinessObjectPermission], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['groupname', 'busobname'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_business_object_permissions_by_bus_ob_name_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'groupname' is set
if self.api_client.client_side_validation and ('groupname' not in local_var_params or # noqa: E501
local_var_params['groupname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `groupname` when calling `security_get_security_group_business_object_permissions_by_bus_ob_name_v1`") # noqa: E501
# verify the required parameter 'busobname' is set
if self.api_client.client_side_validation and ('busobname' not in local_var_params or # noqa: E501
local_var_params['busobname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `busobname` when calling `security_get_security_group_business_object_permissions_by_bus_ob_name_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupname' in local_var_params:
path_params['groupname'] = local_var_params['groupname'] # noqa: E501
if 'busobname' in local_var_params:
path_params['busobname'] = local_var_params['busobname'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getsecuritygroupbusinessobjectpermissions/groupname/{groupname}/busobname/{busobname}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BusinessObjectPermission]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_business_object_permissions_by_bus_ob_name_v2(self, groupname, busobname, **kwargs): # noqa: E501
"""Get Business Object permissions by Security Group # noqa: E501
Operation to get specific Business Object permissions for a Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_by_bus_ob_name_v2(groupname, busobname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str groupname: Specify the Security Group name. (required)
:param str busobname: Specify the Business Object name. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetSecurityGroupBusinessObjectPermissionsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_business_object_permissions_by_bus_ob_name_v2_with_http_info(groupname, busobname, **kwargs) # noqa: E501
def security_get_security_group_business_object_permissions_by_bus_ob_name_v2_with_http_info(self, groupname, busobname, **kwargs): # noqa: E501
"""Get Business Object permissions by Security Group # noqa: E501
Operation to get specific Business Object permissions for a Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_by_bus_ob_name_v2_with_http_info(groupname, busobname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str groupname: Specify the Security Group name. (required)
:param str busobname: Specify the Business Object name. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetSecurityGroupBusinessObjectPermissionsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['groupname', 'busobname'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_business_object_permissions_by_bus_ob_name_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'groupname' is set
if self.api_client.client_side_validation and ('groupname' not in local_var_params or # noqa: E501
local_var_params['groupname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `groupname` when calling `security_get_security_group_business_object_permissions_by_bus_ob_name_v2`") # noqa: E501
# verify the required parameter 'busobname' is set
if self.api_client.client_side_validation and ('busobname' not in local_var_params or # noqa: E501
local_var_params['busobname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `busobname` when calling `security_get_security_group_business_object_permissions_by_bus_ob_name_v2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupname' in local_var_params:
path_params['groupname'] = local_var_params['groupname'] # noqa: E501
if 'busobname' in local_var_params:
path_params['busobname'] = local_var_params['busobname'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getsecuritygroupbusinessobjectpermissions/groupname/{groupname}/busobname/{busobname}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSecurityGroupBusinessObjectPermissionsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1(self, bus_ob_id, **kwargs): # noqa: E501
"""Get Business Object permission for current user # noqa: E501
Operation to get Business Object permissions for the currently logged-in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1(bus_ob_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str bus_ob_id: Specify the Business Object ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[BusinessObjectPermission]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1_with_http_info(bus_ob_id, **kwargs) # noqa: E501
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1_with_http_info(self, bus_ob_id, **kwargs): # noqa: E501
"""Get Business Object permission for current user # noqa: E501
Operation to get Business Object permissions for the currently logged-in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1_with_http_info(bus_ob_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str bus_ob_id: Specify the Business Object ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[BusinessObjectPermission], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['bus_ob_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'bus_ob_id' is set
if self.api_client.client_side_validation and ('bus_ob_id' not in local_var_params or # noqa: E501
local_var_params['bus_ob_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `bus_ob_id` when calling `security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bus_ob_id' in local_var_params:
path_params['busObId'] = local_var_params['bus_ob_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getsecuritygroupbusinessobjectpermissionsforcurrentuserbybusobid/busobid/{busObId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BusinessObjectPermission]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2(self, bus_ob_id, **kwargs): # noqa: E501
"""Get Business Object permission for current user # noqa: E501
Operation to get Business Object permissions for the currently logged-in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2(bus_ob_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str bus_ob_id: Specify the Business Object ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. | |
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
import settings
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**kwargs):
self.iter = 0
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**kwargs):
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**kwargs):
return oep_hf(atoms,orbs,**kwargs)
def oep_hf(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs)
def oep(atoms,orbs,energy_func,grad_func=None,**kwargs):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = kwargs.get('verbose')
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
opt_method = kwargs.get('opt_method',settings.OEPOptMethod)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = kwargs.get('return_flag')
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in xrange(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum | |
= LocalStorageCipher(key, cipherLevel)
except:
return u"Not sure: could not validate your encryption!"
theFormat = key.getFormat()
theAlg = key.getAlgorithm()
except:
return u"Not sure: Error in decryption routine - oh well!!"
return u"%s / %s" % (theFormat, theAlg)
def check_dropbox_and_suppress_warnings():
dataFile = moneydance_data.getRootFolder()
suppressFile = File(dataFile, "suppress_file_in_dropbox_restriction.txt")
fileIsUnderDropbox = False
suppressionFileExists = suppressFile.exists()
parent = dataFile
while parent is not None:
if "dropbox" in parent.getName().lower():
fileIsUnderDropbox = True
break
parent = parent.getParentFile()
return fileIsUnderDropbox, suppressionFileExists
def OFX_view_online_txns_payees_payments(statusLabel):
_OBJOFXTXNS = 0
_OBJOFXOLPAYEES = 1
_OBJOFXOLPAYMNT = 2
objWhat = [
"OFX Online Transactions", # onlineTxnList "oltxns"
"OFX Online Payees", # onlinePayeeList "olpayees"
"OFX Online Payments" # onlinePaymentList "olpmts"
]
selectedAcct = None
selectedObject = None
textType = ""
while True:
selectedObjType = JOptionPane.showInputDialog(toolbox_frame_,
"Select the type of Online data you want to view",
"OFX View Online Data",
JOptionPane.INFORMATION_MESSAGE,
moneydance_ui.getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"),
objWhat,
None)
if not selectedObjType:
statusLabel.setText(("No online data type was selected to view ..").ljust(800, " "))
statusLabel.setForeground(Color.RED)
return
if objWhat.index(selectedObjType) == _OBJOFXTXNS:
accountsListForOlTxns = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(15))
elif objWhat.index(selectedObjType) == _OBJOFXOLPAYEES:
accountsListForOlTxns = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(16))
elif objWhat.index(selectedObjType) == _OBJOFXOLPAYMNT:
accountsListForOlTxns = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(17))
else: continue
accountsListForOlTxns = sorted(accountsListForOlTxns, key=lambda sort_x: (sort_x.getFullAccountName().upper()))
selectedAcct = JOptionPane.showInputDialog(toolbox_frame_,
"Select the Acct to view Online Data:",
"Select ACCOUNT",
JOptionPane.INFORMATION_MESSAGE,
moneydance_ui.getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"),
accountsListForOlTxns,
None) # type: Account
if not selectedAcct: continue
textType = ""
if objWhat.index(selectedObjType) == _OBJOFXTXNS:
selectedObject = MyGetDownloadedTxns(selectedAcct) # Use my version to prevent creation of default record(s)
textType = "Online Txns"
elif objWhat.index(selectedObjType) == _OBJOFXOLPAYEES:
selectedObject = MyGetOnlinePayees(selectedAcct) # Use my version to prevent creation of default record(s)
textType = "Online Payees"
elif objWhat.index(selectedObjType) == _OBJOFXOLPAYMNT:
selectedObject = MyGetOnlinePayments(selectedAcct) # Use my version to prevent creation of default record(s)
textType = "Online Payments"
else: continue
break
output = "VIEW SAVED ONLINE DATA: %s\n" \
" ===================================\n\n" %(textType.upper())
output += "Object Type: %s\n\n" %(type(selectedObject))
# noinspection PyUnresolvedReferences
output += "Linked Account Type: %s Acct Name: %s\n" %(selectedAcct.getAccountType(), selectedAcct.getFullAccountName())
if isinstance(selectedObject, OnlineTxnList):
output += "\n\nMD User Representation of Data Held by this Account/OnlineTxnList record:\n"
output += " ========================================================================== \n"
output += "%s %s\n" % (pad("getTxnCount():",50), selectedObject.getTxnCount() )
output += "%s %s (%s)\n" % (pad("getOFXLastTxnUpdate():",50), selectedObject.getOFXLastTxnUpdate(), DateUtil.convertLongDateToInt(selectedObject.getOFXLastTxnUpdate()) )
output += "%s %s\n" % (pad("hasOnlineAvailBalance():",50), selectedObject.hasOnlineAvailBalance() )
output += "%s %s\n" % (pad("getOnlineAvailBalance():",50), selectedObject.getOnlineAvailBalance() )
output += "%s %s (%s)\n" % (pad("getOnlineAvailBalanceDate():",50), selectedObject.getOnlineAvailBalanceDate(), DateUtil.convertLongDateToInt(selectedObject.getOnlineAvailBalanceDate()) )
output += "%s %s\n" % (pad("hasOnlineLedgerBalance():",50), selectedObject.hasOnlineLedgerBalance() )
output += "%s %s\n" % (pad("getOnlineLedgerBalance():",50), selectedObject.getOnlineLedgerBalance() )
output += "%s %s (%s)\n" % (pad("getOnlineLedgerBalanceDate():",50), selectedObject.getOnlineLedgerBalanceDate(), DateUtil.convertLongDateToInt(selectedObject.getOnlineLedgerBalanceDate()) )
if isinstance(selectedObject, OnlinePayeeList):
output += "\n\nMD User Representation of Data Held by this Account/OnlinePayeeList record:\n"
output += " ========================================================================== \n"
output += "%s %s\n" % (pad("getPayeeCount():",50), selectedObject.getPayeeCount() )
if isinstance(selectedObject, OnlinePaymentList):
output += "\n\nMD User Representation of Data Held by this Account/OnlinePaymentList record:\n"
output += " ========================================================================== \n"
output += "%s %s\n" % (pad("getPaymentCount():",50), selectedObject.getPaymentCount() )
output+="\n"
for convertTimeStamp in ["ts", "rec_dt", "dtentered", "creation_date"]:
if selectedObject.getLongParameter(convertTimeStamp, 0) > 0:
output += "%s %s\n" % (pad("TIMESTAMP('%s'):" %(convertTimeStamp),50), get_time_stamp_as_nice_text(selectedObject.getLongParameter(convertTimeStamp, 0)) )
keys = sorted(selectedObject.getParameterKeys())
for theKey in keys:
# noinspection PyUnresolvedReferences
value = selectedObject.getParameter(theKey)
output += pad("Key:%s" %theKey,50)+" Value: '%s'\n" %(value.strip())
output+="\n\n<END>"
QuickJFrame("VIEW SAVED ONLINE DATA",output).show_the_frame()
return
def get_ofx_related_data():
from java.lang.reflect import Modifier
OFX = []
lCachePasswords = \
(isUserEncryptionPassphraseSet() and moneydance_ui.getCurrentAccounts().getBook().getLocalStorage().getBoolean("store_passwords", False))
# Build a list of Moneydance accounts that are enabled for download and have a service profile linked....
listAccountMDProxies=[]
olAccounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(11))
if len(olAccounts) > 0:
for acctObj in olAccounts:
acct = acctObj # type: Account
svcBank = acct.getBankingFI() # type: OnlineService
svcBP = acct.getBillPayFI() # type: OnlineService
if acct.getBankingFI() is not None:
listAccountMDProxies.append([MDAccountProxy(acct, False),svcBank,False])
if acct.getBillPayFI() is not None:
listAccountMDProxies.append([MDAccountProxy(acct, True),svcBP,True])
OFX.append("VIEW YOUR INSTALLED OFX SERVICE / BANK LOGON PROFILES\n"
" ====================================================\n\n")
if lCachePasswords:
OFX.append("MD Will allow you to Cache your Authentication (means you have set an encryption key and selected store passwords..")
else:
OFX.append("MD Cache Authentication ** DISABLED ** (either no user encryption key or store passwords = no..")
OFX.append("")
OFX.append("MD Accounts enabled for OFX Downloads with linked Service / Bank logon profiles:")
if len(listAccountMDProxies)<1:
OFX.append("<NONE FOUND>")
else:
for olAcct in listAccountMDProxies:
if not olAcct[2]:
OFX.append("- %s Bank Profile: %s" %(pad(olAcct[0].getAccount().getFullAccountName(),40),olAcct[1]))
else:
OFX.append("- %s BillPay Profile: %s" %(pad(olAcct[0].getAccount().getFullAccountName(),40),olAcct[1]))
OFX.append("")
for service in moneydance_data.getOnlineInfo().getAllServices():
# Find the MD accounts specifically linked to this service profile
thisServiceMDAccountProxies=[]
for olacct in listAccountMDProxies:
if olacct[1] == service:
thisServiceMDAccountProxies.append(olacct)
OFX.append(pad("Service/Profile:".upper(),40) + str(service))
OFX.append(pad("----------------",40))
OFX.append(pad(">>Moneydance TIK Service ID:",40) + str(service.getTIKServiceID()))
OFX.append(pad(">>OFX Version:",40) + str(service.getOFXVersion()))
OFX.append(pad(">>Service Id:",40) + str(service.getServiceId()))
OFX.append(pad(">>Service Type:",40) + str(service.getServiceType()))
OFX.append(pad(">>Realms:",40) + str(service.getRealms()))
OFX.append(pad(">>Bootstrap URL:",40) + str(service.getBootstrapURL()))
OFX.append(pad(">>Needs FI Profile Check()?:",40) + str(service.needsFIProfileCheck()))
OFX.append(pad("\n>>Accounts configured within bank profile:",120))
if len(service.getAvailableAccounts())<1:
OFX.append("<NONE FOUND>")
else:
OFX.append(pad(" -- List All accounts configured in profile:",40) + str(service.getAvailableAccounts()))
for availAccount in service.getAvailableAccounts():
OFX.append(">> ACCOUNT: %s (%s)" %(availAccount.getDescription(),availAccount.getAccountNumber()))
try:
# Rather than listing all methods by hand, just iterate and call them all.. I have checked they are all safe...
meths = availAccount.getClass().getDeclaredMethods()
for meth in meths:
if not Modifier.isPublic(meth.getModifiers()): continue
if meth.getName().lower().startswith("get") or meth.getName().lower().startswith("is") \
and meth.getParameterCount()<1:
result = meth.invoke(availAccount)
if result is not None:
OFX.append(" >> %s %s" %(pad(meth.getName(),40),result) )
except:
pass
OFX.append("")
OFX.append(pad("\n>>MD Accounts linked to this service / bank profile:",120))
if len(thisServiceMDAccountProxies)<1:
OFX.append("<NONE FOUND>")
else:
for olacct in thisServiceMDAccountProxies:
if not olacct[2]:
OFX.append(" >> Banking: %s" %(olacct[0].getAccount().getFullAccountName()))
else:
OFX.append(" >> BillPay: %s" %(olacct[0].getAccount().getFullAccountName()))
OFX.append("")
try:
p_getAuthenticationCachePrefix=service.getClass().getDeclaredMethod("getAuthenticationCachePrefix")
p_getAuthenticationCachePrefix.setAccessible(True)
OFX.append(pad("AuthenticationCachePrefix:",33) + str(p_getAuthenticationCachePrefix.invoke(service)))
p_getAuthenticationCachePrefix.setAccessible(False)
p_getSessionCookiePrefix=service.getClass().getDeclaredMethod("getSessionCookiePrefix")
p_getSessionCookiePrefix.setAccessible(True)
OFX.append(pad("SessionCookiePrefix:",33 ) + str(p_getSessionCookiePrefix.invoke(service)))
p_getSessionCookiePrefix.setAccessible(False)
except:
pass
OFX.append(pad("\n>>REALMs configured:",120))
realmsToCheck = service.getRealms()
if "DEFAULT" not in realmsToCheck:
realmsToCheck.insert(0,"DEFAULT")
for realm in realmsToCheck:
OFX.append("Realm: %s User ID: %s" %(realm, service.getUserId(realm, None)))
for olacct in thisServiceMDAccountProxies:
if lCachePasswords:
authKey = "ofx:" + realm
authObj = service.getCachedAuthentication(authKey)
OFX.append("Realm: %s Cached Authentication: %s" %(realm, authObj))
authKey = "ofx:" + (realm + "::" + olacct[0].getAccountKey())
authObj = service.getCachedAuthentication(authKey)
OFX.append("Realm: %s Account Key: %s Cached Authentication: %s" %(realm, olacct[0].getAccountKey(),authObj))
userID=service.getUserId(realm, olacct[0])
OFX.append("Realm: %s UserID: %s" %(realm, userID))
if service.getSessionCookie(userID) is not None:
OFX.append("Session Cookie: %s" %(service.getSessionCookie(userID)))
OFX.append("getFIId() %s" %(service.getFIId() ))
if service.getUpdatedFIId() != service.getFIId():
OFX.append("getUpdatedFIId() %s" %(service.getUpdatedFIId() ))
OFX.append("getFIName() %s" %(service.getFIName() ))
OFX.append("getFIOrg() %s" %(service.getFIOrg() ))
if service.getUpdatedFIOrg() != service.getUpdatedFIOrg():
OFX.append("getUpdatedFIOrg() %s" %(service.getUpdatedFIOrg() ))
OFX.append("usesFITag() %s" %(service.usesFITag() ))
OFX.append("usesPTTAcctIDField() %s" %(service.usesPTTAcctIDField() ))
OFX.append("getFIUrl() %s" %(service.getFIUrl() ))
OFX.append("getFIUrlIsRedirect() %s" %(service.getFIUrlIsRedirect() ))
OFX.append("getIgnoreTxnsBeforeLastUpdate() %s" %(service.getIgnoreTxnsBeforeLastUpdate() ))
OFX.append("getTxnDownloadOverlap() %s" %(service.getTxnDownloadOverlap() ))
OFX.append("getDateAvailAcctsUpdated() %s" %(service.getDateAvailAcctsUpdated() ))
OFX.append("getAlwaysSendDateRange() %s" %(service.getAlwaysSendDateRange() ))
OFX.append("getUseProfileRequest() %s" %(service.getUseProfileRequest() ))
OFX.append("getUseClientSpecificUIDS() %s" %(service.getUseClientSpecificUIDS() ))
OFX.append("getUseFileUIDs() %s" %(service.getUseFileUIDs() ))
OFX.append("getUseBPFileUIDs() %s" %(service.getUseBPFileUIDs() ))
OFX.append("useTerribleTLSV1Hack() %s" %(service.useTerribleTLSV1Hack() ))
OFX.append("getFIEmail() %s" %(service.getFIEmail() ))
OFX.append("getTechServicePhone() %s" %(service.getTechServicePhone() ))
OFX.append("getInvstBrokerID() %s" %(service.getInvstBrokerID() ))
OFX.append("usesBillPayExtendedAcctTo() %s" %(service.usesBillPayExtendedAcctTo() ))
OFX.append("getServiceType() %s" %(service.getServiceType() ))
OFX.append("getUseShortDates() %s" %(service.getUseShortDates() ))
OFX.append("shouldDecrementLastTxnDate() %s" %(service.shouldDecrementLastTxnDate() ))
OFX.append("getSignupAcctsAvail() %s" %(service.getSignupAcctsAvail() ))
OFX.append("getSignupCanActivateAcct() %s" %(service.getSignupCanActivateAcct() ))
OFX.append("getSignupCanChgUserInfo() %s" %(service.getSignupCanChgUserInfo() ))
OFX.append("getSignupCanPreauth() %s" %(service.getSignupCanPreauth() ))
OFX.append("getSignupClientAcctNumReq() %s" %(service.getSignupClientAcctNumReq() ))
OFX.append("getSignupViaClient() %s" %(service.getSignupViaClient() ))
OFX.append("getSignupViaOther() %s" %(service.getSignupViaOther() ))
OFX.append("getSignupViaOtherMsg() %s" %(service.getSignupViaOtherMsg() ))
OFX.append("getSignupViaWeb() %s" %(service.getSignupViaWeb() ))
OFX.append("getSignupViaWebUrl() %s" %(service.getSignupViaWebUrl() ))
OFX.append("getStopChkCanUseDescription() %s" %(service.getStopChkCanUseDescription() ))
OFX.append("getStopChkCanUseRange() %s" %(service.getStopChkCanUseRange() ))
OFX.append("getStopChkFee() %s" %(service.getStopChkFee() ))
OFX.append("getStopChkProcessingDaysOff() %s" %(service.getStopChkProcessingDaysOff() ))
OFX.append("getStopChkProcessingEndTime() %s" %(service.getStopChkProcessingEndTime() ))
for x in service.getRealms():
OFX.append("getClientIDRequired(x) %s" %(service.getClientIDRequired(x) ))
OFX.append("getUserCanChangePIN(x) %s" %(service.getUserCanChangePIN(x) ))
OFX.append("getMaxPasswdLength(x) %s" %(service.getMaxPasswdLength(x) ))
OFX.append("getMinPasswdLength(x) %s" %(service.getMinPasswdLength(x) ))
OFX.append("getMustChngPINFirst(x) %s" %(service.getMustChngPINFirst(x) ))
OFX.append("getPasswdCanHaveSpaces(x) %s" %(service.getPasswdCanHaveSpaces(x) ))
OFX.append("getPasswdCanHaveSpecialChars(x) %s" %(service.getPasswdCanHaveSpecialChars(x) ))
OFX.append("getPasswdCaseSensitive(x) %s" %(service.getPasswdCaseSensitive(x) ))
OFX.append("getPasswdCharType(x) %s" %(service.getPasswdCharType(x) ))
OFX.append("getPasswdType(x) %s" %(service.getPasswdType(x) ))
OFX.append("getDateUpdated() %s (%s)" %(service.getDateUpdated(), DateUtil.convertLongDateToInt(service.getDateUpdated())))
OFX.append("getLastTransactionID() %s" %(service.getLastTransactionID() ))
OFX.append("getMaxFITIDLength() %s" %(service.getMaxFITIDLength() ))
OFX.append("getInvalidAcctTypes() %s" %(service.getInvalidAcctTypes() ))
p_getMsgSetTag=service.getClass().getDeclaredMethod("getMsgSetTag",[Integer.TYPE])
p_getMsgSetTag.setAccessible(True)
for msgType in (0,1,3,4,5,6,7,8,9,10,11,12):
if service.supportsMsgSet(msgType) or msgType==0:
tag=p_getMsgSetTag.invoke(service,[msgType])
OFX.append("---")
OFX.append(" Supports Message Tag: %s" %(tag))
OFX.append(" getMsgSetLanguage(msgType) %s" %(service.getMsgSetLanguage(msgType) ))
OFX.append(" getMsgSetRspnsFileErrors(msgType)%s" %(service.getMsgSetRspnsFileErrors(msgType) ))
OFX.append(" getMsgSetSecurity(msgType) %s" %(service.getMsgSetSecurity(msgType) ))
OFX.append(" getMsgSetSignonRealm(msgType) %s" %(service.getMsgSetSignonRealm(msgType) ))
OFX.append(" getMsgSetSyncMode(msgType) %s" %(service.getMsgSetSyncMode(msgType) ))
OFX.append(" getMsgSetTransportSecure(msgType)%s" %(service.getMsgSetTransportSecure(msgType) ))
OFX.append(" getMsgSetURL(msgType) %s" %(service.getMsgSetURL(msgType) ))
OFX.append(" getMsgSetVersion(msgType) %s" %(service.getMsgSetVersion(msgType) ))
p_getMsgSetTag.setAccessible(False)
OFX.append("---")
OFX.append("getCreditCardClosingAvail() %s" %(service.getCreditCardClosingAvail() ))
OFX.append("getCustServicePhone() %s" %(service.getCustServicePhone() ))
OFX.append("getBankClosingAvail() %s" %(service.getBankClosingAvail() ))
OFX.append("getBankXfrCanModifyModels() %s" %(service.getBankXfrCanModifyModels() ))
OFX.append("getBankXfrCanModifyTransfers() %s" %(service.getBankXfrCanModifyTransfers() ))
OFX.append("getBankXfrCanScheduleRecurring() %s" %(service.getBankXfrCanScheduleRecurring() ))
OFX.append("getBankXfrCanScheduleTransfers() %s" %(service.getBankXfrCanScheduleTransfers() ))
OFX.append("getBankXfrDaysWithdrawn() %s" %(service.getBankXfrDaysWithdrawn() ))
OFX.append("getBankXfrDefaultDaysToPay() %s" %(service.getBankXfrDefaultDaysToPay() ))
OFX.append("getBankXfrModelWindow() %s" %(service.getBankXfrModelWindow() ))
OFX.append("getBankXfrNeedsTAN() %s" %(service.getBankXfrNeedsTAN() | |
<filename>rbi2/inte13.py
from collections import OrderedDict, namedtuple
from string import whitespace
import pdb
import sys
ADD = 'ADD'
SUB = 'SUB'
MUL = 'MUL'
INT_DIV = 'INT_DIV'
REAL_DIV = 'REAL_DIV'
INT_TYPE = 'INT_TYPE'
REAL_TYPE = 'REAL_TYPE'
INT_CONST = 'INT_CONST'
REAL_CONST = 'REAL_CONST'
DOT = 'DOT'
COMMA = 'COMMA'
SEMI = 'SEMI'
COLON = 'COLON'
OPAR = 'OPAR'
CPAR = 'CPAR'
VAR = 'VAR'
ID = 'ID'
BEGIN = 'BEGIN'
END = 'END'
ASSIGN = 'ASSIGN'
PROGRAM = 'PROGRAM'
PROCEDURE = 'PROCEDURE'
EOF = 'EOF'
Token = namedtuple('Token',['type', 'value'])
RESERVED_KEYWORDS = {
'PROGRAM' : Token(PROGRAM, 'PROGRAM'),
'PROCEDURE' : Token(PROCEDURE, 'PROCEDURE'),
'VAR' : Token(VAR, 'VAR'),
'BEGIN' : Token(BEGIN, 'BEGIN'),
'END' : Token(END, 'END'),
'DIV' : Token(INT_DIV, 'DIV'),
'INTEGER' : Token(INT_TYPE, 'INTEGER'),
'REAL' : Token(REAL_TYPE, 'REAL')
}
class Lexer(object):
def __init__(self, text):
self.text = text
self.pos = 0
self.cur_char = self.text[self.pos]
def error(self):
raise NameError('Character not recogized.')
def get_next_char(self):
self.pos += 1
if self.pos <= len(self.text) - 1:
self.cur_char = self.text[self.pos]
else:
self.cur_char = None
def peek_next_char(self):
peek_pos = self.pos + 1
if peek_pos <= len(self.text) - 1:
return self.text[peek_pos]
else:
return None
def get_whitespace(self):
value = ''
while self.cur_char != None and self.cur_char in whitespace:
value = value + self.cur_char
self.get_next_char()
def get_comment(self):
value = ''
while self.cur_char != None and self.cur_char != '}':
value = value + self.cur_char
self.get_next_char()
#get trailing '}'
value = value + self.cur_char
self.get_next_char()
def get_num(self):
value = ''
while self.cur_char != None and self.cur_char.isdigit():
value = value + self.cur_char
self.get_next_char()
if self.cur_char == '.':
value = value + self.cur_char
self.get_next_char()
while self.cur_char.isdigit():
value = value + self.cur_char
self.get_next_char()
return Token(REAL_CONST, float(value))
return Token(INT_CONST, int(value))
def get_id(self):
value = ''
while (self.cur_char != None
and (self.cur_char.isalnum() or self.cur_char == '_')):
value = value + self.cur_char
self.get_next_char()
#keywords and variables not case sensitive
value = value.upper()
return RESERVED_KEYWORDS.get(value, Token(ID, value))
def get_next_token(self):
op_dict = {
'+':Token(ADD, '+'),
'-':Token(SUB, '-'),
'*':Token(MUL, '*'),
'/':Token(REAL_DIV, '/'),
'.':Token(DOT, '.'),
',':Token(COMMA, ','),
';':Token(SEMI, ';'),
':':Token(COLON, ':'),
'(':Token(OPAR, '('),
')':Token(CPAR, ')')
}
while self.cur_char != None:
if self.cur_char in whitespace:
self.get_whitespace()
continue
if self.cur_char == '{':
self.get_comment()
continue
if self.cur_char == ':' and self.peek_next_char() == '=':
self.get_next_char()
self.get_next_char()
return Token(ASSIGN, ':=')
if self.cur_char.isdigit():
return self.get_num()
if self.cur_char.isalpha() or self.cur_char == '_':
return self.get_id()
if self.cur_char in list(op_dict):
token = op_dict[self.cur_char]
self.get_next_char()
return token
self.error()
return Token(EOF, None)
class AST(object):
pass
class Program(AST):
def __init__(self, name, block):
self.name = name
self.block = block
class Procedure(AST):
def __init__(self, name, block):
self.name = name
self.block = block
class Block(AST):
def __init__(self, declarations, compound_statements):
self.declarations = declarations
self.compound_statements = compound_statements
class VarDecl(AST):
def __init__(self, type_node, var_node):
self.type_node = type_node
self.var_node = var_node
class TypeSpec(AST):
def __init__(self, token):
self.token = token
self.value = self.token.value
class Variable(AST):
def __init__(self, token):
self.token = token
self.value = self.token.value
class Compound(AST):
def __init__(self):
self.statement_list = []
class Assign(AST):
def __init__(self, left, token, right):
self.left = left
self.token = self.op = token
self.right = right
class BinOp(AST):
def __init__(self, left, token, right):
self.left = left
self.token = self.op = token
self.right = right
class UnaryOp(AST):
def __init__(self, left, factor):
self.left = self.op = left
self.factor = factor
class Empty(AST):
pass
class Num(AST):
def __init__(self, token):
self.token = token
self.value = self.token.value
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
self.cur_token = self.lexer.get_next_token()
def error(self):
raise SyntaxError('Syntax not recognized.')
def check_token_type(self, token_type):
print(self.cur_token)
#pdb.set_trace()
if self.cur_token.type == token_type:
self.cur_token = self.lexer.get_next_token()
else:
self.error()
def program(self):
self.check_token_type(PROGRAM)
name = self.cur_token.value
self.check_token_type(ID)
self.check_token_type(SEMI)
block = self.block()
self.check_token_type(DOT)
return Program(name, block)
def procedure(self):
self.check_token_type(PROCEDURE)
name = self.cur_token.value
self.check_token_type(ID)
self.check_token_type(SEMI)
block = self.block()
self.check_token_type(SEMI)
return Procedure(name, block)
def block(self):
declarations = self.declaration()
compound_statements = self.compound_statement()
return Block(declarations, compound_statements)
def declaration(self):
var_decls = []
if self.cur_token.type == VAR:
self.check_token_type(VAR)
while self.cur_token.type == ID:
var_decls.extend(self.variable_declaration())
self.check_token_type(SEMI)
while self.cur_token.type == PROCEDURE:
var_decls.append(self.procedure())
return var_decls
def variable_declaration(self):
var_nodes = [self.variable()]
while self.cur_token.type == COMMA:
self.check_token_type(COMMA)
var_nodes.append(self.variable())
self.check_token_type(COLON)
type_node = self.type_spec()
var_decls = []
var_decls = [VarDecl(type_node, var_node) for var_node in var_nodes]
return var_decls
def type_spec(self):
token = self.cur_token
if token.type == INT_TYPE:
self.check_token_type(INT_TYPE)
if token.type == REAL_TYPE:
self.check_token_type(REAL_TYPE)
return TypeSpec(token)
def variable(self):
token = self.cur_token
self.check_token_type(ID)
return Variable(token)
def compound_statement(self):
self.check_token_type(BEGIN)
statements = self.statement_list()
self.check_token_type(END)
compound = Compound()
for statement in statements:
compound.statement_list.append(statement)
return compound
def statement_list(self):
statements = [self.statement()]
while self.cur_token.type == SEMI:
self.check_token_type(SEMI)
statements.append(self.statement())
if self.cur_token.type == ID:
self.error()
return statements
def statement(self):
token = self.cur_token
if token.type == BEGIN:
return self.compound_statement()
if token.type == ID:
return self.assign_statement()
else:
return self.empty()
def assign_statement(self):
left = self.variable()
token = self.cur_token
self.check_token_type(ASSIGN)
right = self.expr1()
return Assign(left, token, right)
def empty(self):
return Empty()
def expr1(self):
node = self.expr2()
while self.cur_token.type in (ADD, SUB):
token = self.cur_token
if token.type == ADD:
self.check_token_type(ADD)
if token.type == SUB:
self.check_token_type(SUB)
node = BinOp(node, token, self.expr2())
return node
def expr2(self):
node = self.expr3()
while self.cur_token.type in (MUL, INT_DIV, REAL_DIV):
token = self.cur_token
if token.type == MUL:
self.check_token_type(MUL)
if token.type == INT_DIV:
self.check_token_type(INT_DIV)
if token.type == REAL_DIV:
self.check_token_type(REAL_DIV)
node = BinOp(node, token, self.expr3())
return node
def expr3(self):
token = self.cur_token
if token.type == ADD:
self.check_token_type(ADD)
return UnaryOp(token, self.expr3())
if token.type == SUB:
self.check_token_type(SUB)
return UnaryOp(token, self.expr3())
if token.type == INT_CONST:
self.check_token_type(INT_CONST)
return Num(token)
if token.type == REAL_CONST:
self.check_token_type(REAL_CONST)
return Num(token)
if token.type == OPAR:
self.check_token_type(OPAR)
node = self.expr1()
self.check_token_type(CPAR)
return node
if token.type == ID:
return self.variable()
def parse(self):
tree = self.program()
if tree == None:
return ''
return tree
class NodeVisitor(object):
def visit(self, node):
visitor_name = 'visit_' + type(node).__name__
visitor = getattr(self, visitor_name, self.default_visitor)
return visitor(node)
def default_visitor(self, node):
raise Exception('Method visit_' + type(node).__name__ + ' not found.')
class VisualAST(NodeVisitor):
def __init__(self, ast):
self.ast = ast
def visit_Program(self, node):
print(type(node).__name__)
print(node.name)
self.visit(node.block)
def visit_Procedure(self, node):
print(type(node).__name__)
print(node.name)
self.visit(node.block)
def visit_Block(self, node):
print(type(node).__name__)
for decl in node.declarations:
self.visit(decl)
self.visit(node.compound_statements)
def visit_VarDecl(self, node):
print(type(node).__name__)
self.visit(node.var_node)
self.visit(node.type_node)
def visit_TypeSpec(self, node):
print(type(node).__name__)
print(node.value)
def visit_Variable(self, node):
print(type(node).__name__)
print(node.value)
def visit_Compound(self, node):
print(type(node).__name__)
for statement in node.statement_list:
self.visit(statement)
def visit_Assign(self, node):
print(type(node).__name__)
self.visit(node.left)
self.visit(node.right)
def visit_BinOp(self, node):
print(type(node).__name__)
self.visit(node.left)
print(node.op.type)
self.visit(node.right)
def visit_UnaryOp(self, node):
print(type(node).__name__)
print(node.op.type)
self.visit(node.factor)
def visit_Empty(self, node):
print(type(node).__name__)
def visit_Num(self, node):
print(type(node).__name__)
print(node.value)
def build_ast(self):
self.visit(self.ast)
class Symbol(object):
def __init__(self, name, type=None):
self.type = type
self.name = name
class BuiltinTypeSymbol(Symbol):
def __init__(self, name):
super().__init__(name)
def __str__(self):
return self.name
def __repr__(self):
return f'<{self.__class__.__name__}(name={self.name})>'
class VarSymbol(Symbol):
def __init__(self, name, type):
super().__init__(name, type)
def __str__(self):
return f'<{self.__class__.__name__}(name={self.name}, type={self.type})>'
__repr__ = __str__
class SymbolTable(object):
def __init__(self):
self._symbols = {}
self._define_builtins()
def _define_builtins(self):
self.insert(BuiltinTypeSymbol('INTEGER'))
self.insert(BuiltinTypeSymbol('REAL'))
def insert(self, symbol):
print('Insert: ' + symbol.name)
self._symbols[symbol.name] = symbol
def lookup(self, name):
print('Lookup: ' + name)
symbol = self._symbols.get(name)
return symbol
def __str__(self):
rtn_str = '\nSymbol Table Contents \n'
rtn_str = rtn_str + '_'*len(rtn_str) + '\n'
sym_str = ''
#the 'value' needs to call the __repr__ function but I don't know the
#syntax for f-strings to do that
sym_str = sym_str.join([f'{key} {value}\n'
for key,value in self._symbols.items()])
return rtn_str + sym_str
__repr__ = __str__
class SemanticAnalyzer(NodeVisitor):
def __init__(self, ast):
self.ast = ast
self.symtab = SymbolTable()
def visit_Program(self, node):
self.visit(node.block)
def visit_Procedure(self, node):
pass
#self.visit(node.block)
def visit_Block(self, node):
for decl in node.declarations:
self.visit(decl)
self.visit(node.compound_statements)
def visit_VarDecl(self, node):
type_symbol = self.visit(node.type_node)
var_name = node.var_node.value
var_symbol = VarSymbol(var_name, type_symbol)
if self.symtab.lookup(var_name) != None:
raise Exception(f'Duplicate identifier found: [{var_name}]')
self.symtab.insert(var_symbol)
def visit_TypeSpec(self, node):
return self.symtab.lookup(node.value)
def visit_Variable(self, node):
var_symbol = self.symtab.lookup(node.value)
if var_symbol == None:
raise Exception(f'Variable [{node.value}] not declared.')
def visit_Compound(self, node):
for statement in node.statement_list:
self.visit(statement)
def visit_Assign(self, node):
self.visit(node.right)
self.visit(node.left)
def visit_BinOp(self, node):
self.visit(node.right)
self.visit(node.left)
def visit_UnaryOp(self, node):
self.visit(node.factor)
def visit_Empty(self, node):
pass
def visit_Num(self, node):
pass
def analyze(self):
self.visit(self.ast)
class Interpreter(NodeVisitor):
GLOBAL_SCOPE = OrderedDict()
def __init__(self, ast):
self.ast = ast
def visit_Program(self, node):
self.visit(node.block)
def visit_Procedure(self, node):
pass
def visit_Block(self, node):
for decl in node.declarations:
self.visit(decl)
self.visit(node.compound_statements)
def visit_VarDecl(self, node):
#used in symantic analysis
pass
def visit_TypeSpec(self, node):
#used in symantic analysis
pass
def visit_Variable(self, node):
var_name = node.value
value = self.GLOBAL_SCOPE.get(var_name)
if value == None:
raise Exception(f'Variable {var_name} not assigned.')
return value
def visit_Compound(self, node):
for statement in node.statement_list:
self.visit(statement)
def visit_Assign(self, node):
var_name = node.left.value
value = self.visit(node.right)
self.GLOBAL_SCOPE[var_name] = value
def visit_BinOp(self, node):
left = node.left
right = node.right
op_type = node.op.type
if op_type | |
newEID
xsect.elemDict = elemDict
del xsect.nodeDict[-1]
del xsect.elemDict[-1]
def solidBox(self,xsect, elemX, elemY, L1, L2, MID, matlib, elemOrder):
"""Meshes a box beam cross-section.
This method meshes a similar cross-section as the boxBeam method. The
geometry of this cross-section can be seen below. The interfaces
between the laminates is different, and more restrictive. In this case
all of the laminates must have the same number of plies, which must
also all be the same thickness.
.. image:: images/rectBoxGeom.png
:align: center
:Args:
- `xsect (obj)`: The cross-section object to be meshed.
- `meshSize (int)`: The maximum aspect ratio an element can have
- `x0 (float)`: The non-dimensional starting point of the cross-section
on the airfoil.
- `xf (float)`: The non-dimesnional ending point of the cross-section
on the airfoil.
- `matlib (obj)`: The material library object used to create CQUADX
elements.
:Returns:
- None
"""
print('Box Meshing Commencing')
# INITIALIZE INPUTS
# Initialize the node dictionary containing all nodes objects used by
# the cross-section
nodeDict = {-1:None}
# Initialize the element dictionary containing all element objects used
# by the cross-section
elemDict = {-1:None}
# Initialize the z location of the cross-section
zc = 0
if elemOrder==1:
nnx = elemX+1
nny = elemY+1
else:
nnx = 2*elemX+1
nny = 2*elemY+1
# Create Mesh
xvec = np.linspace(-L1/2,L1/2,nnx)
yvec = np.linspace(-L2/2,L2/2,nny)[::-1]
# NID Mesh
Mesh = np.zeros((nny,nnx),dtype=int)
EIDmesh = np.zeros((elemY,elemX),dtype=int)
xmesh,ymesh = np.meshgrid(xvec,yvec)
for i in range(0,nny):
for j in range(0,nnx):
newNID = int(max(nodeDict.keys())+1)
Mesh[i,j] = newNID
#Add node to NID Dictionary
nodeDict[newNID] = Node(newNID,np.array([xmesh[i,j],ymesh[i,j],zc]))
xsect.nodeDict = nodeDict
if elemOrder==1:
for i in range(0,elemY):
for j in range(0,elemX):
newEID = int(max(elemDict.keys())+1)
NIDs = [Mesh[i+1,j],Mesh[i+1,j+1],Mesh[i,j+1],Mesh[i,j]]
nodes = [xsect.nodeDict[NID] for NID in NIDs]
elemDict[newEID] = CQUADX(newEID,nodes,MID,matlib)
EIDmesh[i,j] = newEID
else:
for i in range(0,elemY):
for j in range(0,elemX):
newEID = int(max(elemDict.keys())+1)
NIDs = [Mesh[2*i+2,2*j],Mesh[2*i+2,2*j+1],Mesh[2*i+2,2*j+2],\
Mesh[2*i+1,2*j],Mesh[2*i+1,2*j+1],Mesh[2*i+1,2*j+2],\
Mesh[2*i,2*j],Mesh[2*i,2*j+1],Mesh[2*i,2*j+2]]
nodes = [xsect.nodeDict[NID] for NID in NIDs]
elemDict[newEID] = CQUADX9(newEID,nodes,MID,matlib)
EIDmesh[i,j] = newEID
xsect.elemDict = elemDict
del xsect.nodeDict[-1]
del xsect.elemDict[-1]
def rectangleHole(self,xsect, nelem, a, b, r, MID, matlib):
"""Meshes a box beam cross-section.
This method meshes a similar cross-section as the boxBeam method. The
geometry of this cross-section can be seen below. The interfaces
between the laminates is different, and more restrictive. In this case
all of the laminates must have the same number of plies, which must
also all be the same thickness.
.. image:: images/rectBoxGeom.png
:align: center
:Args:
- `xsect (obj)`: The cross-section object to be meshed.
- `meshSize (int)`: The maximum aspect ratio an element can have
- `x0 (float)`: The non-dimensional starting point of the cross-section
on the airfoil.
- `xf (float)`: The non-dimesnional ending point of the cross-section
on the airfoil.
- `matlib (obj)`: The material library object used to create CQUADX
elements.
:Returns:
- None
"""
print('Box Meshing Commencing')
# INITIALIZE INPUTS
# Initialize the node dictionary containing all nodes objects used by
# the cross-section
nodeDict = {-1:None}
# Initialize the element dictionary containing all element objects used
# by the cross-section
elemDict = {-1:None}
nelem=nelem*8+1
laminate = xsect.laminates[0]
# Initialize the z location of the cross-section
xs = [a/2.,a/2.,0.,-a/2.,-a/2.,-a/2.,0.,a/2.,a/2.]
ys = [0.,b/2.,b/2.,b/2.,0.,-b/2.,-b/2.,-b/2.,0.]
xsvec = np.array([])
ysvec = np.array([])
for i in range(0,len(xs)-1):
xsvec = np.append(xsvec,np.linspace(xs[i],xs[i+1],nelem/8.+1)[:-1])
ysvec = np.append(ysvec,np.linspace(ys[i],ys[i+1],nelem/8.+1)[:-1])
xc = r*np.cos(np.linspace(0,2*np.pi,nelem))[:-1]
yc = r*np.sin(np.linspace(0,2*np.pi,nelem))[:-1]
if not len(xc)==len(xsvec):
raise ValueError('Circle and square vectors dont match length.')
xmesh = np.zeros((int(nelem/8-1),len(xc)))
ymesh = np.zeros((int(nelem/8-1),len(xc)))
zmesh = np.zeros((int(nelem/8-1),len(xc)))
Mesh = np.zeros((int(nelem/8-1),len(xc)),dtype=int)
for i in range(0,len(xc)):
xmesh[:,i]=np.linspace(xc[i],xsvec[i],nelem/8-1)
ymesh[:,i]=np.linspace(yc[i],ysvec[i],nelem/8-1)
for i in range(0,np.size(xmesh,axis=0)):
for j in range(0,np.size(xmesh,axis=1)):
newNID = int(max(nodeDict.keys())+1)
Mesh[i,j] = newNID
#Add node to NID Dictionary
nodeDict[newNID] = Node(newNID,np.array([xmesh[i,j],ymesh[i,j],zmesh[i,j]]))
xmesh = np.hstack((xmesh,np.array([xmesh[:,0]]).T))
ymesh = np.hstack((ymesh,np.array([ymesh[:,0]]).T))
zmesh = np.hstack((zmesh,np.array([zmesh[:,0]]).T))
Mesh = np.hstack((Mesh,np.array([Mesh[:,0]],dtype=int).T))
xsect.nodeDict = nodeDict
laminate.mesh = Mesh
laminate.xmesh = xmesh
laminate.ymesh = ymesh
laminate.zmesh = zmesh
EIDmesh = np.zeros((np.size(xmesh,axis=0)-1,np.size(xmesh,axis=1)-1),dtype=int)
for i in range(0,np.size(xmesh,axis=0)-1):
for j in range(0,np.size(xmesh,axis=1)-1):
newEID = int(max(elemDict.keys())+1)
NIDs = [Mesh[i+1,j],Mesh[i+1,j+1],Mesh[i,j+1],Mesh[i,j]]
nodes = [xsect.nodeDict[NID] for NID in NIDs]
elemDict[newEID] = CQUADX(newEID,nodes,MID,matlib)
EIDmesh[i,j] = newEID
xsect.elemDict = elemDict
ylen = np.size(xmesh,axis=0)-1
xlen = np.size(xmesh,axis=1)-1
laminate.plotx = np.zeros((ylen*2,xlen*2))
laminate.ploty = np.zeros((ylen*2,xlen*2))
laminate.plotz = np.zeros((ylen*2,xlen*2))
laminate.plotc = np.zeros((ylen*2,xlen*2))
laminate.EIDmesh = EIDmesh
del xsect.nodeDict[-1]
del xsect.elemDict[-1]
class XSect:
"""Creates a beam cross-section object,
This cross-section can be made of multiple materials which can be in
general anisotropic. This is the main workhorse within the structures
library.
:Attributes:
- `Color (touple)`: A length 3 touple used to define the color of the
cross-section.
- `Airfoil (obj)`: The airfoil object used to define the OML of the cross-
section.
- `typeXSect (str)`: Defines what type of cross-section is to be used.
Currently the only supported type is 'box'.
- `normalVector (1x3 np.array[float])`: Expresses the normal vector of the
cross-section.
- `nodeDict (dict)`: A dictionary of all nodes used to descretize the
cross-section surface. The keys are the NIDs and the values stored
are the Node objects.
- `elemDict (dict)`: A dictionary of all elements used to descretize the
cross-section surface. the keys are the EIDs and the values stored
are the element objects.
- `X (ndx6 np.array[float])`: A very large 2D array. This is one of the
results of the cross-sectional analysis. This array relays the
force and moment resultants applied to the cross-section to the
nodal warping displacements exhibited by the cross-section.
- `Y (6x6 np.array[float])`: This array relays the force and moment
resultants applied to the cross-section to the rigid section
strains and curvatures exhibited by the cross-section.
- `dXdz (ndx6 np.array[float])`: A very large 2D array. This is one of the
results of the cross-sectional analysis. This array relays the
force and moment resultants applied to the cross-section to the
gradient of the nodal warping displacements exhibited by the
cross-section with respect to the beam axis.
- `xt (float)`: The x-coordinate of the tension center (point at which
tension and bending are decoupled)
- `yt (float)`: The y-coordinate of the tension center (point at which
tension and bending are decoupled)
- `xs (float)`: The x-coordinate of the shear center (point at which shear
and torsion are decoupled)
- `ys (float)`: The y-coordinate of the shear center (point at which shear
and torsion are decoupled)
- `refAxis (3x1 np.array[float])`: A column vector containing the reference
axis for the beam.
- `bendAxes (2x3 np.array[float])`: Contains two row vectors about which
bending from one axis is decoupled from bending about the other.
- `F_raw (6x6 np.array[float])`: The 6x6 compliance matrix that results
from cross-sectional analysis. This is the case where the reference
axis is at the origin.
- `K_raw (6x6 np.array[float])`: The 6x6 stiffness matrix that results
from cross-sectional analysis. This is the case where the reference
axis is at the origin.
- `F (6x6 np.array[float])`: The 6x6 compliance matrix for the cross-
section about the reference axis. The reference axis is by default
at the shear center.
- `K (6x6 np.array[float])`: The 6x6 stiffness matrix for the cross-
section about the reference axis. The reference axis is by default
at the shear center.
- `T1 (3x6 np.array[float])`: The transformation matrix that converts
strains and curvatures from the local xsect origin to the reference
axis.
- `T2 (3x6 np.array[float])`: The transformation matrix that converts
forces and moments from the local xsect origin to the reference
axis.
- `x_m (1x3 np.array[float])`: Center of mass of the cross-section about in
the local xsect CSYS
- `M (6x6 np.array[float])`: This mass matrix relays linear and angular
velocities to linear and angular momentum of the cross-section.
:Methods:
- `resetResults`: This method resets all results (displacements, strains
and stresse) within the elements used by the cross-section object.
- `calcWarpEffects`: Given applied force and moment resultants, this method
calculates the warping displacement, | |
<gh_stars>0
# encoding: utf-8
#
# Copyright 2017-2019 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
app_usage
Created by <NAME> on 2017-02-18.
Code for working with the app usage database.
Much code lifted from the application_usage scripts created by Google MacOps:
https://github.com/google/macops/tree/master/crankd
"""
from __future__ import absolute_import, print_function
# standard Python libs
import logging
import os
import sqlite3
import time
# our libs
#from . import display
from . import prefs
# SQLite db to store application usage data
APPLICATION_USAGE_DB = os.path.join(
prefs.pref('ManagedInstallDir'), 'application_usage.sqlite')
# SQL to detect existence of application usage table
APPLICATION_USAGE_TABLE_DETECT = 'SELECT * FROM application_usage LIMIT 1'
# This table creates ~64 bytes of disk data per event.
APPLICATION_USAGE_TABLE_CREATE = (
'CREATE TABLE application_usage ('
'event TEXT,'
'bundle_id TEXT,'
'app_version TEXT,'
'app_path TEXT,'
'last_time INTEGER DEFAULT 0,'
'number_times INTEGER DEFAULT 0,'
'PRIMARY KEY (event, bundle_id)'
')')
APPLICATION_USAGE_TABLE_INSERT = (
'INSERT INTO application_usage VALUES ('
'?, ' # event
'?, ' # bundle_id
'?, ' # app_version
'?, ' # app_path
'?, ' # last_time
'? ' # number_times
')'
)
# keep same order of columns as APPLICATION_USAGE_TABLE_INSERT
APPLICATION_USAGE_TABLE_SELECT = (
'SELECT '
'event, bundle_id, app_version, app_path, last_time, number_times '
'FROM application_usage'
)
APPLICATION_USAGE_TABLE_UPDATE = (
'UPDATE application_usage SET '
'app_version=?,'
'app_path=?,'
'last_time=?,'
'number_times=number_times+1 '
'WHERE event=? and bundle_id=?'
)
INSTALL_REQUEST_TABLE_DETECT = 'SELECT * FROM install_requests LIMIT 1'
INSTALL_REQUEST_TABLE_CREATE = (
'CREATE TABLE install_requests ('
'event TEXT,'
'item_name TEXT,'
'item_version TEXT,'
'last_time INTEGER DEFAULT 0,'
'number_times INTEGER DEFAULT 0,'
'PRIMARY KEY (event, item_name)'
')')
INSTALL_REQUEST_TABLE_INSERT = (
'INSERT INTO install_requests VALUES ('
'?, ' # event
'?, ' # item_name
'?, ' # item_version
'?, ' # last_time
'? ' # number_times
')'
)
# keep same order of columns as INSTALL_REQUEST_TABLE_INSERT
INSTALL_REQUEST_TABLE_SELECT = (
'SELECT '
'event, item_name, item_version, last_time, number_times '
'FROM install_requests'
)
INSTALL_REQUEST_TABLE_UPDATE = (
'UPDATE install_requests SET '
'item_version=?,'
'last_time=?,'
'number_times=number_times+1 '
'WHERE event=? and item_name=?'
)
class ApplicationUsageRecorder(object):
"""Tracks application launches, activations, and quits.
Also tracks Munki selfservice install and removal requests."""
def _connect(self, database_name=None):
"""Connect to database.
Args:
database_name: str, default APPLICATION_USAGE_DB
Returns:
sqlite3.Connection instance
"""
# pylint: disable=no-self-use
if database_name is None:
database_name = APPLICATION_USAGE_DB
conn = sqlite3.connect(database_name)
return conn
def _close(self, conn):
"""Close database.
Args:
conn: sqlite3.Connection instance
"""
# pylint: disable=no-self-use
conn.close()
def _detect_table(self, conn, table_detection_sql):
"""Detect whether the application usage table exists.
Args:
conn: sqlite3.Connection object
table_detection_sql: sql query used to detect if the
table exists
Returns:
True if the table exists, False if not.
Raises:
sqlite3.Error: if error occurs
"""
# pylint: disable=no-self-use
try:
conn.execute(table_detection_sql)
exists = True
except sqlite3.OperationalError as err:
if err.args[0].startswith('no such table'):
exists = False
else:
raise
return exists
def _detect_application_usage_table(self, conn):
"""Detect whether the application usage table exists.
Args:
conn: sqlite3.Connection object
Returns:
True if the table exists, False if not.
Raises:
sqlite3.Error: if error occurs
"""
# pylint: disable=no-self-use
return self._detect_table(conn, APPLICATION_USAGE_TABLE_DETECT)
def _detect_install_request_table(self, conn):
"""Detect whether the application usage table exists.
Args:
conn: sqlite3.Connection object
Returns:
True if the table exists, False if not.
Raises:
sqlite3.Error: if error occurs
"""
# pylint: disable=no-self-use
return self._detect_table(conn, INSTALL_REQUEST_TABLE_DETECT)
def _create_application_usage_table(self, conn):
"""Create application usage table when it does not exist.
Args:
conn: sqlite3.Connection object
Raises:
sqlite3.Error: if error occurs
"""
# pylint: disable=no-self-use
conn.execute(APPLICATION_USAGE_TABLE_CREATE)
def _insert_application_usage(self, conn, event, app_dict):
"""Insert usage data into application usage table.
Args:
conn: sqlite3.Connection object
event: str
app_dict: {bundle_id: str,
version: str,
path: str}
"""
# pylint: disable=no-self-use
# this looks weird, but it's the simplest way to do an update or insert
# operation in sqlite, and atomically update number_times, that I could
# figure out. plus we avoid using transactions and multiple SQL
# statements in most cases.
now = int(time.time())
bundle_id = app_dict.get('bundle_id', 'UNKNOWN_APP')
app_version = app_dict.get('version', '0')
app_path = app_dict.get('path', '')
data = (app_version, app_path, now, event, bundle_id)
query = conn.execute(APPLICATION_USAGE_TABLE_UPDATE, data)
if query.rowcount == 0:
number_times = 1
data = (event, bundle_id, app_version, app_path, now, number_times)
conn.execute(APPLICATION_USAGE_TABLE_INSERT, data)
def _create_install_request_table(self, conn):
"""Create install request table when it does not exist.
Args:
conn: sqlite3.Connection object
Raises:
sqlite3.Error: if error occurs
"""
# pylint: disable=no-self-use
conn.execute(INSTALL_REQUEST_TABLE_CREATE)
def _insert_install_request(self, conn, request_dict):
"""Insert usage data into application usage table.
Args:
conn: sqlite3.Connection object
event: str
request_dict: {name: str,
version: str}
"""
# pylint: disable=no-self-use
# this looks weird, but it's the simplest way to do an update or insert
# operation in sqlite, and atomically update number_times, that I could
# figure out. plus we avoid using transactions and multiple SQL
# statements in most cases.
now = int(time.time())
event = request_dict.get('event', 'UNKNOWN_EVENT')
item_name = request_dict.get('name', 'UNKNOWN_ITEM')
item_version = request_dict.get('version', '0')
data = (item_version, now, event, item_name)
query = conn.execute(INSTALL_REQUEST_TABLE_UPDATE, data)
if query.rowcount == 0:
number_times = 1
data = (event, item_name, item_version, now, number_times)
conn.execute(INSTALL_REQUEST_TABLE_INSERT, data)
def _recreate_database(self):
"""Recreate a database.
Returns:
int number of rows that were recovered from old database
and written into new one
"""
recovered = 0
tables = [{'select_sql': APPLICATION_USAGE_TABLE_SELECT,
'create_sql': APPLICATION_USAGE_TABLE_CREATE,
'insert_sql': APPLICATION_USAGE_TABLE_INSERT,
'rows': []},
{'select_sql': INSTALL_REQUEST_TABLE_SELECT,
'create_sql': INSTALL_REQUEST_TABLE_CREATE,
'insert_sql': INSTALL_REQUEST_TABLE_INSERT,
'rows': []}]
try:
conn = self._connect()
for table in tables:
query = conn.execute(table['select_sql'])
try:
while True:
row = query.fetchone()
if not row:
break
table['rows'].append(row)
except sqlite3.Error:
pass
# ok, done, hit an error
conn.close()
except sqlite3.Error as err:
logging.error('Unhandled error reading existing db: %s', str(err))
return recovered
usage_db_tmp = '%s.tmp.%d' % (APPLICATION_USAGE_DB, os.getpid())
recovered = 0
try:
conn = self._connect(usage_db_tmp)
for table in tables:
conn.execute(table['create_sql'])
for row in table['rows']:
if row[1] == '':
# skip rows with empty bundle_id or item_name
continue
try:
conn.execute(table['insert_sql'], row)
conn.commit()
recovered += 1
except sqlite3.IntegrityError as err:
logging.error(
'Ignored error: %s: %s', str(err), str(row))
self._close(conn)
os.unlink(APPLICATION_USAGE_DB)
os.rename(usage_db_tmp, APPLICATION_USAGE_DB)
except sqlite3.Error as err:
logging.error('Unhandled error: %s', str(err))
recovered = 0
return recovered
def verify_database(self, fix=False):
"""Verify database integrity."""
conn = self._connect()
try:
for sql in [APPLICATION_USAGE_TABLE_SELECT,
INSTALL_REQUEST_TABLE_SELECT]:
query = conn.execute(sql)
dummy_rows = query.fetchall()
query_ok = True
except sqlite3.Error:
query_ok = False
if not query_ok:
if fix:
logging.warning('Recreating database.')
logging.warning(
'Recovered %d rows.', self._recreate_database())
else:
logging.warning('Database is malformed.')
else:
logging.info('Database is OK.')
def log_application_usage(self, event, app_dict):
"""Log application usage.
Args:
event: str, like "launch" or "quit"
app_dict: Dictionary containing bundle_id, version, path
"""
if app_dict.get('bundle_id') is None:
logging.warning(
'Application object had no bundle_id: %s', app_dict.get('path'))
return
logging.debug('%s: bundle_id: %s version: %s path: %s', event,
app_dict.get('bundle_id'),
app_dict.get('version'),
app_dict.get('path'))
try:
conn = self._connect()
if not self._detect_application_usage_table(conn):
self._create_application_usage_table(conn)
self._insert_application_usage(conn, event, app_dict)
conn.commit()
except sqlite3.OperationalError as err:
logging.error('Error writing %s event to database: %s', event, err)
except sqlite3.DatabaseError as err:
if err.args[0] == 'database disk image is malformed':
self._recreate_database()
logging.error('Database error: %s', err)
self._close(conn)
def log_install_request(self, request_dict):
"""Log install request.
Args:
request_dict: Dictionary containing:
event: str, like "install" or "remove"
name: str
version: str
"""
if (request_dict.get('event') is None or
request_dict.get('name') is None):
logging.warning(
'Request dict is missing event or name: %s', request_dict)
return
logging.debug('%s: name: %s version: %s',
request_dict.get('event'),
request_dict.get('name'),
request_dict.get('version'))
try:
conn = self._connect()
if not self._detect_install_request_table(conn):
self._create_install_request_table(conn)
self._insert_install_request(conn, request_dict)
conn.commit()
except sqlite3.OperationalError as err:
logging.error('Error writing install request to database: %s', err)
except sqlite3.DatabaseError as err:
if err.args[0] == 'database is malformed':
self._recreate_database()
logging.error('Database error: %s', err)
self._close(conn)
class ApplicationUsageQuery(object):
'''A class to query our application usage db to determine the last time
an application was activated'''
def __init__(self):
'''Open connection to DB'''
self.database = APPLICATION_USAGE_DB
self.day_in_seconds = 24 * 60 * 60
try:
self.conn = sqlite3.connect(self.database)
except sqlite3.Error as err:
logging.error(
'Error connecting to %s: %s', self.database, str(err))
self.conn = None
def __del__(self):
'''Close connection to DB'''
if self.conn:
try:
self.conn.close()
except sqlite3.Error:
pass
def days_of_data(self):
'''Returns how many days of data we have on | |
'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<e,0,0>', 'O', 'F432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 5, 6],
58: [58, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 7, '<0,0,0>', 'C5', 4, ['r:<0,0,1,c>', 't:<0,0,d>'], 9, '<0,0,0>', 'I', 'I', 0, 'N/A', 4, 2],
59: [59, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'C6', 5, ['r:<0,0,1,c>'], 1, '<0,0,0>', 'C6', 'p6', 2, '(2*e, 2*e), 120', 4, 2],
60: [60, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
61: [61, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 3],
62: [62, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D2', 6, ['None'], 3, '<e,0,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 3],
63: [63, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D2', 6, ['None'], 3, '<2*e,e,0>', 'O', 'I4132', 3, '(8*e,8*e, 8*e), (90, 90, 90)', 3, 2],
64: [64, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 'p312', 2, '(2*e, 2*e), 120', 3, 2],
65: [65, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D3', 7, ['None'], 1, '<0,0,0>', 'D3', 'p321', 2, '(2*e, 2*e), 120', 3, 2],
66: [66, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 12, '<4*e,0,0>', 'D3', 7, ['None'], 4, '<3*e,3*e,3*e>', 'O', 'P4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 4],
67: [67, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D4', 8, ['None'], 1, '<0,0,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
68: [68, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
69: [69, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<e,0,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'F23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
70: [70, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<e,0,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
71: [71, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>'], 1, '<e,e,0>', 'C4', 'p4', 2, '(2*e, 2*e), 90', 4, 2],
72: [72, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 2, '<0,e,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 5, 4],
73: [73, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 2],
74: [74, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'D2', 6, ['None'], 5, '<0,0,0>', 'D4', 'p4212', 2, '(2*e, 2*e), 90', 3, 2],
75: [75, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'D2', 6, ['None'], 3, '<2*e,e,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
76: [76, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D2', 6, ['None'], 3, '<e,0,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 3],
77: [77, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
78: [78, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,e,0>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 2],
79: [79, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'D4', 8, ['None'], 1, '<e,e,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
80: [80, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'T', 10, ['None'], 1, '<e,e,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
81: [81, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
82: [82, 'C6', 5, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
83: [83, 'C6', 5, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 2, 2],
84: [84, 'D2', 6, ['None'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,f,0>', 'D2', 'p222', 2, '(2*e, 2*f), 90', 2, 2],
85: [85, 'D2', 6, ['None'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,f,g>', 'D2', 'F222', 3, '(4*e, 4*f, 4*g), (90, 90, 90)', 3, 3],
86: [86, 'D2', 6, ['None'], 1, '<e,0,0>', 'D2', 6, ['None'], 5, '<0,0,f>', 'D4', 'P4222', 3, '(2*e, 2*e, 4*f), (90, 90, 90)', 2, 2],
87: [87, 'D2', 6, ['None'], 1, '<e,0,0>', 'D2', 6, ['None'], 13, '<0,0,-f>', 'D6', 'P6222', 3, '(2*e, 2*e, 6*f), (90, 90, 120)', 2, 2],
88: [88, 'D2', 6, ['None'], 3, '<0,e,2*e>', 'D2', 6, ['None'], 5, '<0,2*e,e>', 'O', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
89: [89, 'D2', 6, ['None'], 1, '<e,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 1, 1],
90: [90, 'D2', 6, ['None'], 1, '<e,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
91: [91, 'D2', 6, ['None'], 1, '<0,0,2*e>', 'D3', 7, ['None'], 4, '<e,e,e>', 'D6', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
92: [92, 'D2', 6, ['None'], 3, '<2*e,e,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 1, 1],
93: [93, 'D2', 6, ['None'], 1, '<e,0,0>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 1, 1],
94: [94, 'D2', 6, ['None'], 1, '<e,0,f>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'P422', 3, '(2*e, 2*e, 2*f), (90, 90,90)', 2, 2],
95: [95, 'D2', 6, ['None'], 5, '<e,0,f>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'I422', 3, '(2*e, 2*e, 4*f), (90, 90,90)', 2, 2],
96: [96, 'D2', 6, ['None'], 3, '<0,e,2*e>', 'D4', 8, ['None'], 1, '<0,0,2*e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
97: [97, 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 1, 1],
98: [98, 'D2', 6, ['None'], 1, '<e,0,f>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
99: [99, 'D2', 6, ['None'], 1, '<e,0,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
100: [100, 'D2', 6, ['None'], 1, '<e,e,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 2],
101: [101, 'D2', 6, ['None'], 3, '<e,0,e>', 'T', 10, ['None'], 1, '<e,e,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
102: [102, 'D2', 6, ['None'], 3, '<2*e,e,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'O', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
103: [103, 'D2', 6, ['None'], 3, '<e,0,e>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
104: [104, 'D2', 6, ['None'], 3, '<2*e,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
105: [105, 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D3', 'p312', 2, '(2*e, 2*e), 120', 1, 1],
106: [106, 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D3', 'P312', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
107: [107, 'D3', 7, ['None'], 1, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 'P6322', 3, '(2*e, 2*e, 4*f), (90, 90, 120)', 2, 2],
108: [108, 'D3', 7, ['None'], 4, '<e,e,e>', 'D3', 7, ['None'], 12, '<e,3*e,e>', 'O', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
109: [109, 'D3', 7, ['None'], 4, '<3*e,3*e,3*e>', 'D3', 7, ['None'], 12, '<e,3*e,5*e>', 'O', 'P4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 1, 1],
110: [110, 'D3', 7, ['None'], 4, '<e,e,e>', 'D4', 8, ['None'], 1, '<0,0,2*e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
111: [111, 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 1, 1],
112: [112, 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
113: [113, 'D3', 7, ['None'], 4, '<e,e,e>', 'T', 10, ['None'], 1, '<0,0,0>', 'O', 'F4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 1, 1],
| |
##################################################
def decorator(F):
print('Called decorator, get function:', F)
F.data = 1000
return F
@decorator
def func(x, y):
return x + y
##################################################
def decorator(F):
print('Called decorator, get function:', F)
def wrapper(*args):
print('I got arguments:', *args)
F(*args)
return wrapper
@decorator
def func(x, y): # func = decorator(func) --> func = decorator.<locals>.funcX
print(x + y)
##################################################
class decorator:
def __init__(self, func):
print('Called decorator, get function:', func)
self.func = func
def __call__(self, *args):
print('I got arguments:', *args)
self.func(*args)
@decorator
def func(x, y): # func = decorator(func) - instance of class 'decorator'
print(x + y)
# "Called decorator, get function: <function func at 0x7f3692ed7f28>"
# func.func - <function func at 0x7f3692ed7f28>
##################################################
def decorator(F):
print('Called decorator, get function:', F)
def wrapper(*args):
print('I got arguments:', *args)
F(*args)
return wrapper
@decorator
def func(x, y):
print(x + y)
class C:
@decorator
def method(self, x, y): # C.method = decorator(C.method);
print(x * y) # 'decorator' - simple function, not an instance of a class!!!
##################################################
def decorator(cls):
class Wrapper:
def __init__(self, *args):
self.wrapped = cls(*args)
def __getattr__(self, name):
print("Wrapper.__getattr__ '{0}' ---> {1}.__getattr__ '{0}'".format(name, self.wrapped.__class__.__name__))
return getattr(self.wrapped, name)
return Wrapper
@decorator
class C: # C = decorator(C)
def __init__(self, x, y): # called by Wrapper.__init__
self.attr = 'spam: ' + str(x) + str(y)
x = C(100, 500)
print(x.attr)
@decorator
class A:
def __init__(self, *args):
self.args = args
y = A(100, 200, 300, 'a')
print(y.args)
##################################################
class Decorator:
def __init__(self, C): # During decoration @: C = Decorator(C)
self.C = C # Save an original class, not an instance!
def __call__(self, *args): # During an instance creation: C(*args) (---> Decorator(C)(*rags))
self.wrapped = self.C(*args) # Save an instance of an original class in the attribute 'wrapped'
return self
def __getattr__(self, attrname):
return getattr(self.wrapped, attrname) # References to the wrapped instance
@Decorator
class C: # C = Decorator(C) => C is an instnace fo class Decorator !!!
def __init__(self, x, y): # called by Decorator.__call__(C, x, y)
self.attr = (x, y)
x = C()
y = C() # Will override 'x'!
##################################################
def decorator(C): # During decoration @: C = Decorator(C)
class Wrapper:
def __init__(self, *args): # During an instance creation
self.wrapped = C(*args)
return Wrapper
@decorator
class B:
def __init__(self, x, y):
self.attr = (x, y)
# Other case:
class Wrapper:
def __init__(self, instance):
self.wrapped = instance # Wrapper().wrapped = C(*args)
def decorator(C): # During decoration @: C = Decorator(C)
def onCall(*args): # During an instance creation
return Wrapper(C(*args)) # Embeds an instance into an instance
return onCall
@decorator
class C: # C = Decorator(C) => C is an instnace fo class Decorator !!!
def __init__(self, x, y): # called by Decorator.__call__(C, x, y)
self.attr = (x, y)
# Check:
x = B(10, 20); X = C(10, 20)
y = B(30, 40); Y = C(30, 40)
for i in (x, y, X, Y):
print(i.wrapped.attr)
##################################################
def d1(F): return F
def d2(F): return F
def d3(F): return F
@d1
@d2
@d3
def func():
print('spam')
func()
##################################################
def d1(F):
print('decorator d1')
return lambda: 'X' + F()
def d2(F):
print('decorator d2')
return lambda: 'Y' + F()
def d3(F):
print('decorator d3')
return lambda: 'Z' + F()
@d1
@d2
@d3
def func(): # func = d1(d2(d3(func)))
return 'spam'
print(func())
##################################################
# file: filler.py
def filler(collection={}):
'''
This function will help you to create and quickly fill a dictionary.
If you pass in argument your existing dictionary the function will
fill it with a copy.
'''
collection = collection.copy()
print('Press Ctrl+C to stop filling')
try:
while True:
attr_value = input('ATTR=VALUE: ')
attr_value = attr_value.replace(' ', '').split('=')
collection[attr_value[0]] = attr_value[1]
except KeyboardInterrupt:
print('...\nThe end')
return collection
##################################################
class tracer:
def __init__(self, func): # at the stage of decoration @:
self.calls = 0 # saves orginal function 'func'
self.func = func
def __call__(self, *args): # On subsequent calls:
self.calls += 1 # calls the oroginal function 'func'
print('call %s to %s' % (self.calls, self.func.__name__))
self.func(*args)
@tracer
def spam(a, b, c): # spam = tracer(spam) - 'spam' is an instance of the class 'tracer'
print(a + b + c) # Wraps the spam function with a decorator object
# Equivalent implementation without the use of a decorator:
calls = 0
def tracer(func, *args):
global calls
calls += 1
print('call %s to %s' % (calls, func.__name__))
func(*args)
def spam(a, b, c):
print(a, b, c)
##################################################
class tracer: # The state saves in attributes of an instance
def __init__(self, func): # At the stage of decoration @:
self.calls = 0 # Saves orginal function 'func'
self.func = func
def __call__(self, *args, **kwargs): # Called when accessed to the oroginal function 'func'
self.calls += 1
print('call %s to %s' % (self.calls, self.func.__name__))
self.func(*args, **kwargs)
@tracer
def spam(a, b, c): # spam = tracer(spam)
print(a + b + c) # calls method tracer.__init__
@tracer
def eggs(x, y):
print(x ** y)
spam(1, 2, 3) # tracer.__call__
spam(a=1, b=2, c=3) # spam - an attribute of the instance
eggs(2, 16) # self.func = eggs
eggs(4, y=4)
##################################################
calls = 0
def tracer(func):
def wrapper(*args, **kwargs):
global calls # 'calls' is a global variable that is
calls += 1 # common to all functions, not to each function individually
print('call %s to %s' % (calls, func.__name__))
return func(*args, **kwargs)
return wrapper
@tracer
def spam(a, b, c): # spam = tracer(spam)
print(a + b + c)
@tracer
def eggs(x, y):
print(x ** y)
spam(1, 2, 3)
spam(a=1, b=2, c=3)
eggs(2, 16)
eggs(4, y=4)
##################################################
# file: calc.py
def calc():
hint = '''
'?' - help
'*' - multiplication. Ex.: x * y
'/' - division. Ex.: x / y
'+' - addition. Ex.: x + y
'-' - subtraction. Ex.: x - y
'**' - exponentiation. Ex.: x ** y
'()' - prioritization. Ex.: (x + y) * z
'%' - remainder of division. Ex.: x % y
'//' - obtaining the integer part of the division. Ex.: x // y
'abs()' - number modulus. Ex.: abs(x)
'-x' - the change of the sign of the number. Ex.: -x
'divmod(x, y)' - a couple (x // y, x % y). Ex.: divmod(x, y)
'pow(x, y[,z])' - x^y modulo (if the module is specified). Ex.: pow(x, y, z)
'''
while True:
try:
x = input("Expression or '?' for help: ")
if '?' in x: # Calls help hint
print(hint)
continue
else:
res = eval(x) # Dangerous operation!!!
except SyntaxError as E: # Errors of an input
print('SyntaxError:', E)
continue
if type(res) not in [float, int, complex]: # check the type
print('TypeError:', res)
continue
else:
print(res)
##################################################
def tracer(func):
calls = 0
def wrapper(*args, **kwargs):
nonlocal calls
calls += 1
print('call %s to %s' % (calls, func.__name__))
return func(*args, **kwargs)
return wrapper
@tracer
def spam(a, b, c): # spam = tracer(spam)
print(a + b + c)
@tracer
def eggs(x, y):
print(x ** y)
spam(1, 2, 3)
spam(a=1, b=2, c=3)
eggs(2, 16)
eggs(4, y=4)
##################################################
def tracer(func):
def wrapper(*args, **kwargs):
wrapper.calls += 1
print('call %s to %s' % (wrapper.calls, func.__name__))
return func(*args, **kwargs)
wrapper.calls = 0
return wrapper
##################################################
class tracer:
def __init__(self, func):
self.calls = 0
self.func = func
def __call__(self, *args, **kwargs):
self.calls += 1
print('call %s to %s' % (self.calls, self.func.__name__))
return self.func(*args, **kwargs)
@tracer
def spam(a, b, c):
print(a + b + c)
spam(1, 2, 3)
spam(a=1, b=2, c=3)
class Person:
def __init__(self, name, pay):
self.name = name
self.pay = pay
@tracer
def giveRaise(self, percent): # giveRaise = tracer(giverRaise)
self.pay *= (1.0 + percent)
@tracer
def lastName(self): # lastName = tracer(lastName)
return self.name.split()[-1]
bob = Person('<NAME>', 50000)
bob.giveRaise(.25) # ERROR!
print(bob.lastName()) # ERROR!
##################################################
def tracer(func):
calls = 0
def onCall(*args, **kwargs):
nonlocal calls
calls += 1
print('call %s to %s' % (calls, func.__name__))
return func(*args, **kwargs)
return onCall
@tracer
def spam(a, b, c): # spam = tarcer(spam)
print(a + b + c) # onCall will save link on 'spam'
spam(1, 2, 3)
spam(a=4, b=5, c=6)
class Person:
def __init__(self, name, pay):
self.name = name
self.pay = pay
@tracer
def giveRaise(self, percent): # giveRaise = tracer(giverRaise)
self.pay *= (1.0 + percent)
@tracer
def lastName(self): # lastName = tracer(lastName)
return self.name.split()[-1]
print('methods...')
bob = Person('<NAME>', 50000)
sue = Person('<NAME>', 100000)
print(bob.name, sue.name)
sue.giveRaise(.10)
print(sue.pay)
print(bob.lastName(), sue.lastName())
##################################################
class Descriptor:
def __get__(self, instance, owner):
print(self, instance, owner, end='\n')
return instance.__dict__
class Subject:
def __init__(self, name):
self._name = name
attr = Descriptor()
X = Subject('ATTRIBUTE')
X.attr
##################################################
class tracer(object): # Page 1110
def __init__(self, func): # На этапе декорирования @
print("[TEST] tracer.__init__:", self, func)
self.calls = 0
self.func = func # Сохраняет функцию для последующего вызова
def __call__(self, *args, **kwargs): # Вызывается при обращениях к оригинальной | |
import pandas as pd
import math
import parser
# Worksheets with indemnification funds and temporary remuneration
# For active members there are spreadsheets as of July 2019
# Adjust existing spreadsheet variations
def format_value(element):
if element == None:
return 0.0
if type(element) == str and "-" in element:
return 0.0
if element == "#N/DISP":
return 0.0
return element
# July and August 2019
def update_employee_indemnity_jul_aug_2019(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
emp["income"].update(
{
"perks": {
"total": round(ferias_pc + alimentacao, 2),
"food": alimentacao,
"vacation_pecuniary": ferias_pc,
}
}
)
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"] + cumulativa + grat_natureza, 2
)
total_gratificacoes = round(
emp["income"]["other"]["total"] + cumulativa + grat_natureza, 2
)
emp["income"].update(
{"total": round(emp["income"]["total"] + cumulativa + grat_natureza, 2)}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# September to December 2019 / January and November 2020
def update_employee_indemnity_sept_2019_to_jan_and_nov_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
ferias_pc = format_value(row[5])
licensa_pc = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{"total": round(emp["income"]["total"] + cumulativa + grat_natureza, 2)}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_pc, 2),
"food": alimentacao,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# February and March 2020
def update_employee_indemnity_feb_mar_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
licensa_compensatoria = format_value(
row[5]
) # Licença Compensatória ato 1124/18
ferias_pc = format_value(row[6])
licensa_pc = format_value(row[7])
cumulativa = format_value(row[8]) # Gratificação Cumulativa
grat_natureza = format_value(row[9]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[10]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_compensatoria, 2),
"food": alimentacao,
"compensatory_leave": licensa_compensatoria,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# April to July 2020
def update_employee_indemnity_apr_to_july_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
licensa_compensatoria = format_value(
row[5]
) # Licença Compensatória ato 1124/18
ferias_pc = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_compensatoria, 2),
"food": alimentacao,
"compensatory_leave": licensa_compensatoria,
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# August and September 2020
def update_employee_indemnity_aug_sept_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
transporte = format_value(row[5]) # Auxilio Transporte
creche = format_value(row[6]) # Auxilio Creche
ferias_pc = format_value(row[7])
licensa_pc = format_value(row[8]) # Licensa em pecunia
licensa_compensatoria = format_value(
row[9]
) # Licença Compensatória ato 1124/18
insalubridade = format_value(row[10]) # Adicional de Insalubridade
subs_funcao = format_value(row[11]) # Substituição de Função
viatura = format_value(row[12]) # Viatura
cumulativa = format_value(row[13]) # Gratificação Cumulativa
grat_qualificacao = format_value(row[14])
grat_natureza = format_value(row[15]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[16]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(
ferias_pc
+ alimentacao
+ transporte
+ creche
+ licensa_compensatoria
+ licensa_pc,
2,
),
"food": alimentacao,
"transportation": transporte,
"pre_school": creche,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
"compensatory_leave": licensa_compensatoria,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"INSALUBRIDADE": insalubridade,
"SUBS. DE FUNÇÃO": subs_funcao,
"VIATURA": viatura,
"GRAT. CUMULATIVA": cumulativa,
"GRAT. DE QUALIFICAÇÃO": grat_qualificacao,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# October 2020
def update_employee_indemnity_oct_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
ferias_pc = format_value(row[4])
cumulativa = format_value(row[5]) # Gratificação Cumulativa
grat_natureza = format_value(row[6]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[7]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE | |
<reponame>jtschindler/sculptor
#!/usr/bin/env python
"""
This module introduces the SpecModel class and its functionality. The SpecModel
class is designed to fit models to an astronomical spectrum using LMFIT.
The SpecModel is always associated with a SpecFit object, which provides the
foundational functionality for the fitting.
Notes
-----
This module is in active development.
"""
import os
import glob
import corner
import importlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from lmfit import Model, Parameters, fit_report
from lmfit.model import save_model, load_model, save_modelresult, \
load_modelresult
from sculptor import speconed as sod
from sculptor.masksmodels import model_func_list, model_func_dict,\
model_setup_list, mask_presets
# For a full list of fitting method
# https://lmfit.github.io/lmfit-py/fitting.html
fitting_methods = {'Levenberg-Marquardt': 'leastsq',
'Nelder-Mead': 'nelder',
'Maximum likelihood via Monte-Carlo Markov Chain':'emcee',
'Least-Squares minimization': 'least_squares',
'Differential evolution': 'differential_evolution',
'Brute force method': 'brute',
'Basinhopping': 'basinhopping',
'Adaptive Memory Programming for Global Optimization':
'ampgo',
'L-BFGS-B': 'lbfgsb',
'Powell': 'powell',
'Conjugate-Gradient':'cg',
# 'Newton-CG':'newton',
'Cobyla': 'cobyla',
'BFGS': 'bfgs',
'Truncated Newton': 'tnc',
# 'Newton-CG trust-region':'trust-ncg',
# 'Nearly Exact trust-region':'trust-exact',
'Newton GLTR trust-region': 'trust-krylov',
'Trust-region for constrained obtimization': 'trust-constr',
# 'Dog-leg trust-region':'dogleg',
'Sequential Linear Squares Programming':'slsqp',
'Simplicial Homology Global Optimization':'shgo',
'Dual Annealing Optimization':'dual_annealing'
}
"""dict: Dictionary of fitting methods
Fitting methods available for fitting in SpecFit based on the list of methods in
LMFIT.
"""
class SpecModel:
""" Class holding information on models for the SpecFit class
Attributes:
specfit (SpecFit): Associated SpecFit object
xlim (list of float): Wavelength limits for plotting
ylim (list of float): Flux density limits for plotting
spec (SpecOneD): Astronomical spectrum as a SpecOneD object
redshift (float): Cosmological redshift of the astronomical object
use_weights (bool): Boolean to indicate whether fluxden errors will be
used as weights for the fit.
model_list (list of Models): List of LMFIT models
params_list (list of Parameters): List of LMFIT Parameters for all
LMFIT models.
global_params (Parameters): Global parameters to be added to the all
models in the Specmodel. Their main use is to provide variables
and constraints for multiple individual models.
color (str): Color to use in the SpecModel plot.
model (Model): LMFIT SpecModel model. The global model including all
models in the model_list.
params (Parameters): LMFIT SpecModel parameters. The global parameter
list including all parameters from all models.
fit_result(ModelResult): LMFIT ModelResult for the fit to the SpecModel
"""
def __init__(self, specfit, spectrum=None, redshift=0):
""" Initialize the SpecModel objects
:param SpecFit specfit: SpecFit object the SpecModel belongs to
:param SpecOneD spectrum: Astronomical spectrum passed as a SpecOneD
object. Initializes 'None' by default.
:param float redshift: Redshift of the astronomical spectrum. '0'
initialized by default.
"""
self.xlim = [0, 1]
self.ylim = [0, 1]
# Copy the spectrum (SpecOneD object) to the SpecModel object
# if isinstance(spectrum, sod.SpecOneD)
self.specfit = specfit
if isinstance(spectrum, sod.SpecOneD):
self.spec = spectrum.copy()
# Set mask describing the regions included in the fit for this model
self.reset_fit_mask()
self.reset_plot_limits()
self.model_fluxden = np.zeros_like(self.spec.fluxden)
else:
self.mask = None
self.redshift = redshift
# Boolean to indicate whether the fluxden uncertainties will be used as
# weights in the fit
self.use_weights = True
self.name = 'SpecModel'
# list of functional models
self.model_list = []
# list of parameters for the models
self.params_list = []
# String to indicate how spectral model is propagated to next
# Specmodel object in SpecFit
self.propagate = 'subtract'
self.global_params = Parameters()
self.color = 'orange'
self.model = None
self.params = None
self.fit_result = None
def _copy(self, specfit):
""" Copy the SpecModel object to a new SpecFit class.
:param specfit:
:return: SpecModel
"""
specmodel = SpecModel(specfit, spectrum=self.spec,
redshift=self.redshift)
specmodel.mask = self.mask
specmodel.redshift = self.redshift
# Boolean to indicate whether the fluxden uncertainties will be used as
# weights in the fit
specmodel.use_weights = self.use_weights
specmodel.name = self.name
# list of functional models
specmodel.model_list = self.model_list.copy()
# list of parameters for the models
specmodel.params_list = self.params_list.copy()
# Propagate string
specmodel.propagate = self.propagate
specmodel.global_params = self.global_params.copy()
specmodel.color = self.color
specmodel.build_model()
specmodel.fit_result = self.fit_result
return specmodel
def add_model(self, model_name, prefix, **kwargs):
"""Add a model to the SpecModel by using the built-in Sculptor models.
:param model_name:
:param prefix:
:return:
"""
model_idx = model_func_list.index(model_name)
redshift = kwargs.pop('redshift', None)
if self.specfit.redshift is not None and redshift is None:
model, params = model_setup_list[model_idx](
prefix, redshift=self.specfit.redshift, **kwargs)
elif redshift is not None:
model, params = model_setup_list[model_idx](
prefix, redshift=redshift, **kwargs)
else:
model, params = model_setup_list[model_idx](prefix, **kwargs)
# Add global params to params
if self.global_params:
for param in params:
for global_param in self.global_params:
param.add(global_param)
self._add_model(model, params)
def _add_model(self, model, params):
""" Add an LMFIT model and LMFIT parameters to the SpecModel object
:param (Model) model: LMFIT Model to be added to the model list.
:param (Parameters) params: LMFIT Parameters to be added to the
parameter list.
:return: None
"""
prefix_list = []
for existing_model in self.model_list:
prefix_list.append(existing_model.prefix)
if len(self.model_list) > 0:
if isinstance(model, list) and isinstance(params, list):
if model[0].prefix in prefix_list:
print('[WARNING] Model with same name exists. \
# Model could not be added.')
else:
self.model_list.extend(model)
self.params_list.extend(params)
else:
if model.prefix in prefix_list:
print('[WARNING] Model with same name exists. \
# Model could not be added.')
else:
self.model_list.append(model)
self.params_list.append(params)
else:
if isinstance(model, list) and isinstance(params, list):
self.model_list.extend(model)
self.params_list.extend(params)
else:
self.model_list.append(model)
self.params_list.append(params)
def delete_model(self, index=None):
""" Delete model (Model, Parameters) from the SpecModel object.
:param (int) index: Index of model to remove from model_list and
Parameters to remove from params_list (default index=="None"). If
the index is None the last added model will be removed.
:return: None
"""
if len(self.model_list) > 0:
if index is None:
model_to_delete = self.model_list.pop()
params_to_delete = self.params_list.pop()
else:
model_to_delete = self.model_list.pop(index)
params_to_delete = self.params_list.pop(index)
# Delete the Model and Parameter objects
del model_to_delete
del params_to_delete
def add_wavelength_range_to_fit_mask(self, disp_x1, disp_x2):
""" Adding a wavelength region to the fit mask.
The dispersion region between the two dispersion values will be added
to the fit mask.
:param (float) disp_x1: Dispersion value 1
:param (float) disp_x2: Dispersion value 2
:return:
"""
print('[INFO] Manual mask range', disp_x1, disp_x2)
if hasattr(self, 'spec'):
mask_between = np.sort(np.array([disp_x1, disp_x2]))
lo_index = np.argmin(np.abs(self.spec.dispersion - mask_between[0]))
up_index = np.argmin(np.abs(self.spec.dispersion - mask_between[1]))
self.mask[lo_index:up_index] = True
def reset_fit_mask(self):
"""Reset the fit mask based on the supplied astronomical spectrum.
:return: None
"""
self.mask = np.zeros_like(self.spec.dispersion, dtype='bool')
def add_mask_preset_to_fit_mask(self, mask_preset_key):
""" Adding a preset mask from the models_and_masks module to the fit.
:param mask_preset_key: Name of the preset mask in the
mask_preset dictionary.
:type mask_preset_key: str
:return: None
"""
mask_preset = mask_presets[mask_preset_key]
if mask_preset['rest_frame']:
one_p_z = 1 + self.redshift
else:
one_p_z = 1
for mask_range in mask_preset['mask_ranges']:
wave_a = mask_range[0] * one_p_z
wave_b = mask_range[1] * one_p_z
self.add_wavelength_range_to_fit_mask(wave_a, wave_b)
def add_global_param(self, param_name, value=None, vary=True,
min=-np.inf, max=np.inf, expr=None):
""" Adding a "Global Parameter" to the SpecModel object
:param str param_name: Name of the global parameter
:param float,optional value: Initial value of the \
global parameter
:param bool,optional vary: Boolean to indicate whether the global \
parameter should be varied during the fit
:param float,optional min: Minimum value for the global parameter
:param float,optional max: Maximum value for the global parameter
:param str, optional expr: Optional expression for the global \
parameter
:return: None
"""
self.global_params.add(param_name, value=value, vary=vary, min=min,
max=max, expr=expr)
self.update_model_params_for_global_params()
def remove_global_param(self, param_name):
""" Remove "Global Parameter" from SpecModel object
:param str param_name: Parameter name of the global parameter to \
remove.
:return: None
"""
if param_name in self.global_params:
self.global_params.pop(param_name)
for params in self.params_list:
# Remove global parameter from expressions
for param in params:
if params[param].expr == param_name:
params[param].expr = None
# Remove global parameter from parameter list
if param_name in params:
params.pop(param_name)
def build_model(self):
""" Build the Specmodel model and parameters for the fit
:return: None
"""
# If at least one model exists
if len(self.model_list) > 0 and len(self.params_list) > 0:
# Instantiate the SpecModel model parameters
self.params = Parameters()
# Add super parameters (Test!)
self.params.update(self.specfit.super_params)
# Add global parameters (includes super parameters)
self.params.update(self.global_params)
for params in self.params_list:
self.params.update(params)
# Build the full SpecModel model
# Instantiate the model with the first model in the list
self.model = self.model_list[0]
# Add all other models to the global model
for model in self.model_list[1:]:
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AssetJobInput(msrest.serialization.Model):
"""Asset input type.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
"""
super(AssetJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
class AssetJobOutput(msrest.serialization.Model):
"""Asset output type.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
"""
super(AssetJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
class BatchJob(msrest.serialization.Model):
"""Batch endpoint job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compute: Compute configuration used to set instance count.
:vartype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:ivar dataset: Input dataset
This will be deprecated. Use InputData instead.
:vartype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:ivar description: The asset description text.
:vartype description: str
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:vartype error_threshold: int
:ivar input_data: Input data for the job.
:vartype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated job endpoints.
:vartype interaction_endpoints: dict[str,
~azure.mgmt.machinelearningservices.models.JobEndpoint]
:ivar logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:vartype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:ivar max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:vartype max_concurrency_per_instance: int
:ivar mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:vartype mini_batch_size: long
:ivar name:
:vartype name: str
:ivar output: Location of the job output logs and artifacts.
:vartype output: ~azure.mgmt.machinelearningservices.models.JobOutputArtifacts
:ivar output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:vartype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:ivar output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:vartype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:ivar output_file_name: Output file name.
:vartype output_file_name: str
:ivar partition_keys: Partition keys list used for Named partitioning.
:vartype partition_keys: list[str]
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar provisioning_state: Possible values include: "Succeeded", "Failed", "Canceled",
"InProgress".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.JobProvisioningState
:ivar retry_settings: Retry Settings for the batch inference operation.
:vartype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'interaction_endpoints': {'readonly': True},
'output': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'compute': {'key': 'compute', 'type': 'ComputeConfiguration'},
'dataset': {'key': 'dataset', 'type': 'InferenceDataInputBase'},
'description': {'key': 'description', 'type': 'str'},
'error_threshold': {'key': 'errorThreshold', 'type': 'int'},
'input_data': {'key': 'inputData', 'type': '{JobInput}'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': '{JobEndpoint}'},
'logging_level': {'key': 'loggingLevel', 'type': 'str'},
'max_concurrency_per_instance': {'key': 'maxConcurrencyPerInstance', 'type': 'int'},
'mini_batch_size': {'key': 'miniBatchSize', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'output': {'key': 'output', 'type': 'JobOutputArtifacts'},
'output_data': {'key': 'outputData', 'type': '{JobOutputV2}'},
'output_dataset': {'key': 'outputDataset', 'type': 'DataVersion'},
'output_file_name': {'key': 'outputFileName', 'type': 'str'},
'partition_keys': {'key': 'partitionKeys', 'type': '[str]'},
'properties': {'key': 'properties', 'type': '{str}'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'retry_settings': {'key': 'retrySettings', 'type': 'BatchRetrySettings'},
'status': {'key': 'status', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword compute: Compute configuration used to set instance count.
:paramtype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:keyword dataset: Input dataset
This will be deprecated. Use InputData instead.
:paramtype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:keyword description: The asset description text.
:paramtype description: str
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:paramtype error_threshold: int
:keyword input_data: Input data for the job.
:paramtype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:paramtype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:keyword max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:paramtype max_concurrency_per_instance: int
:keyword mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:paramtype mini_batch_size: long
:keyword name:
:paramtype name: str
:keyword output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:paramtype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:keyword output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:paramtype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:keyword output_file_name: Output file name.
:paramtype output_file_name: str
:keyword partition_keys: Partition keys list used for Named partitioning.
:paramtype partition_keys: list[str]
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword retry_settings: Retry Settings for the batch inference operation.
:paramtype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(BatchJob, self).__init__(**kwargs)
self.compute = kwargs.get('compute', None)
self.dataset = kwargs.get('dataset', None)
self.description = kwargs.get('description', None)
self.error_threshold = kwargs.get('error_threshold', None)
self.input_data = kwargs.get('input_data', None)
self.interaction_endpoints = None
self.logging_level = kwargs.get('logging_level', None)
self.max_concurrency_per_instance = kwargs.get('max_concurrency_per_instance', None)
self.mini_batch_size = kwargs.get('mini_batch_size', None)
self.name = kwargs.get('name', None)
self.output = None
self.output_data = kwargs.get('output_data', None)
self.output_dataset = kwargs.get('output_dataset', None)
self.output_file_name = kwargs.get('output_file_name', None)
self.partition_keys = kwargs.get('partition_keys', None)
self.properties = kwargs.get('properties', None)
self.provisioning_state = None
self.retry_settings = kwargs.get('retry_settings', None)
self.status = None
self.tags = kwargs.get('tags', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class BatchJobResource(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. | |
il.sub(1, il.reg(1, 'A'), il.const(1, 1)), 'NZ')],
None,
[('INCA', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'A', il.add(1, il.reg(1, 'A'), il.const(1, 1)), 'NZ')],
[('TSTA', 1, M6805_AddressMode.INH), [], lambda self, il: il.sub(1, il.reg(1, 'A'), il.const(1, 0), 'NZ')],
None,
[('CLRA', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'A', il.const(1, 0), 'NZ')],
# 0x50-0x5f: Read-Modify-Write INH
[('NEGX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.neg_expr(1, il.reg(1, 'X'), 'NZC'))],
None,
None,
[('COMX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.not_expr(1, il.reg(1, 'X'), 'NZC'))],
[('LSRX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.logical_shift_right(1, il.reg(1, 'X'), il.const(1, 1), 'NZC'))],
None,
[('RORX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.rotate_right_carry(1, il.reg(1, 'X'), il.const(1, 1), il.flag('C'), 'NZC'))],
[('ASRX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.arith_shift_right(1, il.reg(1, 'X'), il.const(1, 1), 'NZC'))],
[('LSLX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.shift_left(1, il.reg(1, 'X'), il.const(1, 1), 'NZC'))],
[('ROLX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.rotate_left_carry(1, il.reg(1, 'X'), il.const(1, 1), il.flag('C'), 'NZC'))],
[('DECX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.sub(1, il.reg(1, 'X'), il.const(1, 1)), 'NZ')],
None,
[('INCX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.add(1, il.reg(1, 'X'), il.const(1, 1)), 'NZ')],
[('TSTX', 1, M6805_AddressMode.INH), [], lambda self, il: il.sub(1, il.reg(1, 'X'), il.const(1, 0), 'NZ')],
None,
[('CLRX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.const(1, 0), 'NZ')],
# 0x60-0x6f: Read-Modify-Write IX1
[('NEG', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.neg_expr(1, il.load(1, ix1)))],
None,
None,
[('COM', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.not_expr(1, il.load(1, ix1)))],
[('LSR', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.logical_shift_right(1, il.load(1, ix1), il.const(1, 1), 'NZC'))],
None,
[('ROR', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.rotate_right_carry(1, il.load(1, ix1), il.const(1, 1), il.flag('C'), 'NZC'))],
[('ASR', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.arith_shift_right(1, il.load(1, ix1), il.const(1, 1), 'NZC'))],
[('LSL', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.shift_left(1, il.load(1, ix1), il.const(1, 1), 'NZC'))],
[('ROL', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.rotate_left_carry(1, il.load(1, ix1), il.const(1, 1), il.flag('C'), 'NZC'))],
[('DEC', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.sub(1, il.load(1, ix1), il.const(1, 1)), 'NZ')],
None,
[('INC', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.add(1, il.load(1, ix1), il.const(1, 1)), 'NZ')],
[('TST', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.sub(1, il.load(1, ix1), il.const(1, 0), 'NZ')],
None,
[('CLR', 2, M6805_AddressMode.IX1), ['IX1','X'], lambda self, il, ix1: il.store(1, ix1, il.const(1, 0), 'NZ')],
# 0x70-0x7f: Read-Modify-Write IX
[('NEG', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.neg_expr(1, il.load(1, x)))],
None,
None,
[('COM', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.not_expr(1, il.load(1, x)))],
[('LSR', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.logical_shift_right(1, il.load(1, x), il.const(1, 1), 'NZC'))],
None,
[('ROR', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.rotate_right_carry(1, il.load(1, x), il.const(1, 1), il.flag('C'), 'NZC'))],
[('ASR', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.arith_shift_right(1, il.load(1, x), il.const(1, 1), 'NZC'))],
[('LSL', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.shift_left(1, il.load(1, x), il.const(1, 1), 'NZC'))],
[('ROL', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.rotate_left_carry(1, il.load(1, x), il.const(1, 1), il.flag('C'), 'NZC'))],
[('DEC', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.sub(1, il.load(1, x), il.const(1, 1)), 'NZ')],
None,
[('INC', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.add(1, il.load(1, x), il.const(1, 1)), 'NZ')],
[('TST', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.sub(1, il.load(1, x), il.const(1, 0), 'NZ')],
None,
[('CLR', 1, M6805_AddressMode.IX), ['','X'], lambda self, il, x: il.store(1, x, il.const(1, 0), 'NZ')],
# 0x80-0x8f: Control INH
[('RTI', 1, M6805_AddressMode.INH), [], lambda self, il: il.nop()],
[('RTS', 1, M6805_AddressMode.INH), [], lambda self, il: il.ret(il.pop(2))],
None,
[('SWI', 1, M6805_AddressMode.INH), [], lambda self, il: il.nop()],
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
# 0x90-0x9f: Control INH
None,
None,
None,
None,
None,
None,
None,
[('TAX', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'X', il.reg(1, 'A'))],
[('CLC', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_flag('C', il.const(0, 0))],
[('SEC', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_flag('C', il.const(0, 1))],
[('CLI', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_flag('I', il.const(0, 0))],
[('SEI', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_flag('I', il.const(0, 1))],
[('RSP', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'SP', il.const(1, 0x7f))],
[('NOP', 1, M6805_AddressMode.INH), [], lambda self, il: il.nop()],
None,
[('TXA', 1, M6805_AddressMode.INH), [], lambda self, il: il.set_reg(1, 'A', il.reg(1, 'X'))],
# 0xa0-0xaf: Register/Memory IMM
[('SUB', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.sub(1, il.reg(1, 'A'), imm), 'NZC')],
[('CMP', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.sub(1, il.reg(1, 'A'), imm, 'NZC')],
[('SBC', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.sub_borrow(1, il.reg(1, 'A'), imm, il.flag('C')), 'NZC')],
[('CPX', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.sub(1, il.reg(1, 'X'), imm, 'NZC')],
[('AND', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.and_expr(1, il.reg(1, 'A'), imm), 'NZ')],
[('BIT', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.and_expr(1, il.reg(1, 'A'), imm, 'NZ')],
[('LDA', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', imm)],
None,
[('EOR', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.xor_expr(1, il.reg(1, 'A'), imm), 'NZ')],
[('ADC', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.add_carry(1, il.reg(1, 'A'), imm, il.flag('C')), 'HNZC')],
[('ORA', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.or_expr(1, il.reg(1, 'A'), imm), 'NZ')],
[('ADD', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'A', il.add(1, il.reg(1, 'A'), imm), 'HNZC')],
None,
[('BSR', 2, M6805_AddressMode.REL), ['REL'], lambda self, il, t: il.call(t)],
[('LDX', 2, M6805_AddressMode.IMM), ['IMM'], lambda self, il, imm: il.set_reg(1, 'X', imm)],
None,
# 0xb0-0xbf: Register/Memory DIR
[('SUB', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.sub(1, il.reg(1, 'A'), il.load(1, m), 'NZC'))],
[('CMP', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.sub(1, il.reg(1, 'A'), il.load(1, m), 'NZC')],
[('SBC', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.sub_borrow(1, il.reg(1, 'A'), il.load(1, m), il.flag('C'), 'NZC'))],
[('CPX', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.sub(1, il.reg(1, 'X'), il.load(1, m), 'NZC')],
[('AND', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.and_expr(1, il.reg(1, 'A'), il.load(1, m), 'NZ'))],
[('BIT', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.and_expr(1, il.reg(1, 'A'), il.load(1, m), 'NZ')],
[('LDA', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.load(1, m))],
[('STA', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.store(1, m, il.reg(1, 'A'))],
[('EOR', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.xor_expr(1, il.reg(1, 'A'), il.load(1, m), 'NZ'))],
[('ADC', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.add_carry(1, il.reg(1, 'A'), il.load(1, m), il.flag('C'), 'NZC'))],
[('ORA', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.or_expr(1, il.reg(1, 'A'), il.load(1, m), 'NZ'))],
[('ADD', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'A', il.add(1, il.reg(1, 'A'), il.load(1, m), 'NZC'))],
[('JMP', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: M6805.branch(il, m)],
[('JSR', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.call(m)],
[('LDX', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.set_reg(1, 'X', il.load(1, m))],
[('STX', 2, M6805_AddressMode.DIR), ['DIR'], lambda self, il, m: il.store(1, m, il.reg(1, 'X'))],
# 0xc0-0xcf: Register/Memory EXT
[('SUB', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.sub(1, il.reg(1, 'A'), il.load(1, ext), 'NZC'))],
[('CMP', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.sub(1, il.reg(1, 'A'), il.load(1, ext), 'NZC')],
[('SBC', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.sub_borrow(1, il.reg(1, 'A'), il.load(1, ext), il.flag('C'), 'NZC'))],
[('CPX', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.sub(1, il.reg(1, 'X'), il.load(1, ext), 'NZC')],
[('AND', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.and_expr(1, il.reg(1, 'A'), il.load(1, ext), 'NZ'))],
[('BIT', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.and_expr(1, il.reg(1, 'A'), il.load(1, ext), 'NZ')],
[('LDA', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.load(1, ext))],
[('STA', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.store(1, ext, il.reg(1, 'A'))],
[('EOR', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.xor_expr(1, il.reg(1, 'A'), il.load(1, ext), 'NZ'))],
[('ADC', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.add_carry(1, il.reg(1, 'A'), il.load(1, ext), il.flag('C'), 'NZC'))],
[('ORA', 3, M6805_AddressMode.EXT), ['EXT'], lambda self, il, ext: il.set_reg(1, 'A', il.or_expr(1, il.reg(1, 'A'), il.load(1, ext), 'NZ'))],
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ClientAuthenticationFlowBindingOverrides',
'ClientAuthorization',
'ClientGroupPolicyGroup',
'ClientPermissionsConfigureScope',
'ClientPermissionsManageScope',
'ClientPermissionsMapRolesClientScopeScope',
'ClientPermissionsMapRolesCompositeScope',
'ClientPermissionsMapRolesScope',
'ClientPermissionsTokenExchangeScope',
'ClientPermissionsViewScope',
'ClientRolePolicyRole',
'GetClientAuthenticationFlowBindingOverrideResult',
'GetClientAuthorizationResult',
'GetClientServiceAccountUserFederatedIdentityResult',
]
@pulumi.output_type
class ClientAuthenticationFlowBindingOverrides(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "browserId":
suggest = "browser_id"
elif key == "directGrantId":
suggest = "direct_grant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientAuthenticationFlowBindingOverrides. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientAuthenticationFlowBindingOverrides.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientAuthenticationFlowBindingOverrides.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
browser_id: Optional[str] = None,
direct_grant_id: Optional[str] = None):
"""
:param str browser_id: Browser flow id, (flow needs to exist)
:param str direct_grant_id: Direct grant flow id (flow needs to exist)
"""
if browser_id is not None:
pulumi.set(__self__, "browser_id", browser_id)
if direct_grant_id is not None:
pulumi.set(__self__, "direct_grant_id", direct_grant_id)
@property
@pulumi.getter(name="browserId")
def browser_id(self) -> Optional[str]:
"""
Browser flow id, (flow needs to exist)
"""
return pulumi.get(self, "browser_id")
@property
@pulumi.getter(name="directGrantId")
def direct_grant_id(self) -> Optional[str]:
"""
Direct grant flow id (flow needs to exist)
"""
return pulumi.get(self, "direct_grant_id")
@pulumi.output_type
class ClientAuthorization(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "policyEnforcementMode":
suggest = "policy_enforcement_mode"
elif key == "allowRemoteResourceManagement":
suggest = "allow_remote_resource_management"
elif key == "decisionStrategy":
suggest = "decision_strategy"
elif key == "keepDefaults":
suggest = "keep_defaults"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientAuthorization. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientAuthorization.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientAuthorization.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
policy_enforcement_mode: str,
allow_remote_resource_management: Optional[bool] = None,
decision_strategy: Optional[str] = None,
keep_defaults: Optional[bool] = None):
"""
:param str policy_enforcement_mode: Dictates how policies are enforced when evaluating authorization requests. Can be one of `ENFORCING`, `PERMISSIVE`, or `DISABLED`.
:param bool allow_remote_resource_management: When `true`, resources can be managed remotely by the resource server. Defaults to `false`.
:param str decision_strategy: Dictates how the policies associated with a given permission are evaluated and how a final decision is obtained. Could be one of `AFFIRMATIVE`, `CONSENSUS`, or `UNANIMOUS`. Applies to permissions.
:param bool keep_defaults: When `true`, defaults set by Keycloak will be respected. Defaults to `false`.
"""
pulumi.set(__self__, "policy_enforcement_mode", policy_enforcement_mode)
if allow_remote_resource_management is not None:
pulumi.set(__self__, "allow_remote_resource_management", allow_remote_resource_management)
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if keep_defaults is not None:
pulumi.set(__self__, "keep_defaults", keep_defaults)
@property
@pulumi.getter(name="policyEnforcementMode")
def policy_enforcement_mode(self) -> str:
"""
Dictates how policies are enforced when evaluating authorization requests. Can be one of `ENFORCING`, `PERMISSIVE`, or `DISABLED`.
"""
return pulumi.get(self, "policy_enforcement_mode")
@property
@pulumi.getter(name="allowRemoteResourceManagement")
def allow_remote_resource_management(self) -> Optional[bool]:
"""
When `true`, resources can be managed remotely by the resource server. Defaults to `false`.
"""
return pulumi.get(self, "allow_remote_resource_management")
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[str]:
"""
Dictates how the policies associated with a given permission are evaluated and how a final decision is obtained. Could be one of `AFFIRMATIVE`, `CONSENSUS`, or `UNANIMOUS`. Applies to permissions.
"""
return pulumi.get(self, "decision_strategy")
@property
@pulumi.getter(name="keepDefaults")
def keep_defaults(self) -> Optional[bool]:
"""
When `true`, defaults set by Keycloak will be respected. Defaults to `false`.
"""
return pulumi.get(self, "keep_defaults")
@pulumi.output_type
class ClientGroupPolicyGroup(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "extendChildren":
suggest = "extend_children"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientGroupPolicyGroup. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientGroupPolicyGroup.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientGroupPolicyGroup.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
extend_children: bool,
id: str,
path: str):
pulumi.set(__self__, "extend_children", extend_children)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="extendChildren")
def extend_children(self) -> bool:
return pulumi.get(self, "extend_children")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@pulumi.output_type
class ClientPermissionsConfigureScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "decisionStrategy":
suggest = "decision_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientPermissionsConfigureScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientPermissionsConfigureScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientPermissionsConfigureScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
decision_strategy: Optional[str] = None,
description: Optional[str] = None,
policies: Optional[Sequence[str]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[str]:
return pulumi.get(self, "decision_strategy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def policies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policies")
@pulumi.output_type
class ClientPermissionsManageScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "decisionStrategy":
suggest = "decision_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientPermissionsManageScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientPermissionsManageScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientPermissionsManageScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
decision_strategy: Optional[str] = None,
description: Optional[str] = None,
policies: Optional[Sequence[str]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[str]:
return pulumi.get(self, "decision_strategy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def policies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policies")
@pulumi.output_type
class ClientPermissionsMapRolesClientScopeScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "decisionStrategy":
suggest = "decision_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientPermissionsMapRolesClientScopeScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientPermissionsMapRolesClientScopeScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientPermissionsMapRolesClientScopeScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
decision_strategy: Optional[str] = None,
description: Optional[str] = None,
policies: Optional[Sequence[str]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[str]:
return pulumi.get(self, "decision_strategy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def policies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policies")
@pulumi.output_type
class ClientPermissionsMapRolesCompositeScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "decisionStrategy":
suggest = "decision_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientPermissionsMapRolesCompositeScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientPermissionsMapRolesCompositeScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientPermissionsMapRolesCompositeScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
decision_strategy: Optional[str] = None,
description: Optional[str] = None,
policies: Optional[Sequence[str]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[str]:
return pulumi.get(self, "decision_strategy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def policies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policies")
@pulumi.output_type
class ClientPermissionsMapRolesScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "decisionStrategy":
suggest = "decision_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientPermissionsMapRolesScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientPermissionsMapRolesScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClientPermissionsMapRolesScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
decision_strategy: Optional[str] = None,
description: Optional[str] = None,
policies: Optional[Sequence[str]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[str]:
return pulumi.get(self, "decision_strategy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def policies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policies")
@pulumi.output_type
class ClientPermissionsTokenExchangeScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "decisionStrategy":
suggest = "decision_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClientPermissionsTokenExchangeScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClientPermissionsTokenExchangeScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
| |
self._backend_agent.save(directory=policy_directory, callbacks=callbacks)
return directory
def train(self, train_context: core.TrainContext,
callbacks: Union[List[core.AgentCallback], core.AgentCallback, None],
default_plots: Optional[bool]):
"""Trains a new model using the gym environment passed during instantiation.
Args:
callbacks: list of callbacks called during the training and evaluation
train_context: training configuration to be used (num_iterations,num_episodes_per_iteration,...)
default_plots: if set adds a set of default callbacks (plot.State, plot.Rewards, plot.Loss,...).
if None default callbacks are only added if the callbacks list is empty
"""
assert train_context, "train_context not set."
callbacks = self._to_callback_list(callbacks=callbacks)
callbacks = self._add_plot_callbacks(callbacks, default_plots, [plot.Loss(), plot.Steps(), plot.Rewards()])
self._backend_agent.train(train_context=train_context, callbacks=callbacks)
def get_backends(agent: Optional[Type[EasyAgent]] = None):
"""returns a list of all registered backends containing an implementation for the EasyAgent type agent.
Args:
agent: type deriving from EasyAgent for which the backend identifiers are returned.
Returns:
a list of admissible values for the 'backend' argument of EazyAgents constructors or a list of all
available backends if agent is None.
"""
result = [b.backend_name for b in _backends]
if agent:
result = [b.backend_name for b in _backends if agent in b.get_algorithms()]
return result
def _get_backend(backend_name: str):
"""Yields the backend with the given name.
Returns:
the backend instance or None if no backend is found."""
assert backend_name
backends = [b for b in _backends if b.backend_name == backend_name]
assert len(backends) <= 1, f'no backend found with name "{backend_name}". Available backends = {get_backends()}'
result = None
if backends:
result = backends[0]
return result
class CemAgent(EasyAgent):
"""creates a new agent based on the cross-entropy-method algorithm.
From https://learning.mpi-sws.org/mlss2016/slides/2016-MLSS-RL.pdf:
Initialize µ ∈Rd,σ ∈Rd
for iteration = 1,2,... num_iterations do
Collect num_episodes_per_iteration samples of θi ∼ N(µ,diag(σ))
Perform a noisy evaluation Ri ∼ θi
Select the top elite_set_fraction of samples (e.g. p = 0.2), which we’ll call the elite set
Fit a Gaussian distribution, with diagonal covariance, to the elite set, obtaining a new µ,σ.
end for
Return the final µ.
see https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.6579&rep=rep1&type=pdf
"""
def __init__(self, gym_env_name: str, fc_layers: Optional[Tuple[int, ...]] = None, backend: str = None):
super().__init__(gym_env_name, fc_layers, backend)
assert False, "CemAgent is currently not available (pending migration of keras-rl to tf2.0)"
def train(self,
callbacks: Union[List[core.AgentCallback], core.AgentCallback, None] = None,
num_iterations: int = 100,
num_episodes_per_iteration: int = 50,
max_steps_per_episode: int = 500,
elite_set_fraction: float = 0.1,
num_iterations_between_eval: int = 5,
num_episodes_per_eval: int = 10,
train_context: core.CemTrainContext = None,
default_plots: bool = None):
"""Trains a new model using the gym environment passed during instantiation.
Args:
callbacks: list of callbacks called during training and evaluation
num_iterations: number of times the training is repeated (with additional data)
num_episodes_per_iteration: number of episodes played in each iteration. for each episode a new
policy is sampled from the current weight distribution.
max_steps_per_episode: maximum number of steps per episode
elite_set_fraction: the fraction of policies which are members of the elite set.
These policies are used to fit a new weight distribution in each iteration.
num_iterations_between_eval: number of training iterations before the current policy is evaluated.
if 0 no evaluation is performed.
num_episodes_per_eval: number of episodes played to estimate the average return and steps
train_context: training configuration to be used. if set overrides all other training context arguments.
default_plots: if set adds a set of default callbacks (plot.State, plot.Rewards, plot.Loss,...).
if None default callbacks are only added if the callbacks list is empty
Returns:
train_context: the training configuration containing the loss and sum of rewards encountered
during training
"""
if train_context is None:
train_context = core.CemTrainContext()
train_context.num_iterations = num_iterations
train_context.max_steps_per_episode = max_steps_per_episode
train_context.elite_set_fraction = elite_set_fraction
train_context.num_iterations_between_eval = num_iterations_between_eval
train_context.num_episodes_per_eval = num_episodes_per_eval
super().train(train_context=train_context, callbacks=callbacks, default_plots=default_plots)
return train_context
class DqnAgent(EasyAgent):
"""creates a new agent based on the Dqn algorithm.
From wikipedia:
The DeepMind system used a deep convolutional neural network, with layers of tiled convolutional filters to mimic
the effects of receptive fields. Reinforcement learning is unstable or divergent when a nonlinear function
approximator such as a neural network is used to represent Q.
This instability comes from the correlations present in the sequence of observations, the fact that small updates
to Q may significantly change the policy and the data distribution, and the correlations between Q and the
target values.
The technique used experience replay, a biologically inspired mechanism that uses a random sample of prior actions
instead of the most recent action to proceed.[2] This removes correlations in the observation sequence and smooths
changes in the data distribution. Iterative update adjusts Q towards target values that are only periodically
updated, further reducing correlations with the target.[17]
see also: https://deepmind.com/research/publications/human-level-control-through-deep-reinforcement-learning
"""
def train(self,
callbacks: Union[List[core.AgentCallback], core.AgentCallback, None] = None,
num_iterations: int = 20000,
max_steps_per_episode: int = 500,
num_steps_per_iteration: int = 1,
num_steps_buffer_preload=1000,
num_steps_sampled_from_buffer=64,
num_iterations_between_eval: int = 1000,
num_episodes_per_eval: int = 10,
learning_rate: float = 0.001,
train_context: core.StepsTrainContext = None,
default_plots: bool = None):
"""Trains a new model using the gym environment passed during instantiation.
Args:
callbacks: list of callbacks called during training and evaluation
num_iterations: number of times the training is repeated (with additional data)
max_steps_per_episode: maximum number of steps per episode
num_steps_per_iteration: number of steps played per training iteration
num_steps_buffer_preload: number of initial collect steps to preload the buffer
num_steps_sampled_from_buffer: the number of steps sampled from buffer for each iteration training
num_iterations_between_eval: number of training iterations before the current policy is evaluated.
if 0 no evaluation is performed.
num_episodes_per_eval: number of episodes played to estimate the average return and steps
learning_rate: the learning rate used in the next iteration's policy training (0,1]
train_context: training configuration to be used. if set overrides all other training context arguments.
default_plots: if set adds a set of default callbacks (plot.State, plot.Rewards, plot.Loss,...).
if None default callbacks are only added if the callbacks list is empty
Returns:
train_context: the training configuration containing the loss and sum of rewards encountered
during training
"""
if train_context is None:
train_context = core.StepsTrainContext()
train_context.num_iterations = num_iterations
train_context.max_steps_per_episode = max_steps_per_episode
train_context.num_steps_per_iteration = num_steps_per_iteration
train_context.num_steps_buffer_preload = num_steps_buffer_preload
train_context.num_steps_sampled_from_buffer = num_steps_sampled_from_buffer
train_context.num_iterations_between_eval = num_iterations_between_eval
train_context.num_episodes_per_eval = num_episodes_per_eval
train_context.learning_rate = learning_rate
super().train(train_context=train_context, callbacks=callbacks, default_plots=default_plots)
return train_context
class DoubleDqnAgent(DqnAgent):
"""Agent based on the Double Dqn algorithm (https://arxiv.org/abs/1509.06461)"""
class DuelingDqnAgent(DqnAgent):
"""Agent based on the Dueling Dqn algorithm (https://arxiv.org/abs/1511.06581)."""
class PpoAgent(EasyAgent):
"""creates a new agent based on the PPO algorithm.
PPO is an actor-critic algorithm using 2 neural networks. The actor network
to predict the next action to be taken and the critic network to estimate
the value of the game state we are currently in (the expected, discounted
sum of future rewards when following the current actor network).
see also: https://spinningup.openai.com/en/latest/algorithms/ppo.html
"""
def train(self,
callbacks: Union[List[core.AgentCallback], core.AgentCallback, None] = None,
num_iterations: int = 100,
num_episodes_per_iteration: int = 10,
max_steps_per_episode: int = 500,
num_epochs_per_iteration: int = 10,
num_iterations_between_eval: int = 5,
num_episodes_per_eval: int = 10,
learning_rate: float = 0.001,
train_context: core.PpoTrainContext = None,
default_plots: bool = None):
"""Trains a new model using the gym environment passed during instantiation.
Args:
callbacks: list of callbacks called during training and evaluation
num_iterations: number of times the training is repeated (with additional data)
num_episodes_per_iteration: number of episodes played per training iteration
max_steps_per_episode: maximum number of steps per episode
num_epochs_per_iteration: number of times the data collected for the current iteration
is used to retrain the current policy
num_iterations_between_eval: number of training iterations before the current policy is evaluated.
if 0 no evaluation is performed.
num_episodes_per_eval: number of episodes played to estimate the average return and steps
learning_rate: the learning rate used in the next iteration's policy training (0,1]
train_context: training configuration to be used. if set overrides all other training context arguments.
default_plots: if set adds a set of default callbacks (plot.State, plot.Rewards, plot.Loss,...).
if None default callbacks are only added if the callbacks list is empty
Returns:
train_context: the training configuration containing the loss and sum of rewards encountered
during training
"""
if train_context is None:
train_context = core.PpoTrainContext()
train_context.num_iterations = num_iterations
train_context.num_episodes_per_iteration = num_episodes_per_iteration
train_context.max_steps_per_episode = max_steps_per_episode
train_context.num_epochs_per_iteration = num_epochs_per_iteration
train_context.num_iterations_between_eval = num_iterations_between_eval
train_context.num_episodes_per_eval = num_episodes_per_eval
train_context.learning_rate = learning_rate
super().train(train_context=train_context, callbacks=callbacks, default_plots=default_plots)
return train_context
class RandomAgent(EasyAgent):
"""Agent | |
<gh_stars>10-100
##############################################
# The MIT License (MIT)
# Copyright (c) 2016 <NAME>
# see LICENSE for full details
##############################################
import attr
from math import sqrt
from ins_nav.utils import normalize3
# from math import radians as deg2rad
# from math import degrees as rad2deg
from squaternion import Quaternion
from ins_nav.utils import RAD2DEG, DEG2RAD
@attr.s(slots=True)
class AHRS(object):
q = attr.ib(default=Quaternion())
def reset(self, q=None):
if q is None:
self.q = Quaternion(1, 0, 0, 0)
else:
self.q = q.normalize
def updateAGM(self, a, m, g, beta, dt, degrees=True):
"""
Internally, the current orientation (self.q) is tracked:
q - current quaternion Quaternion(w,x,y,z)
Args:
a - acceleration [g's], this will be normalize
m - magnetometer readings [uT], this will be normalized
g - gyro readings [rad/sec]
beta - function of sensor noise
dt - time step [sec]
degrees - if True, convert to rads/sec
Return:
q - current quaternion Quaternion(w,x,y,z)
"""
q0, q1, q2, q3 = self.q
if degrees:
g = (x*DEG2RAD for x in g)
gx, gy, gz = g
ax, ay, az = a
mx, my, mz = m
# Rate of change of quaternion from gyroscope
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * ( q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * ( q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * ( q0 * gz + q1 * gy - q2 * gx)
ax, ay, az = normalize3(ax, ay, az)
mx, my, mz = normalize3(mx, my, mz)
# Auxiliary variables to avoid repeated arithmetic
_2q0mx = 2.0 * q0 * mx
_2q0my = 2.0 * q0 * my
_2q0mz = 2.0 * q0 * mz
_2q1mx = 2.0 * q1 * mx
_2q0 = 2.0 * q0
_2q1 = 2.0 * q1
_2q2 = 2.0 * q2
_2q3 = 2.0 * q3
_2q0q2 = 2.0 * q0 * q2
_2q2q3 = 2.0 * q2 * q3
q0q0 = q0 * q0
q0q1 = q0 * q1
q0q2 = q0 * q2
q0q3 = q0 * q3
q1q1 = q1 * q1
q1q2 = q1 * q2
q1q3 = q1 * q3
q2q2 = q2 * q2
q2q3 = q2 * q3
q3q3 = q3 * q3
# Reference direction of Earth's magnetic field
hx = mx * q0q0 - _2q0my * q3 + _2q0mz * q2 + mx * q1q1 + _2q1 * my * q2 + _2q1 * mz * q3 - mx * q2q2 - mx * q3q3
hy = _2q0mx * q3 + my * q0q0 - _2q0mz * q1 + _2q1mx * q2 - my * q1q1 + my * q2q2 + _2q2 * mz * q3 - my * q3q3
_2bx = sqrt(hx * hx + hy * hy)
_2bz = -_2q0mx * q2 + _2q0my * q1 + mz * q0q0 + _2q1mx * q3 - mz * q1q1 + _2q2 * my * q3 - mz * q2q2 + mz * q3q3
_4bx = 2.0 * _2bx
_4bz = 2.0 * _2bz
# Gradient decent algorithm corrective step
s0 = -_2q2 * (2.0 * q1q3 - _2q0q2 - ax) + _2q1 * (2.0 * q0q1 + _2q2q3 - ay) - _2bz * q2 * (_2bx * (0.5 - q2q2 - q3q3) + _2bz * (q1q3 - q0q2) - mx) + (-_2bx * q3 + _2bz * q1) * (_2bx * (q1q2 - q0q3) + _2bz * (q0q1 + q2q3) - my) + _2bx * q2 * (_2bx * (q0q2 + q1q3) + _2bz * (0.5 - q1q1 - q2q2) - mz)
s1 = _2q3 * (2.0 * q1q3 - _2q0q2 - ax) + _2q0 * (2.0 * q0q1 + _2q2q3 - ay) - 4.0 * q1 * (1 - 2.0 * q1q1 - 2.0 * q2q2 - az) + _2bz * q3 * (_2bx * (0.5 - q2q2 - q3q3) + _2bz * (q1q3 - q0q2) - mx) + (_2bx * q2 + _2bz * q0) * (_2bx * (q1q2 - q0q3) + _2bz * (q0q1 + q2q3) - my) + (_2bx * q3 - _4bz * q1) * (_2bx * (q0q2 + q1q3) + _2bz * (0.5 - q1q1 - q2q2) - mz)
s2 = -_2q0 * (2.0 * q1q3 - _2q0q2 - ax) + _2q3 * (2.0 * q0q1 + _2q2q3 - ay) - 4.0 * q2 * (1 - 2.0 * q1q1 - 2.0 * q2q2 - az) + (-_4bx * q2 - _2bz * q0) * (_2bx * (0.5 - q2q2 - q3q3) + _2bz * (q1q3 - q0q2) - mx) + (_2bx * q1 + _2bz * q3) * (_2bx * (q1q2 - q0q3) + _2bz * (q0q1 + q2q3) - my) + (_2bx * q0 - _4bz * q2) * (_2bx * (q0q2 + q1q3) + _2bz * (0.5 - q1q1 - q2q2) - mz)
s3 = _2q1 * (2.0 * q1q3 - _2q0q2 - ax) + _2q2 * (2.0 * q0q1 + _2q2q3 - ay) + (-_4bx * q3 + _2bz * q1) * (_2bx * (0.5 - q2q2 - q3q3) + _2bz * (q1q3 - q0q2) - mx) + (-_2bx * q0 + _2bz * q2) * (_2bx * (q1q2 - q0q3) + _2bz * (q0q1 + q2q3) - my) + _2bx * q1 * (_2bx * (q0q2 + q1q3) + _2bz * (0.5 - q1q1 - q2q2) - mz)
# print(">>",s0, s1, s2, s3)
s0, s1, s2, s3 = Quaternion(s0, s1, s2, s3).normalize
# Apply feedback step
qDot1 -= beta * s0
qDot2 -= beta * s1
qDot3 -= beta * s2
qDot4 -= beta * s3
q0 += qDot1 * dt
q1 += qDot2 * dt
q2 += qDot3 * dt
q3 += qDot4 * dt
self.q = Quaternion(q0, q1, q2, q3).normalize
return self.q
def updateAG(self, a, g, beta, dt, degrees=True):
q0, q1, q2, q3 = self.q
if degrees:
g = (x*DEG2RAD for x in g)
gx, gy, gz = g
ax, ay, az = a
# Rate of change of quaternion from gyroscope
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * (q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * (q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * (q0 * gz + q1 * gy - q2 * gx)
# Compute feedback only if accelerometer measurement valid (avoids NaN
# in accelerometer normalisation)
ax, ay, az = normalize3(ax, ay, az)
# Auxiliary variables to avoid repeated arithmetic
_2q0 = 2.0 * q0
_2q1 = 2.0 * q1
_2q2 = 2.0 * q2
_2q3 = 2.0 * q3
_4q0 = 4.0 * q0
_4q1 = 4.0 * q1
_4q2 = 4.0 * q2
_8q1 = 8.0 * q1
_8q2 = 8.0 * q2
q0q0 = q0 * q0
q1q1 = q1 * q1
q2q2 = q2 * q2
q3q3 = q3 * q3
# Gradient decent algorithm corrective step
s0 = _4q0 * q2q2 + _2q2 * ax + _4q0 * q1q1 - _2q1 * ay
s1 = _4q1 * q3q3 - _2q3 * ax + 4.0 * q0q0 * q1 - _2q0 * ay - _4q1 + _8q1 * q1q1 + _8q1 * q2q2 + _4q1 * az
s2 = 4.0 * q0q0 * q2 + _2q0 * ax + _4q2 * q3q3 - _2q3 * ay - _4q2 + _8q2 * q1q1 + _8q2 * q2q2 + _4q2 * az
s3 = 4.0 * q1q1 * q3 - _2q1 * ax + 4.0 * q2q2 * q3 - _2q2 * ay
s0, s1, s2, s3 = Quaternion(s0, s1, s2, s3).normalize
# Apply feedback step
qDot1 -= beta * s0
qDot2 -= beta * s1
qDot3 -= beta * s2
| |
"Charm",
"Ecstasy",
"Enthrall",
"Permanent Madness",
"False Memory",
"Avoid",
"Nightmare",
"Hallucination",
"Lesser Geas",
"Suggestion",
"Mass Suggestion",
"Glib Tongue",
"Enslave",
"Great Hallucination",
"Great Geas",
"Haste",
"Apportation",
"Glue",
"Grease",
"Deflect Missile",
"Hold Fast",
"Increase Burden",
"Jump",
"Levitation",
"Lighten Burden",
"Locksmith",
"Long March",
"Poltergeist",
"Quick March",
"Slow Fall",
"Wallwalker",
"Dancing Object",
"Distant Blow",
"Lockmaster",
"Manipulate",
"Slow",
"Undo",
"Winged Knife",
"Flight",
"Light Tread",
"Slide",
"Flying Carpet",
"Hawk Flight",
"Ethereal Body",
"Great Haste",
"Pull",
"Repel",
"Swim",
"Teleport",
"Teleport Other",
"Blink",
"Blink Other",
"Freedom",
"Cloud-Walking",
"Cloud-Vaulting",
"Death Vision",
"Sense Spirit",
"Summon Spirit",
"Animation",
"Steal Energy",
"Steal Vitality",
"Materialize",
"Solidify",
"Affect Spirits",
"Skull-Spirit",
"Turn Spirit",
"Zombie",
"Control Zombie",
"Turn Zombie",
"Zombie Summoning",
"Mass Zombie",
"Slow Healing",
"Stop Healing",
"Command Spirit (Banshees)",
"Command Spirit (Specters)",
"Command Spirit (Manitous)",
"Age",
"Pestilence",
"Evisceration",
"Animate Shadow",
"Rotting Death",
"Soul Jar",
"Summon Demon",
"Banish",
"Entrap Spirit",
"Repel Spirits",
"Bind Spirit (Banshees)",
"Bind Spirit (Specters)",
"Bind Spirit (Manitous)",
"Steal Grace",
"Steal Vigor",
"Steal Might",
"Steal Wisdom",
"Steal Skill",
"Steal Youth",
"Steal Beauty",
"Astral Block",
"Lich",
"Wraith",
"Shape Plant",
"Plant Growth",
"Plant Vision",
"Sense Danger",
"Detect Poison",
"Magelock",
"Block",
"Hardiness",
"Watchdog",
"Nightingale",
"Sense Observation",
"Shield",
"Armor",
"Turn Blade",
"Bladeturning",
"Missile Shield",
"Catch Missile",
"Reverse Missiles",
"Return Missile",
"Reflect Gaze",
"Mystic Mist",
"Shade",
"Iron Arm",
"Weather Dome",
"Atmosphere Dome",
"Resist Pressure",
"Teleport Shield",
"Force Dome",
"Force Wall",
"Utter Dome",
"Utter Wall",
"Sound",
"Silence",
"Sound Vision",
"Thunderclap",
"Voices",
"Garble",
"Imitate Voice",
"Wall of Silence",
"Hush",
"Mage-Stealth",
"Great Voice",
"Noise",
"Delayed Message",
"Resist Sound",
"Sound Jet",
"Converse",
"Far-Hearing",
"Scribe",
"Musical Scribe",
"Message",
"Silver Tongue",
"Wizard Ear",
"Invisible Wizard Ear",
"Seek Machine",
"Reveal Function",
"Machine Control",
"Machine Summoning",
"Machine Speech",
"Glitch",
"Malfunction",
"Schematic",
"Rebuild",
"Animate Machine",
"Machine Possession",
"Permanent Machine Possession",
"Awaken Computer",
"Seek Power",
"Seek Fuel",
"Test Fuel",
"Preserve Fuel",
"Purify Fuel",
"Create Fuel",
"Essential Fuel",
"Stop Power",
"Lend Power",
"Propel",
"Conduct Power",
"Steal Power",
"Draw Power",
"Magnetic Vision",
"Radio Hearing",
"Spectrum Vision",
"Seek Plastic",
"Identify Metal",
"Identify Plastic",
"Shape Metal",
"Shape Plastic",
"Metal Vision",
"Plastic Vision",
"Body of Metal",
"Body of Plastic",
"Seek Water",
"Seek Coastline",
"Purify Water",
"Create Water",
"Destroy Water",
"Icy Weapon",
"Shape Water",
"Umbrella",
"Body of Water",
"Foul Water",
"Freeze",
"Ice Slick",
"Ice Sphere",
"Icy Missiles",
"Melt Ice",
"Resist Water",
"Snow Shoes",
"Walk on Water",
"Water Jet",
"Water Vision",
"Whirlpool",
"Coolness",
"Create Ice",
"Dehydrate",
"Ice Dagger",
"Icy Touch",
"Walk Through Water",
"Dry Spring",
"Essential Water",
"Frostbite",
"Snow Jet",
"Breathe Water",
"Body of Ice",
"Boil Water",
"Condense Steam",
"Create Acid",
"Create Spring",
"Flesh to Ice",
"Create Steam",
"Resist Acid",
"Geyser",
"Rain of Acid",
"Steam Jet",
"Acid Ball",
"Acid Jet",
"Rain of Ice Daggers",
"Icy Breath",
"Breathe Steam",
"Spit Acid",
"Essential Acid",
"Summon Water Elemental",
"Control Water Elemental",
"Create Water Elemental",
"Frost",
"Fog",
"Predict Weather",
"Waves",
"Clouds",
"Current",
"Tide",
"Wind",
"Rain",
"Snow",
"Hail",
"Warm",
"Cool",
"Storm",
"Resist Lightning",
"Lightning",
"Explosive Lightning",
"Lightning Whip",
"Shocking Touch",
"Spark Cloud",
"Spark Storm",
"Wall of Lightning",
"Ball of Lightning",
"Lightning Stare",
"Body of Lightning",
"Lightning Armor",
"Lightning Weapon",
"Lightning Missiles",
}
banned_colleges = {
"Enchantment",
"Weapon Enchantment",
"Armor Enchantment",
"Radiation",
}
allowed_bard_colleges = {"Communication", "Mind Control"}
# dict of spell name to set of colleges to which it belongs
spell_to_colleges: Dict[str, Set[str]] = {}
# dict of spell name to text of a function that takes (traits, trait_names) as
# arguments and returns True iff the prereqs are satisfied
spell_to_prereq_function: Dict[str, str] = {}
# gcs_library/spell_list/spell/name
# gcs_library/spell_list/spell/categories/category
# (don't use college as that has things like Air/Knowledge)
# gcs_library/spell_list/spell/prereq_list @all="yes" or "no"
# can nest prereq_list elements - up to 3 deep in data
# spell_prereq @has="yes"
# college_count @compare="at_least"10 (if twice, 2 spells from each)
# spell_prereq @has="yes"
# <college compare="contains">air</college>
# <quantity compare="at_least">5</college>
# <name compare="is">aura</college> (lowercase)
# <name compare="starts with">seek power</college>
# <name compare="contains">lightning</college>
# advantage_prereq @has="yes" or "no"
# <name compare="is">magery</name>
# <level compare="at_least">2</name>
# <notes compare="contains">one college (gate)</name>
# <notes compare="does not contain">one college</name>
# bug: geyser prereqs: create well should be create spring, quantity 5
# should be on college water not create spring
# should be college earth not name earth
def count_spell_colleges(traits: List[Tuple[str, int, TraitType]]) -> int:
colleges: Set[str] = set()
for tup in traits:
name = tup[0]
if name in spell_to_colleges:
colleges.update(spell_to_colleges[name])
return len(colleges)
def count_spells_from_each_college(
traits: List[Tuple[str, int, TraitType]]
) -> typing.Counter[str]:
college_count: typing.Counter[str] = Counter()
for tup in traits:
name = tup[0]
for college in spell_to_colleges.get(name, []):
college_count[college] += 1
return college_count
def count_spells_starting_with(
traits: List[Tuple[str, int, TraitType]], st: str
) -> int:
count = 0
for tup in traits:
name = tup[0].title()
if name.startswith(st.title()) and name in spell_to_colleges:
count += 1
return count
def count_spells_containing(
traits: List[Tuple[str, int, TraitType]], st: str
) -> int:
count = 0
for tup in traits:
name = tup[0].title()
if st.title() in name and name in spell_to_colleges:
count += 1
return count
def count_spells(traits: List[Tuple[str, int, TraitType]]) -> int:
count = 0
for tup in traits:
name = tup[0].title()
if name in spell_to_colleges:
count += 1
return count
def _parse_spell_prereq(el: et.Element, function_name: str) -> str:
"""Parse a <spell_prereq> element and its children.
Return a str of Python code that takes traits and trait_names as
arguments and evaluates to True iff the prereqs are satisfied.
"""
if len(el) == 1:
child = el[0]
if child.tag == "name":
if child.get("compare") == "is":
return """
def %s(traits, trait_names):
return '''%s''' in trait_names
""" % (
function_name,
child.text.title(),
)
elif child.get("compare") == "contains":
return """
def %s(traits, trait_names):
for trait in trait_names:
if '''%s'''.title() in trait.title():
return True
return False
""" % (
function_name,
child.text,
)
elif child.get("compare") == "starts with":
return """
def %s(traits, trait_names):
for trait in trait_names:
if trait.title().startswith('''%s'''):
return True
return False
""" % (
function_name,
child.text.title(),
)
elif child.tag == "college_count":
if child.get("compare") == "at_least":
return """
def %s(traits, trait_names):
count = count_spell_colleges(traits)
return count >= %d
""" % (
function_name,
int(child.text),
)
elif child.tag == "college":
if child.get("compare") == "contains":
return """
def %s(traits, trait_names):
counter = count_spells_from_each_college(traits)
for college, quantity in counter.items():
if '''%s''' in college.title() and quantity >= 1:
return True
return False
""" % (
function_name,
child.text.title(),
)
elif child.get("compare") == "is":
return """
def %s(traits, trait_names):
counter = count_spells_from_each_college(traits)
return counter['''%s'''] >= 1
""" % (
function_name,
child.text,
)
elif len(el) == 2:
if el.find("college") is not None and el.find("quantity") is not None:
college_el = el.find("college")
quantity_el = el.find("quantity")
if (
college_el.get("compare") == "contains"
and quantity_el.get("compare") == "at_least"
):
return """
def %s(traits, trait_names):
counter = count_spells_from_each_college(traits)
return counter['''%s'''] >= %d
""" % (
function_name,
college_el.text,
int(quantity_el.text),
)
elif (
college_el.get("compare") == "is"
and quantity_el.get("compare") == "at_least"
):
return """
def %s(traits, trait_names):
counter = count_spells_from_each_college(traits)
return counter['''%s'''] >= %d
""" % (
function_name,
college_el.text,
int(quantity_el.text),
)
elif el.find("name") is not None and el.find("quantity") is not None:
name_el = el.find("name")
quantity_el = el.find("quantity")
if (
name_el.get("compare") == "starts with"
and quantity_el.get("compare") == "at_least"
):
return """
def %s(traits, trait_names):
return count_spells_starting_with(traits, '''%s''') >= %d
""" % (
function_name,
name_el.text.title(),
int(quantity_el.text),
)
elif (
name_el.get("compare") == "is"
and quantity_el.get("compare") == "is"
):
return """
def %s(traits, trait_names):
return '''%s''' in trait_names
""" % (
function_name,
name_el.text.title(),
)
elif (
name_el.get("compare") == "contains"
and quantity_el.get("compare") == "at_least"
):
return """
def %s(traits, trait_names):
return count_spells_containing(traits, '''%s''') >= %d
""" % (
function_name,
name_el.text.title(),
int(quantity_el.text),
)
# XXX This will never be true. Rider Within (@animal)
elif (
name_el.get("compare") == "is"
and quantity_el.get("compare") == "at_least"
):
return """
def %s(traits, trait_names):
count = 0
for trait in trait_names:
if trait == '''%s''':
count += 1
return count >= %d
""" % (
function_name,
name_el.text.title(),
int(quantity_el.text),
)
elif (
name_el.get("compare") == "is anything"
and quantity_el.get("compare") == "at_least"
):
return """
def %s(traits, trait_names):
return count_spells(traits) >= %d
""" % (
function_name,
int(quantity_el.text),
)
elif el.find("any") is not None and el.find("quantity") is not None:
quantity_el = el.find("quantity")
if quantity_el.get("compare") == "at_least":
return """
def %s(traits, trait_names):
return count_spells(traits) >= %d
""" % (
function_name,
int(quantity_el.text),
)
assert False, "parse_spell_prereq %s" % et.tostring(el)
def _parse_advantage_prereq(el: et.Element, function_name: str) -> str:
"""Parse a <advantage_prereq> element and its children.
Return a str of Python code that takes traits and trait_names as
arguments and evaluates to True iff the prereqs are satisfied.
"""
if el.get("has") == "no":
if len(el) == 2:
name_el = | |
""" Implement the Vanilla Interval domain based on PyTorch.
Vanilla Interval: Simply propagates using interval arithmetic without any optimization.
"""
from __future__ import annotations
from pathlib import Path
from typing import Tuple, Union, Iterator, Callable, Iterable
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from diffabs.abs import AbsDom, AbsEle, AbsDist, AbsBlackSheep, forward_linear
from diffabs.utils import valid_lb_ub, divide_pos_neg
class Dom(AbsDom):
name = Path(__file__).with_suffix('').name # use file name (without extension) as domain name
def __getattr__(self, name: str) -> object:
assert name in globals()
return eval(name)
pass
class Ele(AbsEle):
def __init__(self, lb: Tensor, ub: Tensor):
""" In Vanilla Interval domain, only the Lower Bounds and Upper Bounds are maintained. """
assert valid_lb_ub(lb, ub)
self._lb = lb
self._ub = ub
return
@classmethod
def by_intvl(cls, lb: Tensor, ub: Tensor) -> Ele:
return Ele(lb, ub)
def __iter__(self) -> Iterator[Tensor]:
return iter((self._lb, self._ub))
def __getitem__(self, key):
return Ele(self._lb[key], self._ub[key])
def __len__(self) -> int:
return len(self._lb)
def size(self):
return self._lb.size()
def dim(self):
return self._lb.dim()
def device(self):
return self._lb.device
def lb(self) -> Tensor:
return self._lb
def ub(self) -> Tensor:
return self._ub
def view(self, *shape) -> Ele:
return Ele(self._lb.view(*shape), self._ub.view(*shape))
def contiguous(self) -> Ele:
return Ele(self._lb.contiguous(), self._ub.contiguous())
def transpose(self, dim0, dim1) -> Ele:
return Ele(self._lb.transpose(dim0, dim1), self._ub.transpose(dim0, dim1))
def matmul(self, weights: Tensor) -> Ele:
""" A much faster trick:
L' = max(0, w) * L + min(0, w) * U
U' = max(0, w) * U + min(0, w) * L
"""
pos_ws, neg_ws = divide_pos_neg(weights)
newl_pos = self._lb.matmul(pos_ws)
newl_neg = self._ub.matmul(neg_ws)
newl = newl_pos + newl_neg
newu_pos = self._ub.matmul(pos_ws)
newu_neg = self._lb.matmul(neg_ws)
newu = newu_pos + newu_neg
return Ele(newl, newu)
def __add__(self, other) -> Ele:
if isinstance(other, Ele):
return Ele(self._lb + other._lb, self._ub + other._ub)
else:
return Ele(self._lb + other, self._ub + other)
def __mul__(self, flt) -> Ele:
if isinstance(flt, Tensor) and flt.dim() == 1 and flt.shape[0] == self.size()[-1]:
# each output vector dimension has its own factor
pos_ws, neg_ws = divide_pos_neg(flt)
newl_pos = self._lb * (pos_ws)
newl_neg = self._ub * (neg_ws)
newl = newl_pos + newl_neg
newu_pos = self._ub * (pos_ws)
newu_neg = self._lb * (neg_ws)
newu = newu_pos + newu_neg
return Ele(newl, newu)
elif not (isinstance(flt, float) or isinstance(flt, int)):
raise ValueError('Unsupported multiplication with', str(flt), type(flt))
flt = float(flt)
if flt >= 0:
return Ele(self._lb * flt, self._ub * flt)
else:
return Ele(self._ub * flt, self._lb * flt)
def __rmul__(self, flt) -> Ele:
return self.__mul__(flt)
pass
def cat0(es: Iterable[Ele]) -> Ele:
new_lb = torch.cat([e._lb for e in es], dim=0)
new_ub = torch.cat([e._ub for e in es], dim=0)
return Ele(new_lb, new_ub)
class Dist(AbsDist):
""" Vanilla interval domain is non-relational, thus the distances are purely based on LB/UB tensors. """
def __init__(self, eps: float = 1e-5):
"""
:param eps: add to break the tie when choosing max/min.
"""
self.eps = eps
return
def cols_not_max(self, e: Ele, *idxs: int) -> Tensor:
""" Intuitively, always-not-max => exists col . target < col is always true.
Therefore, target_col.UB() - other_col.LB() should < 0, if not, that is the distance.
As long as some of the others < 0, it's OK (i.e., min).
"""
others = self._idxs_not(e, *idxs)
others = e.lb()[..., others]
res = []
for i in idxs:
target = e.ub()[..., [i]]
diff = target - others # will broadcast
diff = F.relu(diff + self.eps)
mins, _ = torch.min(diff, dim=-1)
res.append(mins)
return sum(res)
def cols_is_max(self, e: Ele, *idxs: int) -> Tensor:
""" Intuitively, some-is-max => exists target . target > all_others is always true.
Therefore, other_col.UB() - target_col.LB() should < 0, if not, that is the distance.
All of the others should be accounted (i.e., max).
"""
others = self._idxs_not(e, *idxs)
others = e.ub()[..., others]
res = []
for i in idxs:
target = e.lb()[..., [i]]
diffs = others - target # will broadcast
diffs = F.relu(diffs + self.eps)
res.append(diffs)
if len(idxs) == 1:
all_diffs = res[0]
else:
all_diffs = torch.stack(res, dim=-1)
all_diffs, _ = torch.min(all_diffs, dim=-1) # it's OK to have either one to be max, thus use torch.min()
# then it needs to surpass everybody else, thus use torch.max() for maximum distance
diffs, _ = torch.max(all_diffs, dim=-1)
return diffs
def cols_not_min(self, e: Ele, *idxs: int) -> Tensor:
""" Intuitively, always-not-min => exists col . col < target is always true.
Therefore, other_col.UB() - target_col.LB() should < 0, if not, that is the distance.
As long as some of the others < 0, it's OK (i.e., min).
"""
others = self._idxs_not(e, *idxs)
others = e.ub()[..., others]
res = []
for i in idxs:
target = e.lb()[..., [i]]
diffs = others - target # will broadcast
diffs = F.relu(diffs + self.eps)
mins, _ = torch.min(diffs, dim=-1)
res.append(mins)
return sum(res)
def cols_is_min(self, e: Ele, *idxs: int) -> Tensor:
""" Intuitively, some-is-min => exists target . target < all_others is always true.
Therefore, target_col.UB() - other_col.LB() should < 0, if not, that is the distance.
All of the others should be accounted (i.e., max).
"""
others = self._idxs_not(e, *idxs)
others = e.lb()[..., others]
res = []
for i in idxs:
target = e.ub()[..., [i]]
diffs = target - others # will broadcast
diffs = F.relu(diffs + self.eps)
res.append(diffs)
if len(idxs) == 1:
all_diffs = res[0]
else:
all_diffs = torch.stack(res, dim=-1)
all_diffs, _ = torch.min(all_diffs, dim=-1) # it's OK to have either one to be min, thus use torch.min()
# then it needs to surpass everybody else, thus use torch.max() for maximum distance
diffs, _ = torch.max(all_diffs, dim=-1)
return diffs
pass
class BlackSheep(AbsBlackSheep):
def labels_predicted(self, e: Ele, labels: Tensor) -> Tensor:
""" Intuitively, this is specifying a label_is_max for every input abstraction. """
# TODO to review again
full_lb = e.lb()
full_ub = e.ub()
res = []
for i in range(len(labels)):
cat = labels[i]
piece_outs_lb = full_lb[[i]]
piece_outs_ub = full_ub[[i]]
# default lb-ub or ub-lb doesn't know that target domain has distance 0, so specify that explicitly
lefts = piece_outs_ub[..., :cat]
rights = piece_outs_ub[..., cat + 1:]
target = piece_outs_lb[..., [cat]]
full = torch.cat((lefts, target, rights), dim=-1)
diffs = full - target # will broadcast
# no need to ReLU here, negative values are also useful
res.append(diffs)
res = torch.cat(res, dim=0)
return res
def labels_not_predicted(self, e: Ele, labels: Tensor) -> Tensor:
""" Intuitively, this is specifying a label_not_max for every input abstraction.
:param label: same number of batches as self
"""
full_lb = e.lb()
full_ub = e.ub()
res = []
for i in range(len(labels)):
cat = labels[i]
piece_outs_lb = full_lb[[i]]
piece_outs_ub = full_ub[[i]]
# default lb-ub or ub-lb doesn't know that target domain has distance 0, so specify that explicitly
lefts = piece_outs_lb[..., :cat]
rights = piece_outs_lb[..., cat+1:]
target = piece_outs_ub[..., [cat]]
full = torch.cat((lefts, target, rights), dim=-1)
diffs = target - full # will broadcast
# no need to ReLU here, negative values are also useful
res.append(diffs)
res = torch.cat(res, dim=0)
# TODO
raise NotImplementedError('To use this as distance, it has to have target category not being max, ' +
'thus use torch.min(dim=-1) then ReLU().')
return res
pass
# ===== Below are customized layers that can take and propagate abstract elements. =====
class Linear(nn.Linear):
""" Linear layer with the ability to take approximations rather than concrete inputs. """
def __str__(self):
return f'{Dom.name}.' + super().__str__()
@classmethod
def from_module(cls, src: nn.Linear) -> Linear:
with_bias = src.bias is not None
new_lin = Linear(src.in_features, src.out_features, with_bias)
new_lin.load_state_dict(src.state_dict())
return new_lin
def export(self) -> nn.Linear:
with_bias = self.bias is not None
lin = nn.Linear(self.in_features, self.out_features, with_bias)
lin.load_state_dict(self.state_dict())
return lin
def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:
""" Re-implement the forward computation by myself, because F.linear() may apply optimization using
torch.addmm() which requires inputs to be tensor.
:param ts: either Tensor, Ele, or Ele tensors
:rtype: corresponding to inputs, Tensor for Tensor, Ele for Ele, Ele tensors for Ele tensors
"""
input_is_ele = True
if len(ts) == 1:
if isinstance(ts[0], Tensor):
return super().forward(ts[0]) # plain tensor, no abstraction
elif isinstance(ts[0], Ele):
e = ts[0] # abstract element
else:
raise | |
__init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class UpdateTheGroupRolesOfGroupMemberRequest(TeaModel):
def __init__(
self,
open_conversation_id: str = None,
user_id: str = None,
open_role_ids: List[str] = None,
ding_token_grant_type: int = None,
ding_org_id: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
ding_oauth_app_id: int = None,
):
# 开放群ID
self.open_conversation_id = open_conversation_id
# 用户ID
self.user_id = user_id
# 群角色列表
self.open_role_ids = open_role_ids
self.ding_token_grant_type = ding_token_grant_type
self.ding_org_id = ding_org_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
self.ding_oauth_app_id = ding_oauth_app_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.user_id is not None:
result['userId'] = self.user_id
if self.open_role_ids is not None:
result['openRoleIds'] = self.open_role_ids
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_oauth_app_id is not None:
result['dingOauthAppId'] = self.ding_oauth_app_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('openRoleIds') is not None:
self.open_role_ids = m.get('openRoleIds')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingOauthAppId') is not None:
self.ding_oauth_app_id = m.get('dingOauthAppId')
return self
class UpdateTheGroupRolesOfGroupMemberResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
# result
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class UpdateTheGroupRolesOfGroupMemberResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateTheGroupRolesOfGroupMemberResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateTheGroupRolesOfGroupMemberResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendRobotInteractiveCardHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendRobotInteractiveCardRequestSendOptions(TeaModel):
def __init__(
self,
at_user_list_json: str = None,
at_all: bool = None,
receiver_list_json: str = None,
card_property_json: str = None,
):
# 消息@人,JSON格式:[{"nickName":"张三","userId":"userId0001"},{"nickName":"李四","unionId":"unionId001"}]
self.at_user_list_json = at_user_list_json
# 是否@所有人
self.at_all = at_all
# 消息仅部分人可见的接收人列表【可空:为空则群所有人可见】,JSON格式:[{"userId":"userId0001"},{"unionId":"unionId001"}]
self.receiver_list_json = receiver_list_json
# 卡片特殊属性json串
self.card_property_json = card_property_json
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.at_user_list_json is not None:
result['atUserListJson'] = self.at_user_list_json
if self.at_all is not None:
result['atAll'] = self.at_all
if self.receiver_list_json is not None:
result['receiverListJson'] = self.receiver_list_json
if self.card_property_json is not None:
result['cardPropertyJson'] = self.card_property_json
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('atUserListJson') is not None:
self.at_user_list_json = m.get('atUserListJson')
if m.get('atAll') is not None:
self.at_all = m.get('atAll')
if m.get('receiverListJson') is not None:
self.receiver_list_json = m.get('receiverListJson')
if m.get('cardPropertyJson') is not None:
self.card_property_json = m.get('cardPropertyJson')
return self
class SendRobotInteractiveCardRequest(TeaModel):
def __init__(
self,
request_id: str = None,
ding_access_token_type: str = None,
ding_client_id: str = None,
ding_isv_org_id: int = None,
ding_open_app_id: str = None,
ding_uid: int = None,
card_template_id: str = None,
open_conversation_id: str = None,
single_chat_receiver: str = None,
ding_token_grant_type: int = None,
card_biz_id: str = None,
ding_suite_key: str = None,
robot_code: str = None,
ding_org_id: int = None,
card_data: str = None,
ding_oauth_app_id: int = None,
send_options: SendRobotInteractiveCardRequestSendOptions = None,
):
self.request_id = request_id
self.ding_access_token_type = ding_access_token_type
self.ding_client_id = ding_client_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_open_app_id = ding_open_app_id
self.ding_uid = ding_uid
# 卡片搭建平台模板ID
self.card_template_id = card_template_id
# 【openConversationId & singleChatReceiver 二选一必填】接收卡片的加密群ID,特指多人群会话(非单聊)
self.open_conversation_id = open_conversation_id
# 【openConversationId & singleChatReceiver 二选一必填】单聊会话接受者json串
self.single_chat_receiver = single_chat_receiver
self.ding_token_grant_type = ding_token_grant_type
# 唯一标识一张卡片的外部ID(卡片幂等ID,可用于更新或重复发送同一卡片到多个群会话)【备注:同一个outTrackId重复创建,卡片数据不覆盖更新】
self.card_biz_id = card_biz_id
self.ding_suite_key = ding_suite_key
# 机器人代码,群模板机器人网页有机器人ID;企业内部机器人为机器人appKey,企业三方机器人有robotCode
self.robot_code = robot_code
self.ding_org_id = ding_org_id
# 卡片模板-文本内容参数(卡片json结构体)
self.card_data = card_data
self.ding_oauth_app_id = ding_oauth_app_id
# 互动卡片发送选项
self.send_options = send_options
def validate(self):
if self.send_options:
self.send_options.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.ding_access_token_type is not None:
result['dingAccessTokenType'] = self.ding_access_token_type
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_open_app_id is not None:
result['dingOpenAppId'] = self.ding_open_app_id
if self.ding_uid is not None:
result['dingUid'] = self.ding_uid
if self.card_template_id is not None:
result['cardTemplateId'] = self.card_template_id
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.single_chat_receiver is not None:
result['singleChatReceiver'] = self.single_chat_receiver
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.card_biz_id is not None:
result['cardBizId'] = self.card_biz_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.robot_code is not None:
result['robotCode'] = self.robot_code
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.card_data is not None:
result['cardData'] = self.card_data
if self.ding_oauth_app_id is not None:
result['dingOauthAppId'] = self.ding_oauth_app_id
if self.send_options is not None:
result['sendOptions'] = self.send_options.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('dingAccessTokenType') is not None:
self.ding_access_token_type = m.get('dingAccessTokenType')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOpenAppId') is not None:
self.ding_open_app_id = m.get('dingOpenAppId')
if m.get('dingUid') is not None:
self.ding_uid = m.get('dingUid')
if m.get('cardTemplateId') is not None:
self.card_template_id = m.get('cardTemplateId')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('singleChatReceiver') is not None:
self.single_chat_receiver = m.get('singleChatReceiver')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('cardBizId') is not None:
self.card_biz_id = m.get('cardBizId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('robotCode') is not None:
self.robot_code = m.get('robotCode')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('cardData') is not None:
self.card_data = m.get('cardData')
if m.get('dingOauthAppId') is not None:
self.ding_oauth_app_id = m.get('dingOauthAppId')
if m.get('sendOptions') is not None:
temp_model = SendRobotInteractiveCardRequestSendOptions()
self.send_options = temp_model.from_map(m['sendOptions'])
return self
class SendRobotInteractiveCardResponseBody(TeaModel):
def __init__(
self,
process_query_key: str = None,
):
# 用于业务方后续查看已读列表的查询key
self.process_query_key = process_query_key
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.process_query_key is not None:
result['processQueryKey'] = self.process_query_key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('processQueryKey') is not None:
self.process_query_key = m.get('processQueryKey')
return self
class SendRobotInteractiveCardResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SendRobotInteractiveCardResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or | |
<gh_stars>10-100
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.setting_controller import SettingController
from pycatia.system_interfaces.system_service import SystemService
class MeasureSettingAtt(SettingController):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| System.SettingController
| MeasureSettingAtt
|
| The interface to access a CATIAMeasureSettingAtt.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.measure_setting_att = com_object
@property
def box_display(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property BoxDisplay() As boolean
|
| Returns or sets the BoxDisplay parameter .
|
| Measure label background is filled if BoxDisplay is True ; there are only
| borders if BoxDisplay is False.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: bool
:rtype: bool
"""
return self.measure_setting_att.BoxDisplay
@box_display.setter
def box_display(self, value: bool):
"""
:param bool value:
"""
self.measure_setting_att.BoxDisplay = value
@property
def line_width(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property LineWidth() As short
|
| Returns or sets the LineWidth parameter.
|
| The line width index, which ranges from 1 to 63.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: int
:rtype: int
"""
return self.measure_setting_att.LineWidth
@line_width.setter
def line_width(self, value: int):
"""
:param int value:
"""
self.measure_setting_att.LineWidth = value
@property
def part_update_status(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property PartUpdateStatus() As boolean
|
| Returns or sets the PartUpdateStatus parameter .
|
| Part is automatically updated if PartUpdateStatus is true.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: bool
:rtype: bool
"""
return self.measure_setting_att.PartUpdateStatus
@part_update_status.setter
def part_update_status(self, value: bool):
"""
:param bool value:
"""
self.measure_setting_att.PartUpdateStatus = value
@property
def product_update_status(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ProductUpdateStatus() As boolean
|
| Returns or sets the ProductUpdateStatus parameter .
|
| Product is automatically updated if PartUpdateStatus is
| true.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: bool
:rtype: bool
"""
return self.measure_setting_att.ProductUpdateStatus
@product_update_status.setter
def product_update_status(self, value: bool):
"""
:param bool value:
"""
self.measure_setting_att.ProductUpdateStatus = value
@property
def tilde_display(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TildeDisplay() As boolean
|
| Returns or sets the TildeDisplay parameter.
|
| If TildeDisplay is TRUE, a tilde displayed for approximate
| measurement.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: bool
:rtype: bool
"""
return self.measure_setting_att.TildeDisplay
@tilde_display.setter
def tilde_display(self, value: bool):
"""
:param bool value:
"""
self.measure_setting_att.TildeDisplay = value
def get_box_display_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBoxDisplayInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the BoxDisplay
| parameter.
| Role:Retrieves the state of the BoxDisplay parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.measure_setting_att.GetBoxDisplayInfo(io_admin_level, io_locked)
def get_label_color(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetLabelColor(long oR,
| long oG,
| long oB)
|
| Returns the LabelColor parameter.
|
| Parameters:
|
| oR
| the red component of the color.
| oG
| the green component of the color.
| oB
| the blue component of the color.
|
| Ensure consistency with the C++ interface to which the work is
| delegated.
:return: tuple
:rtype: tuple
"""
return self.measure_setting_att.GetLabelColor()
def get_label_color_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetLabelColorInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the LabelColor
| parameter.
| Role:Retrieves the state of the LabelColor parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.measure_setting_att.GetLabelColorInfo(io_admin_level, io_locked)
def get_line_width_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetLineWidthInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the LineWidth
| parameter.
| Role:Retrieves the state of the LineWidth parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.measure_setting_att.GetLineWidthInfo(io_admin_level, io_locked)
def get_part_update_status_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPartUpdateStatusInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the PartUpdateStatus
| parameter.
| Role:Retrieves the state of the PartUpdateStatus parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.measure_setting_att.GetPartUpdateStatusInfo(io_admin_level, io_locked)
def get_product_update_status_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetProductUpdateStatusInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the ProductUpdateStatus
| parameter.
| Role:Retrieves the state of the ProductUpdateStatus parameter in the
| current environment.
| | |
<gh_stars>0
# -*- coding: utf-8 -*-
import collections
from datetime import datetime, timedelta
import re
import nose
import numpy as np
import pandas as pd
from pandas.tslib import iNaT, NaT
from pandas import (Series, DataFrame, date_range, DatetimeIndex,
TimedeltaIndex, Timestamp, Float64Index)
from pandas import compat
from pandas.compat import range, lrange, lmap, u
from pandas.core.common import notnull, isnull, array_equivalent
import pandas.core.common as com
import pandas.core.convert as convert
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
class TestInferDtype(tm.TestCase):
def test_infer_dtype_from_scalar(self):
# Test that _infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, type(data))
data = 12
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.int64)
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, dtypec)
data = np.float(12)
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.float64)
for data in [True, False]:
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.bool_)
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.complex_)
import datetime
for data in [np.datetime64(1, 'ns'), pd.Timestamp(1),
datetime.datetime(2000, 1, 1, 0, 0)]:
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, 'M8[ns]')
for data in [np.timedelta64(1, 'ns'), pd.Timedelta(1),
datetime.timedelta(1)]:
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, 'm8[ns]')
for data in [datetime.date(2000, 1, 1),
pd.Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = com._infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.object_)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert (np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert (np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert (not result.any())
result = isnull([u('foo'), u('bar')])
assert (not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert (np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert (np.array_equal(result, exp))
def test_isnull_numpy_nat():
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert (notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert (mask[0])
assert (not mask[1:].any())
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert (mask[0])
assert (not mask[1:].any())
mask = isnull(pidx[1:])
assert (not mask.any())
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
class TestNumberScalar(tm.TestCase):
def test_is_number(self):
self.assertTrue(com.is_number(True))
self.assertTrue(com.is_number(1))
self.assertTrue(com.is_number(1.1))
self.assertTrue(com.is_number(1 + 3j))
self.assertTrue(com.is_number(np.bool(False)))
self.assertTrue(com.is_number(np.int64(1)))
self.assertTrue(com.is_number(np.float64(1.1)))
self.assertTrue(com.is_number(np.complex128(1 + 3j)))
self.assertTrue(com.is_number(np.nan))
self.assertFalse(com.is_number(None))
self.assertFalse(com.is_number('x'))
self.assertFalse(com.is_number(datetime(2011, 1, 1)))
self.assertFalse(com.is_number(np.datetime64('2011-01-01')))
self.assertFalse(com.is_number(pd.Timestamp('2011-01-01')))
self.assertFalse(com.is_number(pd.Timestamp('2011-01-01',
tz='US/Eastern')))
self.assertFalse(com.is_number(timedelta(1000)))
self.assertFalse(com.is_number(pd.Timedelta('1 days')))
# questionable
self.assertFalse(com.is_number(np.bool_(False)))
self.assertTrue(com.is_number(np.timedelta64(1, 'D')))
def test_is_bool(self):
self.assertTrue(com.is_bool(True))
self.assertTrue(com.is_bool(np.bool(False)))
self.assertTrue(com.is_bool(np.bool_(False)))
self.assertFalse(com.is_bool(1))
self.assertFalse(com.is_bool(1.1))
self.assertFalse(com.is_bool(1 + 3j))
self.assertFalse(com.is_bool(np.int64(1)))
self.assertFalse(com.is_bool(np.float64(1.1)))
self.assertFalse(com.is_bool(np.complex128(1 + 3j)))
self.assertFalse(com.is_bool(np.nan))
self.assertFalse(com.is_bool(None))
self.assertFalse(com.is_bool('x'))
self.assertFalse(com.is_bool(datetime(2011, 1, 1)))
self.assertFalse(com.is_bool(np.datetime64('2011-01-01')))
self.assertFalse(com.is_bool(pd.Timestamp('2011-01-01')))
self.assertFalse(com.is_bool(pd.Timestamp('2011-01-01',
tz='US/Eastern')))
self.assertFalse(com.is_bool(timedelta(1000)))
self.assertFalse(com.is_bool(np.timedelta64(1, 'D')))
self.assertFalse(com.is_bool(pd.Timedelta('1 days')))
def test_is_integer(self):
self.assertTrue(com.is_integer(1))
self.assertTrue(com.is_integer(np.int64(1)))
self.assertFalse(com.is_integer(True))
self.assertFalse(com.is_integer(1.1))
self.assertFalse(com.is_integer(1 + 3j))
self.assertFalse(com.is_integer(np.bool(False)))
self.assertFalse(com.is_integer(np.bool_(False)))
self.assertFalse(com.is_integer(np.float64(1.1)))
self.assertFalse(com.is_integer(np.complex128(1 + 3j)))
self.assertFalse(com.is_integer(np.nan))
self.assertFalse(com.is_integer(None))
self.assertFalse(com.is_integer('x'))
self.assertFalse(com.is_integer(datetime(2011, 1, 1)))
self.assertFalse(com.is_integer(np.datetime64('2011-01-01')))
self.assertFalse(com.is_integer(pd.Timestamp('2011-01-01')))
self.assertFalse(com.is_integer(pd.Timestamp('2011-01-01',
tz='US/Eastern')))
self.assertFalse(com.is_integer(timedelta(1000)))
self.assertFalse(com.is_integer(pd.Timedelta('1 days')))
# questionable
self.assertTrue(com.is_integer(np.timedelta64(1, 'D')))
def test_is_float(self):
self.assertTrue(com.is_float(1.1))
self.assertTrue(com.is_float(np.float64(1.1)))
self.assertTrue(com.is_float(np.nan))
self.assertFalse(com.is_float(True))
self.assertFalse(com.is_float(1))
self.assertFalse(com.is_float(1 + 3j))
self.assertFalse(com.is_float(np.bool(False)))
self.assertFalse(com.is_float(np.bool_(False)))
self.assertFalse(com.is_float(np.int64(1)))
self.assertFalse(com.is_float(np.complex128(1 + 3j)))
self.assertFalse(com.is_float(None))
self.assertFalse(com.is_float('x'))
self.assertFalse(com.is_float(datetime(2011, 1, 1)))
self.assertFalse(com.is_float(np.datetime64('2011-01-01')))
self.assertFalse(com.is_float(pd.Timestamp('2011-01-01')))
self.assertFalse(com.is_float(pd.Timestamp('2011-01-01',
tz='US/Eastern')))
self.assertFalse(com.is_float(timedelta(1000)))
self.assertFalse(com.is_float(np.timedelta64(1, 'D')))
self.assertFalse(com.is_float(pd.Timedelta('1 days')))
def test_downcast_conv():
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = com._possibly_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = com._possibly_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = com._possibly_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = com._possibly_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
expected = np.array([1.0, 2.0, np.nan])
for dtype in [np.float64, object]:
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = com._possibly_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_, np.int64, object
]:
arr = np.array([], dtype=dtype)
result = com._possibly_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex(
[1, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(
lrange(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
| |
# -*- coding: utf-8 -*-
"""
Created on Jul 29, 2012
@author: marko
"""
from PIL import Image, ImageTk
from Tkconstants import RIGHT, LEFT, BOTH, E, W, HORIZONTAL
from Tkinter import Frame, Label, Button, Toplevel, OptionMenu, Scale, Entry, \
Message, Spinbox, IntVar, StringVar, DoubleVar
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \
NavigationToolbar2TkAgg
from threading import Thread
from tkFont import nametofont
import Queue
import json
import logging
import numpy as np
import os
from pvmismatch.pvmismatch_lib.pvconstants import MODSIZES, NUMBERCELLS, \
NUMBERMODS, NUMBERSTRS
from pvmismatch.pvmismatch_lib.pvmodule import STD128, STD72, STD96, STD24
from pvmismatch import PVsystem as PVsystem_cls, PVmodule, PVcell
# use absolute imports instead of relative, so modules are portable
from pvmismatch import __name__ as __pkg_name__, __file__ as __pkg_file__
from pvmismatch.pvmismatch_tk.advCnf_tk import AdvCnf_tk
from pvmismatch.pvmismatch_tk.pvexceptions import PVValidationError
from pvmismatch.pvmismatch_tk.pvstring_tk import PVstring_tk
import webbrowser
INTEGERS = '0123456789'
FLOATS = '.' + INTEGERS
PVAPP_TXT = __pkg_name__
READY_MSG = 'Ready'
LANGUAGE = 'English'
PKG_BASEDIR = os.path.dirname(__pkg_file__)
JSONDIR = os.path.join(PKG_BASEDIR, 'pvmismatch_json')
SPLOGO = os.path.join(PKG_BASEDIR, 'res', 'logo_bg.png')
DOCS = os.path.join(PKG_BASEDIR, 'docs', '_build', 'html', 'index.html')
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s')
class waitWidget(Frame): # pylint: disable=R0924,R0904
"""
A wait widget that shows something is happening.
"""
def __init__(self, queue, master):
self.queue = queue
Frame.__init__(self, master)
self.pack(fill="both")
self.focus_set() # get the focus
self.grab_set() # make this window modal
master.resizable(False, False) # not resizable
master.title("") # no title
# don't let user close window using X - instead call timer
master.protocol("WM_DELETE_WINDOW", self.timer)
self.wait = IntVar(master, 0, "wait")
Label(master, bitmap="hourglass").pack(fill="both")
Label(master, text="Please wait ...").pack(fill="both")
Label(master, textvariable=self.wait).pack(fill="both")
self.timer()
def timer(self):
"""
A callback that counts milliseconds until SELF.QUEUE has somthing in it
"""
wait = self.wait.get() + 1
if not self.queue.empty():
# when queue is filled, quit loop and print elapsed time
logging.debug('elapsed time = %2.1f [s]', wait * 0.10)
self.quit()
self.wait.set(wait)
# loop over this callback every 100[ms] until queue is filled
self.after(100, self.timer)
def setqueue(original_function, queue):
"""
Create a new function QUEUEFUN that calculates the results from the
ORIGINAL_FUNCTION and puts them in a queue.
:param original_function: The function the results of which are added to \
the queue.
:param queue: The queue to which the results of the original_function are \
added.
:return queuefun: The new function.
"""
def queuefun(*args, **kwargs):
"""
Call ORIGINAL_FUNCTION with ARGS & KWARGS and put the results in QUEUE.
NOTE: this is function *call*, not a function object!
This is equivalent to:
>>> results = original_function(*args, **kwargs) # calc results
>>> queue.put(results) # put results in queue
>>> results = queue.get() # get results from queue
"""
logging.debug('Starting')
queue.put(original_function(*args, **kwargs))
logging.debug('Exiting')
return queuefun
def waitbox(original_function):
"""
Create a new function that adds a waitbox widget to original_function.
:param original_function: A funciton to wrap with a waitbox waitWidget
"""
def new_function(*args, **kwargs):
"""
Create a queue, create a queuefun from original_function and make the
new queuefun the target of a thread, passing the thread target
original_function's args and kwargs. Then instantiate a new Tk Toplevel
Frame called master and a new waitWidget with the queue and master.
Start master's mainloop which exits when the original_function's
results are fed to the queue. Destroy master and return the
original_function's results.
"""
queue = Queue.Queue()
queuefun = setqueue(original_function, queue)
thread = Thread(target=queuefun, args=args, kwargs=kwargs)
thread.start()
master = Toplevel()
waitBox = waitWidget(queue, master)
waitBox.mainloop()
master.destroy()
return queue.get()
return new_function
PVsystem = waitbox(PVsystem_cls) # wrap PVsystem with a waitbox waitWidget
class PVapplicaton(Frame):
"""
classdocs
"""
def __init__(self, master=None):
"""
Constructor
"""
Frame.__init__(self, master, name='pvApplication',
bg='black', padx=5, pady=5)
# set black background, pad sides with 15 points, top/bottom 5 points
# fill=BOTH fills in padding with background color
# w/o fill=BOTH padding is default color
# side=TOP is the default
self.pack(fill=BOTH)
master.resizable(False, False) # not resizable in x or y
master.title(PVAPP_TXT) # set title bar of master (a.k.a. root)
master.protocol("WM_DELETE_WINDOW", self._quit) # close window to quit
self.validationConstants = self.readJSON('validationConstants')
self.messagetext = self.readJSON('messagetext' + '.' + LANGUAGE)
MAX_STRINGS = self.validationConstants["pvapplication"]["numStrs"]
MAX_MODULES = self.validationConstants["pvapplication"]["numMods"]
MAX_SUNS = self.validationConstants["pvapplication"]["sysEe"]
CAPTION_FONT = nametofont('TkCaptionFont') # font for titles
# PVsystem
pvSys = self.pvSys = PVsystem()
# variables
numStrs = self.numStrs = IntVar(self, NUMBERSTRS, 'numStrs')
numMods = self.numMods = IntVar(self, NUMBERMODS, 'numMods')
numCells = self.numCells = IntVar(self, NUMBERCELLS, 'numCells')
txtIsys = self.txtIsys = DoubleVar(self, name='txtIsys')
txtVsys = self.txtVsys = DoubleVar(self, name='txtVsys')
txtPsys = self.txtPsys = DoubleVar(self, name='txtPsys')
txtImp = self.txtImp = StringVar(self, name='txtImp')
txtVmp = self.txtVmp = StringVar(self, name='txtVmp')
txtPmp = self.txtPmp = StringVar(self, name='txtPmp')
txtIsc = self.txtIsc = StringVar(self, name='txtIsc')
txtVoc = self.txtVoc = StringVar(self, name='txtVoc')
txtFF = self.txtFF = StringVar(self, name='txtFF')
txtEff = self.txtEff = StringVar(self, name='txtEff')
sysEe = self.sysEe = DoubleVar(self, 1, name='sysEe')
txtImp.set("{:7.3f}".format(self.pvSys.Imp)) # [A]
txtVmp.set("{:7.3f}".format(self.pvSys.Vmp)) # [V]
txtPmp.set("{:7.3f}".format(self.pvSys.Pmp / 1000)) # [kW]
txtIsc.set("{:7.3f}".format(self.pvSys.Isc)) # [A]
txtVoc.set("{:7.3f}".format(self.pvSys.Voc)) # [V]
txtFF.set("{:7.3f}".format(self.pvSys.FF * 100)) # [%]
txtEff.set("{:7.3f}".format(self.pvSys.eff * 100)) # [%]
self.msgtext = StringVar(self, READY_MSG, 'msgtext')
# must register vcmd and invcmd as Tcl functions
vcmd = (self.register(self.validateWidget),
'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
invcmd = (self.register(self.invalidWidget),
'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
# SP logo
# convert image to tk-compatible format (.gif, .pgm, or .ppm)
self.SPlogo = ImageTk.PhotoImage(Image.open(SPLOGO))
# bg='black' fills extra space with black
# anchor=W aligns photoimage on left side, NW is no different
# padding is ignored by images, use borderwidth
Label(self, image=self.SPlogo, borderwidth=5, bg='black',
anchor=W).pack(fill=BOTH)
# fill=BOTH expands the photoimage to fill parent frame
# w/o fill=BOTH photoimage is centered in frame even with anchor=W
# Intro text
introText = 'PVmismatch calculates I-V and P-V curves as well as the'
introText += ' max power point (MPP) for any sized system.\nSet the'
introText += ' number of strings in the system, the number of modules'
introText += ' per string and the number cells per module.'
# anchor=W aligns message on left side, NW is no different
# fg='white' sets text color to white, default is black, so it doesn't
# show on black background
# default aspect is 150%, about as wide as high, or set width>0
Message(self, text=introText, width=750, bg='black', fg='white',
anchor=W).pack(fill=BOTH)
# fill=BOTH expands the message to fill parent frame
# w/o fill=BOTH message is centered in frame even with anchor=W
# PVsystem frame
pvSysFrame = self.pvSysFrame = Frame(master, name='pvSysFrame')
# fill=BOTH keeps widgets in frame on left when window is resized
pvSysFrame.pack(fill=BOTH)
# PVsystem matplotlib figure canvas
self.pvSysPlotFrame = Frame(pvSysFrame, name='pvSysPlotFrame')
pvSysPlotFrame = self.pvSysPlotFrame
pvSysPlotFrame.pack(side=RIGHT)
pvSysPlot = self.pvSysPlot = pvSys.plotSys()
self.pvSysFigCanvas = FigureCanvasTkAgg(pvSysPlot,
master=pvSysPlotFrame,
resize_callback=None)
pvSysFigCanvas = self.pvSysFigCanvas
pvSysFigCanvas.get_tk_widget()._name = 'pvSysFigCanvas' # IGNORE:W0212
pvSysFigCanvas.show()
# NB: FigureCanvasTkAgg._tkcanvas is FigureCanvasTkAgg.get_tk_widget()
pvSysFigCanvas.get_tk_widget().pack(fill=BOTH)
pvSysToolbar = NavigationToolbar2TkAgg(pvSysFigCanvas, pvSysPlotFrame)
pvSysToolbar.update()
pvSysToolbar.pack(fill=BOTH)
# PVsystem data frame
pvSysDataFrame = self.pvSysDataFrame = Frame(pvSysFrame,
name='pvSysDataFrame')
pvSysDataFrame.pack(side=LEFT)
_row = 0
Label(pvSysDataFrame,
text='PVsystem', font=CAPTION_FONT).grid(row=_row, columnspan=3,
sticky=W)
# number of strings
_row += 1 # row 1
Label(pvSysDataFrame,
text='Number of Strings').grid(row=_row, columnspan=2, sticky=W)
# use textVar to set number of strings from LOAD, RESET or default
spinboxCnf = {'name': 'numStrSpinbox', 'from_': 1, 'to': MAX_STRINGS,
'textvariable': numStrs, 'width': 5, 'validate': 'all',
'validatecommand': vcmd, 'invalidcommand': invcmd,
'command': self.updatePVsys}
self.numStrSpinbox = Spinbox(pvSysDataFrame, cnf=spinboxCnf)
self.numStrSpinbox.bind("<Return>", self.keyBinding)
self.numStrSpinbox.grid(row=_row, column=2)
# number of modules
_row += 1 # row 2
Label(pvSysDataFrame,
text='Number of Modules').grid(row=_row, columnspan=2, sticky=W)
# number of modules spinbox
spinboxCnf = {'name': 'numModSpinbox', 'from_': 1, 'to': MAX_MODULES,
'textvariable': numMods, 'width': 5, 'validate': 'all',
'validatecommand': vcmd, 'invalidcommand': invcmd,
'command': self.updatePVsys}
self.numModSpinbox = Spinbox(pvSysDataFrame, cnf=spinboxCnf)
self.numModSpinbox.bind("<Return>", self.keyBinding)
self.numModSpinbox.grid(row=_row, column=2)
# number of cells
_row += 1 # row 3
Label(pvSysDataFrame,
text='Number of Cells').grid(row=_row, columnspan=2, sticky=W)
# http://www.logilab.org/card/pylintfeatures#basic-checker
# pylint: disable = W0142
self.numCellOption = OptionMenu(pvSysDataFrame, numCells, *MODSIZES,
command=self.updatePVsys)
# pylint: enable = W0142
self.numCellOption._name = 'numCellOption' # IGNORE:W0212
self.numCellOption.grid(row=_row, column=2)
# Advanced Configuration button
_row += 1 # row 14
buttonCnf = {'name': 'advCnfButton', 'text': 'Advanced Configuration',
'command': self.startAdvCnf_tk}
pvStrButton = self.pvStrButton = Button(pvSysDataFrame, buttonCnf)
pvStrButton.grid(row=_row, columnspan=3, sticky=(E + W))
# slider to explore IV curves
_row += 1 # row 4, 5 & 6
self.pvSysScale = Scale(pvSysDataFrame, orient=HORIZONTAL,
label='I-V Curve', font=CAPTION_FONT,
command=self.getIV, showvalue=False,
from_=0, to=(pvSys.pvconst.npts - 1))
self.pvSysScale.grid(row=_row, columnspan=3, sticky=(E + W))
# Isys
Label(pvSysDataFrame, text='Isys [A]').grid(row=(_row + 1))
self.pvIsys = Entry(pvSysDataFrame, textvariable=txtIsys,
width=7)
self.pvIsys.grid(row=(_row + 2))
# Vsys
Label(pvSysDataFrame, text='Vsys [V]').grid(row=(_row + 1), column=1)
self.pvVsys = Entry(pvSysDataFrame, textvariable=txtVsys,
width=7)
self.pvVsys.grid(row=(_row + 2), column=1)
| |
"""Internal module for accessing EuXFEL HDF5 files
This includes convenience features for getting the metadata & indexes from a
file, as well as machinery to close less recently accessed files, so we don't
run into the limit on the number of open files.
"""
from collections import defaultdict, OrderedDict
import h5py
import numpy as np
import os
import os.path as osp
import resource
from weakref import WeakValueDictionary
from .exceptions import SourceNameError
# Track all FileAccess objects - {path: FileAccess}
file_access_registry = WeakValueDictionary()
class OpenFilesLimiter(object):
"""
Working with FileAccess, keep the number of opened HDF5 files
under the given limit by closing files accessed longest time ago.
"""
def __init__(self, maxfiles=128):
self._maxfiles = maxfiles
# We don't use the values, but OrderedDict is a handy as a queue
# with efficient removal of entries by key.
self._cache = OrderedDict()
@property
def maxfiles(self):
return self._maxfiles
@maxfiles.setter
def maxfiles(self, maxfiles):
"""Set the new file limit and closes files over the limit"""
self._maxfiles = maxfiles
self.close_old_files()
def _check_files(self):
# Discard entries from self._cache if their FileAccess no longer exists
self._cache = OrderedDict.fromkeys(
path for path in self._cache if path in file_access_registry
)
def n_open_files(self):
self._check_files()
return len(self._cache)
def close_old_files(self):
if len(self._cache) <= self.maxfiles:
return
# Now check how many paths still have an existing FileAccess object
n = self.n_open_files()
while n > self.maxfiles:
path, _ = self._cache.popitem(last=False)
file_access = file_access_registry.get(path, None)
if file_access is not None:
file_access.close()
n -= 1
def touch(self, filename):
"""
Add/move the touched file to the end of the `cache`.
If adding a new file takes it over the limit of open files, another file
will be closed.
For use of the file cache, FileAccess should use `touch(filename)` every time
it provides the underying instance of `h5py.File` for reading.
"""
if filename in self._cache:
self._cache.move_to_end(filename)
else:
self._cache[filename] = None
self.close_old_files()
def closed(self, filename):
"""Discard a closed file from the cache"""
self._cache.pop(filename, None)
def init_open_files_limiter():
# Raise the limit for open files (1024 -> 4096 on Maxwell)
nofile = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (nofile[1], nofile[1]))
maxfiles = nofile[1] // 2
return OpenFilesLimiter(maxfiles)
open_files_limiter = init_open_files_limiter()
class FileAccess:
"""Access an EuXFEL HDF5 file.
This does not necessarily keep the real file open, but opens it on demand.
It assumes that the file is not changing on disk while this object exists.
Parameters
----------
filename: str
A path to an HDF5 file
"""
_file = None
_format_version = None
metadata_fstat = None
def __new__(cls, filename, _cache_info=None):
# Create only one FileAccess for each path, and store it in a registry
filename = osp.abspath(filename)
inst = file_access_registry.get(filename, None)
if inst is None:
inst = file_access_registry[filename] = super().__new__(cls)
return inst
def __init__(self, filename, _cache_info=None):
self.filename = osp.abspath(filename)
if _cache_info:
self.train_ids = _cache_info['train_ids']
self.control_sources = _cache_info['control_sources']
self.instrument_sources = _cache_info['instrument_sources']
self.validity_flag = _cache_info.get('flag', None)
else:
tid_data = self.file['INDEX/trainId'][:]
self.train_ids = tid_data[tid_data != 0]
self.control_sources, self.instrument_sources = self._read_data_sources()
self.validity_flag = None
if self.validity_flag is None:
if self.format_version == '0.5':
self.validity_flag = self._guess_valid_trains()
else:
self.validity_flag = self.file['INDEX/flag'][:len(self.train_ids)].astype(bool)
if self._file is not None:
# Store the stat of the file as it was when we read the metadata.
# This is used by the run files map.
self.metadata_fstat = os.stat(self.file.id.get_vfd_handle())
# {(file, source, group): (firsts, counts)}
self._index_cache = {}
# {source: set(keys)}
self._keys_cache = {}
# {source: set(keys)} - including incomplete sets
self._known_keys = defaultdict(set)
@property
def file(self):
open_files_limiter.touch(self.filename)
if self._file is None:
self._file = h5py.File(self.filename, 'r')
return self._file
@property
def valid_train_ids(self):
return self.train_ids[self.validity_flag]
def close(self):
"""Close* the HDF5 file this refers to.
The file may not actually be closed if there are still references to
objects from it, e.g. while iterating over trains. This is what HDF5
calls 'weak' closing.
"""
if self._file:
self._file = None
open_files_limiter.closed(self.filename)
@property
def format_version(self):
if self._format_version is None:
version_ds = self.file.get('METADATA/dataFormatVersion')
if version_ds is not None:
self._format_version = version_ds[0].decode('ascii')
else:
# The first version of the file format had no version number.
# Numbering started at 1.0, so we call the first version 0.5.
self._format_version = '0.5'
return self._format_version
def _read_data_sources(self):
control_sources, instrument_sources = set(), set()
# The list of data sources moved in file format 1.0
if self.format_version == '0.5':
data_sources_path = 'METADATA/dataSourceId'
else:
data_sources_path = 'METADATA/dataSources/dataSourceId'
for source in self.file[data_sources_path][:]:
if not source:
continue
source = source.decode()
category, _, h5_source = source.partition('/')
if category == 'INSTRUMENT':
device, _, chan_grp = h5_source.partition(':')
chan, _, group = chan_grp.partition('/')
source = device + ':' + chan
instrument_sources.add(source)
# TODO: Do something with groups?
elif category == 'CONTROL':
control_sources.add(h5_source)
else:
raise ValueError("Unknown data category %r" % category)
return frozenset(control_sources), frozenset(instrument_sources)
def _guess_valid_trains(self):
# File format version 1.0 includes a flag which is 0 if a train ID
# didn't come from the time server. We use this to skip bad trains,
# especially for AGIPD.
# Older files don't have this flag, so this tries to estimate validity.
# The goal is to have a monotonic sequence within the file with the
# fewest trains skipped.
train_ids = self.train_ids
flag = np.ones_like(train_ids, dtype=bool)
for ix in np.nonzero(train_ids[1:] <= train_ids[:-1])[0]:
# train_ids[ix] >= train_ids[ix + 1]
invalid_before = train_ids[:ix+1] >= train_ids[ix+1]
invalid_after = train_ids[ix+1:] <= train_ids[ix]
# Which side of the downward jump in train IDs would need fewer
# train IDs invalidated?
if np.count_nonzero(invalid_before) < np.count_nonzero(invalid_after):
flag[:ix+1] &= ~invalid_before
else:
flag[ix+1:] &= ~invalid_after
return flag
def __hash__(self):
return hash(self.filename)
def __eq__(self, other):
return isinstance(other, FileAccess) and (other.filename == self.filename)
def __repr__(self):
return "{}({})".format(type(self).__name__, repr(self.filename))
def __getstate__(self):
""" Allows pickling `FileAccess` instance. """
state = self.__dict__.copy()
state['_file'] = None
return state
def __getnewargs__(self):
"""Ensure that __new__ gets the filename when unpickling"""
return (self.filename,)
@property
def all_sources(self):
return self.control_sources | self.instrument_sources
def get_index(self, source, group):
"""Get first index & count for a source and for a specific train ID.
Indices are cached; this appears to provide some performance benefit.
"""
try:
return self._index_cache[(source, group)]
except KeyError:
ix = self._read_index(source, group)
self._index_cache[(source, group)] = ix
return ix
def _read_index(self, source, group):
"""Get first index & count for a source.
This is 'real' reading when the requested index is not in the cache.
"""
ntrains = len(self.train_ids)
ix_group = self.file['/INDEX/{}/{}'.format(source, group)]
firsts = ix_group['first'][:ntrains]
if 'count' in ix_group:
counts = ix_group['count'][:ntrains]
else:
status = ix_group['status'][:ntrains]
counts = np.uint64((ix_group['last'][:ntrains] - firsts + 1) * status)
return firsts, counts
def get_keys(self, source):
"""Get keys for a given source name
Keys are found by walking the HDF5 file, and cached for reuse.
"""
try:
return self._keys_cache[source]
except KeyError:
pass
if source in self.control_sources:
group = '/CONTROL/' + source
elif source in self.instrument_sources:
group = '/INSTRUMENT/' + source
else:
raise SourceNameError(source)
res = set()
def add_key(key, value):
if isinstance(value, h5py.Dataset):
res.add(key.replace('/', '.'))
self.file[group].visititems(add_key)
self._keys_cache[source] = res
return res
def has_source_key(self, source, key):
"""Check if the given source and key exist in this file
This doesn't scan for all the keys in the source, as .get_keys() does.
"""
try:
return key in self._keys_cache[source]
except KeyError:
pass
if key in self._known_keys[source]:
return True
if source in self.control_sources:
path = '/CONTROL/{}/{}'.format(source, key.replace('.', '/'))
elif source in self.instrument_sources:
path = '/INSTRUMENT/{}/{}'.format(source, key.replace('.', '/'))
else:
raise SourceNameError(source)
if self.file.get(path, getclass=True) is h5py.Dataset:
self._known_keys[source].add(key)
return True
return False
def dset_proxy(self, ds_path: str):
return DatasetProxy(self, ds_path)
class DatasetProxy:
"""A picklable reference to an HDF5 dataset, suitable for dask.array
Dask tries to do this automatically for h5py Dataset objects, but with
some limitations:
- It only works with Dask distributed, not Dask's local schedulers.
- Dask storing references to h5py Datasets keeps the files open, breaking
our attempts to manage the number of open files.
"""
def __init__(self, file_acc: FileAccess, ds_path: str):
# We could just store the file name and use h5py on demand, but storing
# our FileAccess object lets it use our cache of open files.
self.file_acc = file_acc
self.ds_path = ds_path
ds = file_acc.file[ds_path]
# dask.array expects these three array-like attributes:
self.shape = ds.shape
self.ndim = ds.ndim
self.dtype = ds.dtype
def __getitem__(self, item):
| |
from __future__ import print_function
import json
import zlib
import sys
import inspect
import os
import atexit
import queue
import logging
import pickle
import cloudpickle
from datetime import datetime
from modelchimp import settings
from .sklearn_tracker import sklearn_loader
from .tracker_thread import TrackerThread
from .event_queue import event_queue
from .connection_thread import WSConnectionThread, RestConnection
from .utils import generate_uid, current_string_datetime, is_uuid4_pattern
from .enums import ClientEvent
import logging
logger = logging.getLogger(__name__)
class Tracker:
def __init__(self,
key,
host=None,
experiment_name=None,
tracking=True,
auto_log=False,
existing_exp_id=None):
if settings.current_tracker:
settings.current_tracker.tracker_thread.stop()
self.key = key
self.experiment_name = experiment_name
self.host = host
self.rest = RestConnection("%s/" %(self.host,), self.key)
self.web_socket = WSConnectionThread("%s/ws/tracker/" % (self.host,))
self.tracking = tracking
self.auto_log = auto_log
self._experiment_start = current_string_datetime()
self._experiment_end = None
self._experiment_file = self._get_file_path()
self.experiment_id = existing_exp_id if existing_exp_id else generate_uid()
self._initialize()
def _initialize(self):
if not self.tracking:
return
# Instantiate the experiment
experiment_created_flag = self.rest.create_experiment(self.experiment_id,
self.experiment_name,
self._experiment_file)
if not experiment_created_flag:
return
# Start the websocket
self.web_socket.start()
# Start the tracker thread
self.tracker_thread = TrackerThread(self.web_socket, self.key, self.experiment_id)
self.tracker_thread.start()
# Send experiment start
self._add_to_queue({
'type' : ClientEvent.EXPERIMENT_START,
'value' : self._experiment_start
})
# Add the reference to the current tracker to the settings
settings.current_tracker = self
# Scrape the parameters from the script objects
if self.auto_log:
sklearn_loader()
atexit.register(self._on_end)
def _get_file_path(self):
if self._check_notebook():
return None
frame = inspect.stack()[-1]
module = inspect.getmodule(frame[0])
return module.__file__
def _check_notebook(self):
IS_NOTEBOOK = False
try:
ipy_str = str(type(get_ipython()))
IS_NOTEBOOK = True
except:
pass
return IS_NOTEBOOK
def _on_end(self):
"Send the experiment end event on completion"
self._experiment_end = current_string_datetime()
self.tracker_thread.stop()
def _add_to_queue(self, event):
event['experiment_id'] = self.experiment_id
event['key'] = self.key
event_queue.put(event)
def end(self):
'''
End the experiment. Required for Jupyter notebook or looping through experiments
Parameters
----------
None
Returns
-------
None
'''
self._on_end()
def add_param(self, param_name, param_value):
'''
Log the parameter name and its value
Parameters
----------
param_name : Name of the parameter
param_value : Value of the parameter
Returns
-------
None
'''
# Perform the necessary checks
if not isinstance(param_name, str):
logger.warning('param_name should be a string')
return
if param_name == "":
logger.warning('param_name cannot be empty')
return
if not self.tracking:
return
# Add the event to the queue
eval_event = {'type': ClientEvent.MODEL_PARAM, 'value': {}}
eval_event['value'] = { param_name : param_value }
self._add_to_queue(eval_event)
def add_multiple_params(self, params_dict):
'''
Log multiple parameters
Parameters
----------
params_dict : Dict containing parameter's name as key and parameter's
value as value
Returns
-------
None
'''
# Perform the necessary checks
if not isinstance(params_dict, dict):
logger.warning('Please provide a dict for multiple parameters')
return
if not self.tracking:
return
for k in params_dict.keys():
self.add_param(k, params_dict[k])
def add_metric(self, metric_name, metric_value, epoch=None):
'''
Log the metric's name and its value
Parameters
----------
metric_name : Name of the metric
metric_value : Value of the metric
Returns
-------
None
'''
# Perform the necessary checks
if not isinstance(metric_name, str):
logger.warning('metric_name should be a string')
return
if metric_name == "":
logger.warning('metric_name cannot be empty')
return
if not ( isinstance(metric_value, int) or isinstance(metric_value, float) ):
logger.warning('metric_value should be a number')
return
if epoch is not None and not ( isinstance(epoch, int) or
isinstance(epoch, float) ):
logger.warning('epoch should be a number')
return
if not self.tracking:
return
# Add the event to the queue
metric_event = {'type': ClientEvent.EVAL_PARAM,
'value': {},
'epoch': epoch}
metric_event['value'] = { metric_name : metric_value }
self._add_to_queue(metric_event)
def add_multiple_metrics(self, metrics_dict, epoch=None):
'''
Log multiple metrics
Parameters
----------
metrics_dict : Dict containing metric's name as key and parameter's
value as value
Returns
-------
None
'''
# Perform the necessary checks
if not isinstance(metrics_dict, dict):
logger.warning('Please provide a dict for multiple parameters')
return
if not self.tracking:
return
for k in metrics_dict:
self.add_metric(k, metrics_dict[k], epoch)
def add_duration_at_epoch(self, tag, seconds_elapsed, epoch):
'''
Log the duration at a particular epoch
Parameters
----------
tag : Name of the duration
seconds_elapsed: Number of seconds elapsed for the duration
epoch: Current epoch number
Returns
-------
None
'''
# Perform the necessary checks
if not isinstance(tag, str):
logger.warning('tag should be a string')
return
if tag == "":
logger.warning('tag cannot be empty')
return
if not ( isinstance(seconds_elapsed, int) or
isinstance(seconds_elapsed, float) ):
logger.warning('seconds_elapsed should be a number')
return
if epoch is None:
logger.warning('epoch should be present')
return
if epoch is not None and not ( isinstance(epoch, int) or
isinstance(epoch, float) ):
logger.warning('epoch should be a number')
return
if not self.tracking:
return
# Add the event to the queue
duration_event = {'type': ClientEvent.DURATION_PARAM,
'value': {},
'epoch': epoch}
duration_event['value'] = { tag : seconds_elapsed }
self._add_to_queue(duration_event)
def add_dataset_id(self, id):
'''
Log a user provided id for the dataset used
Parameters
----------
id : Id of the dataset
Returns
-------
None
'''
if not isinstance(id, (int,float,str)):
logger.warning('Dataset id should be a number or string')
return
if not self.tracking:
return
dataset_id_event = {'type': ClientEvent.DATASET_ID,
'value': id}
self._add_to_queue(dataset_id_event)
def add_custom_object(self, name, object):
'''
Save the pickled version of the custom object
Parameters
----------
name : Name for the object
custom object: Python object to be stored
Returns
-------
None
'''
if not isinstance(name, str):
logger.warning('Custom object name should be a string')
return
if not self.rest:
logger.info("Please instantiate the ModelChimp Tracker to store custom objects")
return
if not self.tracking:
return
compressed_object, filesize = self.__get_compressed_picke(object)
result = {
"name": name,
"filesize": filesize,
"project": self.rest.project_id,
"ml_model": self.rest.model_id,
}
custom_object_url = 'api/experiment-custom-object/create/%s/' % (self.rest.project_id,)
logger.info("Uploading custom object: %s" % name)
save_request = self.rest.post(custom_object_url, data=result,
files={'custom_object_file': compressed_object})
if save_request.status_code == 201:
logger.info("%s: custom object was successfully saved" % name)
else:
logger.info("Custom object could not be saved.")
def pull_custom_object(self, id):
'''
Pull the custom object from ModelChimp server to the script
Parameters
----------
id : Id of the dataset
Returns
-------
Object
'''
pull_object_url = 'api/experiment-custom-object/retrieve/%s/?custom-object-id=%s' % (self.rest.project_id, id)
if not isinstance(id, str):
logger.warning('Custom object id should be a string')
return
# Check the id is of correct pattern
if not is_uuid4_pattern(id):
logger.warning('Given custom object id is of wrong pattern. Please, insert the correct id')
return
if not self.tracking:
return
logger.info("Downloading custom object with the id: %s" % id)
request = self.rest.get(pull_object_url)
if request.status_code == 400:
logger.info("Unable to retrieve custom object. Is it a valid custom object id?")
custom_object = request.content
custom_object = zlib.decompress(custom_object, 31)
custom_object = pickle.loads(custom_object)
return custom_object
def add_mat_plot(self, name="exportedPlot.png", plt=None):
'''
Store a matplot
Parameters
----------
name : Name of the plot
plt: Matplot object
Returns
-------
None
'''
if not isinstance(name, str):
logger.warning('Custom plot name should be a string')
return
if not self.rest:
logger.info("Please instantiate the ModelChimp Tracker to store custom plots")
return
if not self.tracking:
return
axes = plt.gca()
if axes.has_data() is False:
logger.warning("Empty plot")
return
#Export the matplot as an image
filepath = ("./" + name + ".svg").strip()
plt.savefig(filepath, bbox_inches="tight", format="svg")
imageFile = open(filepath, 'rb')
filesize = os.path.getsize(filepath)
result = {
"name": name,
"filesize": filesize,
"project": self.rest.project_id,
"ml_model": self.rest.model_id,
}
mat_plot_url = 'api/experiment-mat-plot/create/%s/' % (self.rest.project_id,)
logger.info("Uploading custom plot: %s" % name)
save_request = self.rest.post(mat_plot_url, data=result,
files={'mat_plot_file': imageFile})
imageFile.close()
if save_request.status_code == 201:
logger.info("%s: custom plot was successfully saved" % name)
else:
logger.info("Custom plot could not be saved.")
if os.path.exists(filepath):
logger.debug("Removing temporary file.")
os.remove(filepath)
def pull_params(self, experiment_id):
'''
Pull the parameters of another experiment to the client script
Parameters
----------
experiment_id : Id of an experiment
Returns
-------
Dict
'''
pull_params_url = 'api/experiment-pull-param/?experiment-id=' + experiment_id
if not isinstance(experiment_id, str):
logger.warning('experiment_id should be a string')
return
if not self.tracking:
return
request = self.rest.get(pull_params_url)
if request.status_code == 400:
logger.info("Have you provided the correct experiment id?")
return
if request.status_code == 403:
logger.info("You don't have permission for this experiment")
return
params = json.loads(request.text)
return params
def add_image(self, filepath, metric_dict=None, custom_file_name=None, epoch=None):
'''
Upload image. This is useful for computer vision use cases
Parameters
----------
filepath : File path of the image
metric_dict : Dict of metrics to be stored for the image
custom_file_name : An alternate name to be used for storing the image
Epoch: Epoch at which the image was used for prediction
Returns
-------
None
'''
url = 'api/experiment-images/add-image/'
if not os.path.isfile(filepath):
logger.warning('Image file does not exist at %s' % (filepath))
return
if metric_dict and not isinstance(metric_dict, dict):
logger.warning('metric_dict should be a dictionary for file: %s' % (filepath))
return
if metric_dict:
for k in metric_dict:
if not isinstance(metric_dict[k], (int,float)):
| |
import asyncio
import json
import logging
import time
import warnings
from collections import defaultdict
from decimal import Decimal
from typing import Any, AsyncIterable, Dict, List, Optional
from dateutil.parser import parse as dateparse
from dydx3.errors import DydxApiError
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_auth import DydxPerpetualAuth
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_client_wrapper import DydxPerpetualClientWrapper
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_fill_report import DydxPerpetualFillReport
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_in_flight_order import DydxPerpetualInFlightOrder
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_order_book_tracker import \
DydxPerpetualOrderBookTracker
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_position import DydxPerpetualPosition
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_user_stream_tracker import \
DydxPerpetualUserStreamTracker
from hummingbot.connector.derivative.dydx_perpetual.dydx_perpetual_utils import build_api_factory
from hummingbot.connector.derivative.perpetual_budget_checker import PerpetualBudgetChecker
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.connector.perpetual_trading import PerpetualTrading
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee, TokenAmount
from hummingbot.core.data_type.transaction_tracker import TransactionTracker
from hummingbot.core.event.event_listener import EventListener
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
BuyOrderCreatedEvent,
FundingInfo,
FundingPaymentCompletedEvent,
MarketEvent,
MarketOrderFailureEvent,
OrderCancelledEvent,
OrderExpiredEvent,
OrderFilledEvent,
OrderType,
PositionAction,
PositionMode,
PositionSide,
SellOrderCompletedEvent,
SellOrderCreatedEvent,
TradeType,
)
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.logger import HummingbotLogger
s_logger = None
s_decimal_0 = Decimal(0)
s_decimal_NaN = Decimal("nan")
def now():
return int(time.time()) * 1000
BUY_ORDER_COMPLETED_EVENT = MarketEvent.BuyOrderCompleted
SELL_ORDER_COMPLETED_EVENT = MarketEvent.SellOrderCompleted
ORDER_CANCELLED_EVENT = MarketEvent.OrderCancelled
ORDER_EXPIRED_EVENT = MarketEvent.OrderExpired
ORDER_FILLED_EVENT = MarketEvent.OrderFilled
ORDER_FAILURE_EVENT = MarketEvent.OrderFailure
MARKET_FUNDING_PAYMENT_COMPLETED_EVENT_TAG = MarketEvent.FundingPaymentCompleted
BUY_ORDER_CREATED_EVENT = MarketEvent.BuyOrderCreated
SELL_ORDER_CREATED_EVENT = MarketEvent.SellOrderCreated
API_CALL_TIMEOUT = 10.0
# ==========================================================
UNRECOGNIZED_ORDER_DEBOUCE = 60 # seconds
class LatchingEventResponder(EventListener):
def __init__(self, callback: any, num_expected: int):
super().__init__()
self._callback = callback
self._completed = asyncio.Event()
self._num_remaining = num_expected
def __call__(self, arg: any):
if self._callback(arg):
self._reduce()
def _reduce(self):
self._num_remaining -= 1
if self._num_remaining <= 0:
self._completed.set()
async def wait_for_completion(self, timeout: float):
try:
await asyncio.wait_for(self._completed.wait(), timeout=timeout)
except asyncio.TimeoutError:
pass
return self._completed.is_set()
def cancel_one(self):
self._reduce()
class DydxPerpetualDerivativeTransactionTracker(TransactionTracker):
def __init__(self, owner):
super().__init__()
self._owner = owner
def did_timeout_tx(self, tx_id: str):
TransactionTracker.c_did_timeout_tx(self, tx_id)
self._owner.did_timeout_tx(tx_id)
class DydxPerpetualDerivative(ExchangeBase, PerpetualTrading):
SHORT_POLL_INTERVAL = 5.0
LONG_POLL_INTERVAL = 120.0
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
def __init__(
self,
dydx_perpetual_api_key: str,
dydx_perpetual_api_secret: str,
dydx_perpetual_passphrase: str,
dydx_perpetual_account_number: int,
dydx_perpetual_ethereum_address: str,
dydx_perpetual_stark_private_key: str,
trading_pairs: Optional[List[str]] = None,
trading_required: bool = True,
):
ExchangeBase.__init__(self)
PerpetualTrading.__init__(self)
self._real_time_balance_update = True
self._api_factory = build_api_factory()
self._order_book_tracker = DydxPerpetualOrderBookTracker(
trading_pairs=trading_pairs,
api_factory=self._api_factory,
)
self._tx_tracker = DydxPerpetualDerivativeTransactionTracker(self)
self._trading_required = trading_required
self._ev_loop = asyncio.get_event_loop()
self._poll_notifier = asyncio.Event()
self._last_poll_timestamp = 0
self._polling_update_task = None
self._budget_checker = PerpetualBudgetChecker(self)
self.dydx_client: DydxPerpetualClientWrapper = DydxPerpetualClientWrapper(
api_key=dydx_perpetual_api_key,
api_secret=dydx_perpetual_api_secret,
passphrase=<PASSWORD>,
account_number=dydx_perpetual_account_number,
stark_private_key=dydx_perpetual_stark_private_key,
ethereum_address=dydx_perpetual_ethereum_address,
)
# State
self._dydx_auth = DydxPerpetualAuth(self.dydx_client)
self._user_stream_tracker = DydxPerpetualUserStreamTracker(
dydx_auth=self._dydx_auth, api_factory=self._api_factory
)
self._user_stream_event_listener_task = None
self._user_stream_tracker_task = None
self._lock = asyncio.Lock()
self._trading_rules = {}
self._in_flight_orders = {}
self._trading_pairs = trading_pairs
self._fee_rules = {}
self._reserved_balances = {}
self._unclaimed_fills = defaultdict(set)
self._in_flight_orders_by_exchange_id = {}
self._orders_pending_ack = set()
self._position_mode = PositionMode.ONEWAY
self._margin_fractions = {}
self._trading_pair_last_funding_payment_ts: Dict[str, float] = {}
@property
def name(self) -> str:
return "dydx_perpetual"
@property
def ready(self) -> bool:
return all(self.status_dict.values())
@property
def status_dict(self) -> Dict[str, bool]:
return {
"order_books_initialized": len(self._order_book_tracker.order_books) > 0,
"account_balances": len(self._account_balances) > 0 if self._trading_required else True,
"trading_rule_initialized": len(self._trading_rules) > 0 if self._trading_required else True,
"funding_info_available": len(self._funding_info) > 0 if self._trading_required else True,
"user_stream_tracker_ready": self._user_stream_tracker.data_source.last_recv_time > 0
if self._trading_required
else True,
}
# ----------------------------------------
# Markets & Order Books
@property
def order_books(self) -> Dict[str, OrderBook]:
return self._order_book_tracker.order_books
def get_order_book(self, trading_pair: str):
order_books = self._order_book_tracker.order_books
if trading_pair not in order_books:
raise ValueError(f"No order book exists for '{trading_pair}'.")
return order_books[trading_pair]
@property
def limit_orders(self) -> List[LimitOrder]:
retval = []
for in_flight_order in self._in_flight_orders.values():
dydx_flight_order = in_flight_order
if dydx_flight_order.order_type in [OrderType.LIMIT, OrderType.LIMIT_MAKER]:
retval.append(dydx_flight_order.to_limit_order())
return retval
@property
def budget_checker(self) -> PerpetualBudgetChecker:
return self._budget_checker
# ----------------------------------------
# Account Balances
def get_balance(self, currency: str):
return self._account_balances.get(currency, Decimal(0))
def get_available_balance(self, currency: str):
return self._account_available_balances.get(currency, Decimal(0))
# ==========================================================
# Order Submission
# ----------------------------------------------------------
@property
def in_flight_orders(self) -> Dict[str, DydxPerpetualInFlightOrder]:
return self._in_flight_orders
def supported_order_types(self):
return [OrderType.LIMIT, OrderType.LIMIT_MAKER]
def _set_exchange_id(self, in_flight_order, exchange_order_id):
in_flight_order.update_exchange_order_id(exchange_order_id)
self._in_flight_orders_by_exchange_id[exchange_order_id] = in_flight_order
def _claim_fills(self, in_flight_order, exchange_order_id):
updated_with_fills = False
# Claim any fill reports for this order that came in while we awaited this exchange id
if exchange_order_id in self._unclaimed_fills:
for fill in self._unclaimed_fills[exchange_order_id]:
in_flight_order.register_fill(fill.id, fill.amount, fill.price, fill.fee)
if self.position_key(in_flight_order.trading_pair) in self._account_positions:
position = self._account_positions[in_flight_order.trading_pair]
position.update_from_fill(in_flight_order, fill.price, fill.amount, self.get_balance("USD"))
updated_with_fills = True
else:
self._account_positions[
self.position_key(in_flight_order.trading_pair)
] = DydxPerpetualPosition.from_dydx_fill(
in_flight_order, fill.amount, fill.price, self.get_balance("USD")
)
del self._unclaimed_fills[exchange_order_id]
self._orders_pending_ack.discard(in_flight_order.client_order_id)
if len(self._orders_pending_ack) == 0:
# We are no longer waiting on any exchange order ids, so all uncalimed fills can be discarded
self._unclaimed_fills.clear()
if updated_with_fills:
self._update_account_positions()
async def place_order(
self,
client_order_id: str,
trading_pair: str,
amount: Decimal,
is_buy: bool,
order_type: OrderType,
price: Decimal,
limit_fee: Decimal,
expiration: int,
) -> Dict[str, Any]:
order_side = "BUY" if is_buy else "SELL"
post_only = False
if order_type is OrderType.LIMIT_MAKER:
post_only = True
dydx_order_type = "LIMIT" if order_type in [OrderType.LIMIT, OrderType.LIMIT_MAKER] else "MARKET"
return await self.dydx_client.place_order(
market=trading_pair,
side=order_side,
amount=str(amount),
price=str(price),
order_type=dydx_order_type,
postOnly=post_only,
clientId=client_order_id,
limit_fee=str(limit_fee),
expiration=expiration,
)
async def execute_order(
self, order_side, client_order_id, trading_pair, amount, order_type, position_action, price
):
"""
Completes the common tasks from execute_buy and execute_sell. Quantizes the order's amount and price, and
validates the order against the trading rules before placing this order.
"""
if position_action not in [PositionAction.OPEN, PositionAction.CLOSE]:
raise ValueError("Specify either OPEN_POSITION or CLOSE_POSITION position_action.")
# Quantize order
amount = self.quantize_order_amount(trading_pair, amount)
price = self.quantize_order_price(trading_pair, price)
# Check trading rules
if order_type.is_limit_type():
trading_rule = self._trading_rules[trading_pair]
if amount < trading_rule.min_order_size:
amount = s_decimal_0
elif order_type == OrderType.MARKET:
trading_rule = self._trading_rules[trading_pair]
if order_type.is_limit_type() and trading_rule.supports_limit_orders is False:
raise ValueError("LIMIT orders are not supported")
elif order_type == OrderType.MARKET and trading_rule.supports_market_orders is False:
raise ValueError("MARKET orders are not supported")
if amount < trading_rule.min_order_size:
raise ValueError(
f"Order amount({str(amount)}) is less than the minimum allowable amount({str(trading_rule.min_order_size)})"
)
if amount > trading_rule.max_order_size:
raise ValueError(
f"Order amount({str(amount)}) is greater than the maximum allowable amount({str(trading_rule.max_order_size)})"
)
if amount * price < trading_rule.min_notional_size:
raise ValueError(
f"Order notional value({str(amount * price)}) is less than the minimum allowable notional value for an order ({str(trading_rule.min_notional_size)})"
)
try:
created_at = self.time_now_s()
self.start_tracking_order(
order_side,
client_order_id,
order_type,
created_at,
None,
trading_pair,
price,
amount,
self._leverage[trading_pair],
position_action.name,
)
expiration = created_at + 600
limit_fee = 0.015
try:
creation_response = await self.place_order(
client_order_id,
trading_pair,
amount,
order_side is TradeType.BUY,
order_type,
price,
limit_fee,
expiration,
)
except asyncio.TimeoutError:
# We timed out while placing this order. We may have successfully submitted the order, or we may have had connection
# issues that prevented the submission from taking place.
# Note that if this order is live and we never recieved the exchange_order_id, we have no way of re-linking with this order
# TODO: we can use the /v2/orders endpoint to get a list of orders that match the parameters of the lost orders and that will contain
# the clientId that we have set. This can resync orders, but wouldn't be a garuntee of finding them in the list and would require a fair amout
# of work in handling this re-syncing process
# This would be somthing like
# self._lost_orders.append(client_order_id) # add this here
# ...
# some polling loop:
# get_orders()
# see if any lost orders are in the returned orders and set the exchange id if so
# ...
# TODO: ensure this is the right exception from place_order with our wrapped library call...
return
# Verify the response from the exchange
if "order" not in creation_response.keys():
raise Exception(creation_response["errors"][0]["msg"])
order = creation_response["order"]
status = order["status"]
if status not in ["PENDING", "OPEN"]:
raise Exception(status)
dydx_order_id = order["id"]
in_flight_order = self._in_flight_orders.get(client_order_id)
if in_flight_order is not None:
self._set_exchange_id(in_flight_order, dydx_order_id)
self._claim_fills(in_flight_order, dydx_order_id)
# Begin tracking order
self.logger().info(
f"Created {in_flight_order.description} order {client_order_id} for {amount} {trading_pair}."
)
else:
self.logger().info(f"Created order {client_order_id} for {amount} {trading_pair}.")
except Exception as e:
self.logger().warning(
f"Error submitting {order_side.name} {order_type.name} order to dydx for "
f"{amount} {trading_pair} at {price}."
)
self.logger().info(e, exc_info=True)
# Stop tracking this order
self.stop_tracking_order(client_order_id)
self.trigger_event(ORDER_FAILURE_EVENT, MarketOrderFailureEvent(now(), client_order_id, order_type))
async def execute_buy(
self,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
position_action: PositionAction,
price: Optional[Decimal] = Decimal("NaN"),
):
try:
await self.execute_order(TradeType.BUY, order_id, trading_pair, amount, order_type, position_action, price)
tracked_order = self.in_flight_orders.get(order_id)
if tracked_order is not None:
self.trigger_event(
BUY_ORDER_CREATED_EVENT,
BuyOrderCreatedEvent(
now(),
order_type,
trading_pair,
Decimal(amount),
Decimal(price),
order_id,
tracked_order.creation_timestamp),
)
# Issue any other events (fills) for this order that arrived while waiting for the exchange id
self._issue_order_events(tracked_order)
except ValueError as e:
# never tracked, so no need to stop tracking
self.trigger_event(ORDER_FAILURE_EVENT, MarketOrderFailureEvent(now(), order_id, order_type))
self.logger().warning(f"Failed to place {order_id} on dydx. {str(e)}")
async def execute_sell(
self,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
position_action: PositionAction,
price: Optional[Decimal] = | |
from hms import app
from datetime import datetime, date
from flask import render_template, session, url_for, request, redirect, flash, session, g
from .Forms import Login_form, Patient_create, Patient_delete, delete_result, Patient_update, issue_medicine_form, add_diagnosis
from .Models import UserStore, Patient_test, Patient_Medicine, Patient_details, Diagnosis, Medicine
from .Config import db
# store patient ID for querying
pid = 0
issue_med = None
quantity = []
add_test = None
@app.context_processor
def inject_now():
return {'now': date.today()}
# Function to implement session management and check the category of stakeholder accessing the website
def check_session():
if 'user' not in session or not session['user']:
return None
else:
stakeholder_type = session['user'][-1]
if stakeholder_type == 'A':
session['stakeholder'] = 'registration_desk_executive'
return 'registration_desk_executive'
elif stakeholder_type == 'D':
session['stakeholder'] = 'diagnostic_executive'
return 'diagnostic_executive'
elif stakeholder_type == 'P':
session['stakeholder'] = 'pharmacy_executive'
return 'pharmacy_executive'
# ==================================================================================
# Home and Login
# ==================================================================================
@app.route("/", methods=["GET", "POST"])
@app.route("/login", methods=["GET", "POST"])
def main():
if check_session():
return render_template('index.html', user=session['user'])
form = Login_form()
if request.method == 'POST':
# Validate the form
if form.validate_on_submit():
# Check the credentials
if UserStore.query.filter_by(login=request.form.get('username'), password=request.form.get('password')).first():
flash("Login successful!", "success")
session['user'] = request.form.get('username')
return redirect(url_for('main'))
else:
flash("Invalid credentials!", "danger")
return render_template('login.html', title="Login", form=form)
return render_template('login.html', title="Login", form=form)
@app.route("/index")
def index():
if not check_session():
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
return render_template("index.html")
# ==================================================================================
# Patient Registration
# ==================================================================================
@app.route("/CreatePatient", methods=['GET', 'POST'])
def create_patient():
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
# If form has been submitted
form = Patient_create()
if request.method == 'POST':
if form.validate_on_submit():
ssn_id = form.ssn_id.data
name = form.patient_name.data
age = form.patient_age.data
date = form.date.data
bed_type = form.Type_of_bed.data
address = form.address.data
state = request.form.get('stt')
city = request.form.get('state_list')
# Add the patient to the database
details = Patient_details(
name, age, ssn_id, date, bed_type, address, city, state, status="Admitted")
db.session.add(details)
db.session.commit()
flash("Patient creation initiated successfully", "success")
return render_template("create_patient.html", title="Create Patient", form=form)
# ==================================================================================
# Delete an existing patient
# ==================================================================================
@app.route("/DeletePatient", methods=["GET", "POST"])
def delete_patient():
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
form = Patient_delete()
if form.validate_on_submit():
global pid
pid = int(form.patient_id.data)
# Query for patient_details
patient = Patient_details.query.filter(
Patient_details.id == int(form.patient_id.data))
for patient_1 in patient:
if patient_1:
form2 = delete_result()
flash("patient found", "success")
return render_template("delete_patient2.html", title="Delete patient", patient=patient, form=form2)
flash("patient not found", "danger")
return render_template("delete_patient.html", title="Delete Patient", form=form)
@app.route("/deletepatient2", methods=["GET", "POST"])
def delete_patient2():
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
form2 = delete_result()
if form2.validate_on_submit():
global pid
print(pid)
# delete query
Patient_details.query.filter_by(id=pid).delete()
db.session.commit()
flash("patient deleted successfully", "success")
return redirect(url_for('delete_patient'))
else:
flash("patient delete failed . Please try again", "danger")
return redirect(url_for('delete_patient'))
# ==================================================================================
# Search for existing patient using Patient ID
# ==================================================================================
@app.route("/SearchPatient", methods=["GET", "POST"])
def search_patient():
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
form = Patient_delete()
if request.method == 'POST':
if form.validate_on_submit():
global pid
pid = int(form.patient_id.data)
# Query for patient_details
patient = Patient_details.query.filter(
Patient_details.id == int(form.patient_id.data))
for patient_1 in patient:
if patient_1:
flash("patient found", "success")
return render_template("search_patient.html", title="Search patient", patient=patient, form=form)
flash("patient not found", "danger")
return render_template("search_patient.html", title="Search Patient", form=form)
# ==================================================================================
# Update the detains of an existing patient
# ==================================================================================
@app.route("/UpdatePatient", methods=["GET", "POST"])
def update_patient():
flag = 0
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
form = Patient_delete()
if form.validate_on_submit():
global pid
pid = int(form.patient_id.data)
# Query for patient details
patient = Patient_details.query.filter(
Patient_details.id == int(form.patient_id.data))
for patient_1 in patient:
if patient_1:
flash("patient found", "success")
flag = 1
# Display the update form
form2 = Patient_update(Type_of_bed=patient_1.bed_type, date=patient_1.admission_date,
address=patient_1.address, patient_name=patient_1.name, patient_age=patient_1.age)
return render_template("update_patient.html", title="Update Patient", form=form, form2=form2, flag=flag, patient_s=patient)
flash("Patient not found", "danger")
return render_template("update_patient.html", title="Update Patient", form=form, flag=flag)
@app.route("/UpdatePatient2", methods=["GET", "POST"])
def update_result():
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
form = Patient_update()
if request.method == "POST":
if form.validate_on_submit():
global pid
if request.form.get('stt') != "":
if request.form.get('state_list') == None or request.form.get('state_list') == "":
# Query for patient Details
patient = Patient_details.query.filter(
Patient_details.id == pid)
for patient_1 in patient:
if patient_1:
flag = 1
flash(
"You have to select city if you change state", "danger")
form2 = Patient_update(Type_of_bed=patient_1.bed_type, date=patient_1.admission_date,
address=patient_1.address, patient_name=patient_1.name, patient_age=patient_1.age)
return render_template("update_patient.html", title="Update Patient", form=form, form2=form2, flag=flag, patient_s=patient)
if request.form.get('stt') == "":
name = form.patient_name.data
age = form.patient_age.data
date = form.date.data
bed_type = form.Type_of_bed.data
address = form.address.data
# Update the patient_details table
Patient_details.query.filter_by(id=pid).update({"name": name})
Patient_details.query.filter_by(
id=pid).update({"admission_date": date})
Patient_details.query.filter_by(id=pid).update({"age": age})
Patient_details.query.filter_by(
id=pid).update({"bed_type": bed_type})
Patient_details.query.filter_by(
id=pid).update({"address": address})
else:
name = form.patient_name.data
age = form.patient_age.data
date = form.date.data
bed_type = form.Type_of_bed.data
address = form.address.data
city = request.form.get('state_list')
state = request.form.get('stt')
# Update the patient_details table
Patient_details.query.filter_by(id=pid).update({"name": name})
Patient_details.query.filter_by(
id=pid).update({"admission_date": date})
Patient_details.query.filter_by(id=pid).update({"city": city})
Patient_details.query.filter_by(
id=pid).update({"state": state})
Patient_details.query.filter_by(id=pid).update({"age": age})
Patient_details.query.filter_by(
id=pid).update({"bed_type": bed_type})
Patient_details.query.filter_by(
id=pid).update({"address": address})
# Commit the changes
db.session.commit()
flash("Patient update intiated successfully!", "success")
return redirect(url_for('update_patient'))
# Query for patient_details
patient = Patient_details.query.filter(Patient_details.id == pid)
for patient_1 in patient:
if patient_1:
flag = 1
flash("Please enter AGE in integer format and less than or equal to 3 digits in length!", "danger")
form2 = Patient_update(Type_of_bed=patient_1.bed_type, date=patient_1.admission_date,
address=patient_1.address, patient_name=patient_1.name, patient_age=patient_1.age)
return render_template("update_patient.html", title="Update Patient", form=form, form2=form2, flag=flag, patient_s=patient)
# ==================================================================================
# View all the admitted patients in record
# ==================================================================================
@app.route("/ViewAllPatients")
def view_patient():
# Check that an authorised user only can access this functionality
if check_session() != 'registration_desk_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
# Query for all admitted patients
patient = Patient_details.query.filter_by(status="Admitted")
return render_template("view_patients.html", patients=patient)
# ==================================================================================
# Issue Medicines
# ==================================================================================
@app.route("/GetPatientDetails/Medicine", methods=["GET", "POST"])
def get_patient():
# Check that an authorised user only can access this functionality
if check_session() != 'pharmacy_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
form = Patient_delete()
if request.method == 'POST':
if form.validate_on_submit():
global pid
global issue_med
pid = int(form.patient_id.data)
# Query for patient details
patient = Patient_details.query.filter(
Patient_details.id == int(form.patient_id.data))
for patient_1 in patient:
if patient_1:
flash("Patient found!", "success")
issue_med = None
medicine = med_patient(patient_1)
if medicine != None:
return render_template("get_patient_details.html", title="Search patient", patient=patient, medicine=medicine.all())
else:
return render_template("get_patient_details.html", title="Search patient", patient=patient)
flash("patient not found", "danger")
return render_template("get_patient_details.html", title="Get Patient Details", form=form)
@app.route("/IssueMedicine", methods=["GET", "POST"])
def issue_medicine():
# Check that an authorised user only can access this functionality
if check_session() != 'pharmacy_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
global issue_med
global pid
form = issue_medicine_form()
form.medicine_name.choices = []
medicine = Medicine.query.all()
for med in medicine:
# Populate the medicine select form
form.medicine_name.choices += [(med.medicine_name, med.medicine_name + ' || Qty: ' + str(med.medicine_quantity))]
if form.validate_on_submit():
name = form.medicine_name.data
quantity = form.quantity.data
# Query for medicines
med = Medicine.query.filter(
Medicine.medicine_name == form.medicine_name.data).first()
medid = med.id
rate = med.medicine_amount
# Update issue_med dict
if issue_med == None:
issue_med = {}
issue_med[name] = {
'name': name, 'quantity': quantity, 'medid': medid, 'rate': rate}
else:
issue_med[name] = {
'name': name, 'quantity': quantity, 'medid': medid, 'rate': rate}
flash("Medicine Added!", "success")
return render_template("issue_medicine.html", form=form, medicine=issue_med)
return render_template("issue_medicine.html", form=form, medicine=issue_med)
@app.route("/medicine_update", methods=["GET", "POST"])
def update():
# Check that an authorised user only can access this functionality
if check_session() != 'pharmacy_executive':
flash('You are not authorised to access that! Please login with proper credentials.', 'danger')
return redirect(url_for('main'))
global issue_med
global pid
for i in issue_med:
med_name = str(issue_med[i]['name'])
med_id = int(issue_med[i]['medid'])
med_quant = int(issue_med[i]['quantity'])
# Query for Medicines
medicine = Medicine.query.filter(
Medicine.medicine_name == med_name).first()
current_quant = medicine.medicine_quantity
new_quant = current_quant-med_quant
# Query for patient_medicines
patient = Patient_Medicine.query.filter(
Patient_Medicine.patient_id == pid, Patient_Medicine.medicine_id == med_id).first()
if patient == None:
# Query for Patient_Medicine
db.session.add(Patient_Medicine(
patient_id=pid, medicine_quantity=med_quant, medicine_id=med_id))
medicine.medicine_quantity = new_quant
db.session.commit()
else:
# Update Medicine | |
copy from https://zhuanlan.zhihu.com/p/31421408
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout2d(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], upsample='basic'):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True, upsample=upsample)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout, upsample=upsample)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, upsample=upsample)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer, upsample=upsample)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer, upsample=upsample)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, upsample=upsample)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False, upsample='basic'):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = upsampleLayer(inner_nc*2, outer_nc, upsample=upsample)
down = [downconv]
up = [uprelu] + upconv + [nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = upsampleLayer(inner_nc, outer_nc, upsample=upsample)
down = [downrelu, downconv]
up = [uprelu] + upconv + [upnorm]
model = down + up
else:
upconv = upsampleLayer(inner_nc*2, outer_nc, upsample=upsample)
down = [downrelu, downconv, downnorm]
up = [uprelu] + upconv + [upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(PixelDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.net, input, self.gpu_ids)
else:
return self.net(input)
class AutoEncoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], padding_type='reflect'):
# assert(n_blocks >= 0)
super(AutoEncoder, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
encoder = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 6
for i in range(n_downsampling-1):
mult = 2**i
encoder += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
encoder += [nn.Conv2d(ngf * mult*2, 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(2),
nn.ReLU(True)]
decoder = [nn.ConvTranspose2d(2, ngf * mult * 2,
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
for i in range(1, n_downsampling):
mult = 2**(n_downsampling - i)
decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
decoder += [nn.ReflectionPad2d(3)]
decoder += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
decoder += [nn.Tanh()]
self.encoder = nn.Sequential(*encoder)
self.decoder = nn.Sequential(*decoder)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
code = nn.parallel.data_parallel(self.encoder, input, self.gpu_ids)
output = nn.parallel.data_parallel(self.decoder, code, self.gpu_ids)
else:
code = self.encoder(input)
output = self.decoder(code)
return code, output
class AE_NLayers(nn.Module):
def __init__(self, input_nc, output_nc=1, ndf=64, n_layers=7,
norm_layer=None, nl_layer=None, gpu_ids=[], vaeLike=False):
super(AE_NLayers, self).__init__()
self.gpu_ids = gpu_ids
kw, padw = 4, 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nl_layer()]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 4)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw)]
if norm_layer is not None and n < 6:
sequence += [norm_layer(ndf * nf_mult)]
sequence += [nl_layer()]
# sequence += [nn.AvgPool2d(8)]
self.conv = nn.Sequential(*sequence)
self.fc = nn.Sequential(*[nn.Linear(ndf * nf_mult, output_nc), nn.LeakyReLU(0.2, True)])
self.fc2 = nn.Sequential(*[nn.Linear(output_nc, ndf * nf_mult)])
# deconv = [nn.Upsample(scale_factor=8, mode='nearest')]
deconv = []
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**(n_layers - n - 1), 4)
deconv += [
nn.ConvTranspose2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw)]
if norm_layer is not None and (n_layers - n + 1) < 7:
deconv += [norm_layer(ndf * nf_mult)]
deconv += [nl_layer()]
deconv += [nn.ConvTranspose2d(ndf, input_nc, kernel_size=kw, stride=2, padding=padw), nn.Tanh()]
self.deconv = nn.Sequential(*deconv)
def forward(self, x):
x_conv = self.conv(x)
# return self.deconv(x_conv), x_conv
# print x_conv
conv_flat = x_conv.view(x.size(0), -1)
code = self.fc(conv_flat)
return self.deconv( self.fc2(code).view(x_conv.size(0),x_conv.size(1),x_conv.size(2),x_conv.size(3))), code
# Defines the encoder
# and related util functions
# copied from BicyleGAN: networks.py
def conv3x3(in_planes, out_planes):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=True)
# two usage cases, depend on kw and padw
def upsampleConv(inplanes, outplanes, kw, padw):
sequence = []
sequence += [nn.Upsample(scale_factor=2, mode='nearest')]
sequence += [nn.Conv2d(inplanes, outplanes, kernel_size=kw, stride=1, padding=padw, bias=True)]
return nn.Sequential(*sequence)
def meanpoolConv(inplanes, outplanes):
sequence = []
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
sequence += [nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0, bias=True)]
return nn.Sequential(*sequence)
def maxpoolConv(inplanes, outplanes):
sequence = []
sequence += [nn.MaxPool2d(kernel_size=2, stride=2)]
sequence += [nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0, bias=True)]
return nn.Sequential(*sequence)
def convMeanpool(inplanes, outplanes):
sequence = []
sequence += [conv3x3(inplanes, outplanes)]
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*sequence)
def convMaxpool(inplanes, outplanes):
sequence = []
sequence += [conv3x3(inplanes, outplanes)]
sequence += [nn.MaxPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*sequence)
def upsampleLayer(inplanes, outplanes, upsample='basic', padding_type='zero'):
# padding_type = 'zero'
if upsample == 'basic':
upconv = [nn.ConvTranspose2d(inplanes, outplanes, kernel_size=4, stride=2, padding=1)]
elif upsample == 'bilinear':
upconv = [nn.Upsample(scale_factor=2, mode='bilinear'),
nn.ReflectionPad2d(1),
nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=1, padding=0)]
else:
raise NotImplementedError('upsample layer [%s] not implemented' % upsample)
return upconv
class BasicBlock(nn.Module):
def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None, pooling='max'):
super(BasicBlock, self).__init__()
layers = []
if norm_layer is not None:
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [conv3x3(inplanes, inplanes)]
if norm_layer is not None:
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
if pooling == 'mean':
layers += [convMeanpool(inplanes, outplanes)]
self.shortcut = meanpoolConv(inplanes, outplanes)
elif pooling == 'max':
layers += [convMaxpool(inplanes, outplanes)]
| |
1]
# sf = (y0 - y1) / (y_lhs[-1] - y_lhs[0])
# y_rhs = y_lhs * sf + y1 - y_lhs[0]
x0 = x1
y0 = y1
pass
@property
def soils(self):
return self._soils
def set_to_decimal_places(self):
"""Adjusts the node coordinates to a certain number of decimal places"""
self.y_nodes = np.round(self.y_nodes, self.dp)
self.x_nodes = np.round(self.x_nodes, self.dp)
def set_soil_ids_to_vary_y_grid(self):
# Assign soil to element grid
x_centres = (self.x_nodes[:-1] + self.x_nodes[1:]) / 2
y_centres = (self.y_nodes[:, :-1] + self.y_nodes[:, 1:]) / 2
y_centres = (y_centres[:-1] + y_centres[1:]) / 2
self.y_centres = y_centres
surf_centres = np.interp(x_centres, self.tds.x_surf, self.tds.y_surf)
self.soil_grid = np.zeros((len(y_centres), len(y_centres[0])), dtype=int)
self.x_index_to_sp_index = interp_left(x_centres, self.tds.x_sps, np.arange(0, len(self.tds.x_sps)))
self.x_index_to_sp_index = np.array(self.x_index_to_sp_index, dtype=int)
for xx in range(len(self.soil_grid)):
for yy in range(len(self.soil_grid[0])):
pid = self.x_index_to_sp_index[xx]
sp = self.tds.sps[pid]
if y_centres[xx][yy] > surf_centres[xx]:
self.soil_grid[xx][yy] = self._inactive_value
continue
x_angles = list(sp.x_angles)
sp_x = self.tds.x_sps[pid]
for ll in range(1, sp.n_layers + 1):
yc = y_centres[xx][yy]
if x_angles[ll - 1] is None:
pass
elif -yc > (
sp.layer_depth(ll) - x_angles[ll - 1] * (x_centres[xx] - sp_x) - self.y_surf_at_sps[pid]):
pass
else:
if ll == 1: # above the original soil profile due to ground slope
unique_hash = sp.layer(1).unique_hash
else:
unique_hash = sp.layer(ll - 1).unique_hash
self.soil_grid[xx][yy] = self._soil_hashes.index(unique_hash)
break
if ll == sp.n_layers:
unique_hash = sp.layer(ll).unique_hash
self.soil_grid[xx][yy] = self._soil_hashes.index(unique_hash)
break
def set_soil_ids_to_vary_xy_grid(self):
# Assign soil to element grid
x_centres = (self.x_nodes2d[:-1, :] + self.x_nodes2d[1:, :]) / 2
x_centres = (x_centres[:, :-1] + x_centres[:, 1:]) / 2
y_centres = (self.y_nodes[:, :-1] + self.y_nodes[:, 1:]) / 2
y_centres = (y_centres[:-1] + y_centres[1:]) / 2
self.y_centres = y_centres
self.soil_grid = np.zeros((len(y_centres), len(y_centres[0])), dtype=int)
self.x_index_to_sp_index = interp_left(x_centres[:, -1], self.tds.x_sps, np.arange(0, len(self.tds.x_sps)))
self.x_index_to_sp_index = np.array(self.x_index_to_sp_index, dtype=int)
for xx in range(len(self.soil_grid)):
for yy in range(len(self.soil_grid[0])):
pid = self.x_index_to_sp_index[xx]
sp = self.tds.sps[pid]
if y_centres[xx][yy] > np.interp(x_centres[xx][yy], self.x_surf, self.y_surf):
self.soil_grid[xx][yy] = self._inactive_value
continue
x_angles = list(sp.x_angles)
sp_x = self.tds.x_sps[pid]
lay_ind = 0
for ll in range(1, sp.n_layers + 1):
# yc = y_centres[xx][yy]
x_diff = x_centres[xx][yy] - sp_x
z_lay_at_sp = -sp.layer_depth(ll) + self.y_surf_at_sps[pid]
if x_angles[ll - 1] is None or np.isnan(x_angles[ll - 1]):
z_lay_at_x = 1e6
else:
z_lay_at_x = z_lay_at_sp + x_angles[ll - 1] * x_diff
if y_centres[xx][yy] <= z_lay_at_x:
lay_ind = ll
else:
break
if lay_ind == 0:
self.soil_grid[xx][yy] = self._inactive_value
else:
unique_hash = sp.layer(lay_ind).unique_hash
self.soil_grid[xx][yy] = self._soil_hashes.index(unique_hash)
# else:
# if ll == 1: # above the original soil profile due to ground slope
# unique_hash = sp.layer(1).unique_hash
# else:
# unique_hash = sp.layer(ll - 1).unique_hash
#
# break
# if ll == sp.n_layers:
# unique_hash = sp.layer(ll).unique_hash
# self.soil_grid[xx][yy] = self._soil_hashes.index(unique_hash)
# break
def create_mesh(self):
# if len(np.shape(self.x_nodes)) == 2:
if self.x_nodes2d is not None:
self._femesh = FiniteElementVaryXY2DMesh(self.x_nodes2d, self.y_nodes, self.soil_grid, self.soils)
else:
self._femesh = FiniteElementVaryY2DMesh(self.x_nodes, self.y_nodes, self.soil_grid, self.soils)
@property
def femesh(self):
return self._femesh
def exclude_fd_eles(
self): # TODO: implement a near field option, where grid gets remeshed with angles to have more detail near footing
for i, bd in enumerate(self.tds.bds):
fd = bd.fd
fcx = self.tds.x_bds[i] + bd.x_fd
fcy = np.interp(fcx, self.x_surf, self.y_surf)
lip = getattr(fd, fd.ip_axis)
x0 = fcx - lip / 2
x1 = fcx + lip / 2
y_top = fcy - fd.depth
y_bot = fcy - fd.depth + fd.height
xsi = self.femesh.get_nearest_node_index_at_x(x0)
xei = self.femesh.get_nearest_node_index_at_x(x1)
yei = self.femesh.get_nearest_node_index_at_depth(y_top, x0)
ysi = self.femesh.get_nearest_node_index_at_depth(y_bot, x0)
# create foundation nodes a soil mesh nodes
# along the base
j = 0
for xx in range(int(xsi), int(xei)):
for yy in range(int(ysi), int(yei)):
self.soil_grid[xx][yy] = self._inactive_value
self.femesh.soil_grid[xx][yy] = self.femesh.inactive_value
class FiniteElementVaryY2DMesh(PhysicalObject):
base_type = 'femesh'
type = 'vary_y2d'
def __init__(self, x_nodes, y_nodes, soil_grid, soils, inactive_value=1e6):
self._x_nodes = x_nodes
self._y_nodes = y_nodes
self._soil_grid = soil_grid
self._soils = soils
self.inactive_value = inactive_value
self.inputs = ['x_nodes', 'y_nodes', 'soil_grid', 'soils']
def get_active_nodes(self):
active_nodes = np.ones((len(self._x_nodes), len(self._y_nodes[0])), dtype=int) # Start with all active
# Pad soil_grid with inactive values around edge
sg_w_pad = self.inactive_value * np.ones((len(self._soil_grid) + 2, len(self._soil_grid[0]) + 2))
sg_w_pad[1:-1, 1:-1] = self._soil_grid
# Then compute the average soil_grid from four elements
node_grid = (sg_w_pad[:-1, :-1] + sg_w_pad[:-1, 1:] + sg_w_pad[1:, :-1] + sg_w_pad[1:, 1:]) / 4
# if average is equal to inactive then node is not active
inds = np.where(node_grid == self.inactive_value)
active_nodes[inds] = 0
return active_nodes
@property
def soils(self):
return self._soils
def get_ele_indexes_at_depths(self, depths, x, low=None):
x_ind = self.get_ele_indexes_at_xs([x])[0]
return interp_left(-np.array(depths), -self._y_nodes[x_ind], low=low)
def get_ele_indexes_at_xs(self, xs, low=None):
return interp_left(xs, self.x_nodes, low=low)
def get_nearest_node_index_at_depth(self, depth, x):
x_ind = self.get_nearest_node_index_at_x(x)
return np.argmin(abs(self._y_nodes[x_ind] - depth))
def get_nearest_node_index_at_x(self, x):
return np.argmin(abs(self.x_nodes - x))
@property
def nny(self):
return len(self._y_nodes[0])
@property
def nnx(self):
return len(self._x_nodes)
def set_to_decimal_places(self, dp):
"""Adjusts the node coordinates to a certain number of decimal places"""
self._y_nodes = np.round(self._y_nodes, dp)
self._x_nodes = np.round(self._x_nodes, dp)
@property
def x_nodes(self):
return self._x_nodes
@x_nodes.setter
def x_nodes(self, x_nodes):
if isinstance(x_nodes, str):
self._x_nodes = np.loadtxt(x_nodes)
else:
self._x_nodes = x_nodes
@property
def y_nodes(self):
"""Y-position of nodes - top-to-bottom"""
return self._y_nodes
@y_nodes.setter
def y_nodes(self, y_nodes):
if isinstance(y_nodes, str):
self._y_nodes = np.loadtxt(y_nodes)
else:
self._y_nodes = y_nodes
@property
def soil_grid(self):
return self._soil_grid
@soil_grid.setter
def soil_grid(self, soil_grid):
if isinstance(soil_grid, str):
self._soil_grid = np.loadtxt(soil_grid)
else:
self._soil_grid = soil_grid
def add_to_dict(self, models_dict, **kwargs):
if self.base_type not in models_dict:
models_dict[self.base_type] = {}
if "soil" not in models_dict:
models_dict["soil"] = {}
def get_node_indices_for_all_eles(self):
xx = np.arange(len(self.soil_grid))
xis = np.array([xx, xx + 1, xx + 1, xx])[:, :, np.newaxis] * np.ones_like(self.soil_grid)[np.newaxis, :, :]
yy = np.arange(len(self.soil_grid[0]))
yis = np.array([xx, xx + 1, xx + 1, xx])[:, np.newaxis, :] * np.ones_like(self.soil_grid)[np.newaxis, :, :]
return xis, yis
def get_node_coords_for_all_eles(self):
xis, yis = self.get_node_indices_for_all_eles()
x_coords = self.x_nodes[xis]
y_coords = self.y_nodes[xis, yis]
class FiniteElementVaryXY2DMesh(PhysicalObject):
base_type = 'femesh'
type = 'vary_xy2d'
def __init__(self, x_nodes, y_nodes, soil_grid, soils, inactive_value=1e6):
self._x_nodes = x_nodes
self._y_nodes = y_nodes
self.node_coords_mesh = None
self.ele_coords_mesh = None
self._soil_grid = soil_grid
self._soils = soils
self.inactive_value = inactive_value
self.inputs = ['x_nodes', 'y_nodes', 'soil_grid', 'soils']
def get_active_nodes(self):
active_nodes = np.ones((len(self._x_nodes), len(self._y_nodes[0])), dtype=int) # Start with all active
# Pad soil_grid with inactive values around edge
sg_w_pad = self.inactive_value * np.ones((len(self._soil_grid) + 2, len(self._soil_grid[0]) + 2))
sg_w_pad[1:-1, 1:-1] = self._soil_grid
# Then compute the average soil_grid from four elements
node_grid = (sg_w_pad[:-1, :-1] + sg_w_pad[:-1, 1:] + sg_w_pad[1:, :-1] + sg_w_pad[1:, 1:]) / 4
# if average is equal to inactive then node is not active
inds = np.where(node_grid == self.inactive_value)
active_nodes[inds] = 0
return active_nodes
@property
def soils(self):
return self._soils
def tidy_unused_mesh(self):
anodes = self.get_active_nodes()
inds = np.where(anodes == 0)
xns = self.x_nodes
yns = self.y_nodes
for i in range(len(xns)):
inds = np.where(anodes[i] == 0)
if len(inds[0]):
x_surf = self.x_nodes[i][inds[0][-1] + 1]
y_surf = self.y_nodes[i][inds[0][-1] + 1]
xns[i][inds] = x_surf
yns[i][inds] = y_surf + np.arange(1, len(inds[0]) + 1)[::-1]
# def get_ele_indexes_at_depths(self, depths, x, low=None):
# x_ind = self.get_ele_indexes_at_xs([x])[0]
# return interp_left(-np.array(depths), -self._y_nodes[x_ind], low=low)
#
# def get_ele_indexes_at_xs(self, xs, y, low=None):
# return interp_left(xs, self.x_nodes, low=low)
#
# def get_nearest_node_index_at_depth(self, depth, x):
# x_ind = self.get_nearest_node_index_at_x(x)
# return np.argmin(abs(self._y_nodes[x_ind] - depth))
#
# def get_nearest_node_index_at_x(self, x, y):
# return np.argmin(abs(self.x_nodes - x))
def build_node_coords_mesh(self):
self.node_coords_mesh = np.array([self.x_nodes, self.y_nodes]).transpose(1, 2, 0)
def build_ele_coords_mesh(self):
x_centres = (self.x_nodes[:-1, :] + self.x_nodes[1:, :]) / 2
x_centres = (x_centres[:, :-1] + x_centres[:, 1:]) / 2
y_centres = (self.y_nodes[:, :-1] + self.y_nodes[:, 1:]) / 2
y_centres = (y_centres[:-1] + y_centres[1:]) / 2
self.ele_coords_mesh = np.array([x_centres, y_centres]).transpose(1, 2, 0)
def get_nearest_nodes_indexes(self, coords, n=1):
coords = np.array(coords)
if self.node_coords_mesh is None:
self.build_node_coords_mesh()
norms = np.linalg.norm(coords - self.node_coords_mesh, axis=2)
arr_s = np.shape(self.node_coords_mesh)[:-1]
return np.dstack(np.unravel_index(np.argsort(norms.ravel())[:n], arr_s))[0]
def get_nearest_eles_indexes(self, coords, n=1):
coords = np.array(coords)
if self.ele_coords_mesh is None:
self.build_ele_coords_mesh()
norms = np.linalg.norm(coords - self.ele_coords_mesh, axis=2)
arr_s = np.shape(self.ele_coords_mesh)[:-1]
return np.dstack(np.unravel_index(np.argsort(norms.ravel())[:n], arr_s))[0]
def get_ele_index_by_type(self, stype):
s_inds = []
for i, sl in enumerate(self.soils):
if sl.type == stype:
s_inds.append(i)
s_inds = np.array(s_inds)
arr_s = np.shape(self.soil_grid)
pmesh_inds = np.where(self.soil_grid.flatten() == s_inds[:, None])
pmesh_inds = np.unravel_index(pmesh_inds[1], arr_s)
return pmesh_inds
@property
def nny(self):
return len(self._y_nodes[0])
@property
def nnx(self):
return len(self._x_nodes)
def set_to_decimal_places(self, dp):
"""Adjusts the node coordinates to a certain number of decimal places"""
self._y_nodes = np.round(self._y_nodes, dp)
self._x_nodes = np.round(self._x_nodes, dp)
| |
<filename>zunis_lib/zunis/models/flows/coupling_cells/piecewise_coupling/piecewise_linear.py
"""Implementation of the piecewise linear coupling cell
This means that the *variable transform* is piecewise-linear.
"""
import torch
from ..transforms import InvertibleTransform
from ..general_coupling import InvertibleCouplingCell
from zunis.utils.exceptions import AvertedCUDARuntimeError
from zunis.models.layers.trainable import ArbitraryShapeRectangularDNN
from zunis.models.utils import Reshift
third_dimension_softmax = torch.nn.Softmax(dim=2)
def piecewise_linear_transform(x, q_tilde, compute_jacobian=True):
"""Apply an element-wise piecewise-linear transformation to some variables
Parameters
----------
x : torch.Tensor
a tensor with shape (N,k) where N is the batch dimension while k is the
dimension of the variable space. This variable span the k-dimensional unit
hypercube
q_tilde: torch.Tensor
is a tensor with shape (N,k,b) where b is the number of bins.
This contains the un-normalized heights of the bins of the piecewise-constant PDF for dimension k,
i.e. q_tilde lives in all of R and we don't impose a constraint on their sum yet.
Normalization is imposed in this function using softmax.
compute_jacobian : bool, optional
determines whether the jacobian should be compute or None is returned
Returns
-------
tuple of torch.Tensor
pair `(y,h)`.
- `y` is a tensor with shape (N,k) living in the k-dimensional unit hypercube
- `j` is the jacobian of the transformation with shape (N,) if compute_jacobian==True, else None.
"""
logj = None
# TODO do a bottom-up assesment of how we handle the differentiability of variables
# Compute the bin width w
N, k, b = q_tilde.shape
Nx, kx = x.shape
assert N == Nx and k == kx, "Shape mismatch"
w = 1. / b
# Compute the normalized bin heights by applying a softmax function on the bin dimension
q = 1. / w * third_dimension_softmax(q_tilde)
# x is in the mx-th bin: x \in [0,1],
# mx \in [[0,b-1]], so we clamp away the case x == 1
mx = torch.clamp(torch.floor(b * x), 0, b - 1).to(torch.long)
# Need special error handling because trying to index with mx
# if it contains nans will lock the GPU. (device-side assert triggered)
if torch.any(torch.isnan(mx)).item() or torch.any(mx < 0) or torch.any(mx >= b):
raise AvertedCUDARuntimeError("NaN detected in PWLinear bin indexing")
# We compute the output variable in-place
out = x - mx * w # alpha (element of [0.,w], the position of x in its bin
# Multiply by the slope
# q has shape (N,k,b), mxu = mx.unsqueeze(-1) has shape (N,k) with entries that are a b-index
# gather defines slope[i, j, k] = q[i, j, mxu[i, j, k]] with k taking only 0 as a value
# i.e. we say slope[i, j] = q[i, j, mx [i, j]]
slopes = torch.gather(q, 2, mx.unsqueeze(-1)).squeeze(-1)
out = out * slopes
# The jacobian is the product of the slopes in all dimensions
if compute_jacobian:
logj = torch.log(torch.prod(slopes, 1))
del slopes
# Compute the integral over the left-bins.
# 1. Compute all integrals: cumulative sum of bin height * bin weight.
# We want that index i contains the cumsum *strictly to the left* so we shift by 1
# leaving the first entry null, which is achieved with a roll and assignment
q_left_integrals = torch.roll(torch.cumsum(q, 2) * w, 1, 2)
q_left_integrals[:, :, 0] = 0
# 2. Access the correct index to get the left integral of each point and add it to our transformation
out = out + torch.gather(q_left_integrals, 2, mx.unsqueeze(-1)).squeeze(-1)
# Regularization: points must be strictly within the unit hypercube
# Use the dtype information from pytorch
eps = torch.finfo(out.dtype).eps
out = out.clamp(
min=eps,
max=1. - eps
)
return out, logj
def piecewise_linear_inverse_transform(y, q_tilde, compute_jacobian=True):
"""
Apply the inverse of an element-wise piecewise-linear transformation to some variables
Parameters
----------
y : torch.Tensor
a tensor with shape (N,k) where N is the batch dimension while k is the
dimension of the variable space. This variable span the k-dimensional unit
hypercube
q_tilde: torch.Tensor
is a tensor with shape (N,k,b) where b is the number of bins.
This contains the un-normalized heights of the bins of the piecewise-constant PDF for dimension k,
i.e. q_tilde lives in all of R and we don't impose a constraint on their sum yet.
Normalization is imposed in this function using softmax.
compute_jacobian : bool, optional
determines whether the jacobian should be compute or None is returned
Returns
-------
tuple of torch.Tensor
pair `(x,h)`.
- `x` is a tensor with shape (N,k) living in the k-dimensional unit hypercube
- `j` is the jacobian of the transformation with shape (N,) if compute_jacobian==True, else None.
"""
# TODO do a bottom-up assesment of how we handle the differentiability of variables
# Compute the bin width w
N, k, b = q_tilde.shape
Ny, ky = y.shape
assert N == Ny and k == ky, "Shape mismatch"
w = 1. / b
# Compute the normalized bin heights by applying a softmax function on the bin dimension
q = 1. / w * third_dimension_softmax(q_tilde)
# Compute the integral over the left-bins in the forward transform.
# 1. Compute all integrals: cumulative sum of bin height * bin weight.
# We want that index i contains the cumsum *strictly to the left* so we shift by 1
# leaving the first entry null, which is achieved with a roll and assignment
q_left_integrals = torch.roll(torch.cumsum(q, 2) * w, 1, 2)
q_left_integrals[:, :, 0] = 0
# We can figure out which bin each y belongs to by finding the smallest bin such that
# y - q_left_integral is positive
edges = (y.unsqueeze(-1) - q_left_integrals).detach()
# y and q_left_integrals are between 0 and 1 so that their difference is at most 1.
# By setting the negative values to 2., we know that the smallest value left
# is the smallest positive
edges[edges < 0] = 2.
edges = torch.clamp(torch.argmin(edges, dim=2), 0, b - 1).to(torch.long)
# Need special error handling because trying to index with mx
# if it contains nans will lock the GPU. (device-side assert triggered)
if torch.any(torch.isnan(edges)).item() or torch.any(edges < 0) or torch.any(edges >= b):
raise AvertedCUDARuntimeError("NaN detected in PWLinear bin indexing")
# Gather the left integrals at each edge. See comment about gathering in q_left_integrals
# for the unsqueeze
q_left_integrals = q_left_integrals.gather(2, edges.unsqueeze(-1)).squeeze(-1)
# Gather the slope at each edge.
q = q.gather(2, edges.unsqueeze(-1)).squeeze(-1)
# Build the output
x = (y - q_left_integrals) / q + edges * w
# Regularization: points must be strictly within the unit hypercube
# Use the dtype information from pytorch
eps = torch.finfo(x.dtype).eps
x = x.clamp(
min=eps,
max=1. - eps
)
# Prepare the jacobian
logj = None
if compute_jacobian:
logj = - torch.log(torch.prod(q, 1))
return x.detach(), logj
class ElementWisePWLinearTransform(InvertibleTransform):
"""Invertible piecewise-linear transformations over the unit hypercube
Implements a batched bijective transformation `h` from the d-dimensional unit hypercube to itself,
in an element-wise fashion (each coordinate transformed independently)
In each direction, the bijection is a piecewise-linear transform with b bins
where the forward transform has evenly spaced bins. The transformation in each bin is
actually an affine transformation. The slopes for each direction and each point in the batch
are given by an unormalized tensor `q_tilde`. This input is softmax-normalized such that
1. h(0) = 0
2. h(1) = 1
3. h is monotonous
4. h is continuous
for which knowing the slopes in each bin is sufficient (when the abuse of language "linear")
Conditions 1. to 3. ensure the transformation is a bijection and therefore invertible
The inverse is also an element-wise, piece-wise linear transformation,
but, of course, with variable input bin sizes (and fixed output bin sizes).
"""
backward = staticmethod(piecewise_linear_transform)
forward = staticmethod(piecewise_linear_inverse_transform)
class GeneralPWLinearCoupling(InvertibleCouplingCell):
"""Abstract class implementing a coupling cell based on PW linear transformations
A specific way to predict the parameters of the transform must be implemented
in child classes.
"""
def __init__(self, *, d, mask):
"""Generator for the abstract class GeneralPWLinearCoupling
Parameters
----------
d: int
dimension of the space
mask: list of bool
variable mask which variables are transformed (False)
or used as parameters of the transform | |
import os
from shutil import copyfile
import numpy as np
from datetime import datetime
import pytest
from mikeio import Dfsu, Mesh, Dfs0
from mikeio.eum import ItemInfo
from mikeio.dutil import Dataset
def test_read_all_items_returns_all_items_and_names():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read()
assert dfs.n_items == 4
ds_text = repr(ds)
dfs_text = repr(dfs)
assert len(ds) == 4
def test_read_item_0():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.n_items == 4
ds = dfs.read(1)
assert len(ds) == 1
def test_read_timestep_1():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read(time_steps=1)
assert len(ds.time) == 1
def test_read_simple_3d():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
ds = dfs.read()
assert len(ds.data) == 4
assert len(ds.items) == 4
assert ds.items[0].name == "Z coordinate"
assert ds.items[3].name == "W velocity"
def test_read_simple_2dv():
filename = os.path.join("tests", "testdata", "basin_2dv.dfsu")
dfs = Dfsu(filename)
ds = dfs.read()
assert len(ds.data) == 4
assert len(ds.items) == 4
assert ds.items[0].name == "Z coordinate"
assert ds.items[3].name == "W velocity"
def test_read_single_item_returns_single_item():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read(items=[3])
assert len(ds.items) == 1
def test_read_single_item_scalar_index():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read([3])
assert len(ds) == 1
def test_read_returns_array_time_dimension_first():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read([3])
assert ds.data[0].shape == (9, 884)
def test_read_selected_item_returns_correct_items():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read([0, 3])
assert len(ds) == 2
assert ds.items[0].name == "Surface elevation"
assert ds.items[1].name == "Current speed"
def test_read_selected_item_names_returns_correct_items():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read(["Surface elevation", "Current speed"])
assert len(ds) == 2
assert ds.items[0].name == "Surface elevation"
assert ds.items[1].name == "Current speed"
def test_read_returns_correct_items_sigma_z():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
ds = dfs.read()
assert len(ds) == 3
assert ds.items[0].name == "Z coordinate"
assert ds.items[1].name == "Temperature"
assert ds.items[2].name == "Salinity"
def test_read_all_time_steps():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read(items=[0, 3])
assert len(ds.time) == 9
assert ds.data[0].shape[0] == 9
def test_read_single_time_step():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read(items=[0, 3], time_steps=[1])
assert len(ds.time) == 1
assert ds.data[0].shape[0] == 1
def test_read_single_time_step_scalar():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read(items=[0, 3], time_steps=1)
assert len(ds.time) == 1
assert ds.data[0].shape[0] == 1
def test_read_single_time_step_outside_bounds_fails():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
with pytest.raises(Exception):
dfs.read(items=[0, 3], time_steps=[100])
def test_get_number_of_time_steps():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.n_timesteps == 9
def test_number_of_nodes_and_elements_sigma_z():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert dfs.n_elements == 17118
assert dfs.n_nodes == 12042
def test_get_node_coords():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
nc = dfs.node_coordinates
assert nc[0, 0] == 607031.4886285994
def test_get_element_coords():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ec = dfs.element_coordinates
assert ec[1, 1] == pytest.approx(6906790.5928664245)
def test_find_nearest_element_2d():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
elem_id = dfs.find_nearest_element(606200, 6905480)
assert elem_id == 317
def test_dfsu_to_dfs0_via_dataframe(tmpdir):
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.start_time.year == 1985
elem_id = dfs.find_nearest_element(606200, 6905480)
ds = dfs.read(elements=[elem_id])
dss = ds.isel(idx=0)
df = dss.to_dataframe()
outfilename = os.path.join(tmpdir, "out.dfs0")
df.to_dfs0(outfilename)
dfs0 = Dfs0(outfilename)
newds = dfs0.read()
assert newds.items[0].name == ds.items[0].name
assert ds.time[0] == newds.time[0]
assert ds.time[-1] == newds.time[-1]
def test_dfsu_to_dfs0(tmpdir):
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.start_time.year == 1985
elem_id = dfs.find_nearest_element(606200, 6905480)
ds = dfs.read(elements=[elem_id])
dss = ds.isel(idx=0)
outfilename = os.path.join(tmpdir, "out.dfs0")
dfs0 = Dfs0()
dfs0.write(outfilename, dss)
dfs0 = Dfs0(outfilename)
newds = dfs0.read()
assert newds.items[0].name == ds.items[0].name
assert ds.time[0] == newds.time[0]
assert ds.time[-1] == newds.time[-1]
def test_find_nearest_element_2d_array():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
elem_ids = dfs.find_nearest_element(x=[606200, 606200], y=[6905480, 6905480])
assert len(elem_ids) == 2
assert elem_ids[0] == 317
assert elem_ids[1] == 317
def test_find_nearest_element_3d():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
elem_id = dfs.find_nearest_element(333934, 6158101)
assert elem_id == 5323
assert elem_id in dfs.top_elements
elem_id = dfs.find_nearest_element(333934, 6158101, layer=8)
assert elem_id == 5322
elem_id = dfs.find_nearest_element(333934, 6158101, -7)
assert elem_id == 5320
def find_nearest_profile_elements():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
elem_ids = dfs.find_nearest_profile_elements(333934, 6158101)
assert elem_ids[0] == 5320
assert elem_ids[-1] == 5323
def test_read_and_select_single_element():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
ds = dfs.read()
assert ds.data[0].shape == (9, 884)
idx = dfs.find_nearest_element(606200, 6905480)
selds = ds.isel(idx=idx, axis=1)
assert selds.data[0].shape == (9,)
def test_read_and_select_single_element_dfsu_3d():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
ds = dfs.read()
selds = ds.isel(idx=1739, axis=1)
assert selds.data[0].shape == (3,)
def test_is_2d():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.is_2d
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert not dfs.is_2d
def test_n_layers():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert dfs.n_layers == 10
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert dfs.n_layers == 9
filename = os.path.join("tests", "testdata", "oresund_vertical_slice.dfsu")
dfs = Dfsu(filename)
assert dfs.n_layers == 9
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.n_layers is None
def test_n_sigma_layers():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert dfs.n_sigma_layers == 10
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert dfs.n_sigma_layers == 4
filename = os.path.join("tests", "testdata", "oresund_vertical_slice.dfsu")
dfs = Dfsu(filename)
assert dfs.n_sigma_layers == 4
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.n_sigma_layers is None
def test_n_z_layers():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert dfs.n_z_layers == 0
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert dfs.n_z_layers == 5
filename = os.path.join("tests", "testdata", "oresund_vertical_slice.dfsu")
dfs = Dfsu(filename)
assert dfs.n_z_layers == 5
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.n_z_layers is None
def test_boundary_codes():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert len(dfs.boundary_codes) == 1
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert len(dfs.boundary_codes) == 3
def test_top_elements():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert len(dfs.top_elements) == 174
assert dfs.top_elements[3] == 39
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert len(dfs.top_elements) == 3700
assert dfs.top_elements[3] == 16
filename = os.path.join("tests", "testdata", "oresund_vertical_slice.dfsu")
dfs = Dfsu(filename)
assert len(dfs.top_elements) == 99
assert dfs.top_elements[3] == 19
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.top_elements is None
def test_bottom_elements():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert len(dfs.bottom_elements) == 174
assert dfs.bottom_elements[3] == 30
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert len(dfs.bottom_elements) == 3700
assert dfs.bottom_elements[3] == 13
filename = os.path.join("tests", "testdata", "oresund_vertical_slice.dfsu")
dfs = Dfsu(filename)
assert len(dfs.bottom_elements) == 99
assert dfs.bottom_elements[3] == 15
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.bottom_elements is None
def test_n_layers_per_column():
filename = os.path.join("tests", "testdata", "basin_3d.dfsu")
dfs = Dfsu(filename)
assert len(dfs.n_layers_per_column) == 174
assert dfs.n_layers_per_column[3] == 10
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
assert len(dfs.n_layers_per_column) == 3700
assert dfs.n_layers_per_column[3] == 4
assert max(dfs.n_layers_per_column) == dfs.n_layers
filename = os.path.join("tests", "testdata", "oresund_vertical_slice.dfsu")
dfs = Dfsu(filename)
assert len(dfs.n_layers_per_column) == 99
assert dfs.n_layers_per_column[3] == 5
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.n_layers_per_column is None
def test_get_layer_elements():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
elem_ids = dfs.get_layer_elements(0)
assert np.all(elem_ids == dfs.top_elements)
elem_ids = dfs.get_layer_elements(-1)
assert elem_ids[5] == 23
elem_ids = dfs.get_layer_elements(1)
assert elem_ids[5] == 8639
assert len(elem_ids) == 10
elem_ids = dfs.get_layer_elements([1, 3])
assert len(elem_ids) == 197
with pytest.raises(Exception):
elem_ids = dfs.get_layer_elements(12)
def test_find_nearest_profile_elements():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
elem_ids = dfs.find_nearest_profile_elements(358337, 6196090)
assert len(elem_ids) == 8
assert elem_ids[-1] == 3042
def test_is_geo_UTM():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
assert dfs.is_geo is False
def test_is_geo_LONGLAT():
filename = os.path.join("tests", "testdata", "wind_north_sea.dfsu")
dfs = Dfsu(filename)
assert dfs.is_geo is True
def test_get_element_area_UTM():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu(filename)
areas = dfs.get_element_area()
assert areas[0] == 4949.102548750438
def test_get_element_area_3D():
filename = os.path.join("tests", "testdata", "oresund_sigma_z.dfsu")
dfs = Dfsu(filename)
areas = dfs.get_element_area()
assert areas[0] == 350186.43530453625
def test_get_element_area_LONGLAT():
filename = os.path.join("tests", "testdata", "wind_north_sea.dfsu")
dfs = Dfsu(filename)
areas = dfs.get_element_area()
assert areas[0] == 139524218.81411952
def test_write(tmpdir):
outfilename = os.path.join(tmpdir.dirname, "simple.dfsu")
meshfilename = os.path.join("tests", "testdata", "odense_rough.mesh")
msh = Mesh(meshfilename)
n_elements = msh.n_elements
d = np.zeros((1, n_elements))
data = []
data.append(d)
ds = Dataset(data, time=[datetime(2000, 1, 1)], items=[ItemInfo("Zeros")])
dfs = Dfsu(meshfilename)
dfs.write(outfilename, ds)
assert os.path.exists(outfilename)
def test_write_from_dfsu(tmpdir):
sourcefilename = os.path.join("tests", "testdata", "HD2D.dfsu")
outfilename = os.path.join(tmpdir.dirname, "simple.dfsu")
dfs = Dfsu(sourcefilename)
ds = dfs.read([0, 1])
dfs.write(outfilename, ds)
assert dfs.start_time.hour == 7
assert os.path.exists(outfilename)
newdfs = Dfsu(outfilename)
assert dfs.start_time == newdfs.start_time
assert dfs.timestep == newdfs.timestep
assert dfs.end_time == newdfs.end_time
def test_write_from_dfsu_2_time_steps(tmpdir):
sourcefilename = os.path.join("tests", "testdata", | |
the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedKFold` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
Train and test sizes may be different in each fold, with a difference of at
most ``n_classes``.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneGroupOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePGroupsOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, | |
checked_request.unique_id == rq.unique_id:
# This is a case when rq != self.tmp_rq.
# Request was already checked
status = (e_errors.OK, None)
else:
# This is a case when rq == self.tmp_rq
# Need to check this request
rq, status = self.check_read_request(rq.ticket['volume'], rq, requestor)
if rq and status[0] == e_errors.OK:
return rq, status
elif rq.work == 'write_to_hsm':
if checked_request and checked_request.unique_id == rq.unique_id:
# This is a case when rq != self.tmp_rq.
# Request was already checked
status = (e_errors.OK, None)
else:
# This is a case when rq == self.tmp_rq
# Need to check this request
rq, status = self.check_write_request(rq.ticket['fc']['external_label'], rq, requestor)
if rq and status[0] == e_errors.OK:
return rq, status
# no HIPri requests: look in pending work queue for reading or writing work
# see what priority has a completed request
use_this_volume = 1
if priority and priority[0] and priority[0] <= 0:
self.init_request_selection()
# this is a lower priority request (usually used for migration)
# it can be preempted by any normal priority request
# process request
start_t=time.time()
rq, status = self.schedule(requestor)
Trace.trace(self.trace_level+10,"next_work_this_volume: SCHEDULE RETURNED %s %s"%(rq, status))
Trace.trace(100, "next_work_this_volume: SCHEDULE, time in state %s"%(time.time()-start_t, ))
if rq and rq.ticket['encp']['curpri'] > 0:
# preempt current low priority request
# by request with normal priority
use_this_volume = 0
if use_this_volume:
self.init_request_selection()
self.process_for_bound_vol = external_label
# for tape positioning optimization check what was
# a last work for this volume
if last_work == 'WRITE':
# see if there is another work for this volume family
# disable retrival of HiPri requests as they were
# already treated above
#rq = self.pending_work.get(vol_family, use_admin_queue=0)
rq = self._get_request(requestor, self.pending_work.get, vol_family, use_admin_queue=0)
Trace.trace(self.trace_level+10, "next_work_this_volume: use volume family %s rq %s"%
(vol_family, rq))
if not rq:
#rq = self.pending_work.get(external_label, current_location, use_admin_queue=0)
rq = self._get_request(requestor, self.pending_work.get, external_label, current_location, use_admin_queue=0)
Trace.trace(self.trace_level+10, "next_work_this_volume: use label %s rq %s"%
(external_label, rq))
else:
# see if there is another work for this volume
# disable retrival of HiPri requests as they were
# already treated above
#rq = self.pending_work.get(external_label, current_location, use_admin_queue=0)
rq = self._get_request(requestor, self.pending_work.get, external_label, current_location, use_admin_queue=0)
Trace.trace(self.trace_level+10, "next_work_this_volume: use label %s rq %s"%
(external_label, rq))
if not rq:
#rq = self.pending_work.get(vol_family, use_admin_queue=0)
rq = self._get_request(requestor, self.pending_work.get, vol_family, use_admin_queue=0)
Trace.trace(self.trace_level+10, "next_work_this_volume: use volume family %s rq %s"%
(vol_family, rq))
exc_limit_rq = None # exceeded limit requests
rqs = []
while rq:
found = 0
for r in rqs:
if r.unique_id == rq.unique_id:
found = 1
Trace.log(e_errors.INFO, "Found the same id. Looks like going in cycles. Will break")
break
else:
rqs.append(rq)
if found:
rq = None
break
if rq.ticket.has_key('reject_reason'):
del(rq.ticket['reject_reason'])
if rq:
Trace.trace(self.trace_level+10, "next_work_this_volume: s2 rq %s" % (rq.ticket,))
if rq.work == 'read_from_hsm':
rq, key = self.process_read_request(rq, requestor)
if self.continue_scan:
# before continuing check if it is a request
# for v['external_label']
if rq and rq.ticket['fc']['external_label'] == external_label:
Trace.trace(self.trace_level+10, "next_work_this_volume:exc_limit_rq 1 %s"%(rq,))
exc_limit_rq = rq
checked_request = rq
break
if last_work == "READ":
# volume is readonly: get only read requests
#rq = self.pending_work.get(external_label, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, external_label, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
if not rq:
# see if there is a write work for the current volume
# volume family
#rq = self.pending_work.get(vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts)
rq = self._get_request(requestor, self.pending_work.get, vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts)
else:
#rq = self.pending_work.get(vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
continue
break
elif rq.work == 'write_to_hsm':
rq, key = self.process_write_request(rq, requestor, last_work=last_work)
Trace.trace(self.trace_level+10, "next_work_this_volume:process_write_request returned %s continue_scan %s "%((rq, key), self.continue_scan))
if self.continue_scan:
if rq:
if checked_request and checked_request.unique_id == rq.unique_id:
status = (e_errors.OK, None)
else:
rq, status = self.check_write_request(external_label, rq, requestor)
if rq and status[0] == e_errors.OK:
Trace.trace(self.trace_level+10, "next_work_this_volume: exc_limit_rq 2 %s"%(rq,))
exc_limit_rq = rq
checked_request = rq
break
Trace.trace(self.trace_level+10, "next_work_this_volume: current_volume_info %s"%(self.current_volume_info,))
if last_work == "WRITE":
#rq = self.pending_work.get(vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
if not rq:
# see if there is a read work for the current volume
# volume family
#rq = self.pending_work.get(external_label, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, external_label, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
else:
#rq = self.pending_work.get(external_label, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, external_label, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts) # get next request
continue
break
# end while
if not rq and self.tmp_rq:
rq = self.tmp_rq
if exc_limit_rq: # request with exceeded SG limit
rq = exc_limit_rq
if rq and rq.work == 'write_to_hsm' and self.mover_type(requestor) != 'DiskMover':
while rq:
Trace.trace(self.trace_level+10,"next_work_this_volume: LABEL %s RQ %s" % (external_label, rq))
# regular write request must have the same volume label
rq.ticket['fc']['external_label'] = external_label
if checked_request and checked_request.unique_id == rq.unique_id:
status = (e_errors.OK, None)
else:
rq, status = self.check_write_request(external_label, rq, requestor)
Trace.trace(self.trace_level+10,"next_work_this_volume: RQ %s STAT %s" %(rq,status))
if rq: Trace.trace(self.trace_level+10,"next_work_this_volume: TICK %s" %(rq.ticket,))
if rq and status[0] == e_errors.OK:
return rq, status
if not rq: break
Trace.trace(self.trace_level+10, "next_work_this_volume: got here")
#rq = self.pending_work.get(vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts)
rq = self._get_request(requestor, self.pending_work.get, vol_family, next=1, use_admin_queue=0, disabled_hosts=self.disabled_hosts)
# return read work
if rq:
Trace.trace(self.trace_level+10, "next_work_this_volume: s4 rq %s" % (rq.ticket,))
if checked_request and checked_request.unique_id == rq.unique_id:
status = (e_errors.OK, None)
else:
rq, status = self.check_read_request(label, rq, requestor)
return rq, status
return (None, (e_errors.NOWORK, None))
def is_volume_suspect(self, external_label):
"""
Check if volume is in the suspect volume list.
:type external_label: :obj:`str`
:arg external_label: label of the volume
:rtype: :obj:`dict` - volume record or :obj:`None`
"""
# remove volumes time in the queue for wich has expired
if self.suspect_vol_expiration_to:
# create a list of expired volumes
now = time.time()
expired_vols = []
for vol in self.suspect_volumes.list:
if now - vol['time'] >= self.suspect_vol_expiration_to:
expired_vols.append(vol)
# cleanup suspect volume list
if expired_vols:
for vol in expired_vols:
Trace.log(e_errors.INFO,"%s has been removed from suspect volume list due to TO expiration"%
(vol['external_label'],))
self.suspect_volumes.remove(vol)
Trace.trace(self.trace_level+11, "is_volume_suspect: external label %s suspect_volumes.list: %s"%(external_label,self.suspect_volumes.list))
for vol in self.suspect_volumes.list:
if external_label == vol['external_label']:
Trace.trace(self.trace_level+11, "is_volume_suspect: returning %s"%(vol, ))
return vol
Trace.trace(self.trace_level+11, "is_volume_suspect: returning None")
return None
# check if mover is in the suspect volume list
# return tuple (suspect_volume, suspect_mover)
def is_mover_suspect(self, mover, external_label):
"""
Check if mover is in the suspect volume list.
:type mover: :obj:`str`
:arg mover: mover name
:type external_label: :obj:`str`
:arg external_label: label of the volume
:rtype: :obj:`tuple` (:obj:`str`- volume name or :obj:`None`,
:obj:`str`- mover name or :obj:`None`)
"""
Trace.trace(self.trace_level+11, "is_mover_suspect: %s %s"%(mover, external_label))
vol = self.is_volume_suspect(external_label)
if vol:
for mov in vol['movers']:
if mover == mov:
break
else:
Trace.trace(self.trace_level+11, "is_mover_suspect: returning %s, None"%(vol,))
return vol,None
Trace.trace(self.trace_level+11, "is_mover_suspect: returning %s %s"%(vol, mov))
return vol,mov
else:
Trace.trace(self.trace_level+11, "is_mover_suspect: returning None, None")
return None,None
# update suspect volumer list
def update_suspect_vol_list(self, external_label, mover):
"""
Update suspect volumer list.
:type external_label: :obj:`str`
:arg external_label: label of the volume
:type mover: :obj:`str`
:arg mover: mover name
:rtype: :obj:`dict` - suspect volume dictionary
"""
# update list of suspected volumes
Trace.trace(self.trace_level+11,"update_suspect_vol_list: SUSPECT VOLUME LIST BEFORE %s"%(self.suspect_volumes.list,))
if not external_label: return None
vol_found = 0
for vol in self.suspect_volumes.list:
if external_label == vol['external_label']:
vol_found = 1
break
if not vol_found:
vol = {'external_label' : external_label,
'movers' : [],
'time':time.time()
}
for mv in vol['movers']:
if mover == mv:
break
else:
vol['movers'].append(mover)
if not vol_found:
self.suspect_volumes.append(vol)
# send alarm if number of suspect volumes is above a threshold
if len(self.suspect_volumes.list) >= self.max_suspect_volumes:
Trace.alarm(e_errors.WARNING, e_errors.ABOVE_THRESHOLD,
{"volumes":"Number of suspect volumes is above threshold"})
Trace.trace(self.trace_level+11, "update_suspect_vol_list: SUSPECT VOLUME LIST AFTER %s" % (self.suspect_volumes,))
return vol
############################################
# End request processing methods
############################################
class LibraryManager(dispatching_worker.DispatchingWorker,
generic_server.GenericServer,
LibraryManagerMethods):
"""
Library manager methods processing movers and enstore command-line requests.
"""
def __init__(self, libman, csc):
"""
:type libman: :obj:`str`
:arg libman: unique library manager name
:type csc: :class:`configuration_client.ConfigurationClient`
:arg csc: configuration client instance. Also can be server address:
:obj:`tuple` (:obj:`str`- IP address, :obj:`int` - port)
"""
self.name_ext = "LM"
self.csc = | |
# -*- coding: utf-8 -*-
import simplejson as json
import pendulum
import inflection
import inspect
import uuid
import datetime
from warnings import warn
from six import add_metaclass
from collections import OrderedDict
from ..utils import basestring, deprecated
from ..exceptions.orm import MassAssignmentError, RelatedClassNotFound
from ..query import QueryBuilder
from .builder import Builder
from .collection import Collection
from .relations import (
Relation,
HasOne,
HasMany,
BelongsTo,
BelongsToMany,
HasManyThrough,
MorphOne,
MorphMany,
MorphTo,
MorphToMany,
)
from .relations.wrapper import Wrapper, BelongsToManyWrapper
from .utils import mutator, accessor
from .scopes import Scope
from ..events import Event
class ModelRegister(dict):
def __init__(self, *args, **kwargs):
self.inverse = {}
super(ModelRegister, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
super(ModelRegister, self).__setitem__(key, value)
self.inverse[value] = key
def __delitem__(self, key):
del self.inverse[self[key]]
super(ModelRegister, self).__delitem__(key)
class MetaModel(type):
__register__ = {}
def __init__(cls, *args, **kwargs):
name = cls.__table__ or inflection.tableize(cls.__name__)
cls._register[name] = cls
super(MetaModel, cls).__init__(*args, **kwargs)
def __getattr__(cls, item):
try:
return type.__getattribute__(cls, item)
except AttributeError:
query = cls.query()
return getattr(query, item)
@add_metaclass(MetaModel)
class Model(object):
__connection__ = None
__table__ = None
__primary_key__ = "id"
__incrementing__ = True
__fillable__ = []
__guarded__ = ["*"]
__unguarded__ = False
__hidden__ = []
__visible__ = []
__appends__ = []
__timestamps__ = True
__dates__ = []
__casts__ = {}
__touches__ = []
__morph_name__ = None
_per_page = 15
_with = []
_booted = {}
_global_scopes = {}
_registered = []
_accessor_cache = {}
_mutator_cache = {}
__resolver = None
__columns__ = []
__dispatcher__ = Event()
__observables__ = []
_register = ModelRegister()
__attributes__ = {}
many_methods = ["belongs_to_many", "morph_to_many", "morphed_by_many"]
CREATED_AT = "created_at"
UPDATED_AT = "updated_at"
def __init__(self, _attributes=None, **attributes):
"""
:param attributes: The instance attributes
"""
self._boot_if_not_booted()
self._exists = False
self._without_scope_name = None
self._original = {}
# Setting default attributes' values
self._attributes = dict((k, v) for k, v in self.__attributes__.items())
self._relations = {}
self.sync_original()
if _attributes is not None:
attributes.update(_attributes)
self.fill(**attributes)
def _boot_if_not_booted(self):
"""
Check if the model needs to be booted and if so, do it.
"""
klass = self.__class__
if not klass._booted.get(klass):
klass._booted[klass] = True
self._fire_model_event("booting")
klass._boot()
self._fire_model_event("booted")
@classmethod
def _boot(cls):
"""
The booting method of the model.
"""
cls._accessor_cache[cls] = {}
cls._mutator_cache[cls] = {}
for name, method in cls.__dict__.items():
if isinstance(method, accessor):
cls._accessor_cache[cls][method.attribute] = method
elif isinstance(method, mutator):
cls._mutator_cache[cls][method.attribute] = method
cls._boot_mixins()
@classmethod
def _boot_columns(cls):
connection = cls.resolve_connection()
columns = connection.get_schema_manager().list_table_columns(
cls.__table__ or inflection.tableize(cls.__name__)
)
cls.__columns__ = list(columns.keys())
@classmethod
def _boot_mixins(cls):
"""
Boot the mixins
"""
for mixin in cls.__bases__:
# if mixin == Model:
# continue
method = "boot_%s" % inflection.underscore(mixin.__name__)
if hasattr(mixin, method):
getattr(mixin, method)(cls)
@classmethod
def add_global_scope(cls, scope, implementation=None):
"""
Register a new global scope on the model.
:param scope: The scope to register
:type scope: orator.orm.scopes.scope.Scope or callable or str
:param implementation: The scope implementation
:type implementation: callbale or None
"""
if cls not in cls._global_scopes:
cls._global_scopes[cls] = OrderedDict()
if isinstance(scope, basestring) and implementation is not None:
cls._global_scopes[cls][scope] = implementation
elif callable(scope):
cls._global_scopes[cls][uuid.uuid4().hex] = scope
elif isinstance(scope, Scope):
cls._global_scopes[cls][scope.__class__] = scope
else:
raise Exception("Global scope must be an instance of Scope or a callable")
@classmethod
def has_global_scope(cls, scope):
"""
Determine if a model has a global scope.
:param scope: The scope to register
:type scope: orator.orm.scopes.scope.Scope or str
"""
return cls.get_global_scope(scope) is not None
@classmethod
def get_global_scope(cls, scope):
"""
Get a global scope registered with the model.
:param scope: The scope to register
:type scope: orator.orm.scopes.scope.Scope or str
"""
for key, value in cls._global_scopes[cls].items():
if isinstance(scope, key):
return value
def get_global_scopes(self):
"""
Get the global scopes for this class instance.
:rtype: dict
"""
return self.__class__._global_scopes.get(self.__class__, {})
@classmethod
def observe(cls, observer):
"""
Register an observer with the Model.
:param observer: The observer
"""
for event in cls.get_observable_events():
if hasattr(observer, event):
cls._register_model_event(event, getattr(observer, event))
def fill(self, _attributes=None, **attributes):
"""
Fill the model with attributes.
:param attributes: The instance attributes
:type attributes: dict
:return: The model instance
:rtype: Model
:raises: MassAssignmentError
"""
if _attributes is not None:
attributes.update(_attributes)
totally_guarded = self.totally_guarded()
for key, value in self._fillable_from_dict(attributes).items():
key = self._remove_table_from_key(key)
if self.is_fillable(key):
self.set_attribute(key, value)
elif totally_guarded:
raise MassAssignmentError(key)
return self
def force_fill(self, _attributes=None, **attributes):
"""
Fill the model with attributes. Force mass assignment.
:param attributes: The instance attributes
:type attributes: dict
:return: The model instance
:rtype: Model
"""
if _attributes is not None:
attributes.update(_attributes)
self.unguard()
self.fill(**attributes)
self.reguard()
return self
def _fillable_from_dict(self, attributes):
"""
Get the fillable attributes from a given dictionary.
:type attributes: dict
:return: The fillable attributes
:rtype: dict
"""
if self.__fillable__ and not self.__unguarded__:
return {x: attributes[x] for x in attributes if x in self.__fillable__}
return attributes
def new_instance(self, attributes=None, exists=False):
"""
Create a new instance for the given model.
:param attributes: The instance attributes
:type attributes: dict
:param exists:
:type exists: bool
:return: A new instance for the current model
:rtype: Model
"""
if attributes is None:
attributes = {}
model = self.__class__(**attributes)
model.set_connection(self.get_connection_name())
model.set_exists(exists)
return model
def new_from_builder(self, attributes=None, connection=None):
"""
Create a new model instance that is existing.
:param attributes: The model attributes
:type attributes: dict
:param connection: The connection name
:type connection: str
:return: A new instance for the current model
:rtype: Model
"""
model = self.new_instance({}, True)
if attributes is None:
attributes = {}
model.set_raw_attributes(attributes, True)
model.set_connection(connection or self.__connection__)
return model
@classmethod
def hydrate(cls, items, connection=None):
"""
Create a collection of models from plain lists.
:param items:
:param connection:
:return:
"""
instance = cls().set_connection(connection)
collection = instance.new_collection(items)
return collection.map(lambda item: instance.new_from_builder(item))
@classmethod
def hydrate_raw(cls, query, bindings=None, connection=None):
"""
Create a collection of models from a raw query.
:param query: The SQL query
:type query: str
:param bindings: The query bindings
:type bindings: list
:param connection: The connection name
:rtype: Collection
"""
instance = cls().set_connection(connection)
items = instance.get_connection().select(query, bindings)
return cls.hydrate(items, connection)
@classmethod
def create(cls, _attributes=None, **attributes):
"""
Save a new model an return the instance.
:param attributes: The instance attributes
:type attributes: dict
:return: The new instance
:rtype: Model
"""
if _attributes is not None:
attributes.update(_attributes)
model = cls(**attributes)
model.save()
return model
@classmethod
def force_create(cls, **attributes):
"""
Save a new model an return the instance. Allow mass assignment.
:param attributes: The instance attributes
:type attributes: dict
:return: The new instance
:rtype: Model
"""
cls.unguard()
model = cls.create(**attributes)
cls.reguard()
return model
@classmethod
def first_or_create(cls, **attributes):
"""
Get the first record matching the attributes or create it.
:param attributes: The instance attributes
:type attributes: dict
:return: The new instance
:rtype: Model
"""
instance = cls().new_query_without_scopes().where(attributes).first()
if instance is not None:
return instance
return cls.create(**attributes)
@classmethod
def first_or_new(cls, **attributes):
"""
Get the first record matching the attributes or instantiate it.
:param attributes: The instance attributes
:type attributes: dict
:return: The new instance
:rtype: Model
"""
instance = cls().new_query_without_scopes().where(attributes).first()
if instance is not None:
return instance
return cls(**attributes)
@classmethod
def update_or_create(cls, attributes, values=None):
"""
Create or update a record matching the attributes, and fill it with values.
:param attributes: The instance attributes
:type attributes: dict
:param values: The values
:type values: dict
:return: The new instance
:rtype: Model
"""
instance = cls.first_or_new(**attributes)
if values is None:
values = {}
instance.fill(**values).save()
return instance
@classmethod
def query(cls):
"""
Begin querying the model.
:return: A Builder instance
:rtype: orator.orm.Builder
"""
return cls().new_query()
@classmethod
def on(cls, connection=None):
"""
Begin querying the model on a given connection.
:param connection: The connection name
:type connection: str
:return: A Builder instance
:rtype: orator.orm.Builder
"""
instance = cls()
instance.set_connection(connection)
return instance.new_query()
@classmethod
def on_write_connection(cls):
"""
Begin querying the model on the write connection.
:return: A Builder instance
:rtype: QueryBuilder
"""
instance = cls()
return instance.new_query().use_write_connection()
@classmethod
def all(cls, columns=None):
"""
Get all og the models from the database.
:param columns: The columns to retrieve
:type columns: list
:return: A Collection instance
:rtype: Collection
"""
instance = cls()
return instance.new_query().get(columns)
@classmethod
def find(cls, id, columns=None):
"""
Find a model by its primary key.
:param id: The id of the model
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:return: Either a Model instance or a Collection
:rtype: Model
"""
instance = cls()
if isinstance(id, list) and not id:
return instance.new_collection()
if columns is None:
columns = ["*"]
return instance.new_query().find(id, columns)
| |
and returns a list of outputs, binary or categorical
Inputs:
corpus: 'DictaSign' or 'NCSLGR'
types: a list of lists of types to regroup
DictaSign: subset of ['fls' (with different categories), 'PT', 'PT_PRO1', 'PT_PRO2', 'PT_PRO3', 'PT_LOC', 'PT_DET', 'PT_LBUOY', 'PT_BUOY', 'DS', 'DSA', 'DSG', 'DSL', 'DSM', 'DSS', 'DST', 'DSX', 'FBUOY', 'N', 'FS']
NCSLGR: subset of ['other', 'lexical_with_ns_not_fs' (only 0/1), 'fingerspelling', 'fingerspelled_loan_signs', 'IX_1p', 'IX_2p', 'IX_3p', 'IX_loc', 'POSS', 'SELF', 'gesture', 'part_indef', 'DCL', 'LCL', 'SCL', 'BCL', 'ICL', 'BPCL', 'PCL']
nonZero: a list lists of non-zero categories to count positively
if anything other than 0 is counted positive, choose []
binary: list of True/False
video_indices: list/array of integers
separation: (integer) frames to separate videos
provided_annotation: raw annotation data (not needed)
from_notebook: True if used in Jupyter notebook
Outputs:
Concatenated categorical data, shape (1, time_steps, C+1) where C is the number of major types
e.g.1 : types = [['DS'], ['PT']], nonZeros = [[], []], binary=[True, True]
e.g.2 : types = [['PT_PRO1', 'PT_PRO2', 'PT_PRO3'], ['fls']], nonZeros = [[], [41891,43413,42495,42093]], binary=[True, True]
e.g.2b : types = [['PT_PRO1', 'PT_PRO2', 'PT_PRO3'], ['fls']], nonZeros = [[], [41891,43413,42495,42093]], binary=[True, False]
e.g.3 : types = [['PT_PRO1', 'PT_PRO2', 'PT_PRO3'], ['fls']], nonZeros = [[], []], binary=[True, True]
"""
video_indices = list(video_indices)
N_videos = len(video_indices)
if N_videos == 0:
sys.exit('At least one video index is required')
types = list(types)
N_types = len(types)
if N_types == 0:
sys.exit('At least one annotation type is required')
if provided_annotation is None:
provided_annotation = get_raw_annotation_from_file(corpus, from_notebook)
tmp = concatenate_annotations(corpus, types[0][0], video_indices, separation, provided_annotation, from_notebook)
output_list = []
for i_t in range(N_types):
if len(types[i_t])==0:
sys.exit('There should be at least one annotation category per type')
elif len(types[i_t])>1:
if len(nonZero[i_t])>0 or not binary[i_t]:
sys.exit('Grouping several annotation types with non-binary annotation is ambiguous')
else: # len(nonZero[i_t])==0 and binary[i_t]:
output_list.append(to_categorical(concatenate_fuse_annotations(corpus, types[i_t], video_indices, separation, provided_annotation, from_notebook),2))
else: # len(types[i_t])==1
if len(nonZero[i_t])>0:
if binary[i_t]:
output_list.append(to_categorical(concatenate_binarize_annotations(corpus, types[i_t][0], nonZero[i_t], video_indices, separation, provided_annotation, from_notebook),2))
else: # not binary[i_t]:
output_list.append(concatenate_categorize_annotations(corpus, types[i_t][0], nonZero[i_t], video_indices, separation, provided_annotation, from_notebook))
else: # len(nonZero[i_t])==0
if binary[i_t]:
output_list.append(to_categorical(concatenate_binarize_annotations(corpus, types[i_t][0], 'all', video_indices, separation, provided_annotation, from_notebook),2))
else: # not binary[i_t]:
sys.exit('Non-binary categorical output requires at least one nonZero value')
return output_list
def get_features_videos(corpus,
input_type='bodyFace_3D_features_hands_OP_HS',
input_normed=True,
input_type_format='old',
video_indices=np.arange(94),
from_notebook=False):
"""
Gets all wanted features.
Inputs:
corpus (string)
video_indices: list or numpy array of wanted videos
from_notebook: if notebook script, data is in parent folder
Outputs:
features (list of numpy arrays [1, time_steps, features_number])
"""
features = []
if input_type_format == 'old':
features_dict, features_number = getFeaturesDict(input_type, input_normed)
elif input_type_format == 'cslr_limsi_features':
features_number = getFeaturesNumberCslrLimsiFeatures(input_type)
else:
sys.exit('Wrong input type format')
if from_notebook:
parent = '../'
else:
parent = ''
if input_normed:
suffix='_normalized'
else:
suffix=''
list_videos = np.load(parent + 'data/processed/' + corpus + '/list_videos.npy')
if corpus == 'DictaSign':
annotation_raw = np.load(parent + 'data/processed/DictaSign/annotations.npz', encoding='latin1', allow_pickle=True)['dataBrut_DS'] # for counting nb of images
elif corpus == 'NCSLGR':
annotation_raw = np.load(parent + 'data/processed/NCSLGR/annotations.npz', encoding='latin1', allow_pickle=True)['lexical_with_ns_not_fs'] # for counting nb of images
else:
sys.exit('Invalid corpus name')
for vid_idx in video_indices:
time_steps = annotation_raw[vid_idx].shape[0]
features.append(np.zeros((1, time_steps, features_number)))
if input_type_format == 'old':
features_number_idx = 0
for key in features_dict:
key_features_idx = features_dict[key]
key_features_number = key_features_idx.size
if key_features_number > 0:
key_features = np.load(parent + 'data/processed/' + corpus + '/' + key + '.npy', encoding='latin1', allow_pickle=True)
index_vid_tmp = 0
for vid_idx in video_indices:
features[index_vid_tmp][0, :, features_number_idx:features_number_idx+key_features_number] = key_features[vid_idx][:, key_features_idx]
index_vid_tmp += 1
features_number_idx += key_features_number
elif input_type_format == 'cslr_limsi_features':
index_vid_tmp = 0
for vid_idx in video_indices:
if corpus == 'DictaSign':
vidName = 'DictaSign_lsf_' + list_videos[vid_idx] + '_front'
else:
vidName = list_videos[vid_idx]
loaded_features = np.load(parent + 'data/processed/' + corpus + '/' + vidName + '_' + input_type + suffix + '.npy', encoding='latin1', allow_pickle=True)
time_steps = annotation_raw[vid_idx].shape[0]
T_loaded_features = loaded_features.shape[0]
if T_loaded_features > time_steps:
features[index_vid_tmp][0, :, :] = loaded_features[:time_steps, :]
elif T_loaded_features < time_steps:
features[index_vid_tmp][0, :T_loaded_features, :] = loaded_features
else:
features[index_vid_tmp][0, :, :] = loaded_features
#features[index_vid_tmp][0, :, :] = np.load(parent + 'data/processed/' + corpus + '/' + vidName + '_' + input_type + suffix + '.npy', encoding='latin1', allow_pickle=True)
index_vid_tmp += 1
return features
def get_sequence_features(corpus,
vid_idx=0,
img_start_idx=0,
input_type='bodyFace_3D_features_hands_OP_HS',
input_normed=True,
input_type_format='old',
time_steps=100,
preloaded_features=None,
from_notebook=False):
"""
Function returning features for a sequence.
Inputs:
corpus (string)
vid_idx (int): which video
img_start_idx (int): which start image
time_steps: length of sequence (int)
preloaded_features: if features are already loaded, in the format of a list (features for each video)
from_notebook: if notebook script, data is in parent folder
Outputs:
X: a numpy array [1, time_steps, features_number] for features
"""
if from_notebook:
parent = '../'
else:
parent = ''
if preloaded_features is None:
if input_type_format == 'old':
features_dict, features_number = getFeaturesDict(input_type, input_normed)
elif input_type_format == 'cslr_limsi_features':
features_number = getFeaturesNumberCslrLimsiFeatures(input_type)
else:
sys.exit('Wrong input type format')
else:
features_number = preloaded_features[vid_idx].shape[1]
X = np.zeros((1, time_steps, features_number))
if preloaded_features is None:
if input_type_format == 'old':
features_number_idx = 0
for key in features_dict:
key_features_idx = features_dict[key]
key_features_number = key_features_idx.size
if key_features_number > 0:
key_features = np.load(parent + 'data/processed/' + corpus + '/' + key + '.npy', encoding='latin1', allow_pickle=True)[vid_idx]
X[0, :, features_number_idx:features_number_idx+key_features_number] = key_features[img_start_idx:img_start_idx + time_steps, key_features_idx]
features_number_idx += key_features_number
elif input_type_format == 'cslr_limsi_features':
if corpus == 'DictaSign':
vidName = 'DictaSign_lsf_' + list_videos[vid_idx] + '_front'
else:
vidName = list_videos[vid_idx]
X[0, :, :] = np.load(parent + 'data/processed/' + corpus + '/' + vidName + '_' + input_type + suffix + '.npy', encoding='latin1', allow_pickle=True)[img_start_idx:img_start_idx + time_steps, :]
else:
X[0, :, :] = preloaded_features[vid_idx][0, img_start_idx:img_start_idx+time_steps, :]
return X
def get_sequence_annotations_mixed(corpus,
types,
nonZero,
binary,
video_index,
img_start_idx=0,
time_steps=100,
provided_annotation=None,
from_notebook=False):
"""
For returning annotations for a sequence, in the form of a list of different categories.
e.g.: get_sequence_annotations_categories('DictaSign',
['fls', 'DS'],
[[41891,43413,43422,42992],[1]],
vid_idx=17,
img_start_idx=258,
time_steps=100)
Inputs:
output_names: list of outputs (strings)
output_categories: list of lists of meaningful annotation categories for each output
vid_idx (int): which video
img_start_idx (int): which start image
time_steps: length of sequences (int)
provided_annotation: raw annotation data (not needed)
from_notebook: if notebook script, data is in parent folder
Outputs:
Y: a list, comprising annotations
"""
if provided_annotation is None:
provided_annotation = get_raw_annotation_from_file(corpus, from_notebook)
Y = get_concatenated_mixed(corpus, types, nonZero, binary, video_indices=[video_index], separation=0, provided_annotation=provided_annotation, from_notebook=from_notebook)
N_types = len(types)
for i_t in range(N_types):
Y[i_t] = Y[i_t][:,img_start_idx:img_start_idx+time_steps,:]
return Y
def get_sequence_annotations_sign_types(corpus,
types,
nonZero,
video_index,
img_start_idx=0,
time_steps=100,
provided_annotation=None,
from_notebook=False):
"""
For returning annotations for a sequence, in the form of a list of different categories, each of which is a list of video annotations.
e.g.: get_sequence_annotations('DictaSign',
['fls', 'DS'],
[[41891,43413,43422,42992],[1]],
vid_idx=17,
img_start_idx=258,
time_steps=100)
Inputs:
corpus (string)
output_names_final: list of outputs (strings) corresponding to the desired output_categories
output_names_original: original names that are used to compose final outputs
DictaSign: subset of ['fls' (with different categories), 'PT', 'PT_PRO1', 'PT_PRO2', 'PT_PRO3', 'PT_LOC', 'PT_DET', 'PT_LBUOY', 'PT_BUOY', 'DS', 'DSA', 'DSG', 'DSL', 'DSM', 'DSS', 'DST', 'DSX', 'FBUOY', 'N', 'FS']
NCSLGR: subset of ['other', 'lexical_with_ns_not_fs' (only 0/1), 'fingerspelling', 'fingerspelled_loan_signs', 'IX_1p', 'IX_2p', 'IX_3p', 'IX_loc', 'POSS', 'SELF', 'gesture', 'part_indef', 'DCL', 'LCL', 'SCL', 'BCL', 'ICL', 'BPCL', 'PCL']
vid_idx (int): which video
img_start_idx (int): which start image
time_steps: length of sequences (int)
provided_annotation: raw annotation data (not needed)
from_notebook: if notebook script, data is in parent folder
Outputs:
Y: an annotation array
"""
if provided_annotation is None:
provided_annotation = get_raw_annotation_from_file(corpus, from_notebook)
Y = get_concatenated_sign_types(corpus, types, nonZero, [video_index], 0, provided_annotation, from_notebook)
return Y[:, img_start_idx:img_start_idx+time_steps, :]
def get_sequence(corpus,
output_form,
types,
nonZero,
binary,
video_index,
img_start_idx,
input_type='bodyFace_3D_features_hands_OP_HS',
input_normed=True,
input_type_format='old',
time_steps=100,
preloaded_features=None,
provided_annotation=None,
features_type='features',
frames_path_before_video='/localHD/DictaSign/convert/img/DictaSign_lsf_',
empty_image_path='/localHD/DictaSign/convert/img/white.jpg',
from_notebook=False):
"""
For returning features and annotations for a sequence.
Inputs:
corpus (string): DictaSign or NCSLGR
output_form: 'mixed' if different and separated Outputs
'sign_types' if annotation is only a binary matrix of sign types
output_names_final: list of outputs (strings)
output_categories_or_names_original:
if output_form: 'mixed': list of lists of meaningful annotation categories for each output
if output_form: 'sign_types': list of lists of original names that are used to compose final outputs
vid_idx (int): which video
img_start_idx (int): which start image
time_steps: length of sequences (int)
preloaded_features: if features are already loaded, in the format of a list (features for each video)
provided_annotation: raw annotation data (not needed)
from_notebook: if notebook script, data is in parent folder
Outputs:
X: a numpy array [1, time_steps, features_number] for features
| |
input = """
c num blocks = 1
c num vars = 180
c minblockids[0] = 1
c maxblockids[0] = 180
p cnf 180 815
-50 -91 -166 0
-55 -32 101 0
30 -91 -161 0
42 75 -12 0
55 -107 -69 0
-18 -31 -140 0
152 -137 -113 0
-103 113 -93 0
-58 -15 -131 0
129 -74 73 0
111 -86 6 0
134 -104 -92 0
-32 116 75 0
53 108 -40 0
85 -87 -8 0
-86 103 -134 0
139 -125 -9 0
53 116 4 0
23 134 -90 0
-125 -71 -86 0
109 52 140 0
-168 -128 165 0
-173 -163 68 0
180 -93 -124 0
-70 174 124 0
14 147 51 0
-173 -23 22 0
-156 144 -174 0
173 120 -170 0
-18 -2 -63 0
-27 169 -98 0
-139 -55 -177 0
-80 49 -86 0
-93 172 -118 0
-95 75 82 0
101 -140 -11 0
-6 126 -123 0
70 -166 -41 0
101 -102 122 0
68 -118 -134 0
22 -95 -157 0
-2 122 47 0
71 -67 -102 0
117 -43 -171 0
-79 157 139 0
-36 157 -62 0
41 162 -87 0
5 73 -152 0
-138 84 72 0
179 60 -40 0
41 163 65 0
-145 52 176 0
28 170 160 0
55 43 120 0
66 5 103 0
133 48 146 0
78 -50 101 0
128 -55 -134 0
43 68 72 0
-46 -28 -143 0
-53 -143 110 0
-2 107 -131 0
27 115 -21 0
54 157 -56 0
-170 -155 -65 0
19 136 164 0
-142 -146 -122 0
127 53 167 0
-80 159 -66 0
-158 -110 -27 0
84 111 -90 0
39 -110 11 0
-30 -109 -147 0
42 -117 158 0
-95 84 152 0
-114 30 -19 0
-99 -1 60 0
57 124 152 0
150 93 134 0
13 107 -116 0
-149 -114 3 0
-10 -158 -179 0
-44 81 107 0
-66 -37 158 0
-114 -17 163 0
22 -138 -67 0
127 149 -69 0
163 140 43 0
-157 -120 8 0
145 -161 -90 0
-173 -55 103 0
4 128 171 0
30 -142 39 0
-175 59 105 0
83 -119 61 0
68 69 -28 0
42 26 8 0
103 -177 164 0
-49 -121 -137 0
-137 133 107 0
8 -170 -116 0
61 -149 -131 0
172 135 -130 0
104 -60 -40 0
-108 33 171 0
-139 -12 -85 0
174 -154 -94 0
2 -80 124 0
-42 -131 -153 0
40 38 -92 0
-18 -43 65 0
-69 155 -35 0
-168 69 -81 0
121 -77 -88 0
-160 158 -31 0
-55 -12 -3 0
43 -18 -68 0
10 -99 -152 0
-119 63 -98 0
62 14 -5 0
2 -98 25 0
-167 -16 121 0
36 143 -94 0
102 -55 7 0
-167 -70 -10 0
53 63 79 0
4 176 28 0
-66 151 71 0
-140 -3 35 0
163 172 67 0
31 56 -18 0
76 14 98 0
22 -56 54 0
71 -79 -38 0
127 36 31 0
-111 -65 -138 0
97 -75 -1 0
-121 -180 13 0
-69 122 71 0
-81 -142 -145 0
115 142 116 0
54 119 5 0
-56 46 62 0
135 41 -49 0
131 -78 -47 0
157 -45 155 0
-37 96 45 0
64 -160 -120 0
109 -70 123 0
164 -23 -42 0
-108 15 137 0
-97 147 175 0
22 -19 73 0
118 22 145 0
-113 -57 110 0
109 152 -173 0
95 11 -25 0
-169 45 158 0
103 -45 50 0
-115 37 143 0
155 15 -101 0
74 88 -119 0
-150 126 63 0
156 -136 -33 0
12 -31 -32 0
140 73 153 0
-20 108 36 0
48 179 80 0
-61 -119 15 0
28 170 -168 0
-117 -119 88 0
29 -38 116 0
138 77 175 0
78 175 111 0
27 26 9 0
43 -133 34 0
71 116 -16 0
157 -13 -76 0
132 92 -93 0
-131 66 168 0
-97 -80 -21 0
116 171 -163 0
-94 131 -78 0
-165 6 -24 0
-149 -159 45 0
23 -93 -2 0
56 -142 -8 0
-137 -88 111 0
-95 -168 -71 0
117 -106 -112 0
-120 118 98 0
7 145 -45 0
102 -92 -176 0
-101 -99 174 0
143 -52 -132 0
129 136 98 0
58 95 -164 0
162 42 55 0
155 -151 -70 0
-47 -126 -79 0
42 32 -20 0
-78 -118 65 0
62 -126 -123 0
52 -60 -54 0
-39 -125 150 0
110 -94 171 0
-69 -3 -35 0
-41 18 -128 0
86 -4 -11 0
-37 60 -88 0
82 110 -111 0
-136 126 -101 0
-150 -96 67 0
80 -92 -24 0
167 -78 8 0
-22 152 177 0
-57 49 -3 0
46 -60 -54 0
-35 -55 64 0
-27 23 86 0
-128 -74 -27 0
123 -9 47 0
5 111 62 0
-85 44 115 0
53 -86 -107 0
6 -177 148 0
-130 -59 -142 0
6 -138 63 0
-112 -167 132 0
-134 132 137 0
-126 96 169 0
161 100 158 0
60 9 104 0
-30 -17 6 0
39 140 141 0
28 109 142 0
33 40 -52 0
-70 -1 66 0
71 37 -47 0
-149 57 -117 0
-56 -171 -39 0
49 -176 -79 0
138 87 -61 0
20 51 65 0
41 4 82 0
97 -173 -104 0
136 35 -41 0
-113 -23 -70 0
153 -149 -45 0
-115 18 -55 0
-60 98 -96 0
116 16 102 0
5 7 52 0
157 -138 112 0
-15 -35 113 0
-158 -98 44 0
41 -136 103 0
48 -8 -34 0
153 -100 155 0
144 180 -116 0
146 39 33 0
70 -77 28 0
164 108 -128 0
29 126 128 0
179 -140 141 0
80 -75 27 0
129 -107 -108 0
-89 42 67 0
-159 -126 69 0
50 19 -6 0
125 47 127 0
161 68 9 0
-141 -25 -171 0
50 -8 145 0
-62 135 -38 0
-3 -75 -144 0
36 -82 -59 0
-155 58 151 0
53 -123 -22 0
79 -101 -65 0
164 44 56 0
62 180 58 0
131 -134 6 0
-88 139 110 0
-55 -111 -90 0
43 -8 94 0
-38 -140 179 0
-17 167 90 0
-81 -13 1 0
-23 -50 37 0
-178 41 126 0
6 -55 -84 0
-141 -80 107 0
73 -89 -50 0
-22 15 -45 0
13 -89 87 0
-15 1 32 0
167 -30 96 0
-64 50 88 0
-161 148 -68 0
174 -94 -90 0
97 75 -102 0
-139 -69 -55 0
118 -135 -134 0
1 -179 50 0
-91 64 -41 0
-79 -62 70 0
-26 -87 -134 0
-101 128 94 0
177 54 -104 0
-71 44 -175 0
132 101 5 0
172 72 -69 0
-82 -53 -167 0
163 -16 84 0
47 -82 160 0
-88 -177 145 0
-156 -177 -63 0
-162 -15 126 0
56 146 63 0
30 -119 166 0
-78 -154 -180 0
160 -121 176 0
70 148 -65 0
-13 -139 -138 0
-125 10 -148 0
-98 126 173 0
31 110 -136 0
101 173 17 0
8 171 -38 0
100 178 89 0
-37 53 139 0
65 -108 -160 0
162 145 71 0
34 85 -127 0
16 -165 -164 0
158 69 112 0
-98 -27 -88 0
104 -133 -8 0
15 179 -44 0
-51 72 122 0
85 131 -23 0
-15 -144 -133 0
-110 176 -49 0
169 33 170 0
150 27 -157 0
-59 15 -71 0
-104 -171 38 0
141 -178 34 0
-18 -4 -179 0
173 151 -162 0
-120 -64 20 0
-79 -94 -53 0
-23 -173 147 0
34 12 2 0
-137 -8 21 0
164 -118 124 0
16 14 -65 0
56 92 -41 0
-28 -152 103 0
146 -93 -116 0
-160 -177 -138 0
36 89 37 0
-97 43 171 0
-156 -176 40 0
-71 -13 81 0
-127 86 -111 0
-134 -90 119 0
64 122 112 0
-161 90 126 0
-141 -69 -27 0
-2 -22 17 0
-69 -139 -114 0
-107 -165 -119 0
-75 57 -82 0
-116 -49 161 0
-47 -145 122 0
-6 116 37 0
-75 -58 -83 0
133 -171 -22 0
132 -67 104 0
23 151 -119 0
127 141 100 0
29 -39 -93 0
-96 72 172 0
-51 22 27 0
13 -7 -20 0
-70 -132 -145 0
31 173 -112 0
-15 -128 -94 0
48 45 103 0
51 53 -82 0
177 166 -173 0
95 -60 -121 0
-77 -159 -96 0
-2 -16 128 0
60 -157 128 0
-32 10 78 0
-159 -85 38 0
-20 -64 169 0
-67 107 -9 0
54 -2 94 0
129 55 -104 0
61 -35 -4 0
-72 -144 -83 0
135 -12 57 0
14 20 47 0
-18 -70 85 0
-177 65 -94 0
156 -25 -32 0
-100 -159 5 0
-43 -84 -57 0
133 -25 -174 0
153 16 85 0
-7 69 156 0
90 -178 -119 0
11 180 111 0
50 -142 -102 0
68 168 -155 0
113 3 145 0
-97 | |
"""
Definition of all artifactory objects.
"""
import warnings
import json
import logging
import os
from os.path import isdir, join
from typing import List, Optional, Dict, Tuple, Union, Iterator
from pathlib import Path
import requests
from requests import Response
from pydantic import parse_obj_as
from pyartifactory.exception import (
UserNotFoundException,
UserAlreadyExistsException,
GroupNotFoundException,
RepositoryAlreadyExistsException,
GroupAlreadyExistsException,
RepositoryNotFoundException,
ArtifactoryException,
PermissionAlreadyExistsException,
PermissionNotFoundException,
InvalidTokenDataException,
PropertyNotFoundException,
ArtifactNotFoundException,
)
from pyartifactory.models import (
AuthModel,
ApiKeyModel,
PasswordModel,
AccessTokenModel,
Group,
LocalRepository,
VirtualRepository,
LocalRepositoryResponse,
VirtualRepositoryResponse,
RemoteRepository,
RemoteRepositoryResponse,
SimpleRepository,
AnyRepository,
AnyRepositoryResponse,
UserResponse,
NewUser,
SimpleUser,
User,
Permission,
SimplePermission,
ArtifactPropertiesResponse,
ArtifactStatsResponse,
ArtifactInfoResponse,
)
from pyartifactory.models.artifact import (
ArtifactFileInfoResponse,
ArtifactFolderInfoResponse,
)
from pyartifactory.utils import custom_encoder
logger = logging.getLogger("pyartifactory")
class Artifactory:
"""Models artifactory."""
def __init__(
self, url: str, auth: Tuple[str, str], verify: bool = True, cert: str = None,
):
self.artifactory = AuthModel(url=url, auth=auth, verify=verify, cert=cert)
self.users = ArtifactoryUser(self.artifactory)
self.groups = ArtifactoryGroup(self.artifactory)
self.security = ArtifactorySecurity(self.artifactory)
self.repositories = ArtifactoryRepository(self.artifactory)
self.artifacts = ArtifactoryArtifact(self.artifactory)
self.permissions = ArtifactoryPermission(self.artifactory)
class ArtifactoryObject:
"""Models the artifactory object."""
def __init__(self, artifactory: AuthModel) -> None:
self._artifactory = artifactory
self._auth = (
self._artifactory.auth[0],
self._artifactory.auth[1].get_secret_value(),
)
self._verify = self._artifactory.verify
self._cert = self._artifactory.cert
self.session = requests.Session()
def _get(self, route: str, **kwargs) -> Response:
"""
:param route: API Route
:param kwargs: Additional parameters to add the request
:returns An HTTP response
"""
return self._generic_http_method_request("get", route, **kwargs)
def _post(self, route: str, **kwargs) -> Response:
"""
:param route: API Route
:param kwargs: Additional parameters to add the request
:returns An HTTP response
"""
return self._generic_http_method_request("post", route, **kwargs)
def _put(self, route: str, **kwargs) -> Response:
"""
:param route: API Route
:param kwargs: Additional parameters to add the request
:returns An HTTP response
"""
return self._generic_http_method_request("put", route, **kwargs)
def _delete(self, route: str, **kwargs) -> Response:
"""
:param route: API Route
:param kwargs: Additional parameters to add the request
:returns An HTTP response
"""
return self._generic_http_method_request("delete", route, **kwargs)
def _generic_http_method_request(
self, method: str, route: str, raise_for_status: bool = True, **kwargs
) -> Response:
"""
:param method: HTTP method to use
:param route: API Route
:param kwargs: Additional parameters to add the request
:return: An HTTP response
"""
http_method = getattr(self.session, method)
response: Response = http_method(
f"{self._artifactory.url}/{route}",
auth=self._auth,
**kwargs,
verify=self._verify,
cert=self._cert,
)
if raise_for_status:
response.raise_for_status()
return response
class ArtifactoryUser(ArtifactoryObject):
"""Models an artifactory user."""
_uri = "security/users"
def create(self, user: NewUser) -> UserResponse:
"""
Create user
:param user: NewUser object
:return: User
"""
username = user.name
try:
self.get(username)
logger.error("User %s already exists", username)
raise UserAlreadyExistsException(f"User {username} already exists")
except UserNotFoundException:
data = user.dict()
data["password"] = user.password.get_secret_value()
self._put(f"api/{self._uri}/{username}", json=data)
logger.debug("User %s successfully created", username)
return self.get(user.name)
def get(self, name: str) -> UserResponse:
"""
Read user from artifactory. Fill object if exist
:param name: Name of the user to retrieve
:return: UserModel
"""
try:
response = self._get(f"api/{self._uri}/{name}")
logger.debug("User %s found", name)
return UserResponse(**response.json())
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404 or error.response.status_code == 400:
logger.error("User %s does not exist", name)
raise UserNotFoundException(f"{name} does not exist")
raise ArtifactoryException from error
def list(self) -> List[SimpleUser]:
"""
Lists all the users
:return: UserList
"""
response = self._get(f"api/{self._uri}")
logger.debug("List all users successful")
return [SimpleUser(**user) for user in response.json()]
def update(self, user: User) -> UserResponse:
"""
Updates an artifactory user
:param user: NewUser object
:return: UserModel
"""
username = user.name
self.get(username)
self._post(
f"api/{self._uri}/{username}",
json=user.dict(exclude={"lastLoggedIn", "realm"}),
)
logger.debug("User %s successfully updated", username)
return self.get(username)
def delete(self, name: str) -> None:
"""
Remove user
:param name: Name of the user to delete
:return: None
"""
self.get(name)
self._delete(f"api/{self._uri}/{name}")
logger.debug("User %s successfully deleted", name)
def unlock(self, name: str) -> None:
"""
Unlock user
Even if the user doesn't exist, it succeed too
:param name: Name of the user to unlock
:return none
"""
self._post(f"api/security/unlockUsers/{name}")
logger.debug("User % successfully unlocked", name)
class ArtifactorySecurity(ArtifactoryObject):
"""Models artifactory security."""
_uri = "security"
def get_encrypted_password(self) -> PasswordModel:
"""
Get the encrypted password of the authenticated requestor.
:return: str
"""
response = self._get(f"api/{self._uri}/encryptedPassword")
logger.debug("Encrypted password successfully delivered")
return PasswordModel(**response.json())
def create_access_token(
self,
user_name: str,
expires_in: int = 3600,
refreshable: bool = False,
groups: Optional[List[str]] = None,
) -> AccessTokenModel:
"""
Creates an access token.
:param user_name: Name of the user to whom an access key should be granted. transient token
is created if user doesn't exist in artifactory.
:param expires_in: Expiry time for the token in seconds. For eternal tokens specify 0.
:param refreshable: If set to true token can be refreshed using the refresh token returned.
:param groups: A list of groups the token has membership of.
If an existing user in artifactory is used with existing memberships
groups are automatically implied without specification.
:return: AccessToken
"""
payload = {
"username": user_name,
"expires_in": expires_in,
"refreshable": refreshable,
}
if groups:
if not isinstance(groups, list):
raise ValueError(groups)
scope = f'member-of-groups:"{", ".join(groups)}"'
payload.update({"scope": scope})
response = self._post(
f"api/{self._uri}/token", data=payload, raise_for_status=False
)
if response.ok:
return AccessTokenModel(**response.json())
raise InvalidTokenDataException(
response.json().get("error_description", "Unknown error")
)
def revoke_access_token(self, token: str = None, token_id: str = None) -> bool:
"""
Revokes an access token.
:param token: The token to revoke
:param token_id: The id of a token to revoke
:return: bool True or False indicating success or failure of token revocation attempt.
"""
if not any([token, token_id]):
logger.error("Neither a token or a token id was specified")
raise InvalidTokenDataException
payload: Dict[str, Optional[str]] = {"token": token} if token else {
"token_id": token_id
}
response = self._post(
f"api/{self._uri}/token/revoke", data=payload, raise_for_status=False
)
if response.ok:
logger.debug("Token revoked successfully, or token did not exist")
return True
logger.error("Token revocation unsuccessful, response was %s", response.text)
return False
def create_api_key(self) -> ApiKeyModel:
"""
Create an API key for the current user.
:return: Error if API key already exists - use regenerate API key instead.
"""
response = self._post(f"api/{self._uri}/apiKey")
logger.debug("API Key successfully created")
return ApiKeyModel(**response.json())
def regenerate_api_key(self) -> ApiKeyModel:
"""
Regenerate an API key for the current user
:return: API key
"""
response = self._put(f"api/{self._uri}/apiKey")
logger.debug("API Key successfully regenerated")
return ApiKeyModel(**response.json())
def get_api_key(self) -> ApiKeyModel:
"""
Get the current user's own API key
:return: API key
"""
response = self._get(f"api/{self._uri}/apiKey")
logger.debug("API Key successfully delivered")
return ApiKeyModel(**response.json())
def revoke_api_key(self) -> None:
"""
Revokes the current user's API key
:return: None
"""
self._delete(f"api/{self._uri}/apiKey")
logger.debug("API Key successfully revoked")
def revoke_user_api_key(self, name: str) -> None:
"""
Revokes the API key of another user
:param name: name of the user to whom api key has to be revoked
:return: None
"""
self._delete(f"api/{self._uri}/apiKey/{name}")
logger.debug("User API Key successfully revoked")
class ArtifactoryGroup(ArtifactoryObject):
"""Models artifactory groups."""
_uri = "security/groups"
def create(self, group: Group) -> Group:
"""
Creates a new group in Artifactory or replaces an existing group
:param group: Group to create
:return: Created group
"""
group_name = group.name
try:
self.get(group_name)
logger.error("Group %s already exists", group_name)
raise GroupAlreadyExistsException(f"Group {group_name} already exists")
except GroupNotFoundException:
self._put(f"api/{self._uri}/{group_name}", json=group.dict())
logger.debug("Group %s successfully created", group_name)
return self.get(group.name)
def get(self, name: str) -> Group:
"""
Get the details of an Artifactory Group
:param name: Name of the group to retrieve
:return: Found artifactory group
"""
try:
response = self._get(
f"api/{self._uri}/{name}", params={"includeUsers": True}
)
logger.debug("Group %s found", name)
return Group(**response.json())
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404 or error.response.status_code == 400:
logger.error("Group %s does not exist", name)
raise GroupNotFoundException(f"Group {name} does not exist")
raise ArtifactoryException from error
def list(self) -> List[Group]:
"""
Lists all the groups
:return: GroupList
"""
response = self._get(f"api/{self._uri}")
logger.debug("List all groups successful")
return [Group(**group) for group in response.json()]
def update(self, group: Group) -> Group:
"""
Updates an exiting group in Artifactory with the provided group details.
:param group: Group to be updated
:return: Updated group
"""
group_name = group.name
self.get(group_name)
self._post(f"api/{self._uri}/{group_name}", json=group.dict())
logger.debug("Group %s successfully updated", group_name)
return self.get(group_name)
def delete(self, name: str) -> None:
"""
Removes a group
:param name: Name of the group to delete
:return: None
"""
self.get(name)
self._delete(f"api/{self._uri}/{name}")
logger.debug("Group %s successfully deleted", name)
class ArtifactoryRepository(ArtifactoryObject):
"""Models an artifactory repository."""
_uri = "repositories"
# Repositories operations
def get_repo(self, repo_name: str) -> AnyRepositoryResponse:
"""
Finds repository in artifactory. Raises an exception if the repo doesn't exist.
:param repo_name: Name of the repository to retrieve
:return: Either a local, virtual or remote repository
"""
try:
response = self._get(f"api/{self._uri}/{repo_name}")
repo: AnyRepositoryResponse = parse_obj_as(
Union[
LocalRepositoryResponse,
VirtualRepositoryResponse,
RemoteRepositoryResponse,
],
response.json(),
| |
or (phjSortProportions in ['ascending','ascend','asc']):
phjPropDF = phjPropDF.sort_values(by = phjSuffixDict['joinstr'].join([phjGroupsToPlotList[0],phjSuffixDict['proportion']]),
axis = 0,
ascending = True)
elif phjSortProportions in ['descending','descend','desc']:
phjPropDF = phjPropDF.sort_values(by = phjSuffixDict['joinstr'].join([phjGroupsToPlotList[0],phjSuffixDict['proportion']]),
axis = 0,
ascending = False)
else:
# This message should never be given.
print('Option for sorting does not exist. (This message should never be given.)')
if phjPrintResults == True:
with pd.option_context('display.float_format','{:,.4f}'.format):
print(phjPropDF)
# Plot bar chart of relative frequencies
if phjPlotProportions == True:
# Plot chart
phjPlotProportionsBarChart(phjDF = phjPropDF,
phjCategoriesToPlotList = phjColumnsList,
phjGroupVarName = phjGroupVarName,
phjGroupLevelsList = phjGroupLevelsList,
phjAlpha = phjAlpha,
phjGraphTitle = None,
phjXAxisTitle = 'Categories',
phjYAxisTitle = 'Proportions',
phjPrintResults = True)
finally:
return phjPropDF
# Calculates relative frequencies and multinomial confidence intervals
# --------------------------------------------------------------------
# This function calculates proportions, simultaneous confidence intervals for a categorical
# variable and plots bar charts with asymmetrical error bars.
def phjCalculateMultinomialProportions(phjDF,
phjCategoryVarName,
phjGroupVarName = None,
phjMissingValue = 'missing',
phjMultinomialConfIntMethod = 'goodman',
phjAlpha = 0.05,
phjPlotRelFreq = True,
phjCategoriesToPlotList = 'all',
phjGroupsToPlotList = None, # Currently not implemented
phjGraphTitle = None,
phjPrintResults = False):
# Check whether required parameters have been set to correct type and are set to
# allowable values. N.B. isinstance() can take a tuple to test against multiple types.
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
phjAssert('phjCategoryVarName',phjCategoryVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
if phjGroupVarName is not None:
phjAssert('phjGroupVarName',phjGroupVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjMissingValue',phjMissingValue,(str,int,float),phjBespokeMessage = "Parameter 'phjMissingValue' needs to be a string or a number (including np.nan).")
phjAssert('phjMultinomialConfIntMethod',phjMultinomialConfIntMethod,str,phjAllowedOptions = ['goodman','sison-glaz'])
phjAssert('phjAlpha',phjAlpha,float,phjAllowedOptions = {'min':0.0001,'max':0.9999})
phjAssert('phjPlotRelFreq',phjPlotRelFreq,bool)
if phjCategoriesToPlotList != 'all':
if isinstance(phjCategoriesToPlotList,str):
phjCategoriesToPlotList = [phjCategoriesToPlotList]
phjAssert('phjCategoriesToPlotList',phjCategoriesToPlotList,list,phjBespokeMessage = "Parameter 'phjCategoriesToPlotList' needs to be a list or the string value 'all'.")
if phjGroupsToPlotList is not None:
if phjGroupsToPlotList != 'all':
if isinstance(phjGroupsToPlotList,str):
phjGroupsToPlotList = [phjGroupsToPlotList]
phjAssert('phjGroupsToPlotList',phjGroupsToPlotList,list,phjBespokeMessage = "Parameter 'phjGroupsToPlotList' needs to be a list or the string value 'all'.")
phjAssert('phjGraphTitle',phjGraphTitle,str)
phjAssert('phjPrintResults',phjPrintResults,bool)
except AssertionError as e:
phjRelFreqDF = None
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
# Set default suffixes and join strings to create column names
# to use in output dataframe.
phjSuffixDict = phjDefineSuffixDict(phjAlpha = phjAlpha)
# Copy required columns to dataframe and delete rows with missing values
phjDF = phjKeepRequiredData(phjDF = phjDF,
phjColumnsList = [phjCategoryVarName],
phjGroupVarName = phjGroupVarName,
phjMissingValue = phjMissingValue)
# Create lists of unique category and group levels
# (N.B. If no group name is given, a default value 'group' is used.)
# i. Categories
phjCategoryLevelsList = phjGetCategoryLevelsList(phjDF = phjDF,
phjCategoryVarName = phjCategoryVarName,
phjPrintResults = phjPrintResults)
# ii. Groups
phjGroupLevelsList = phjGetGroupLevelsList(phjDF = phjDF,
phjGroupVarName = phjGroupVarName,
phjPrintResults = phjPrintResults)
# Create empty dataframe (no columns) with index consisting of all category levels
phjRelFreqDF = pd.DataFrame(index = phjCategoryLevelsList)
# Define which suffix indicates normalization of value_counts() function
# (i.e. which is absolute counts and which is relative frequency):
phjSuffixNormOrderedDict = collections.OrderedDict()
phjSuffixNormOrderedDict[phjSuffixDict['absfreq']] = False
phjSuffixNormOrderedDict[phjSuffixDict['proportion']] = True
# Create temporary dataframes consisting of output from value_counts() function
# applied to slices of dataframe based on group levels
for phjSuffix, phjNormalize in phjSuffixNormOrderedDict.items():
# Calculate frequencies and relative frequencies for each group
if phjGroupVarName is None:
phjTempRelFreqDF = pd.DataFrame(phjDF[phjCategoryVarName].value_counts(normalize = phjNormalize))
phjTempRelFreqDF = phjTempRelFreqDF.rename(columns = {phjCategoryVarName: phjSuffix})
# Use non-normalized data to calculate simultaneous confidence intervals
if phjNormalize == False:
phjTempRelFreqDF = phjCalculateMultinomialConfInts(phjDF = phjTempRelFreqDF,
phjAbsFreqColumnName = phjSuffix,
phjSimultConfIntColumnName = phjSuffixDict['cisuffix'],
phjMultinomialConfIntMethod = phjMultinomialConfIntMethod,
phjAlpha = phjAlpha,
phjPrintResults = phjPrintResults)
# Join temporary data frame to complete dataframe based on index value.
phjRelFreqDF = phjRelFreqDF.join(phjTempRelFreqDF)
# Cells in summary dataframe with missing values are converted to zero
# (N.B. In this bit (when phjGroupVarName is None) the following may not be required
# but I haven't confirmed that for certain so have included it anyway.)
phjRelFreqDF = phjRelFreqDF.fillna(0)
else:
for phjGroup in phjGroupLevelsList:
phjTempRelFreqDF = pd.DataFrame(phjDF.loc[phjDF[phjGroupVarName] == phjGroup,phjCategoryVarName].value_counts(normalize = phjNormalize))
phjTempRelFreqDF = phjTempRelFreqDF.rename(columns = {phjCategoryVarName: phjSuffixDict['joinstr'].join([str(phjGroup),phjSuffix])})
# Use non-normalized data to calculate simultaneous confidence intervals
if phjNormalize == False:
phjTempRelFreqDF = phjCalculateMultinomialConfInts(phjDF = phjTempRelFreqDF,
phjAbsFreqColumnName = phjSuffixDict['joinstr'].join([str(phjGroup),phjSuffix]),
phjSimultConfIntColumnName = phjSuffixDict['joinstr'].join([str(phjGroup),phjSuffixDict['cisuffix']]),
phjMultinomialConfIntMethod = phjMultinomialConfIntMethod,
phjAlpha = phjAlpha,
phjPrintResults = phjPrintResults)
# Join temporary data frame to complete dataframe based on index value.
phjRelFreqDF = phjRelFreqDF.join(phjTempRelFreqDF)
# Cells in summary dataframe with missing values are converted to zero
phjRelFreqDF = phjRelFreqDF.fillna(0)
phjRelFreqDF = phjReorderCols(phjDF = phjRelFreqDF,
phjGroupVarName = phjGroupVarName,
phjGroupLevelsList = phjGroupLevelsList,
phjAlpha = phjAlpha,
phjPrintResults = False)
if phjPrintResults == True:
print(phjRelFreqDF)
# Plot bar chart of relative frequencies
if phjPlotRelFreq == True:
# Plot chart
phjPlotProportionsBarChart(phjDF = phjRelFreqDF,
phjCategoriesToPlotList = phjCategoriesToPlotList,
phjGroupVarName = phjGroupVarName,
phjGroupLevelsList = phjGroupLevelsList,
phjAlpha = phjAlpha,
phjGraphTitle = phjGraphTitle,
phjXAxisTitle = phjCategoryVarName,
phjYAxisTitle = 'Relative frequency',
phjPrintResults = False)
finally:
return phjRelFreqDF
def phjCalculateBinomialConfInts(phjDF,
phjSuccVarName = None,
phjFailVarName = None,
phjTotalVarName = None,
phjBinomialConfIntMethod = 'normal',
phjAlpha = 0.05,
phjPrintResults = False):
# Deep copy dataframe to ensure columns not added to passed dataframe
phjDF = phjDF.copy(deep = True)
# Get a list of the terms used to head columns in summary tables
phjSuffixDict = phjDefineSuffixDict(phjAlpha = phjAlpha)
# Check whether function parameters have been set to correct type and are of
# correct values.
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
if phjSuccVarName is not None:
#assert isinstance(phjSuccVarName,str), "Parameter 'phjSuccVarName' needs to be a string."
phjAssert('phjSuccVarName',phjSuccVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
if phjFailVarName is not None:
#assert isinstance(phjFailVarName,str), "Parameter 'phjFailVarName' needs to be a string."
phjAssert('phjFailVarName',phjFailVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
if phjTotalVarName is not None:
#assert isinstance(phjTotalVarName,str), "Parameter 'phjTotalVarName' needs to be a string."
phjAssert('phjTotalVarName',phjTotalVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjBinomialConfIntMethod',phjBinomialConfIntMethod,str,phjAllowedOptions = ['normal','agresti_coull','beta','wilson','jeffreys','binom_test'])
phjAssert('phjAlpha',phjAlpha,float,phjAllowedOptions = {'min':0.0001,'max':0.9999})
phjAssert('phjPrintResults',phjPrintResults,bool)
# Bespoke asserts
# ---------------
# The user can enter two of three parameters in list of successes, failures or total.
# Check that at least 2 parameters are entered.
nArgs = len([i for i in [phjSuccVarName,phjFailVarName,phjTotalVarName] if i is not None])
assert nArgs >= 2, "At least 2 variables from phjSuccVarName, phjFailVarName and phjTotalVarName need to be entered but only {} has been entered.".format(nArgs)
# If all three parameters have been entered, check that successes + failures = total
if nArgs == 3:
assert (phjDF[phjSuccVarName] + phjDF[phjFailVarName]).equals(phjDF[phjTotalVarName]), "The '{0}' and '{1}' columns do not add up to the values in the '{2}' column.".format(phjSuccVarName,phjFailVarName,phjTotalVarName)
# New columns
# Some new column names will be created.
phjProbName = phjSuffixDict['proportion']
phjProbCILowLimName = phjSuffixDict['joinstr'].join([phjSuffixDict['cisuffix'],phjSuffixDict['cilowlim']])
phjProbCIUppLimName = phjSuffixDict['joinstr'].join([phjSuffixDict['cisuffix'],phjSuffixDict['ciupplim']])
phjProbCILowIntName = phjSuffixDict['joinstr'].join([phjSuffixDict['cisuffix'],phjSuffixDict['cilowint']])
phjProbCIUppIntName = phjSuffixDict['joinstr'].join([phjSuffixDict['cisuffix'],phjSuffixDict['ciuppint']])
# Check that new column names do not already exist
phjAssert('New column names',
[phjProbName,phjProbCILowLimName,phjProbCIUppLimName,phjProbCILowIntName,phjProbCIUppIntName],
list,
phjMustBeAbsentColumnList = list(phjDF.columns))
except AssertionError as e:
phjDF = None
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
# Calculations are made using successes and totals.
# If 2 column names entered and 1 is failures then calculate either the successes or the totals column.
if (nArgs == 2) & (phjFailVarName is not None):
if phjSuccVarName is None:
phjSuccVarName = phjSuffixDict['numbersuccess']
phjDF[phjSuccVarName] = phjDF[phjTotalVarName] - phjDF[phjFailVarName]
elif phjTotalVarName is None:
phjTotalVarName = phjSuffixDict['numbertrials']
phjDF[phjTotalVarName] = phjDF[phjSuccVarName] + phjDF[phjFailVarName]
# Ensure count data is stored as integer values. Otherwise,
# for some reason, calculations with object | |
import json
import random
from typing import NamedTuple, Any
import numpy
from numpy.testing import assert_array_almost_equal, assert_almost_equal
import torch
import pytest
from flaky import flaky
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import sanitize
from allennlp.nn import util
from allennlp.models import load_archive
class TestNnUtil(AllenNlpTestCase):
def test_get_sequence_lengths_from_binary_mask(self):
binary_mask = torch.tensor(
[
[True, True, True, False, False, False],
[True, True, False, False, False, False],
[True, True, True, True, True, True],
[True, False, False, False, False, False],
]
)
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
def test_get_mask_from_sequence_lengths(self):
sequence_lengths = torch.LongTensor([4, 3, 1, 4, 2])
mask = util.get_mask_from_sequence_lengths(sequence_lengths, 5).data.numpy()
assert_almost_equal(
mask,
[[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 0, 0, 0]],
)
def test_get_sequence_lengths_converts_to_long_tensor_and_avoids_variable_overflow(self):
# Tests the following weird behaviour in Pytorch 0.1.12
# doesn't happen for our sequence masks:
#
# mask = torch.ones([260]).bool()
# mask.sum() # equals 260.
# var_mask = t.a.V(mask)
# var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
binary_mask = torch.ones(2, 260).bool()
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.data.numpy(), numpy.array([260, 260]))
def test_clamp_tensor(self):
# Test on uncoalesced sparse tensor
i = torch.LongTensor([[0, 1, 1, 0], [2, 0, 2, 2]])
v = torch.FloatTensor([3, 4, -5, 3])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on coalesced sparse tensor
i = torch.LongTensor([[0, 1, 1], [2, 0, 2]])
v = torch.FloatTensor([3, 4, -5])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on dense tensor
tensor = torch.tensor([[5, -4, 3], [-3, 0, -30]])
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3)
assert_almost_equal(clamped_tensor, [[3, -3, 3], [-3, 0, -3]])
def test_sort_tensor_by_length(self):
tensor = torch.rand([5, 7, 9])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 1:, :] = 0
tensor[3, 5:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 1, 5, 7])
sorted_tensor, sorted_lengths, reverse_indices, _ = util.sort_batch_by_length(
tensor, sequence_lengths
)
# Test sorted indices are padded correctly.
numpy.testing.assert_array_equal(sorted_tensor[1, 5:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[2, 4:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[3, 3:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[4, 1:, :].data.numpy(), 0.0)
assert sorted_lengths.data.equal(torch.LongTensor([7, 5, 4, 3, 1]))
# Test restoration indices correctly recover the original tensor.
assert sorted_tensor.index_select(0, reverse_indices).data.equal(tensor.data)
def test_get_final_encoder_states(self):
encoder_outputs = torch.Tensor(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
]
)
mask = torch.tensor([[True, True, True], [True, True, False]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=False)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 11, 12], [17, 18, 19, 20]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=True)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 3, 4], [17, 18, 15, 16]])
def test_masked_softmax_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 3.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.090031, 0.244728, 0.665241]])
)
assert_almost_equal(1.0, numpy.sum(vector_1d_softmaxed), decimal=6)
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.017148, 0.046613, 0.93624]]))
# Testing the unmasked 1D case where the input is all 0s.
vector_zero = torch.FloatTensor([[0.0, 0.0, 0.0]])
vector_zero_softmaxed = util.masked_softmax(vector_zero, None).data.numpy()
assert_array_almost_equal(
vector_zero_softmaxed, numpy.array([[0.33333334, 0.33333334, 0.33333334]])
)
# Testing the general unmasked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.09003057, 0.24472847, 0.66524096]]
),
)
# Testing the unmasked batched case where one of the inputs are all 0s.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.33333334, 0.33333334, 0.33333334]]
),
)
def test_masked_softmax_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.0, 0.0, 0.0]])
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.0, 0.0, 0.0], [0.11920292, 0.0, 0.88079708]])
)
def test_masked_softmax_memory_efficient_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, | |
set local=1. The
Daylight Savings flag is always -1, which means unknown, and may
or may not have ramifications.
"""
if local:
return (self._localYear,
self._localMonth,
self._localDay,
self._localHour,
self._localMinute,
self._second,
self._localWeekday,
self._localDayOfYear,
-1)
else:
return (self._utcYear,
self._utcMonth,
self._utcDay,
self._utcHour,
self._utcMinute,
self._second,
self._utcWeekday,
self._utcDayOfYear,
-1)
def year(self, local=0):
"""
Returns the year component of the stored date and time as an int
like 2001.
"""
if local: return self._localYear
return self._utcYear
def month(self, local=0):
"""
Returns the month component of the stored date and time as an int
in the range 0-11.
"""
if local: return self._localMonth
return self._utcMonth
def monthName(self, local=0):
"""
Returns the month component of the stored date and time as a
string like 'January'.
"""
if local:
return self.monthNameTable[self._localMonth]
return self.monthNameTable[self._utcMonth]
def abbreviatedMonthName(self, local=0):
"""
Returns the month component of the stored date and time as a
string like 'Jan'.
"""
if local:
return self.abbreviatedMonthNameTable[self._localMonth]
return self.abbreviatedMonthNameTable[self._utcMonth]
def day(self, local=0):
"""
Returns the day component of the stored date and time as an
integer in the range 1-31.
"""
if local: return self._localDay
return self._utcDay
def dayOfYear(self, local=0):
"""
Returns the day of year component of the stored date and time
as an int in the range 1-366.
"""
if local: return self._localDayOfYear
return self._utcDayOfYear
def dayOfWeek(self, local=0):
"""
Returns the day of week component of the stored date and time
as an int in the range 0-6 (0=Monday).
"""
if local: return self._localWeekday
return self._utcWeekday
def hour(self, local=0):
"""
Returns the hour component of the stored date and time as an int
in the range 0-23.
"""
if local: return self._localHour
return self._utcHour
def minute(self, local=0):
"""
Returns the minute component of the stored date and time as an
int in the range 0-59.
"""
if local: return self._localMinute
return self._utcMinute
def second(self):
"""
Returns the second component of the stored date and time as an
int in the range 0-59.
"""
return self._second
def milliSecond(self):
"""
Returns the millisecond component of the stored date and time as
an int in the range 0-999.
"""
return self._milliSecond
def tzName(self):
"""
Returns the local time's time zone name component of the stored
date and time as a string like 'MST'.
"""
return self._tzName
def tzHourOffset(self):
"""
Returns the local time's hour offset from GMT component of the
stored date and time as an int, typically in the range -12 to 14.
"""
return self._tzHourOffset
def tzMinuteOffset(self):
"""
Returns the local time's minute offset from GMT component of the
stored date and time as an int in the range 0-59.
"""
return self._tzMinuteOffset
def __normalizeMinute(self, minute):
hourShift = 0
while minute < 0:
hourShift -=1
minute += 60
while minute > 59:
hourShift +=1
minute -= 60
return hourShift, minute
def __normalizeHour(self, hour):
dayShift = 0
while hour < 0:
dayShift -=1
hour += 24
while hour > 23:
dayShift +=1
hour -= 24
return dayShift, hour
def __normalizeDate(self, day, month, year):
# Returns a valid year, month and day, given a day value that is out
# of the acceptable range. This is needed so that the correct local
# date can be determined after adding the local time offset to the
# UTC time. The time difference may result in the day being shifted,
# for example Jan 1 may become Jan 0, which needs to be normalized
# to Dec 31 of the preceding year. This function may also be used to
# convert a Julian day (1-366) for the given year to a proper year,
# month and day, if the month is initially set to 1.
while (month < 1 or
month > 12 or
day < 1 or
day > _month_days[calendar.isleap(year)][month]
):
if month < 1:
year -= 1
month += 12
elif month > 12:
year += 1
month -= 12
elif day < 1:
month -= 1
if month == 0:
#Special case
day += 31
else:
day += _month_days[calendar.isleap(year)][month]
elif day > _month_days[calendar.isleap(year)][month]:
day -= _month_days[calendar.isleap(year)][month]
month += 1
return year, month, day
#Pythonic Interface
__str__ = asISO8601DateTime
def __cmp__(self, other):
if isinstance(other, (str, unicode)):
return cmp(self.asISO8601DateTime(), other)
elif isinstance(other, (int, float)):
return cmp(self.asPythonTime(), other)
elif not isinstance(other, DT):
raise TypeError("Cannot Compare DT with %s" % repr(other))
#Compare two instances
#For now, compare our strings
return cmp(self.asISO8601DateTime(), other.asISO8601DateTime())
def __hash__(self):
return id(self)
#For internal lookups
abbreviatedMonthNameTable = ('ERR', 'Jan', 'Feb', 'Mar', 'Apr', 'May',
'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov',
'Dec')
monthNameTable = ('ERROR', 'January', 'February', 'March', 'April',
'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December')
weekdayNameTable = ('Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday')
abbreviatedWeekdayNameTable = ('Mon', 'Tue', 'Wed', 'Thu',
'Fri', 'Sat', 'Sun')
# keyed by offset;
# values are (GMT TZ, military TZ, most likely civ TZ,
# most likely civ TZ if on summer/daylight savings time)
tzNameTable = {
+0 : ("GMT", "Zulu", "GMT", "BST"),
+1 : ("GMT+1", "Alpha", "CET", "MEST"),
+2 : ("GMT+2", "Bravo", "EET", ""),
+3 : ("GMT+3", "Charlie", "BT", ""),
+3.5 : ("GMT+3:30", "", "", ""),
+4 : ("GMT+4", "Delta", "", ""),
+4.5 : ("GMT+4:30", "", "", ""),
+5 : ("GMT+5", "Echo", "", ""),
+5.5 : ("GMT+5:30", "", "", ""),
+6 : ("GMT+6", "Foxtrot", "", ""),
+6.5 : ("GMT+6:30", "", "", ""),
+7 : ("GMT+7", "Golf", "WAST", ""),
+8 : ("GMT+8", "Hotel", "CCT", ""),
+9 : ("GMT+9", "India", "JST", ""),
+9.5 : ("GMT+9:30", "", "Australia Central Time", ""),
+10 : ("GMT+10", "Kilo", "GST", ""),
+10.5 : ("GMT+10:30", "", "", ""),
+11 : ("GMT+11", "Lima", "", ""),
+11.5 : ("GMT+11:30", "", "", ""),
+12 : ("GMT+12", "Mike", "NZST", ""),
+13 : ("GMT+13", "", "", ""),
+14 : ("GMT+14", "", "", ""),
-1 : ("GMT-1", "November", "WAT", ""),
-2 : ("GMT-2", "Oscar", "AT", ""),
-3 : ("GMT-3", "Papa", "", "ADT"),
-3.5 : ("GMT-3", "", "", ""),
-4 : ("GMT-4", "Quebec", "AST", "EDT"),
-5 : ("GMT-5", "Romeo", "EST", "CDT"),
-6 : ("GMT-6", "Sierra", "CST", "MDT"),
-7 : ("GMT-7", "Tango", "MST", "PDT"),
-8 : ("GMT-8", "Uniform", "PST", ""),
-8.5 : ("GMT-8:30", "", "", "YDT"),
-9 : ("GMT-9", "Victor", "YST", ""),
-9.5 : ("GMT-9:30", "", "", "HDT"),
-10 : ("GMT-10", "Whiskey", "AHST", ""),
-11 : ("GMT-11", "XRay", "NT", ""),
-12 : ("GMT-11", "Yankee", "IDLW", ""),
}
CENTURY="(?P<Century>[0-9]{2,2})"
YEAR="(?P<Year>[0-9]{2,2})"
MONTH="(?P<Month>[0-9]{2,2})"
DAY="(?P<Day>[0-9]{2,2})"
BASIC_DATE="%s?%s%s%s" % (CENTURY, YEAR, MONTH, DAY)
EXTENDED_DATE="%s?%s-%s-%s" % (CENTURY, YEAR, MONTH, DAY)
YEAR_AND_MONTH_DATE="(-|%s)%s-%s" % (CENTURY, YEAR, MONTH)
YEAR_AND_MONTH_DATE_EXTENDED="-%s%s" % (YEAR, MONTH)
YEAR_ONLY_DATE="(-|%s)%s" % (CENTURY, YEAR)
CENTURY_ONLY_DATE=CENTURY
DAY_OF_MONTH="--%s(?:-?%s)?" % (MONTH, DAY)
DAY_ONLY_DATE="---%s" % (DAY)
#build the list of calendar date expressions
cd_expressions = [BASIC_DATE,
EXTENDED_DATE,
YEAR_AND_MONTH_DATE,
YEAR_AND_MONTH_DATE_EXTENDED,
YEAR_ONLY_DATE,
CENTURY_ONLY_DATE,
DAY_OF_MONTH,
DAY_ONLY_DATE]
cd_expressions = map(lambda x:"(?P<CalendarDate>%s)" % x, cd_expressions)
ORDINAL_DAY="(?P<Ordinal>[0-9]{3,3})"
ORDINAL_DATE="(?P<OrdinalDate>%s?%s-?%s)" % (CENTURY, YEAR, ORDINAL_DAY)
ORDINAL_DATE_ONLY="(?P<OrdinalDate>-%s)" % (ORDINAL_DAY)
od_expressions = [ORDINAL_DATE, ORDINAL_DATE_ONLY]
WEEK="(?P<Week>[0-9][0-9])"
WEEK_DAY="(?P<Weekday>[1-7])"
BASIC_WEEK_DATE="%s?%sW%s%s?" %(CENTURY, YEAR, WEEK, WEEK_DAY)
EXTENDED_WEEK_DATE="%s?%s-W%s(?:-%s)?" %(CENTURY, YEAR, WEEK, WEEK_DAY)
WEEK_IN_DECADE="-(?P<YearInDecade>[0-9])W%s%s" % (WEEK, WEEK_DAY)
WEEK_IN_DECADE_EXTENDED="-(?P<YearInDecade>[0-9])-W%s-%s" % (WEEK, WEEK_DAY)
WEEK_AND_DAY_BASIC="-W%s(?:-?%s)?"%(WEEK, WEEK_DAY)
WEEKDAY_ONLY="-W?-%s" % (WEEK_DAY)
#build the list of week date expressions
wd_expressions=[BASIC_WEEK_DATE,
EXTENDED_WEEK_DATE,
WEEK_IN_DECADE,
WEEK_IN_DECADE_EXTENDED,
WEEK_AND_DAY_BASIC,
WEEKDAY_ONLY]
wd_expressions = map(lambda x:"(?P<WeekDate>%s)" % x, wd_expressions)
#Build the list of date expressions
date_expressions = map(lambda x:"(?P<Date>%s)" % x, cd_expressions+od_expressions+wd_expressions)
HOUR="(?P<Hour>(?:0[0-9])|(?:1[0-9])|(?:2[0-4]))"
MINUTE="(?P<Minute>(?:[0-5][0-9])|(?:60))"
SECOND="(?P<Second>(?:[0-5][0-9])|(?:60))"
DECIMAL_SEPARATOR="(?:\.|,)"
DECIMAL_VALUE="(?P<DecimalValue>[0-9]*)"
BASIC_TIME_FORMAT="(?:%s%s%s(?:%s%s)?)" % (HOUR, MINUTE, SECOND, DECIMAL_SEPARATOR, DECIMAL_VALUE)
EXTENDED_TIME_FORMAT="(?:%s:%s:%s(?:%s%s)?)" % (HOUR, MINUTE, SECOND, DECIMAL_SEPARATOR, DECIMAL_VALUE)
HOUR_MINUTE_TIME="(?:%s:?%s(?:%s%s)?)" % (HOUR, MINUTE, DECIMAL_SEPARATOR, DECIMAL_VALUE)
HOUR_TIME="(?:%s(?:%s%s)?)" % (HOUR, DECIMAL_SEPARATOR, DECIMAL_VALUE)
MINUTE_SECOND_TIME="(?:-%s:?%s(?:%s%s)?)" % (MINUTE, SECOND, DECIMAL_SEPARATOR, DECIMAL_VALUE)
MINUTE_TIME="(?:-%s(?:%s%s)?)" % (MINUTE, DECIMAL_SEPARATOR, DECIMAL_VALUE)
SECOND_TIME="(?P<CurrentSecond>--%s(?:%s%s)?)" % (SECOND, DECIMAL_SEPARATOR, DECIMAL_VALUE)
#build the basic time expressions
bt_expressions = [BASIC_TIME_FORMAT,
EXTENDED_TIME_FORMAT,
HOUR_MINUTE_TIME,
HOUR_TIME,
MINUTE_SECOND_TIME,
MINUTE_TIME,
SECOND_TIME]
bt_expressions = map(lambda x:"(?P<Time>%s)"%x, bt_expressions)
UTC_TIME_ZONE="Z"
TZ_DIRECTION="(?P<TzDirection>\+|-)"
TZ_HOUR="(?P<TzHour>(?:0[0-9])|(?:1[0-9])|(?:2[0-4]))"
TZ_MINUTE="(?P<TzMinute>(?:[0-5][0-9])|(?:60))"
BASIC_TIME_ZONE="(?P<TzOffset>%s%s(?::?%s)?)" % (TZ_DIRECTION, TZ_HOUR, TZ_MINUTE)
TIME_ZONE="(?P<TimeZone>%s|%s)" % (UTC_TIME_ZONE,
BASIC_TIME_ZONE)
#build the tz expressions
tz_expressions=map(lambda x, t=TIME_ZONE: "%s%s?" % (x, t), bt_expressions)
#Lastly build the list of all possible expressions
g_isoExpressions = []
#First, All Date expressions
for e in date_expressions:
g_isoExpressions.append(["^"+e+"$", None])
#Then, all Time expressions. Not to worry about clashes because date has precedence and is first
for e in tz_expressions:
g_isoExpressions.append(["^"+e+"$", None])
for e in tz_expressions:
g_isoExpressions.append(["^T"+e+"$", None])
#now add the combination of the two
for d in date_expressions:
for t in tz_expressions:
g_isoExpressions.append(["^"+d+"T"+t+"$", None])
#cleanup namespace a bit
del BASIC_DATE, BASIC_TIME_FORMAT, BASIC_TIME_ZONE, BASIC_WEEK_DATE
del CENTURY, CENTURY_ONLY_DATE
del DAY, DAY_OF_MONTH, DAY_ONLY_DATE, DECIMAL_SEPARATOR, DECIMAL_VALUE
del EXTENDED_DATE, EXTENDED_TIME_FORMAT, EXTENDED_WEEK_DATE
del HOUR, HOUR_MINUTE_TIME, HOUR_TIME
del MINUTE, MINUTE_SECOND_TIME, MINUTE_TIME, MONTH
del ORDINAL_DATE, ORDINAL_DATE_ONLY, ORDINAL_DAY
del SECOND, SECOND_TIME
del TIME_ZONE, TZ_DIRECTION, TZ_HOUR
del TZ_MINUTE, UTC_TIME_ZONE,
del WEEK, WEEKDAY_ONLY, WEEK_AND_DAY_BASIC, WEEK_DAY, WEEK_IN_DECADE, WEEK_IN_DECADE_EXTENDED
del | |
<gh_stars>0
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import time
import saga
import radical.utils.testing as testing
import saga.utils.test_config as sutc
from copy import deepcopy
# ------------------------------------------------------------------------------
#
def _silent_cancel(job_obj):
# try to cancel job but silently ignore all errors
try:
job_obj.cancel()
except Exception:
pass
# ------------------------------------------------------------------------------
#
def _silent_close_js(js_obj):
# try to cancel job but silently ignore all errors
try:
js_obj.close()
except Exception:
pass
# ------------------------------------------------------------------------------
#
def test_job_service_get_url():
""" Test if the job service URL is returned correctly
"""
js = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
assert js, "job service creation failed?"
assert (tc.job_service_url == str(js.url)), "%s == %s" % (tc.job_service_url, str(js.url))
except saga.SagaException as ex:
assert False, "unexpected exception %s" % ex
finally:
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_service_invalid_url():
""" Test if a non-resolvable hostname results in a proper exception
"""
try:
tc = testing.get_test_config ()
invalid_url = deepcopy(saga.Url(tc.job_service_url))
invalid_url.host = "does.not.exist"
tmp_js = saga.job.Service(invalid_url, tc.session)
_silent_close_js(tmp_js)
assert False, "Expected XYZ exception but got none."
except saga.BadParameter :
assert True
# we don't check DNS anymore, as that can take *ages* -- so we now also
# see Timeout and NoSuccess exceptions...
except saga.Timeout :
assert True
except saga.NoSuccess :
assert True
# other exceptions sould never occur
except saga.SagaException as ex:
assert False, "Expected BadParameter, Timeout or NoSuccess exception, but got %s (%s)" % (type(ex), ex)
# ------------------------------------------------------------------------------
#
def test_job_service_create():
""" Test service.create_job() - expecting state 'NEW'
"""
js = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['10']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
assert j.state == j.get_state()
assert j.state == saga.job.NEW
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_service_get_session():
""" Test if the job service session is set correctly
"""
js = None
session = None
try:
tc = testing.get_test_config ()
session = tc.session or saga.Session()
js = saga.job.Service(tc.job_service_url, session)
assert js.get_session() == session, "Setting service session failed."
assert js.session == session, "Setting service session failed."
assert js._adaptor.get_session() == session, "Setting service session failed."
assert js._adaptor.session == session, "Setting service session failed."
except saga.SagaException as ex:
assert False, "unexpected exception %s" % ex
finally:
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_run():
""" Test job.run() - expecting state: RUNNING/PENDING
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['10']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
assert (j.state in [saga.job.RUNNING, saga.job.PENDING]), "j.state: %s" % j.state
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_wait():
""" Test job.wait() - expecting state: DONE (this test might take a while)
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['10']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
j.wait()
assert j.state == saga.job.DONE, "%s != %s" % (j.state, saga.job.DONE)
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_multiline_run():
""" Test job.run() with multiline command
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sh'
jd.arguments = ["""-c "python -c '
import time
if True :
if True :
time.sleep (3)
'
"
"""]
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
assert (j.state in [saga.job.RUNNING, saga.job.PENDING]), 'j.state: %s' % j.state
j.wait()
assert (j.state == saga.job.DONE), "j.state: %s " % j.state
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_suspend_resume():
""" Test job.suspend()/resume() - expecting state: SUSPENDED/RUNNING
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['20']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
j.suspend()
assert j.state == saga.job.SUSPENDED
assert j.state == j.get_state()
j.resume()
assert j.state == saga.job.RUNNING
assert j.state == j.get_state()
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_cancel():
""" Test job.cancel() - expecting state: CANCELED
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['10']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
j.cancel()
assert j.state == saga.job.CANCELED
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_job_run_many():
""" Run a bunch of jobs concurrently via the same job service.
"""
NUM_JOBS = 32
js = None
jobs = []
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['60']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
for i in range(0, NUM_JOBS):
j = js.create_job(jd)
jobs.append(j)
# start all jobs
for job in jobs:
job.run()
# wait a bit
time.sleep(10)
for job in jobs:
job.cancel()
assert job.state == saga.job.CANCELED
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
for j in jobs:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_get_exit_code():
""" Test job.exit_code
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = "/bin/sleep"
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
j.wait()
ec = j.exit_code
assert ec == 1, "%s != 1" % ec
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_get_stdio():
""" Test job.get_stdin/get_stdout/get_log
"""
js = None
j = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.pre_exec = ['echo pre' ]
jd.executable = 'sh'
jd.arguments = ['-c', '"echo out; echo err 1>&2"']
jd.post_exec = ['echo post']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
j.run()
j.wait()
assert 0 == j.exit_code
assert 'pre' in j.get_log()
assert 'post' in j.get_log()
assert 'out' in j.get_stdout()
assert 'err' in j.get_stderr()
assert 'pre' in j.log
assert 'post' in j.log
assert 'out' in j.stdout
assert 'err' in j.stderr
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as se:
assert False, "Unexpected exception: %s" % se
finally:
_silent_cancel(j)
_silent_close_js(js)
# ------------------------------------------------------------------------------
#
def test_get_service_url():
""" Test if job.service_url == Service.url
"""
js = None
try:
tc = testing.get_test_config ()
js = saga.job.Service(tc.job_service_url, tc.session)
jd = saga.job.Description()
jd.executable = '/bin/sleep'
jd.arguments = ['10']
# add options from the test .cfg file if set
jd = sutc.add_tc_params_to_jd(tc=tc, jd=jd)
j = js.create_job(jd)
assert j.service_url == js.url
except saga.NotImplemented as ni:
assert tc.notimpl_warn_only, "%s " % ni
if tc.notimpl_warn_only:
print "%s " % ni
except saga.SagaException as | |
<gh_stars>0
#!/usr/bin/env python
#coding: utf8
# by wgwang <EMAIL>
#
import os
import sys
import psycopg2
import pymongo
import jinja2
from datetime import datetime
from setting import pg, mdb
try:
from setting import interval
except:
interval = 600
import traceback
import logging
import time
import zipfile
import tarfile
#daemon processing
pid = os.fork()
if pid != 0:
sys.exit()
pid = os.fork()
if pid != 0:
sys.exit()
reload(sys)
sys.setdefaultencoding('utf-8')
today = datetime.today().strftime('%Y%m%d')
rootpath = os.path.abspath(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), '..'))
'''
logfile = os.path.join(rootpath, 'logs', 'sm.log.'+today)
logformat = '%(asctime)s -- %(message)s'
logging.basicConfig(filename=logfile, level=logging.DEBUG, format=logformat)
'''
inventory_template_file = os.path.join(rootpath, 'req_postgres/template', 'inventory.xml.template')
inventory_sale_template_file = os.path.join(rootpath, 'template', 'inventory.sale.xml.template')
inventory_datafile = os.path.join(rootpath, 'data', 'inventory.%s.' %today)
#logging.info('begin...')
pgcon = psycopg2.connect(database=pg['db'], user=pg['user'], host=pg['host'], port=pg['port'])
#mcon = pymongo.Connection(host = mdb['host'], port=mdb['port'])
pkey = ('_id', 'code', 'cate', 'brand', 'model', 'material', 'color', 'size', 'price', 'quatity', 'product_name', 'price_eu', 'pt_name', 'pt_sku')
def misc_info(pgcon):
cur = pgcon.cursor()
#for brand
sql_cmd="select id,name from product_brand;"
cur.execute(sql_cmd)
dbrands = dict(cur.fetchall())
dbrands[None] = ''
#for size
sql_cmd="select id,name from product_size;"
cur.execute(sql_cmd)
dsizes = dict(cur.fetchall())
dsizes[None] = ''
#for category
sql_cmd = "select id,name from product_category"
cur.execute(sql_cmd)
dcates = dict(cur.fetchall())
dcates[None] = ''
cur.close()
return (dbrands, dsizes, dcates)
def full_stock_lots(pgcon, lots):
cur = pgcon.cursor()
stock = {}
sql_cmd = "select max(id) from stock_move"
cur.execute(sql_cmd)
max_stock_id = cur.fetchall()[0][0]
sql_cmd="select product_id,sum(product_qty) from stock_move where id <= %d and location_id not in (%s) and location_dest_id in (%s) and state ='done' group by product_id" %(max_stock_id, lots, lots)
cur.execute(sql_cmd)
stock = dict(cur.fetchall())
sql_cmd="select product_id,sum(product_qty) from stock_move where id <= %d and location_id in (%s) and location_dest_id not in (%s) and state in ('done','confirmed','waiting','assigned') group by product_id" %(max_stock_id, lots, lots)
cur.execute(sql_cmd)
for pid, qty in cur.fetchall():
stock[pid] -= qty
cur.close()
res = {}
for k, v in stock.items():
if v > 0:
res[k] = int(v)
return (max_stock_id, res)
def compute_onsale_price(pgcon, stockdb, company, pricelist_version):
cur = pgcon.cursor()
col = stockdb[company]
p_mdb = col.find()
p_count = col.count()
res = []
def _create_parent_category_list(id, lst):
if not id:
return []
parent = product_category_tree.get(id)
if parent:
lst.append(parent)
return _create_parent_category_list(parent, lst)
else:
return lst
cur.execute("select id,parent_id from product_category")
product_categories = cur.fetchall()
product_category_tree = dict([(item[0], item[1]) for item in product_categories if item[1]])
while (p_count>0):
price = False
p_count -= 1
p = p_mdb.next()
p_id = p['_id']
sql_cmd = "select id from product_pricelist_item where price_version_id = %s"%(pricelist_version,)
cur.execute(sql_cmd)
item_list = cur.fetchall()
sql_cmd = "select id,item_product_id from product_ids where item_product_id = %s and items_id in (%s)"%(p_id,','.join(str(i[0]) for i in item_list))
cur.execute(sql_cmd)
item_id_list = cur.fetchall()
# category
categ_name = p['cate']
sql_cmd = """select id from product_category where name = '%s' """%(categ_name,)
cur.execute(sql_cmd)
try:
categ_id = cur.fetchall()[0][0]
categ_ids = _create_parent_category_list(categ_id, [categ_id])
categ_where = '(categ_id IN (' + ','.join(map(str, categ_ids)) + '))'
except:
categ_where = '(categ_id IS NULL)'
# brand
brand_name = p['brand']
if brand_name.find("'"):
brand_name = brand_name.replace("'","''")
sql_cmd = """select id from product_brand where name = '%s' """%(brand_name,)
cur.execute(sql_cmd)
try:
hx_product_brand_ids = cur.fetchall()[0][0]
brand_where = 'hx_product_brand_id = %s'
brand_args = (hx_product_brand_ids,)
except:
brand_where = '(hx_product_brand_id IS NULL)'
brand_args = ()
if item_id_list <> []:
cur.execute(
'SELECT i.price_discount, I.price_surcharge '
'FROM product_pricelist_item AS i, '
'product_pricelist_version AS v, product_pricelist AS pl, product_ids as pi '
'WHERE (item_product_id = %s OR (item_product_id IS NULL)) '
'AND (' + categ_where + ' OR (categ_id IS NULL)) '
'AND (' + brand_where + ' OR (hx_product_brand_id IS NULL)) '
'AND price_version_id = %s '
'AND (min_quantity IS NULL OR min_quantity <= %s) '
'AND i.price_version_id = v.id AND v.pricelist_id = pl.id '
'AND i.id = pi.items_id '
'ORDER BY sequence',
(p_id,) + brand_args + (pricelist_version,p['quatity']))
else:
cur.execute(
'SELECT i.price_discount, I.price_surcharge '
'FROM product_pricelist_item AS i, '
'product_pricelist_version AS v, product_pricelist AS pl '
'WHERE (' + categ_where + ' OR (categ_id IS NULL)) '
'AND (' + brand_where + ' OR (hx_product_brand_id IS NULL)) '
'AND price_version_id = %s '
'AND (min_quantity IS NULL OR min_quantity <= %s) '
'AND i.price_version_id = v.id AND v.pricelist_id = pl.id '
'AND i.id NOT in (SELECT items_id FROM product_ids)'
'ORDER BY sequence',
brand_args + (pricelist_version, p['quatity']))
res2 = cur.fetchall()
for res1 in res2:
price = p['price'] * (1.0+(float(res1[0]) or 0.0))
price += (float(res1[1]) or 0.0)
if price == p['price']:
price = False
break
if price:
if company == "hk":
stockdb.hk.update({'_id': p['_id']}, {'$set': {'on_sale_price': int(price)}})
if company == "sh":
stockdb.sh.update({'_id': p['_id']}, {'$set': {'on_sale_price': int(price)}})
pinfo = {'_id':p['_id'], 'code':p['code'], 'onsale_price':int(price)}
res.append(pinfo)
template = jinja2.Template(open(inventory_sale_template_file).read().decode('utf-8'))
r = template.render(ps = res).encode('utf-8')
fname = inventory_datafile+company+'.onsale'+'.xml'
#write xml file
fw = open(fname, 'w')
fw.write(r)
fw.close()
def full_stock_company(pgcon, stockdb, company):
s = None
price_field = None
if company == 'hk':
#21: lussomoda
maxid, s = full_stock_lots(pgcon, '21')
price_field = 'hx_price_hk'
elif company == 'sh':
# 12: 光复路仓库, 15: 黄金城道店, 14: 久光店, 13: 陕西南路店
maxid, s = full_stock_lots(pgcon, '12,13,14,15')
price_field = 'hx_price_cn'
if not s:
return
col = stockdb[company]
col_meta = stockdb.meta
col_meta.save({'_id': company, 'maxid': maxid})
cur = pgcon.cursor()
res = []
for pid, qty in s.items():
#sql_cmd = "select product_tmpl_id,default_code,hx_product_brand_id,hx_model,hx_material,hx_color,hx_product_size,%s from product_product where id = %d" %(price_field, pid)
sql_cmd = "select pp.product_tmpl_id,pp.default_code,pt.hx_product_brand_id,pt.hx_model,pt.hx_material,pt.hx_color,pp.hx_product_size,pt.%s from product_product as pp, product_template as pt where pp.id = %d and pp.product_tmpl_id = pt.id" %(price_field, pid)
cur.execute(sql_cmd)
product_tmpl_id,default_code,hx_product_brand_id,hx_model,hx_material,hx_color,hx_product_size,hx_price = cur.fetchall()[0]
sql_cmd = "select categ_id from product_template where id = %d" %(product_tmpl_id)
cur.execute(sql_cmd)
categ_id = cur.fetchall()[0][0]
pvalue = [pid, default_code, dcates[categ_id], dbrands[hx_product_brand_id], hx_model or '', hx_material or '', hx_color or '', dsizes[hx_product_size], hx_price, qty]
pinfo = dict(zip(pkey, pvalue))
col.insert(pinfo)
res.append(pinfo)
template = jinja2.Template(open(inventory_template_file).read().decode('utf-8'))
r = template.render(ps = res).encode('utf-8')
fname = inventory_datafile+company+'.xml'
#write xml file
fw = open(fname, 'w')
fw.write(r)
fw.close()
#write xml.zip file
fw = zipfile.ZipFile(fname+'.zip', 'w', zipfile.ZIP_DEFLATED)
fw.writestr(os.path.basename(fname), r)
fw.close()
#write tar.gz file
fw = tarfile.open(fname+'.tar.gz', 'w:gz')
fw.add(fname, arcname=os.path.basename(fname))
fw.close()
def full_stock(pgcon, stockdb):
full_stock_company(pgcon, stockdb, 'hk')
full_stock_company(pgcon, stockdb, 'sh')
compute_onsale_price(pgcon, stockdb, 'hk', 10)
compute_onsale_price(pgcon, stockdb, 'sh', 13)
def inc_stock_lots(pgcon, lots, beg):
cur = pgcon.cursor()
stock = {}
sql_cmd = "select max(id) from stock_move"
cur.execute(sql_cmd)
max_stock_id = cur.fetchall()[0][0]
if max_stock_id <= beg:
return None
sql_cmd="select product_id,sum(product_qty) from stock_move where id > %d and id <= %d and location_id not in (%s) and location_dest_id in (%s) and state ='done' group by product_id" %(beg, max_stock_id, lots, lots)
cur.execute(sql_cmd)
stock = dict(cur.fetchall())
sql_cmd="select product_id,sum(product_qty) from stock_move where id > %d and id <= %d and location_id in (%s) and location_dest_id not in (%s) and state in ('done','confirmed','waiting','assigned') group by product_id" %(beg, max_stock_id, lots, lots)
cur.execute(sql_cmd)
for pid, qty in cur.fetchall():
stock.setdefault(pid, 0)
stock[pid] -= qty
cur.close()
res = {}
for k, v in stock.items():
if v != 0:
res[k] = int(v)
return (max_stock_id, res)
def inc_stock_company(pgcon, stockdb, company):
s = None
price_field = None
col_meta = stockdb.meta
beg = col_meta.find_one({'_id': company})
if not beg:
return
beg = beg['maxid']
if company == 'hk':
#21: lussomoda
ret = inc_stock_lots(pgcon, '21', beg)
price_field = 'hx_price_hk'
elif company == 'sh':
# 12: 光复路仓库, 15: 黄金城道店, 14: 久光店, 13: 陕西南路店
ret = inc_stock_lots(pgcon, '12,13,14,15', beg)
price_field = 'hx_price_cn'
if not ret:
return
maxid, s = ret
if not s:
return
col = stockdb[company + '_inc']
col_meta.update({'_id': company}, {'$set': {'maxid': maxid}})
cur = pgcon.cursor()
for pid, qty in s.items():
p_mdb = col.find_one({'_id': pid})
if p_mdb:
col.update({'_id':pid}, {"$inc":{'quatity':qty}});
continue
#sql_cmd = "select product_tmpl_id,default_code,hx_product_brand_id,hx_model,hx_material,hx_color,hx_product_size,%s from product_product where id = %d" %(price_field, pid)
sql_cmd = "select pp.product_tmpl_id,pp.default_code,pt.hx_product_brand_id,pt.hx_model,pt.hx_material,pt.hx_color,pp.hx_product_size,pt.%s from product_product as pp, product_template as pt where pp.id = %d and pp.product_tmpl_id = pt.id" %(price_field, pid)
cur.execute(sql_cmd)
product_tmpl_id,default_code,hx_product_brand_id,hx_model,hx_material,hx_color,hx_product_size,hx_price = cur.fetchall()[0]
sql_cmd = "select categ_id from product_template where id = %d" %(product_tmpl_id)
cur.execute(sql_cmd)
categ_id = cur.fetchall()[0][0]
pvalue = [pid, default_code, dcates[categ_id], dbrands[hx_product_brand_id], hx_model, hx_material, hx_color, dsizes[hx_product_size], hx_price, qty]
pinfo = dict(zip(pkey, pvalue))
col.insert(pinfo)
def inc_stock_company_once(pgcon, stockdb, company):
try:
beg_maxid = stockdb.meta.find_one({'_id':company})['maxid']
inc_stock_company(pgcon, stockdb, company)
end_maxid = stockdb.meta.find_one({'_id':company})['maxid']
logging.info('inc_stock_%s: beg_maxid=%d, end_maxid=%d' %(company, beg_maxid, end_maxid))
except:
try:
time.sleep(300)
pgcon = psycopg2.connect(database=pg['db'], user=pg['user'], host=pg['host'], port=pg['port'])
beg_maxid = stockdb.meta.find_one({'_id':company})['maxid']
inc_stock_company(pgcon, stockdb, company)
end_maxid = stockdb.meta.find_one({'_id':company})['maxid']
logging.info('inc_stock_%s: beg_maxid=%d, end_maxid=%d' %(company, beg_maxid, end_maxid))
except:
e = traceback.format_exc()
logging.warning('inc_stock_company_once exception: \n %s' %(e))
os.system('echo -e "%s" | mail -s "[WARNING][STOCKAPI]error message - %s" "<EMAIL>"' %(e, today))
def inc_stock(pgcon, stockdb):
while True:
if datetime.now().hour == 23:
logging.info('timeout, exit.')
break
inc_stock_company_once(pgcon, stockdb, 'hk')
inc_stock_company_once(pgcon, stockdb, 'sh')
time.sleep(interval)
def stock(pgcon, stockdb):
full_stock(pgcon, stockdb)
inc_stock(pgcon, stockdb)
dbrands, dsizes, dcates = misc_info(pgcon)
def _full_stock_lots(pgcon, lots):
cur = pgcon.cursor()
stock = {}
sql_cmd = "select max(id) from stock_move"
cur.execute(sql_cmd)
| |
-----
if call.data == '1-1':
t = 'Супер! А что именно из категории (Фото и видео) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Фото и видео'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-2':
t = 'Супер! А что именно из категории (Техника для дома) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Техника для дома'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-3':
t = 'Супер! А что именно из категории (Игры и консоли) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Игры и консоли'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-4':
t = 'Супер! А что именно из категории (Туризм и путешествия) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Туризм и путешествия'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-5':
t = 'Супер! А что именно из категории (Декор и мебель) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Декор и мебель'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-6':
t = 'Супер! А что именно из категории (Детские товары) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Детские товары'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-7':
t = 'Супер! А что именно из категории (Для мероприятий) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Для мероприятий'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-8':
t = 'Супер! А что именно из категории (Инструменты) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Инструменты'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-9':
t = 'Супер! А что именно из категории (Товары для спорта) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Товары для спорта'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-10':
t = 'Супер! А что именно из категории (Музыка и хобби) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Музыка и хобби'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
if call.data == '1-11':
t = 'Супер! А что именно из категории (Прочее) ты готов сдать в аренду? ' \
'Укажи название, нескольких слов будет достаточно :)'
z = 'Прочее'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, init_name_obj, z)
# -----ПОДТВЕРДИТЬ ТОЛЬКО ЧТО СОЗДАННОЕ ОБЬЯВЛЕНИЕ(КАТ_1)------
if call.data == '01':
t = 'Супер! Объявление добавлено.\n‼️ Важно: перед тем, как сдать товар сфотографируй паспорт того, ' \
'кому сдаёшь. А если вещь дорога для тебя, то подумай, что можешь взять в залог.\nПодробнее: https://telegra.ph/Osnovy-bezopasnoj-sdelki-12-13'
main_menu(message, t)
# ----РЕДАКТИРОВАТЬ ТОЛЬКО ЧТО СОЗДАННОЕ ОБЬЯВЛЕНИЕ(КАТ_1)---------------
if call.data == '01-1':
t = 'Выбери, что ты хочешь изменить ->'
markup = types.InlineKeyboardMarkup()
key1 = types.InlineKeyboardButton('Название', callback_data='01-1-1')
key2 = types.InlineKeyboardButton('Цена', callback_data='01-1-2')
key3 = types.InlineKeyboardButton('Описание', callback_data='01-1-3')
key5 = types.InlineKeyboardButton('Фото', callback_data='01-1-5')
key4 = types.InlineKeyboardButton('Удалить', callback_data='01-1-4')
markup.row(key1)
markup.row(key2)
markup.row(key3)
markup.row(key5)
markup.row(key4)
bot.edit_message_text(chat_id=message.chat.id, message_id=message.message_id, text=t, reply_markup=markup)
if call.data == '01-1-4':
u_id = call.from_user.id
cursor.execute('DELETE FROM obj WHERE id = (SELECT MAX(id) FROM obj WHERE u_id = ?)',
(u_id,))
conn.commit()
t = 'Я удалил твоё последнее объявление.\n'
main_menu(message, t)
if call.data == '01-1-1':
t = 'Введи НОВОЕ название объявления'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, update_name_obj)
if call.data == '01-1-2':
t = 'Введи НОВУЮ цену объявления'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, update_money_obj)
if call.data == '01-1-3':
t = 'Введи НОВОЕ описание объявления'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, update_text_obj)
if call.data == '01-1-5':
t = 'Отправь НОВУЮ фотографию для объявления'
msg = bot.send_message(message.chat.id, t, parse_mode='html')
bot.register_next_step_handler(msg, update_photo_obj)
# --------УДАЛИТЬ ОБЬЯВЛЕНИЕ----------------------------------------
if call.data == 'delete1':
t = 'Введи название своего обьявления, которое ты хочешь удалить.'
msg = bot.send_message(message.chat.id, t)
bot.register_next_step_handler(msg, delete_obj)
if call.data == 'delete2':
t = 'Введи название своего обьявления, которое ты хочешь удалить.'
msg = bot.send_message(message.chat.id, t)
bot.register_next_step_handler(msg, delete_search_obj)
# --------сОЗДАТЬ ОБЪЯВЛЕНИЕ О ПОИСКЕ В АРЕНДУ-------------
if call.data == 'search':
t = 'Хочешь создать объявление о поиске в аренду?'
markup = types.InlineKeyboardMarkup()
key1 = types.InlineKeyboardButton('Давай', callback_data='2-1---')
key2 = types.InlineKeyboardButton('Нет, спасибо', callback_data='2-1-++')
markup.row(key1)
markup.row(key2)
bot.edit_message_text(chat_id=message.chat.id, message_id=message.message_id, text=t, reply_markup=markup)
# -------------------------ФУНКЦИЯ ПОЛУЧЕНИЯ УВЕДОМЛЕНИЙ В НЕСКОЛЬКИХ РАЙОНАХ-------------------------------
def search_message_init(message):
if message.text.lower() in stop_text:
t = 'Возвращаю меню...'
return main_menu(message, t)
u_reg = message.text
u_region = [str(x) for x in u_reg.split(', ')]
u_id = message.from_user.id
get_reg = ["Академический", "Алексеевский", "Алтуфьевский", "Арбат", "Аэропорт",
"Бабушкинский", "Басманный", "Беговой", "Бескудниковский", "Бибирево",
"Бирюлёво восточное", "Бирюлёво западное", "Богородское", "Братеево",
"Бутово северное", "Бутово южное", "Бутырский", "Вешняки",
"Внуково", "Войковский", "Восточный", "Выхино-жулебино", "Гагаринский",
"Головинский", "Гольяново", "Ганиловский", "Дегунино восточное", "Дегунино западное",
"Дмитровский", "Донской", "Дорогомилово", "Замоскворечье",
"Зюзино", "Зябликово", "Ивановское", "Измайлово восточное", "Измайлово",
"Измайлово северное", "Капотня", "Коньково", "Коптево", "Косино-ухтомский",
"Котловка", "Красносельский", "Крылатское", "Крюково", "Кузьминки",
"Кунцево", "Куркино", "Левобережный", "Лефортово", "Лианозово", "Ломоносовский",
"Лосиноостровский", "Люблино", "Марфино", "Марьина роща", "Марьино",
"Матушкино", "Медведково северное", "Медведково южное", "Метрогородок",
"Мещанский", "Митино", "Можайский", "Молжаниновский", "Москворечье-сабурово",
"Нагатино-садовники", "Нагатинский затон", "Нагорный", "Некрасовка",
"Нижегородский", "Ново-переделкино", "Новогиреево", "Новокосино",
"Обручевский", "Орехово-борисово северное", "Орехово-борисово южное",
"Останкинский", "Отрадное", "Очаково-матвеевское", "Перово", "Печатники",
"Покровское-стрешнево", "Преображенское", "Пресненский", "Проспект вернадского",
"Раменки", "Ростокино", "Рязанский", "Савёлки", "Савёловский", "Свиблово",
"Северный", "Силино", "Сокол", "Соколиная гора", "Сокольники", "Солнцево",
"Старое крюково", "Строгино", "Таганский", "Тверской", "Текстильщики", "Тёплый стан",
"Тимирязевский", "Тропарёво-никулино", "Тушино северное", "Тушино южное",
"Филёвский парк", "Фили-давыдково", "Хамовники", "Ховрино", "Хорошёво-мневники",
"Хорошёвский", "Царицыно", "Черёмушки", "Чертаново", "Щукино", "Южнопортовый",
"Якиманка", "Ярославский", "Ясенево"]
cursor.execute('SELECT user_region FROM user WHERE user_id = ?', (u_id,))
result = cursor.fetchone()
u_region.append(result[0])
if set(u_region).issubset(get_reg) == 1:
result = ", ".join(list(set(get_reg).intersection(set(u_region))))
cursor.execute('UPDATE user SET search_message = ? WHERE user_id = ?', (result, u_id,))
conn.commit()
t = 'Понял тебя. Теперь буду сообщать тебе о поиске в этих районах: {}'.format(result)
main_menu(message, t)
else:
t = 'Не нашёл все районы, пожалуйста, попробуй еще раз.\n' \
'Ввод должен быть таким: Арбат, Царицыно, Внуково\n\n' \
'Вот список доступных районов:\n\n{}' \
.format('\n'.join(get_reg))
msg = bot.send_message(message.chat.id, t)
bot.register_next_step_handler(msg, search_message_init)
# -----------------ФУНКЦИЯ ДЛЯ ПРОСМОТРА ВСЕХ ОБ. ПО КАТЕГОРИЯМ И РЕГИОНУ ПОЛЬЗОВАТЕЛЯ----------------------
def look_obj(message, category):
# if message.text.lower() in stop_text:
# t = 'Возвращаю меню...'
# return main_menu(message, t)
# if message[2] == 'from_user':
# u_id =
u_id = message.from_user.id
cursor.execute("SELECT user_region FROM user WHERE user_id = ?", (u_id,))
u_region = cursor.fetchone()
cat = '{}'.format(category)
cursor.execute("SELECT * FROM 'obj' WHERE "
"cat_1 IS NOT NULL AND name_cat1_obj IS NOT NULL AND photo IS NOT NULL "
"AND category = ? AND region = ?", (cat, u_region[0],))
result = cursor.fetchall()
if result:
bot.send_message(message.from_user.id,
"Вот все действующие объявления: ",
parse_mode='html')
for x in result:
bot.send_photo(message.from_user.id, x[6])
bot.send_message(message.from_user.id,
"Категория: {}\n\nНазвание: {}\n\nЦена: {}р\n\nОписание :{}"
"\n\nВладелец:{}".format(x[7], x[2], x[3], x[4], x[5]),
parse_mode='html')
t = 'Нашел ли ты нужное или все не то?\n‼️ Важно: аккуратно относись к вещам, которые берёшь в аренду.' \
'Мы советуем делать фотографии дефектов перед арендой, чтобы потом не было конфликтов.\nПодробнее: https://telegra.ph/Osnovy-bezopasnoj-sdelki-12-13'
markup = types.InlineKeyboardMarkup()
key1 = types.InlineKeyboardButton('Подходит!', callback_data='2-1+')
key2 = types.InlineKeyboardButton('Я не нашёл, что искал', callback_data='2-1-')
markup.row(key1)
markup.row(key2)
bot.send_message(message.from_user.id, t, reply_markup=markup)
else:
t = 'Похоже, еще никто не создал объявление :( Создай своё с помощью кнопки меню: "Арендовать товар"'
main_menu(message, t)
# ------------------------------ФУНКЦИИ ДЛЯ УДАЛЕНИЯ ОБЬЯВЛЕНИЙ-------------------------------------
def delete_obj(message):
if message.text.lower() in stop_text:
t = 'Возвращаю меню...'
return main_menu(message, t)
u_id = message.from_user.id
u_text = message.text
cursor.execute('SELECT name_cat1_obj FROM obj WHERE u_id = ?', (u_id,))
result = cursor.fetchall()
for x in result:
a = x[0]
if u_text.lower() == a.lower():
cursor.execute('DELETE FROM obj WHERE u_id = ? AND name_cat1_obj = ?', (u_id, a,))
conn.commit()
t = 'Я удалил обьявление ({})'.format(a)
bot.send_message(message.chat.id, t)
break
else:
t = 'Я не нашел в списке твоих обьявлений такое название.'
bot.send_message(message.chat.id, t)
def delete_search_obj(message):
if message.text.lower() in stop_text:
t = 'Возвращаю меню...'
return main_menu(message, t)
u_id = message.from_user.id
u_text = message.text
| |
NotFound()
@raise_null_argument
def get_functions_by_ids(self, function_ids):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_ids_template
if self._can('lookup'):
return self._provider_session.get_functions_by_ids(function_ids)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_function_query()
for function_id in (function_ids):
query.match_id(function_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_functions_by_genus_type(self, function_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_genus_type_template
if self._can('lookup'):
return self._provider_session.get_functions_by_genus_type(function_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_function_query()
query.match_genus_type(function_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_functions_by_parent_genus_type(self, function_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template
if self._can('lookup'):
return self._provider_session.get_functions_by_parent_genus_type(function_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_function_query()
query.match_parent_genus_type(function_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_functions_by_record_type(self, function_record_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_record_type_template
if self._can('lookup'):
return self._provider_session.get_functions_by_record_type(function_record_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_function_query()
query.match_record_type(function_record_type, match=True)
return self._try_harder(query)
def get_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_template
if self._can('lookup'):
return self._provider_session.get_functions()
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_function_query()
query.match_any(match=True)
return self._try_harder(query)
functions = property(fget=get_functions)
class FunctionQuerySession(abc_authorization_sessions.FunctionQuerySession, osid_sessions.OsidSession):
"""Adapts underlying FunctionQuerySession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_vault_id()
self._id_namespace = 'authorization.Function'
self.use_federated_vault_view()
self._unauth_vault_ids = None
# self._overriding_vault_ids = None
# def _get_overriding_vault_ids(self):
# if self._overriding_vault_ids is None:
# self._overriding_vault_ids = self._get_overriding_catalog_ids('search')
# return self._overriding_vault_ids
def _try_overriding_vaults(self, query):
if self._get_overriding_catalog_ids('search') is not None:
for vault_id in self._get_overriding_catalog_ids('search'):
query._provider_query.match_vault_id(vault_id, match=True)
return self._query_session.get_functions_by_query(query), query
def _get_unauth_vault_ids(self, vault_id):
if self._can('search', vault_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(vault_id)]
if self._hierarchy_session.has_child_vaults(vault_id):
for child_vault_id in self._hierarchy_session.get_child_vault_ids(vault_id):
unauth_list = unauth_list + self._get_unauth_vault_ids(child_vault_id)
return unauth_list
def _try_harder(self, query):
results, query = self._try_overriding_vaults(query)
if self._is_isolated_catalog_view():
if results.available():
return results
if self._hierarchy_session is None or self._query_session is None:
return results
if self._unauth_vault_ids is None:
self._unauth_vault_ids = self._get_unauth_vault_ids(self._qualifier_id)
for vault_id in self._unauth_vault_ids:
query._provider_query.match_vault_id(vault_id, match=False)
return self._query_session.get_functions_by_query(query)
class FunctionQueryWrapper(QueryWrapper):
"""Wrapper for FunctionQueries to override match_vault_id method"""
def match_vault_id(self, vault_id, match=True):
self._cat_id_args_list.append({'vault_id': vault_id, 'match': match})
def get_vault_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_vault_id()
vault_id = property(fget=get_vault_id)
def get_vault(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_vault()
vault = property(fget=get_vault)
def can_search_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return (self._can('search') or
bool(self._get_overriding_catalog_ids('search')))
def use_federated_vault_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_vault_view()
if self._query_session:
self._query_session.use_federated_vault_view()
def use_isolated_vault_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_vault_view()
if self._query_session:
self._query_session.use_isolated_vault_view()
def get_function_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resource_query_template
if (not self._can('search') and
self._is_isolated_catalog_view()):
raise PermissionDenied()
else:
return self.FunctionQueryWrapper(self._provider_session.get_function_query())
function_query = property(fget=get_function_query)
@raise_null_argument
def get_functions_by_query(self, function_query):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resources_by_query_template
if not hasattr(function_query, '_cat_id_args_list'):
raise Unsupported('function_query not from this session')
for kwargs in function_query._cat_id_args_list:
if self._can('search', kwargs['vault_id']):
function_query._provider_query.match_vault_id(**kwargs)
if self._can('search'):
return self._provider_session.get_functions_by_query(function_query)
self._check_search_conditions()
result = self._try_harder(function_query)
function_query._provider_query.clear_vault_terms()
return result
class FunctionSearchSession(abc_authorization_sessions.FunctionSearchSession, FunctionQuerySession):
"""Adapts underlying FunctionSearchSession methodswith authorization checks."""
def get_function_search(self):
"""Pass through to provider FunctionSearchSession.get_function_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resource_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_function_search()
function_search = property(fget=get_function_search)
def get_function_search_order(self):
raise Unimplemented()
function_search_order = property(fget=get_function_search_order)
@raise_null_argument
def get_functions_by_search(self, function_query, function_search):
"""Pass through to provider FunctionSearchSession.get_functions_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_functions_by_search(function_query, function_search)
@raise_null_argument
def get_function_query_from_inspector(self, function_query_inspector):
raise Unimplemented()
class FunctionAdminSession(abc_authorization_sessions.FunctionAdminSession, osid_sessions.OsidSession):
"""Adapts underlying FunctionAdminSession methodswith authorization checks."""
def __init__(self, provider_manager, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_vault_id()
self._id_namespace = 'authorization.Function'
self._overriding_vault_ids = None
if self._proxy is not None:
try:
self._object_catalog_session = provider_manager.get_function_vault_session(self._proxy)
except (Unimplemented, AttributeError):
pass
else:
try:
self._object_catalog_session = provider_manager.get_function_vault_session()
self.get_vault_ids_by_function = self._object_catalog_session.get_vault_ids_by_function
except (Unimplemented, AttributeError):
pass
def _get_overriding_vault_ids(self):
if self._overriding_vault_ids is None:
self._overriding_vault_ids = self._get_overriding_catalog_ids('lookup')
return self._overriding_vault_ids
def _can_for_function(self, func_name, function_id):
"""Checks if agent can perform function for object"""
return self._can_for_object(func_name, function_id, 'get_vault_ids_for_function')
def get_vault_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_vault_id()
vault_id = property(fget=get_vault_id)
def get_vault(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_vault()
vault = property(fget=get_vault)
def can_create_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('create')
@raise_null_argument
def can_create_function_with_record_types(self, function_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# This would like to be a real implementation someday:
if function_record_types is None:
raise NullArgument() # Just 'cause the spec says to :)
return self._can('create')
@raise_null_argument
def get_function_form_for_create(self, function_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_create
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.get_function_form_for_create(function_record_types)
@raise_null_argument
def create_function(self, function_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.create_resource
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.create_function(function_form)
def can_update_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_update_resources
return (self._can('update') or
bool(self._get_overriding_catalog_ids('update')))
@raise_null_argument
def get_function_form_for_update(self, function_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
if not self._can_for_function('update', function_id):
raise PermissionDenied()
return self._provider_session.get_function_form_for_update(function_id)
def duplicate_function(self, function_id):
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.duplicate_function(function_id)
@raise_null_argument
def update_function(self, function_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.update_resource
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.update_function(function_form)
def can_delete_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_delete_resources
return (self._can('delete') or
bool(self._get_overriding_catalog_ids('delete')))
@raise_null_argument
def delete_function(self, function_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.delete_resource
if not self._can_for_function('delete', function_id):
raise PermissionDenied()
return self._provider_session.delete_function(function_id)
def can_manage_function_aliases(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases
return (self._can('manage') or
bool(self._get_overriding_catalog_ids('manage')))
@raise_null_argument
def alias_function(self, function_id, alias_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.alias_resources
if not self._can_for_function('alias', function_id):
raise PermissionDenied()
return self._provider_session.alias_function(function_id, alias_id)
class FunctionNotificationSession(abc_authorization_sessions.FunctionNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying FunctionNotificationSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_vault_id()
self._id_namespace = 'authorization.Function'
def get_vault_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_vault_id()
vault_id = property(fget=get_vault_id)
def get_vault(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_vault()
vault = property(fget=get_vault)
def can_register_for_function_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def use_federated_vault_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_vault_view()
if self._query_session:
self._query_session.use_federated_vault_view()
def use_isolated_vault_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_vault_view()
if self._query_session:
self._query_session.use_isolated_vault_view()
def reliable_function_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_function_notifications()
def unreliable_function_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_function_notifications()
@raise_null_argument
def acknowledge_function_notification(self, notification_id):
raise Unimplemented()
def register_for_new_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_functions()
def register_for_changed_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_functions()
@raise_null_argument
def register_for_changed_function(self, function_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_function(function_id)
def register_for_deleted_functions(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_functions()
@raise_null_argument
def register_for_deleted_function(self, function_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_function(function_id)
def reliable_function_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_function_notifications()
def unreliable_function_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_function_notifications()
@raise_null_argument
def acknowledge_function_notification(self, notification_id):
raise Unimplemented()
class FunctionVaultSession(abc_authorization_sessions.FunctionVaultSession, osid_sessions.OsidSession):
"""Adapts underlying FunctionVaultSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('authorization.Vault%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'authorization.FunctionVault'
def can_lookup_function_vault_mappings(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._can('lookup')
def use_comparative_vault_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_vault_view()
def use_plenary_vault_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_vault_view()
@raise_null_argument
def get_function_ids_by_vault(self, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_function_ids_by_vault(vault_id)
@raise_null_argument
def get_functions_by_vault(self, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_functions_by_vault(vault_id)
@raise_null_argument
def get_function_ids_by_vaults(self, vault_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_function_ids_by_vaults(vault_ids)
@raise_null_argument
def get_functions_by_vaults(self, vault_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_functions_by_vaults(vault_ids)
@raise_null_argument
def get_vault_ids_by_function(self, function_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
if not self._can('lookup'):
raise PermissionDenied()
| |
'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
# Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
# There are two code paths currently for the out= variant
# 1. When 'out' tensor is in Fortran (column-major) memory format
# then the fast route is taken and the storage is reused directly in the computations
# 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally
# and the result is copied from the temporary tensor to 'out' tensor
# This test checks the first code path
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
# This test checks the second code path
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
# cholesky_inverse requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
# cholesky_inverse requires a square matrix
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.cholesky_inverse(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
# cholesky_inverse raises an error for invalid inputs on CPU
# for example if at least one diagonal element is zero
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(torch.linalg.LinAlgError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
# cholesky_inverse on GPU does not raise an error for this case
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@tf32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
# test torch.matmul function as well
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
# test torch.matmul with out
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
# compare to bmm
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
LU_data, LU_pivots, info = torch.lu(A, get_infos=True, pivot=pivot)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n, n), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual | |
<filename>mama/build_dependency.py
import os, subprocess, shutil, stat
from mama.parse_mamafile import parse_mamafile, update_mamafile_tag, update_cmakelists_tag
from mama.system import System, console, execute, execute_piped
from mama.util import is_dir_empty, has_tag_changed, write_text_to, read_lines_from, forward_slashes, back_slashes
from mama.package import cleanup_libs_list
from time import sleep
######################################################################################
class Git:
def __init__(self, url, branch, tag):
if not url: raise RuntimeError("Git url must not be empty!")
self.url = url
self.branch = branch
self.tag = tag
self.dep = None
self.missing_status = False
self.url_changed = False
self.tag_changed = False
self.branch_changed = False
self.commit_changed = False
def run_git(self, git_command):
cmd = f"cd {self.dep.src_dir} && git {git_command}"
if self.dep.config.verbose:
console(f' {self.dep.name: <16} git {git_command}')
execute(cmd)
def fetch_origin(self):
self.run_git(f"pull origin {self.branch_or_tag()} -q")
def current_commit(self):
result = execute_piped(['git', 'show', '--oneline', '-s'], cwd=self.dep.src_dir)
if self.dep.config.verbose:
console(f' {self.dep.name: <16} git show --oneline -s: {result}')
return result
def save_status(self):
status = f"{self.url}\n{self.tag}\n{self.branch}\n{self.current_commit()}\n"
write_text_to(f"{self.dep.build_dir}/git_status", status)
def check_status(self):
lines = read_lines_from(f"{self.dep.build_dir}/git_status")
if not lines:
self.missing_status = True
if not self.url: return False
#console(f'check_status {self.url}: NO STATUS AT {self.dep.build_dir}/git_status')
self.url_changed = True
self.tag_changed = True
self.branch_changed = True
self.commit_changed = True
return True
self.fetch_origin()
self.url_changed = self.url != lines[0].rstrip()
self.tag_changed = self.tag != lines[1].rstrip()
self.branch_changed = self.branch != lines[2].rstrip()
self.commit_changed = self.current_commit() != lines[3].rstrip()
#console(f'check_status {self.url} {self.branch_or_tag()}: urlc={self.url_changed} tagc={self.tag_changed} brnc={self.branch_changed} cmtc={self.commit_changed}')
return self.url_changed or self.tag_changed or self.branch_changed or self.commit_changed
def branch_or_tag(self):
if self.branch: return self.branch
if self.tag: return self.tag
return ''
def checkout_current_branch(self):
branch = self.branch_or_tag()
if branch:
if self.tag and self.tag_changed:
self.run_git("reset --hard")
self.run_git(f"checkout {branch}")
def reclone_wipe(self):
if self.dep.config.print:
console(f' - Target {self.dep.name: <16} RECLONE WIPE')
if os.path.exists(self.dep.dep_dir):
if System.windows: # chmod everything to user so we can delete:
for root, dirs, files in os.walk(self.dep.dep_dir):
for d in dirs: os.chmod(os.path.join(root, d), stat.S_IWUSR)
for f in files: os.chmod(os.path.join(root, f), stat.S_IWUSR)
shutil.rmtree(self.dep.dep_dir)
def clone_or_pull(self, wiped=False):
if is_dir_empty(self.dep.src_dir):
if not wiped and self.dep.config.print:
console(f" - Target {self.dep.name: <16} CLONE because src is missing")
branch = self.branch_or_tag()
if branch: branch = f" --branch {self.branch_or_tag()}"
execute(f"git clone --recurse-submodules --depth 1 {branch} {self.url} {self.dep.src_dir}", self.dep.config.verbose)
self.checkout_current_branch()
else:
if self.dep.config.print:
console(f" - Pulling {self.dep.name: <16} SCM change detected")
self.checkout_current_branch()
execute("git submodule update --init --recursive")
if not self.tag: # pull if not a tag
self.run_git("reset --hard -q")
self.run_git("pull")
class BuildDependency:
loaded_deps = dict()
def __init__(self, name, config, target_class, workspace=None, src=None, git=None, \
is_root=False, mamafile=None, always_build=False, args=[]):
self.name = name
self.workspace = workspace
self.config = config
self.target = None
self.target_class = target_class
self.target_args = args
self.mamafile = mamafile
self.always_build = always_build
self.should_rebuild = False
self.nothing_to_build = False
self.already_loaded = False
self.already_executed = False
self.currently_loading = False
self.is_root = is_root # Root deps are always built
self.children = []
self.depends_on = []
self.product_sources = []
self.flattened_deps = [] # used for debugging
if not src and not git:
raise RuntimeError(f'{name} src and git not configured. Specify at least one.')
if git:
self.git = git
git.dep = self
self.update_dep_dir()
self.src_dir = forward_slashes(os.path.join(self.dep_dir, self.name))
self.target = None
else:
self.git = None
self.src_dir = forward_slashes(src)
self.create_build_target()
self.name = self.target.name
self.update_dep_dir()
@staticmethod
def get(name, config, target_class, workspace, src=None, git=None, \
mamafile=None, always_build=False, args=[]):
if name in BuildDependency.loaded_deps:
#console(f'Using existing BuildDependency {name}')
dependency = BuildDependency.loaded_deps[name]
dependency.target_args += args
if dependency.target:
dependency.target._set_args(args)
return dependency
dependency = BuildDependency(name, config, target_class, \
workspace=workspace, src=src, git=git, mamafile=mamafile,
always_build=always_build, args=args)
BuildDependency.loaded_deps[name] = dependency
return dependency
def update_dep_dir(self):
dep_name = self.name
if self.git:
if self.git.branch: dep_name = f'{self.name}-{self.git.branch}'
elif self.git.tag: dep_name = f'{self.name}-{self.git.tag}'
self.dep_dir = forward_slashes(os.path.join(self.config.workspaces_root, self.workspace, dep_name))
self.build_dir = forward_slashes(os.path.join(self.dep_dir, self.config.build_folder()))
def has_build_files(self):
return os.path.exists(self.build_dir+'/CMakeCache.txt') \
or os.path.exists(self.build_dir+'/Makefile')
def exported_libs_file(self):
return self.build_dir + '/mama_exported_libs'
def load_build_dependencies(self, target):
loaded_deps = read_lines_from(self.exported_libs_file())
loaded_deps = cleanup_libs_list(loaded_deps)
if loaded_deps:
target.build_dependencies += loaded_deps
def save_exports_as_dependencies(self, exports):
write_text_to(self.exported_libs_file(), '\n'.join(exports))
def find_first_missing_build_product(self):
for depfile in self.target.build_dependencies:
if not os.path.exists(depfile):
return depfile
return None
def source_dir_exists(self):
return os.path.exists(self.src_dir)
def build_dir_exists(self):
return os.path.exists(self.build_dir)
def create_build_dir_if_needed(self):
if not os.path.exists(self.build_dir): # check to avoid Access Denied errors
os.makedirs(self.build_dir, exist_ok=True)
## @return True if dependency has changed
def load(self):
if self.currently_loading:
#console(f'WAIT {self.name}')
while self.currently_loading:
sleep(0.1)
return self.should_rebuild
#console(f'LOAD {self.name}')
changed = False
try:
self.currently_loading = True
changed = self._load()
finally:
self.currently_loading = False
return changed
def git_checkout(self):
if not self.git or self.is_root: # No git for local or root targets
return False
if not self.source_dir_exists(): # we MUST pull here
self.git.clone_or_pull()
return True
changed = self.git.check_status() if self.config.update else False
is_target = self.config.target_matches(self.name)
wiped = False
should_wipe = self.git.url_changed and not self.git.missing_status
if should_wipe or (is_target and self.config.reclone):
self.git.reclone_wipe()
wiped = True
else:
# don't pull if no changes to git status
# or if we're current target of a non-update build
# mama update target=ReCpp -- this should git pull
# mama build target=ReCpp -- should NOT pull
non_update_target = is_target and not self.config.update
if non_update_target or not changed:
return False
self.git.clone_or_pull(wiped)
return True
def _load(self):
git_changed = self.git_checkout()
self.create_build_target() ## parses target mamafile
self.update_dep_dir()
self.create_build_dir_if_needed()
target = self.target
conf = self.config
is_target = conf.target_matches(target.name)
if conf.clean and is_target:
self.clean()
if not self.is_root:
self.load_build_dependencies(target)
target.dependencies() ## customization point for additional dependencies
build = False
if conf.build or conf.update:
build = self._should_build(conf, target, is_target, git_changed)
if build:
self.create_build_dir_if_needed() # in case we just cleaned
if git_changed:
self.git.save_status()
self.already_loaded = True
self.should_rebuild = build
if conf.list:
self._print_list(conf, target)
return build
def _print_list(self, conf, target):
if conf.print:
console(f' - Target {target.name: <16}')
def _should_build(self, conf, target, is_target, git_changed):
def build(r):
if conf.print:
args = f'{target.args}' if target.args else ''
console(f' - Target {target.name: <16} BUILD [{r}] {args}')
return True
if conf.target and not is_target: # if we called: "target=SpecificProject"
return False # skip build if target doesn't match
## build also entails packaging
if conf.clean and is_target: return build('cleaned target')
if self.is_root: return build('root target')
if self.always_build: return build('always build')
if git_changed: return build('git commit changed')
if update_mamafile_tag(self.mamafile_path(), self.build_dir): return build(target.name+'/mamafile.py modified')
if update_cmakelists_tag(self.cmakelists_path(), self.build_dir): return build(target.name+'/CMakeLists.txt modified')
if not self.nothing_to_build:
if not self.has_build_files(): return build('not built yet')
if not target.build_dependencies: return build('no build dependencies')
missing_product = self.find_first_missing_build_product()
if missing_product: return build(f'{missing_product} does not exist')
missing_dep = self.find_missing_dependency()
if missing_dep: return build(f'{missing_dep} was removed')
# Finally, if we call `update this_target`
if conf.update and conf.target == target.name:
return build('update target='+conf.target)
if conf.print:
console(f' - Target {target.name: <16} OK')
return False # do not build, all is ok
def after_load(self):
if self.config.no_specific_target():
first_changed = next((c for c in self.children if c.should_rebuild), None)
if first_changed and not self.should_rebuild:
self.should_rebuild = True
if self.config.print:
console(f' - Target {self.name: <16} BUILD [{first_changed.name} changed]')
self.create_build_dir_if_needed() # in case we just cleaned
def successful_build(self):
update_mamafile_tag(self.mamafile_path(), self.build_dir)
update_cmakelists_tag(self.cmakelists_path(), self.build_dir)
self.save_dependency_list()
if self.git:
self.git.save_status()
def create_build_target(self):
if self.target:
self.target._set_args(self.target_args)
return
project, buildTarget = parse_mamafile(self.config, self.target_class, self.mamafile_path())
if project and buildTarget:
buildStatics = buildTarget.__dict__
if not self.workspace:
if 'workspace' in buildStatics: self.workspace = buildStatics['workspace']
elif 'local_workspace' in buildStatics: self.workspace = buildStatics['local_workspace']
elif 'global_workspace' in buildStatics: self.workspace = buildStatics['global_workspace']
else: self.workspace = 'build'
if self.is_root:
if 'workspace' in buildStatics: self.config.global_workspace = False
elif 'local_workspace' in buildStatics: self.config.global_workspace = False
elif 'global_workspace' in buildStatics: self.config.global_workspace = True
if not self.config.global_workspace:
self.config.workspaces_root = self.src_dir
self.target = buildTarget(name=project, config=self.config, dep=self, args=self.target_args)
else:
if not self.workspace:
self.workspace = 'build'
self.target = self.target_class(name=self.name, config=self.config, dep=self, args=self.target_args)
def is_root_or_config_target(self):
if self.config.target:
return self.config.target_matches(self.name)
return self.is_root
def cmakelists_path(self):
return os.path.join(self.src_dir, 'CMakeLists.txt')
def cmakelists_exists(self):
return os.path.exists(self.cmakelists_path())
def ensure_cmakelists_exists(self):
if not os.path.exists(self.cmakelists_path()):
raise IOError(f'Could not find {self.cmakelists_path()}! Add a CMakelists.txt, or add `self.nothing_to_build()` to configuration step. Also note that filename CMakeLists.txt is case sensitive.')
def mamafile_path(self):
return self.mamafile if self.mamafile else os.path.join(self.src_dir, 'mamafile.py')
def mamafile_exists(self):
return os.path.exists(self.mamafile_path())
# "name(-branch)"
def get_dependency_name(self):
if self.git:
branch = self.git.branch_or_tag()
if branch:
return self.name + '-' + branch
return self.name
def save_dependency_list(self):
deps = [dep.get_dependency_name() for dep in self.children]
write_text_to(f'{self.build_dir}/mama_dependency_libs', '\n'.join(deps))
def find_missing_dependency(self):
last_build = [dep.rstrip() for dep in read_lines_from(f'{self.build_dir}/mama_dependency_libs')]
current = [dep.get_dependency_name() for dep in self.children]
#console(f'{self.name: <32} last_build: {last_build}')
#console(f'{self.name: <32} current: {current}')
for last in last_build:
if not (last in current):
return last.strip()
return None # Nothing missing
## Clean
def clean(self):
if self.config.print:
console(f' - Target {self.name: <16} CLEAN | |
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 更新持仓缓存数据 # TODO: do we need this?
posBuffer = self.posBufferDict.get(trade.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = trade.vtSymbol
self.posBufferDict[trade.vtSymbol] = posBuffer
posBuffer.updateTradeData(trade)
# 推送委托数据
so.status = STOPORDER_TRIGGERED
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
order.gatewayName = so.gatewayName
self.strategy.onOrder(order)
self.limitOrderDict[orderID] = order
# 从字典中删除该限价单
try:
del self.workingStopOrderDict[stopOrderID]
except Exception as ex:
self.writeCtaError(u'crossStopOrder exception:{},{}'.format(str(ex), traceback.format_exc()))
# 若采用实时计算净值
if self.calculateMode == self.REALTIME_MODE:
self.realtimeCalculate()
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
def get_data_path(self):
"""
获取数据保存目录
:return:
"""
logs_folder = os.path.abspath(os.path.join(os.getcwd(), 'data'))
if os.path.exists(logs_folder):
return logs_folder
else:
return os.path.abspath(os.path.join(cta_engine_path, 'data'))
def get_logs_path(self):
"""
获取日志保存目录
:return:
"""
logs_folder = os.path.abspath(os.path.join(os.getcwd(), 'logs'))
if os.path.exists(logs_folder):
return logs_folder
else:
return os.path.abspath(os.path.join(cta_engine_path, 'TestLogs'))
def createLogger(self, debug=False):
"""
创建日志
:param debug:
:return:
"""
filename = os.path.abspath(os.path.join(self.get_logs_path(), '{}'.format(self.strategy_name if len(self.strategy_name) > 0 else 'strategy')))
self.logger = setup_logger(filename=filename, name=self.strategy_name if len(self.strategy_name) > 0 else 'strategy', debug=debug,backtesing=True)
#----------------------------------------------------------------------
def writeCtaLog(self, content,strategy_name=None):
"""记录日志"""
#log = str(self.dt) + ' ' + content
#self.logList.append(log)
# 写入本地log日志
if self.logger:
self.logger.info(content)
else:
self.createLogger()
def writeCtaError(self, content,strategy_name=None):
"""记录异常"""
self.output(u'Error:{}'.format(content))
if self.logger:
self.logger.error(content)
else:
self.createLogger()
def writeCtaWarning(self, content,strategy_name=None):
"""记录告警"""
self.output(u'Warning:{}'.format(content))
if self.logger:
self.logger.warning(content)
else:
self.createLogger()
def writeCtaNotification(self,content,strategy_name=None):
"""记录通知"""
#print content
self.output(u'Notify:{}'.format(content))
self.writeCtaLog(content)
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
#print str(datetime.now()) + "\t" + content
pass
def realtimeCalculate(self):
"""实时计算交易结果2
支持多空仓位并存"""
if len(self.tradeDict) < 1: return
tradeids = list(self.tradeDict.keys())
#resultDict = OrderedDict() # 交易结果记录
resultDict = []
longid = EMPTY_STRING
shortid = EMPTY_STRING
# 对交易记录逐一处理
for tradeid in tradeids:
try:
trade = self.tradeDict[tradeid]
except:
self.writeCtaError(u'没有{0}的成交单'.format(tradeid))
continue
# buy trade
if trade.direction == DIRECTION_LONG and trade.offset == OFFSET_OPEN:
self.output(u'{0}多开:{1},{2}'.format(trade.vtSymbol, trade.volume, trade.price))
self.writeCtaLog(u'{0}多开:{1},{2}'.format(trade.vtSymbol, trade.volume, trade.price))
self.longPosition.append(trade)
del self.tradeDict[tradeid]
if trade.volume == EMPTY_INT:
self.writeCtaLog(u'{},dir:{},vtOrderID:{}tradeID:{}的volumn为{},删除'.format(trade.vtSymbol, trade.direction,trade.vtOrderID,trade.tradeID,trade.volume))
try:
del self.tradeDict[tradeid]
except:
pass
continue
# cover trade,
elif trade.direction == DIRECTION_LONG and trade.offset == OFFSET_CLOSE:
gId = trade.tradeID # 交易组(多个平仓数为一组)
gr = None # 组合的交易结果
coverVolume = trade.volume
self.writeCtaLog(u'平空:{}'.format(coverVolume))
while coverVolume > 0:
if len(self.shortPosition) == 0:
self.writeCtaError(u'异常!没有开空仓的数据')
raise Exception(u'realtimeCalculate2() Exception,没有开空仓的数据')
return
cur_short_pos_list = [s_pos.volume for s_pos in self.shortPosition]
self.writeCtaLog(u'当前空单:{}'.format(cur_short_pos_list))
pop_indexs = [i for i, val in enumerate(self.shortPosition) if val.vtSymbol == trade.vtSymbol]
if len(pop_indexs) < 1:
self.writeCtaError(u'异常,没有对应symbol:{0}的空单持仓'.format(trade.vtSymbol))
raise Exception(u'realtimeCalculate2() Exception,没有对应symbol:{0}的空单持仓'.format(trade.vtSymbol))
return
pop_index = pop_indexs[0]
# 从未平仓的空头交易
entryTrade = self.shortPosition.pop(pop_index)
# 开空volume,不大于平仓volume
if coverVolume >= entryTrade.volume:
self.writeCtaLog(u'开空volume,不大于平仓volume, coverVolume:{} ,先平::{}'.format(coverVolume, entryTrade.volume))
coverVolume = coverVolume - entryTrade.volume
if coverVolume>0:
self.writeCtaLog(u'剩余待平数量:{}'.format(coverVolume))
self.output(u'{0}空平:{1},{2}'.format(entryTrade.vtSymbol, entryTrade.volume, trade.price))
self.writeCtaLog(u'{0}空平:{1},{2}'.format(entryTrade.vtSymbol, entryTrade.volume, trade.price))
result = TradingResult(entryPrice=entryTrade.price,
entryDt=entryTrade.dt,
exitPrice=trade.price,
exitDt=trade.dt,
volume=-entryTrade.volume,
rate=self.rate,
slippage=self.slippage,
size=self.size,
groupId=gId,
fixcommission=self.fixCommission)
t = OrderedDict()
t['Gid'] = gId
t['vtSymbol'] = entryTrade.vtSymbol
t['OpenTime'] = entryTrade.tradeTime
t['OpenPrice'] = entryTrade.price
t['Direction'] = u'Short'
t['CloseTime'] = trade.tradeTime
t['ClosePrice'] = trade.price
t['Volume'] = entryTrade.volume
t['Profit'] = result.pnl
t['Commission'] = result.commission
self.exportTradeList.append(t)
msg = u'Gid:{0} {1}[{2}:开空tid={3}:{4}]-[{5}.平空tid={6},{7},vol:{8}],净盈亏pnl={9},手续费:{10}'\
.format(gId, entryTrade.vtSymbol, entryTrade.tradeTime, shortid, entryTrade.price,
trade.tradeTime, tradeid, trade.price,
entryTrade.volume, result.pnl,result.commission)
self.output(msg)
self.writeCtaLog(msg)
resultDict.append(result)
if type(gr) == type(None):
if coverVolume > 0:
# 属于组合
gr = copy.deepcopy(result)
else:
# 删除平空交易单,
self.writeCtaLog(u'删除平空交易单,tradeID:'.format(trade.tradeID))
del self.tradeDict[trade.tradeID]
else:
# 更新组合的数据
gr.turnover = gr.turnover + result.turnover
gr.commission = gr.commission + result.commission
gr.slippage = gr.slippage + result.slippage
gr.pnl = gr.pnl + result.pnl
# 所有仓位平完
if coverVolume == 0:
self.writeCtaLog(u'所有平空仓位撮合完毕')
gr.volume = abs(trade.volume)
#resultDict[entryTrade.dt] = gr
# 删除平空交易单,
self.writeCtaLog(u'删除平空交易单:{}'.format(trade.tradeID))
del self.tradeDict[trade.tradeID]
# 开空volume,大于平仓volume,需要更新减少tradeDict的数量。
else:
self.writeCtaLog(u'Short volume:{0} > Cover volume:{1},需要更新减少tradeDict的数量。'.format(entryTrade.volume,coverVolume))
shortVolume = entryTrade.volume - coverVolume
result = TradingResult(entryPrice=entryTrade.price,
entryDt=entryTrade.dt,
exitPrice=trade.price,
exitDt=trade.dt,
volume=-coverVolume,
rate=self.rate,
slippage=self.slippage,
size=self.size,
groupId=gId,
fixcommission=self.fixCommission)
t = OrderedDict()
t['Gid'] = gId
t['vtSymbol'] = entryTrade.vtSymbol
t['OpenTime'] = entryTrade.tradeTime
t['OpenPrice'] = entryTrade.price
t['Direction'] = u'Short'
t['CloseTime'] = trade.tradeTime
t['ClosePrice'] = trade.price
t['Volume'] = coverVolume
t['Profit'] = result.pnl
t['Commission'] = result.commission
self.exportTradeList.append(t)
msg = u'Gid:{0} {1}[{2}:开空tid={3}:{4}]-[{5}.平空tid={6},{7},vol:{8}],净盈亏pnl={9},手续费:{10}'\
.format(gId, entryTrade.vtSymbol, entryTrade.tradeTime, shortid, entryTrade.price,
trade.tradeTime, tradeid, trade.price,
coverVolume, result.pnl,result.commission)
self.output(msg)
self.writeCtaLog(msg)
# 更新(减少)开仓单的volume,重新推进开仓单列表中
entryTrade.volume = shortVolume
self.writeCtaLog(u'更新(减少)开仓单的volume,重新推进开仓单列表中:{}'.format(entryTrade.volume))
self.shortPosition.append(entryTrade)
cur_short_pos_list = [s_pos.volume for s_pos in self.shortPosition]
self.writeCtaLog(u'当前空单:{}'.format(cur_short_pos_list))
coverVolume = 0
resultDict.append(result)
if type(gr) != type(None):
# 更新组合的数据
gr.turnover = gr.turnover + result.turnover
gr.commission = gr.commission + result.commission
gr.slippage = gr.slippage + result.slippage
gr.pnl = gr.pnl + result.pnl
gr.volume = abs(trade.volume)
# 删除平空交易单,
del self.tradeDict[trade.tradeID]
if type(gr) != type(None):
self.writeCtaLog(u'组合净盈亏:{0}'.format(gr.pnl))
self.writeCtaLog(u'-------------')
# Short Trade
elif trade.direction == DIRECTION_SHORT and trade.offset == OFFSET_OPEN:
self.output(u'{0}空开:{1},{2}'.format(trade.vtSymbol, trade.volume, trade.price))
self.writeCtaLog(u'{0}空开:{1},{2}'.format(trade.vtSymbol, trade.volume, trade.price))
self.shortPosition.append(trade)
del self.tradeDict[trade.tradeID]
continue
# sell trade
elif trade.direction == DIRECTION_SHORT and trade.offset == OFFSET_CLOSE:
gId = trade.tradeID # 交易组(多个平仓数为一组)
gr = None # 组合的交易结果
sellVolume = trade.volume
while sellVolume > 0:
if len(self.longPosition) == 0:
self.writeCtaError(u'异常,没有开多单')
raise RuntimeError(u'realtimeCalculate2() Exception,没有开多单')
return
pop_indexs = [i for i, val in enumerate(self.longPosition) if val.vtSymbol == trade.vtSymbol]
if len(pop_indexs) < 1:
self.writeCtaError(u'没有对应的symbol{0}多单数据,'.format(trade.vtSymbol))
raise RuntimeError(u'realtimeCalculate2() Exception,没有对应的symbol{0}多单数据,'.format(trade.vtSymbol))
return
pop_index = pop_indexs[0]
entryTrade = self.longPosition.pop(pop_index)
# 开多volume,不大于平仓volume
if sellVolume >= entryTrade.volume:
self.writeCtaLog(u'{0}Sell Volume:{1} >= Entry Volume:{2}'.format(entryTrade.vtSymbol, sellVolume, entryTrade.volume))
sellVolume = sellVolume - entryTrade.volume
self.output(u'{0}多平:{1},{2}'.format(entryTrade.vtSymbol, entryTrade.volume, trade.price))
self.writeCtaLog(u'{0}多平:{1},{2}'.format(entryTrade.vtSymbol, entryTrade.volume, trade.price))
result = TradingResult(entryPrice=entryTrade.price,
entryDt=entryTrade.dt,
exitPrice=trade.price,
exitDt=trade.dt,
volume=entryTrade.volume,
rate=self.rate,
slippage=self.slippage,
size=self.size,
groupId=gId,
fixcommission=self.fixCommission)
t = OrderedDict()
t['Gid'] = gId
t['vtSymbol'] = entryTrade.vtSymbol
t['OpenTime'] = entryTrade.tradeTime
t['OpenPrice'] = entryTrade.price
t['Direction'] = u'Long'
t['CloseTime'] = trade.tradeTime
t['ClosePrice'] = trade.price
t['Volume'] = entryTrade.volume
t['Profit'] = result.pnl
t['Commission'] = result.commission
self.exportTradeList.append(t)
msg = u'Gid:{0} {1}[{2}:开多tid={3}:{4}]-[{5}.平多tid={6},{7},vol:{8}],净盈亏pnl={9},手续费:{10}'\
.format(gId, entryTrade.vtSymbol,
entryTrade.tradeTime, longid, entryTrade.price,
trade.tradeTime, tradeid, trade.price,
entryTrade.volume, result.pnl, result.commission)
self.output(msg)
self.writeCtaLog(msg)
resultDict.append(result)
if type(gr) == type(None):
if sellVolume > 0:
# 属于组合
gr = copy.deepcopy(result)
else:
# 删除平多交易单,
del self.tradeDict[trade.tradeID]
else:
# 更新组合的数据
gr.turnover = gr.turnover + result.turnover
gr.commission = gr.commission + result.commission
gr.slippage = gr.slippage + result.slippage
gr.pnl = gr.pnl + result.pnl
if sellVolume == 0:
gr.volume = abs(trade.volume)
# 删除平多交易单,
del self.tradeDict[trade.tradeID]
# 开多volume,大于平仓volume,需要更新减少tradeDict的数量。
else:
longVolume = entryTrade.volume -sellVolume
self.writeCtaLog(u'Entry Long Volume:{0} > Sell Volume:{1},Remain:{2}'
.format(entryTrade.volume, sellVolume, longVolume))
result = TradingResult(entryPrice=entryTrade.price,
entryDt=entryTrade.dt,
exitPrice=trade.price,
exitDt=trade.dt,
volume=sellVolume,
rate=self.rate,
slippage=self.slippage,
size=self.size,
groupId=gId,
fixcommission=self.fixCommission)
t = OrderedDict()
t['Gid'] = gId
t['vtSymbol'] = entryTrade.vtSymbol
t['OpenTime'] = entryTrade.tradeTime
t['OpenPrice'] = entryTrade.price
t['Direction'] = u'Long'
t['CloseTime'] = trade.tradeTime
t['ClosePrice'] = trade.price
t['Volume'] = sellVolume
t['Profit'] = result.pnl
t['Commission'] = result.commission
self.exportTradeList.append(t)
msg = u'Gid:{0} {1}[{2}:开多tid={3}:{4}]-[{5}.平多tid={6},{7},vol:{8}],净盈亏pnl={9},手续费:{10}'\
.format(gId, entryTrade.vtSymbol,entryTrade.tradeTime, longid, entryTrade.price,
trade.tradeTime, tradeid, trade.price, sellVolume, result.pnl, result.commission)
self.output(msg)
self.writeCtaLog(msg)
# 减少开多volume,重新推进多单持仓列表中
entryTrade.volume = longVolume
self.longPosition.append(entryTrade)
sellVolume = 0
resultDict.append(result)
if type(gr) != type(None):
# 更新组合的数据
gr.turnover = gr.turnover + result.turnover
gr.commission = gr.commission + result.commission
gr.slippage = gr.slippage + result.slippage
gr.pnl = gr.pnl + result.pnl
gr.volume = abs(trade.volume)
# 删除平多交易单,
del self.tradeDict[trade.tradeID]
if type(gr) != type(None):
self.writeCtaLog(u'组合净盈亏:{0}'.format(gr.pnl))
self.writeCtaLog(u'-------------')
# 计算仓位比例
occupyMoney = EMPTY_FLOAT
occupyLongVolume = EMPTY_INT
occupyShortVolume = EMPTY_INT
longPos = {}
shortPos = {}
if len(self.longPosition) > 0:
for t in self.longPosition:
occupyMoney += t.price * abs(t.volume) * self.size * self.margin_rate
occupyLongVolume += abs(t.volume)
if t.vtSymbol in longPos:
longPos[t.vtSymbol] += abs(t.volume)
else:
longPos[t.vtSymbol] = abs(t.volume)
if len(self.shortPosition) > 0:
for t in self.shortPosition:
occupyMoney += t.price * abs(t.volume) * self.size * self.margin_rate
occupyShortVolume += (t.volume)
if t.vtSymbol in shortPos:
shortPos[t.vtSymbol] += abs(t.volume)
else:
shortPos[t.vtSymbol] = abs(t.volume)
self.output(u'L:{0}|{1},S:{2}|{3}'.format(occupyLongVolume, str(longPos), occupyShortVolume, str(shortPos)))
self.writeCtaLog(u'L:{0}|{1},S:{2}|{3}'.format(occupyLongVolume, str(longPos), occupyShortVolume, str(shortPos)))
# 最大持仓
self.maxVolume = max(self.maxVolume, occupyLongVolume + occupyShortVolume)
# 更改为持仓净值
self.avaliable = self.netCapital - occupyMoney
self.percent = round(float(occupyMoney * 100 / self.netCapital), 2)
# 检查是否有平交易
if len(resultDict) ==0:
msg = u''
if len(self.longPosition) > 0:
msg += u'持多仓{0},'.format( str(longPos))
if len(self.shortPosition) > 0:
msg += u'持空仓{0},'.format(str(shortPos))
msg += u'资金占用:{0},仓位:{1}%%'.format(occupyMoney, self.percent)
self.output(msg)
self.writeCtaLog(msg)
return
# 对交易结果汇总统计
for result in resultDict:
if result.pnl > 0:
self.winningResult += 1
self.totalWinning += result.pnl
else:
self.losingResult += 1
self.totalLosing += result.pnl
self.capital += result.pnl
self.maxCapital = max(self.capital, self.maxCapital)
| |
<reponame>widnyana/pyTenable<gh_stars>1-10
from tenable.errors import *
from ..checker import check, single
from .conftest import SCAN_ID_WITH_RESULTS
import uuid, time, pytest
@pytest.mark.vcr()
def test_scan_create_scan_document_template_typeerror(api):
with pytest.raises(TypeError):
api.scans._create_scan_document({'template': 123})
@pytest.mark.vcr()
def test_scan_create_scan_document_template_unexpected_value_error(api):
with pytest.raises(UnexpectedValueError):
api.scans._create_scan_document({'template': 'nothing_here'})
@pytest.mark.vcr()
def test_scan_create_scan_socument_template_pass(api):
templates = api.policies.templates()
resp = api.scans._create_scan_document({'template': 'basic'})
assert isinstance(resp, dict)
check(resp, 'uuid', 'scanner-uuid')
assert resp['uuid'] == templates['basic']
@pytest.mark.vcr()
def test_scan_create_scan_document_policies_id_pass(api):
policies = api.policies.list()
p = policies[0]
resp = api.scans._create_scan_document({'policy': p['id']})
assert isinstance(resp, dict)
check(resp, 'settings', dict)
check(resp['settings'], 'policy_id', int)
assert resp['settings']['policy_id'] == p['id']
@pytest.mark.vcr()
def test_scan_create_scan_document_policies_name_pass(api):
policies = api.policies.list()
p = policies[0]
resp = api.scans._create_scan_document({'policy': p['name']})
assert isinstance(resp, dict)
check(resp, 'uuid', 'scanner-uuid')
check(resp, 'settings', dict)
check(resp['settings'], 'policy_id', int)
assert resp['settings']['policy_id'] == p['id']
assert resp['uuid'] == p['template_uuid']
#def test_scan_create_scan_document_targets
@pytest.mark.vcr()
def test_scan_create_scan_document_scanner_unexpectedvalueerror(api):
with pytest.raises(UnexpectedValueError):
api.scans._create_scan_document({'scanner': 'nothing to see here'})
@pytest.mark.vcr()
def test_scan_create_scan_document_scanner_uuid_pass(api):
scanners = api.scanners.allowed_scanners()
s = scanners[0]
resp = api.scans._create_scan_document({'scanner': s['id']})
assert isinstance(resp, dict)
check(resp, 'settings', dict)
check(resp['settings'], 'scanner_id', 'scanner-uuid')
assert resp['settings']['scanner_id'] == s['id']
@pytest.mark.vcr()
def test_scan_create_scan_document_scanner_name_pass(api):
scanners = api.scanners.allowed_scanners()
s = scanners[0]
resp = api.scans._create_scan_document({'scanner': s['name']})
assert isinstance(resp, dict)
check(resp, 'settings', dict)
check(resp['settings'], 'scanner_id', str)
assert resp['settings']['scanner_id'] == s['id']
@pytest.mark.vcr()
def test_scan_attachment_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.attachment('nope', 1)
@pytest.mark.vcr()
def test_scan_attachment_attachement_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.attachment(1, 'nope')
@pytest.mark.vcr()
@pytest.mark.xfail(raises=InvalidInputError)
def test_scan_attachement_notfounderror(api):
with pytest.raises(NotFoundError):
api.scans.attachment(1, 1, 'none')
@pytest.mark.vcr()
def test_scan_configure_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.configure('abc123')
@pytest.mark.vcr()
def test_scan_configure_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.configure('nope')
@pytest.mark.vcr()
def test_scan_configure_notfounderror(api):
with pytest.raises(NotFoundError):
api.scans.configure(1, name=str(uuid.uuid4()))
@pytest.mark.vcr()
def test_scan_configure(api, scan):
mod = api.scans.configure(scan['id'], name='MODIFIED')
assert mod['id'] == scan['id']
assert mod['name'] == 'MODIFIED'
@pytest.mark.vcr()
def test_scan_copy_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.copy('nope')
@pytest.mark.vcr()
def test_scan_copy_folder_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.copy(1, folder_id='nope')
@pytest.mark.vcr()
def test_scan_copy_name_typeerror(api):
with pytest.raises(TypeError):
api.scans.copy(1, name=1)
@pytest.mark.vcr()
def test_scan_copy_notfounderror(api):
with pytest.raises(NotFoundError):
api.scans.copy(1)
@pytest.mark.vcr()
def test_scan_copy(api, scan):
clone = api.scans.copy(scan['id'])
assert isinstance(clone, dict)
check(clone, 'control', bool)
check(clone, 'creation_date', int)
check(clone, 'enabled', bool)
check(clone, 'id', int)
check(clone, 'last_modification_date', int)
check(clone, 'owner', str)
check(clone, 'name', str)
check(clone, 'read', bool)
check(clone, 'rrules', str, allow_none=True)
# This is in the documentation, however isn't always returned oddly.
#check(clone, 'schedule_uuid', 'scanner-uuid')
check(clone, 'shared', bool)
check(clone, 'starttime', str, allow_none=True)
check(clone, 'status', str)
check(clone, 'timezone', str, allow_none=True)
check(clone, 'user_permissions', int)
check(clone, 'uuid', 'scanner-uuid')
@pytest.mark.vcr()
def test_scan_create_no_template_pass(api, scan):
assert isinstance(scan, dict)
check(scan, 'creation_date', int)
check(scan, 'custom_targets', str)
check(scan, 'default_permissions', int)
check(scan, 'description', str, allow_none=True)
check(scan, 'emails', str, allow_none=True)
check(scan, 'enabled', bool)
check(scan, 'id', int)
check(scan, 'last_modification_date', int)
check(scan, 'owner', str)
check(scan, 'owner_id', int)
check(scan, 'policy_id', int)
check(scan, 'name', str)
check(scan, 'rrules', str, allow_none=True)
check(scan, 'scanner_id', 'scanner-uuid', allow_none=True)
check(scan, 'shared', int)
check(scan, 'starttime', str, allow_none=True)
check(scan, 'timezone', str, allow_none=True)
check(scan, 'type', str)
check(scan, 'user_permissions', int)
check(scan, 'uuid', str)
@pytest.mark.vcr()
def test_scan_create_was_scan_pass(api):
scan = api.scans.create(template='was_scan', name=str(uuid.uuid4()),
plugins={
'Authentication & Session': {'status': 'enabled'},
'Code Execution': {'status': 'enabled'},
'Component Vulnerability': {'status': 'enabled'},
'Cross Site Request Forgery': {'status': 'enabled'},
'Cross Site Scripting': {'status': 'enabled'},
'Data Exposure': {'status': 'enabled'},
'File Inclusion': {'status': 'enabled'},
'Injection': {'status': 'enabled'},
'Web Applications': {'status': 'enabled'},
'Web Servers': {'status': 'enabled'},
},
assessment_mode='Quick',
targets=['http://127.0.0.1:3000'],
was_timeout='00:05:00'
)
check(scan, 'creation_date', int)
check(scan, 'custom_targets', str)
check(scan, 'default_permissions', int)
check(scan, 'description', str, allow_none=True)
check(scan, 'emails', str, allow_none=True)
check(scan, 'enabled', bool)
check(scan, 'id', int)
check(scan, 'last_modification_date', int)
check(scan, 'owner', str)
check(scan, 'owner_id', int)
check(scan, 'policy_id', int)
check(scan, 'name', str)
check(scan, 'rrules', str, allow_none=True)
check(scan, 'scanner_id', 'scanner-uuid', allow_none=True)
check(scan, 'shared', int)
check(scan, 'starttime', str, allow_none=True)
check(scan, 'timezone', str, allow_none=True)
check(scan, 'type', str)
check(scan, 'user_permissions', int)
check(scan, 'uuid', str)
@pytest.mark.vcr()
def test_scan_delete_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.delete('nope')
@pytest.mark.vcr()
def test_scan_delete_notfounderror(api):
with pytest.raises(NotFoundError):
api.scans.delete(0)
@pytest.mark.vcr()
def test_scan_delete(api, scan):
api.scans.delete(scan['id'])
@pytest.mark.vcr()
def test_scan_delete_history_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.delete_history('nope', 1)
@pytest.mark.vcr()
def test_scan_delete_history_history_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.delete_history(1, 'nope')
@pytest.mark.vcr()
def test_scan_delete_history_notfounderror(api):
with pytest.raises(NotFoundError):
api.scans.delete_history(1, 1)
@pytest.mark.vcr()
def test_scan_details_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.details('nope')
@pytest.mark.vcr()
def test_scan_details_history_it_typeerror(api):
with pytest.raises(TypeError):
api.scans.details(1, 'nope')
@pytest.mark.vcr()
def test_scan_results(api, scan_results):
assert isinstance(scan_results, dict)
s = scan_results
check(s, 'info', dict)
info = s['info']
check(info, 'acls', list, allow_none=True)
for i in s['info']['acls']:
check(i, 'owner', int, allow_none=True)
check(i, 'type', str, allow_none=True)
check(i, 'permissions', int, allow_none=True)
check(i, 'id', int, allow_none=True)
check(i, 'name', str, allow_none=True)
check(i, 'display_name', str, allow_none=True)
check(info, 'schedule_uuid', 'scanner-uuid', allow_none=True)
check(info, 'edit_allowed', bool)
check(info, 'status', str)
check(info, 'alt_targets_used', str, allow_none=True)
check(info, 'scanner_start', int, allow_none=True)
check(info, 'policy', str, allow_none=True)
check(info, 'pci-can-upload', bool, allow_none=True)
check(info, 'scan_start', int, allow_none=True)
check(info, 'hasaudittrail', bool)
check(info, 'user_permissions', int)
check(info, 'folder_id', int, allow_none=True)
check(info, 'no_target', bool)
check(info, 'owner', str)
check(info, 'targets', str, allow_none=True)
check(info, 'control', bool)
check(info, 'object_id', int)
check(info, 'scanner_name', str, allow_none=True)
check(info, 'uuid', str)
check(info, 'haskb', bool)
check(info, 'scanner_end', int, allow_none=True)
check(info, 'scan_end', int)
check(info, 'hostcount', int)
check(info, 'scan_type', str, allow_none=True)
check(info, 'name', str)
check(s, 'comphosts', list)
for i in s['comphosts']:
check(i, 'totalchecksconsidered', int)
check(i, 'numchecksconsidered', int)
check(i, 'scanprogresstotal', int)
check(i, 'scanprogresscurrent', int)
check(i, 'host_index', int)
check(i, 'score', int)
check(i, 'severitycount', dict)
check(i, 'progress', str)
check(i, 'critical', int)
check(i, 'high', int)
check(i, 'medium', int)
check(i, 'low', int)
check(i, 'info', int)
check(i, 'host_id', int)
check(i, 'hostname', str)
check(s, 'hosts', list)
for i in s['hosts']:
check(i, 'totalchecksconsidered', int)
check(i, 'numchecksconsidered', int)
check(i, 'scanprogresstotal', int)
check(i, 'scanprogresscurrent', int)
check(i, 'host_index', int)
check(i, 'score', int)
check(i, 'severitycount', dict)
check(i, 'progress', str)
check(i, 'critical', int)
check(i, 'high', int)
check(i, 'medium', int)
check(i, 'low', int)
check(i, 'info', int)
check(i, 'host_id', int)
check(i, 'hostname', str)
check(s, 'notes', list)
for i in s['notes']:
check(i, 'title', str)
check(i, 'message', str)
check(i, 'severity', int)
check(s, 'remediations', dict)
check(s['remediations'], 'num_hosts', int)
check(s['remediations'], 'num_cves', int)
check(s['remediations'], 'num_impacted_hosts', int)
check(s['remediations'], 'num_remediated_cves', int)
check(s['remediations'], 'remediations', list)
for i in s['remediations']['remediations']:
check(i, 'value', str)
check(i, 'remediation', str)
check(i, 'hosts', int)
check(i, 'vulns', int)
check(s, 'vulnerabilities', list)
for i in s['vulnerabilities']:
check(i, 'count', int)
check(i, 'plugin_name', str)
check(i, 'vuln_index', int)
check(i, 'severity', int)
check(i, 'plugin_id', int)
# Mentioned in the docs, however doesn't appear to show in testing
#check(i, 'severity_index', int)
check(i, 'plugin_family', str)
check(s, 'history', list)
for i in s['history']:
check(i, 'alt_targets_used', bool)
check(i, 'scheduler', int)
check(i, 'status', str)
check(i, 'type', str, allow_none=True)
check(i, 'uuid', str)
check(i, 'last_modification_date', int)
check(i, 'creation_date', int)
check(i, 'owner_id', int)
check(i, 'history_id', int)
check(s, 'compliance', list)
for i in s['compliance']:
check(i, 'count', int)
check(i, 'plugin_name', str)
check(i, 'vuln_index', int)
check(i, 'severity', int)
check(i, 'plugin_id', int)
# Mentioned in the docs, however doesn't appear to show in testing
#check(i, 'severity_index', int)
check(i, 'plugin_family', str)
@pytest.mark.vcr()
def test_scan_export_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.export('nope')
@pytest.mark.vcr()
def test_scan_export_history_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.export(1, history_id='nope')
@pytest.mark.vcr()
def test_scan_export_format_typeerror(api):
with pytest.raises(TypeError):
api.scans.export(1, format=1)
@pytest.mark.vcr()
def test_scan_export_format_unexpectedvalueerror(api):
with pytest.raises(UnexpectedValueError):
api.scans.export(1, format='something else')
@pytest.mark.vcr()
def test_scan_export_password_typeerror(api):
with pytest.raises(TypeError):
api.scans.export(1, password=1)
@pytest.mark.vcr()
def test_scan_export_chapters_typeerror(api):
with pytest.raises(TypeError):
api.scans.export(1, chapters=1)
@pytest.mark.vcr()
def test_scan_export_chapters_unexpectedvalueerror(api):
with pytest.raises(UnexpectedValueError):
api.scans.export(1, chapters=['nothing to see here'])
@pytest.mark.vcr()
def test_scan_export_filter_type_typeerror(api):
with pytest.raises(TypeError):
api.scans.export(1, filter_type=1)
@pytest.mark.vcr()
def test_scan_export_filter_type_unexpectedvalueerror(api):
with pytest.raises(UnexpectedValueError):
api.scans.export(1, filter_type='nothing')
@pytest.mark.vcr()
def test_scan_export_bytesio(api):
from io import BytesIO
from tenable.reports.nessusv2 import NessusReportv2
fobj = api.scans.export(SCAN_ID_WITH_RESULTS)
assert isinstance(fobj, BytesIO)
counter = 0
for i in NessusReportv2(fobj):
counter += 1
if counter > 10:
break
@pytest.mark.vcr()
def test_scan_export_file_object(api):
from tenable.reports.nessusv2 import NessusReportv2
fn = '{}.nessus'.format(uuid.uuid4())
with open(fn, 'wb') as fobj:
api.scans.export(SCAN_ID_WITH_RESULTS, fobj=fobj)
with open(fn, 'rb') as fobj:
counter = 0
for i in NessusReportv2(fobj):
counter += 1
if counter > 10:
break
@pytest.mark.vcr()
def test_scan_host_details_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.host_details('nope', 1)
@pytest.mark.vcr()
def test_scan_host_details_host_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.host_details(1, 'nope')
@pytest.mark.vcr()
def test_scan_host_details_history_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.host_details(1, 1, 'nope')
@pytest.mark.vcr()
def test_scan_host_details_notfounderror(api):
with pytest.raises(NotFoundError):
api.scans.host_details(1, 1)
@pytest.mark.vcr()
def test_scan_host_details(api, scan_results):
host = api.scans.host_details(
SCAN_ID_WITH_RESULTS, scan_results['hosts'][0]['asset_id'])
assert isinstance(host, dict)
check(host, 'info', dict)
check(host['info'], 'host-fqdn', str, allow_none=True)
check(host['info'], 'host_end', str)
check(host['info'], 'host_start', str)
check(host['info'], 'operating-system', list)
check(host['info'], 'host-ip', str)
check(host['info'], 'mac-address', str, allow_none=True)
check(host, 'vulnerabilities', list)
for i in host['vulnerabilities']:
check(i, 'count', int)
check(i, 'severity', int)
check(i, 'plugin_family', str)
check(i, 'hostname', str)
check(i, 'plugin_name', str)
check(i, 'severity_index', int)
check(i, 'vuln_index', int)
check(i, 'host_id', int)
check(i, 'plugin_id', int)
check(host, 'compliance', list)
for i in host['compliance']:
check(i, 'count', int)
check(i, 'plugin_name', str)
check(i, 'vuln_index', int)
check(i, 'severity', int)
check(i, 'plugin_id', int)
check(i, 'severity_index', int)
check(i, 'plugin_family', str)
@pytest.mark.vcr()
def test_scan_import_scan_folder_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.import_scan(None, folder_id='nope')
@pytest.mark.vcr()
def test_scan_import_scan_password_typeerror(api):
with pytest.raises(TypeError):
api.scans.import_scan(None, password=1)
@pytest.mark.vcr()
def test_scan_import_scan(api):
fobj = api.scans.export(SCAN_ID_WITH_RESULTS)
api.scans.import_scan(fobj)
@pytest.mark.vcr()
def test_scan_launch_scanid_typeerror(api):
with pytest.raises(TypeError):
api.scans.launch('nope')
@pytest.mark.vcr()
def test_scan_launch_targets_typerror(api):
with pytest.raises(TypeError):
api.scans.launch(1, targets='nope')
@pytest.mark.skip(reason="Switching between scan states can be tricky")
def test_scan_launch(api, scan):
api.scans.launch(scan['id'])
time.sleep(5)
api.scans.stop(scan['id'], block=True)
@pytest.mark.skip(reason='Switching between scan states this quickly can be trixsy')
def test_scan_launch_alt_targets(api, scan):
api.scans.launch(scan['id'], targets=['127.0.0.2'])
time.sleep(5)
api.scans.stop(scan['id'], block=True)
@pytest.mark.vcr()
def test_scan_list_folder_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.list(folder_id='nope')
@pytest.mark.vcr()
def test_scan_list_last_modified_typeerror(api):
with pytest.raises(TypeError):
api.scans.list(last_modified='nope')
@pytest.mark.vcr()
def test_scan_list(api):
scans = api.scans.list()
assert isinstance(scans, list)
s = scans[0]
check(s, 'control', bool)
check(s, 'creation_date', int)
check(s, 'enabled', bool)
check(s, 'id', int)
check(s, 'last_modification_date', int)
check(s, 'legacy', bool)
check(s, 'owner', str)
check(s, 'name', str)
check(s, 'permissions', int)
check(s, 'read', bool)
check(s, 'rrules', str, allow_none=True)
check(s, 'schedule_uuid', 'scanner-uuid')
check(s, 'shared', bool)
check(s, 'starttime', str, allow_none=True)
check(s, 'status', str)
check(s, 'timezone', str, allow_none=True)
check(s, 'user_permissions', int)
check(s, 'uuid', 'scanner-uuid')
@pytest.mark.vcr()
def test_scan_pause_scan_id_typeerror(api):
with pytest.raises(TypeError):
api.scans.pause('nope')
@pytest.mark.skip(reason="Switching between scan states can be tricky")
def test_scan_pause_scan(api, scan):
hid = api.scans.launch(scan['id'])
| |
lets the LRU be based on the
# popularity of an object, not its particular transaction. It also lets
# us use a LLBTree to store the data, which can be more memory efficient.
self._cache = None
# The {oid: tid} that we read from the cache.
# These are entries that we know are there, and if we see them
# change, we need to be sure to update that in the database,
# *even if they are evicted* and we would otherwise lose
# knowledge of them before we save. We do this by watching incoming
# TIDs; only if they were already in here do we continue to keep track.
# At write time, if we can't meet the requirement ourself, we at least
# make sure there are no stale entries in the cache database.
self._min_allowed_writeback = OidTMap()
self.flush_all()
compression_module = options.cache_local_compression
try:
compression_markers = self._compression_markers[compression_module]
except KeyError:
raise ValueError("Unknown compression module")
else:
self.__compression_marker = compression_markers[0]
self.__compress = compression_markers[1]
if self.__compress is None:
self._compress = None
@property
def size(self):
return self._cache.size
def __len__(self):
"""
How many distinct OIDs are stored.
"""
return len(self._cache)
def __iter__(self):
for oid, lru_entry in iteroiditems(self._cache.data):
value = lru_entry.value
if isinstance(value, _MultipleValues):
for entry in value:
yield (oid, entry[1])
else:
yield (oid, value[1])
def _decompress(self, data):
pfx = data[:2]
if pfx not in self._decompression_functions:
return data
return self._decompression_functions[pfx](data[2:])
def _compress(self, data): # pylint:disable=method-hidden
# We override this if we're disabling compression
# altogether.
# Use the same basic rule as zc.zlibstorage, but bump the object size up from 20;
# many smaller object (under 100 bytes) like you get with small btrees,
# tend not to compress well, so don't bother.
if data and (len(data) > 100) and data[:2] not in self._decompression_functions:
compressed = self.__compression_marker + self.__compress(data)
if len(compressed) < len(data):
return compressed
return data
@_log_timed
def save(self, object_index=None, checkpoints=None, **sqlite_args):
options = self.options
if options.cache_local_dir and self.size:
try:
conn = sqlite_connect(options, self.prefix,
**sqlite_args)
except FAILURE_TO_OPEN_DB_EXCEPTIONS:
logger.exception("Failed to open sqlite to write")
return 0
with closing(conn):
self.write_to_sqlite(conn, checkpoints, object_index)
# Testing: Return a signal when we tried to write
# something.
return 1
def restore(self):
"""
Load the data from the persistent database.
Returns the checkpoint data last saved, which may be None if
there was no data.
"""
options = self.options
if options.cache_local_dir:
try:
conn = sqlite_connect(options, self.prefix, close_async=False)
except FAILURE_TO_OPEN_DB_EXCEPTIONS:
logger.exception("Failed to read data from sqlite")
return
with closing(conn):
return self.read_from_sqlite(conn)
@_log_timed
def remove_invalid_persistent_oids(self, bad_oids):
"""
Remove data from the persistent cache for the given oids.
"""
options = self.options
if not options.cache_local_dir:
return
count_removed = 0
conn = '(no oids to remove)'
if bad_oids:
conn = sqlite_connect(options, self.prefix, close_async=False)
with closing(conn):
db = Database.from_connection(conn)
count_removed = db.remove_invalid_persistent_oids(bad_oids)
logger.debug("Removed %d invalid OIDs from %s", count_removed, conn)
def zap_all(self):
_, destroy = sqlite_files(self.options, self.prefix)
destroy()
@staticmethod
def key_weight(_):
# All keys are equally weighted, and we don't count them.
return 0
@staticmethod
def value_weight(value):
# Values are the (state, actual_tid) pairs, or lists of the same, and their
# weight is the size of the state
return value.weight
def flush_all(self):
with self._lock:
self._cache = self._cache_type(
self.limit,
key_weight=self.key_weight,
value_weight=self.value_weight,
empty_value=_SingleValue(b'', 0)
)
self._peek = self._cache.peek
self._cache_mru = self._cache.__getitem__
self._min_allowed_writeback = OidTMap()
self.reset_stats()
def reset_stats(self):
self._hits = 0
self._misses = 0
self._sets = 0
self._aged_at = 0
self._next_age_at = 1000
def stats(self):
total = self._hits + self._misses
return {
'hits': self._hits,
'misses': self._misses,
'sets': self._sets,
'ratio': self._hits / total if total else 0,
'len': len(self),
'bytes': self.size,
'lru_stats': self._cache.stats(),
}
def __contains__(self, oid_tid):
oid, tid = oid_tid
entry = self._peek(oid)
return entry is not None and entry % tid is not None
def get(self, oid_tid, update_mru=True):
oid, tid = oid_tid
assert tid is None or tid >= 0
decompress = self._decompress
value = None
with self._lock:
entry = self._peek(oid)
if entry is not None:
value = entry % tid
if value is not None:
self._hits += 1
if update_mru:
self._cache_mru(oid) # Make an actual hit.
else:
self._misses += 1
# Finally, while not holding the lock, decompress if needed.
# Recall that for deleted objects, `state` can be None.
if value is not None:
state, tid = value
return decompress(state) if state else state, tid
__getitem__ = get
def _age(self):
# Age only when we're full and would thus need to evict; this
# makes initial population faster. It's cheaper to calculate this
# AFTER the operations, though, because we read it from C.
#if self.size < self.limit:
# return
# Age the whole thing periodically based on the number of
# operations we've done that would have altered popularity.
# Dynamically calculate how often we need to age. By default, this is
# based on what Caffeine's PerfectFrequency does: 10 * max
# cache entries
age_period = self._age_factor * len(self._cache)
operations = self._hits + self._sets
if operations - self._aged_at < age_period:
self._next_age_at = age_period
return
if self.size < self.limit:
return
self._aged_at = operations
now = time.time()
logger.debug("Beginning frequency aging for %d cache entries",
len(self._cache))
self._cache.age_lists()
done = time.time()
logger.debug("Aged %d cache entries in %s", len(self._cache), done - now)
self._next_age_at = int(self._aged_at * 1.5) # in case the dict shrinks
return self._aged_at
def __setitem__(self, oid_tid, state_bytes_tid):
if not self.limit:
# don't bother
return
# This used to allow non-byte values, but that's confusing
# on Py3 and wasn't used outside of tests, so we enforce it.
# A state of 'None' happens for undone transactions.
oid, key_tid = oid_tid
state_bytes, actual_tid = state_bytes_tid
assert isinstance(state_bytes, bytes) or state_bytes is None, type(state_bytes)
compress = self._compress
cvalue = compress(state_bytes) if compress else state_bytes # pylint:disable=not-callable
del state_bytes
if cvalue and len(cvalue) >= self._value_limit:
# This value is too big, so don't cache it.
return
# Really key_tid should be > 0; we allow >= for tests.
assert key_tid == actual_tid and key_tid >= 0
value = _SingleValue(cvalue, actual_tid)
with self._lock:
existing = self._peek(oid)
if existing:
existing += value
value = existing
self._cache[oid] = value # possibly evicts
if actual_tid > self._min_allowed_writeback.get(oid, MAX_TID):
self._min_allowed_writeback[oid] = actual_tid
self._sets += 1
# Do we need to move this up above the eviction choices?
# Inline some of the logic about whether to age or not; avoiding the
# call helps speed
if self._hits + self._sets > self._next_age_at:
self._age()
def set_all_for_tid(self, tid_int, state_oid_iter):
for state, oid_int, _ in state_oid_iter:
self[(oid_int, tid_int)] = (state, tid_int)
def __delitem__(self, oid_tid):
oid, expected_tid = oid_tid
with self._lock:
entry = self._peek(oid)
if entry is not None:
entry -= expected_tid
if not entry:
del self._cache[oid]
else:
# XXX: Messing with LRU. We just want to update the
# value and size calculation.
self._cache[oid] = entry
if expected_tid > self._min_allowed_writeback.get(oid, MAX_TID):
self._min_allowed_writeback[oid] = expected_tid
def invalidate_all(self, oids):
with self._lock:
min_allowed = self._min_allowed_writeback
for oid in oids:
entry = self._peek(oid)
if entry:
del self._cache[oid]
tid = entry.max_tid
if tid > min_allowed.get(oid, MAX_TID):
min_allowed[oid] = tid
def freeze(self, oids_tids):
# The idea is to *move* the data, or make it available,
# *without* copying it.
with self._lock:
# This shuffles them around the LRU order. We probably don't actually
# want to do that.
store = self._cache.__setitem__
delitem = self._cache.__delitem__
peek = self._peek
for oid, tid in oids_tids.items():
orig = entry = peek(oid)
if entry is not None:
entry <<= tid
if entry is None:
delitem(oid)
elif entry is not orig:
store(oid, entry)
def close(self):
pass
release = close
def new_instance(self):
return self
def updating_delta_map(self, deltas):
return deltas
def _bulk_update(self, keys_and_values,
source='<unknown>',
log_count=None,
mem_usage_before=None):
"""
Insert all the ``(key, value)`` pairs found in *keys_and_values*.
This will permute the most-recently-used status of any existing entries.
Entries in the *keys_and_values* iterable should be returned from
least recent to most recent, as the items at the end will be considered to be
the most recent. (Alternately, you can think of them | |
from __future__ import print_function
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import os.path as op
import warnings
from nose.tools import assert_true, assert_raises, assert_equal
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from scipy import stats
from itertools import product
from mne import io, Epochs, read_events, pick_types
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import score_funcs, _check_n_pca_components
from mne.io.meas_info import Info
from mne.utils import set_log_file, _TempDir, requires_sklearn, slow_test
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
start, stop = 0, 6 # if stop is too small pca may fail in some cases, but
# we're okay on this file
score_funcs_unsuited = ['pointbiserialr', 'ansari']
try:
from sklearn.utils.validation import NonBLASDotWarning
warnings.simplefilter('error', NonBLASDotWarning)
except:
pass
@requires_sklearn
def test_ica_full_data_recovery():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
for method in ['fastica']:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw, exclude=[], copy=True)
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert_true(np.max(diff) > 1e-14)
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs, exclude=[], copy=True)
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked, exclude=[], copy=True)
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
assert_raises(ValueError, ICA, method='pizza-decomposision')
@requires_sklearn
def test_ica_rank_reduction():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw, copy=True)
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
def test_ica_core():
"""Test ICA on raw and epochs"""
raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = ['fastica']
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
print(ica) # to test repr
# test fit checker
assert_raises(RuntimeError, ica.get_sources, raw)
assert_raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
# test re-fit
unmixing1 = ica.unmixing_matrix_
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
sources = ica.get_sources(raw)[:, :][0]
assert_true(sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
assert_raises(ValueError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert_true(sources.shape[1] == ica.n_components_)
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
assert_raises(ValueError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica._pre_whitener.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs, copy=True)
assert_array_equal(_pre_whitener, ica._pre_whitener)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
assert_raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
assert_raises(ValueError, ica.get_sources, offender)
assert_raises(ValueError, ica.fit, offender)
assert_raises(ValueError, ica.apply, offender)
@slow_test
@requires_sklearn
def test_ica_additional():
"""Test additional ICA functionality"""
tempdir = _TempDir()
stop2 = 500
raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
# test if n_components=None works
with warnings.catch_warnings(record=True):
ica = ICA(n_components=None,
max_pca_components=None,
n_pca_components=None, random_state=0)
ica.fit(epochs, picks=picks, decim=3)
# for testing eog functionality
picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=True, exclude='bads')
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True)
test_cov2 = deepcopy(test_cov)
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4)
assert_true(ica.info is None)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5])
assert_true(isinstance(ica.info, Info))
assert_true(ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
assert_raises(RuntimeError, ica.save, '')
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, start=start, stop=stop2)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
ica.save(ica_badname)
read_ica(ica_badname)
assert_true(len(w) == 2)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(raw_._data.shape[1], n_samples)
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(ica.n_components_ == 4)
# epochs extraction from raw fit
assert_raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True): # ICA does not converge
ica.fit(raw, picks=picks, start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert_true(ica.mixing_matrix_.shape == (2, 2))
assert_true(ica.unmixing_matrix_.shape == (2, 2))
assert_true(ica.pca_components_.shape == (4, len(picks)))
assert_true(sources.shape[1] == ica.n_components_)
for exclude in [[], [0]]:
ica.exclude = [0]
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.exclude == ica_read.exclude)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
with warnings.catch_warnings(record=True): # dB warning
ica_raw.filter(4, 20)
assert_true((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
with warnings.catch_warnings(record=True): # dB warning
ica_raw.notch_filter([10])
assert_true((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.n_pca_components == ica_read.n_pca_components)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ _pre_whitener')
f = lambda x, y: getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'_pre_whitener']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert_true(ica.ch_names == ica_read.ch_names)
assert_true(isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check scrore funcs
for name, func in score_funcs.items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
with warnings.catch_warnings(record=True):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
assert_raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
idx, scores = ica.find_bads_eog(raw)
assert_true(isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
# check score funcs
for name, func in score_funcs.items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# | |
import os
import pytest
from mock import MagicMock, Mock, call, patch
import AndroidRunner.Adb as Adb
from AndroidRunner.Device import Device
from AndroidRunner.Devices import Devices
from AndroidRunner.util import ConfigError
class TestDevice(object):
@pytest.fixture()
@patch('AndroidRunner.Adb.connect')
def device(self, adb_connect):
adb_connect.return_value = None
name = 'fake_device'
device_id = 123456789
device_settings = {}
return Device(name, device_id, device_settings)
@pytest.fixture()
@patch('AndroidRunner.Adb.connect')
def device_root(self, adb_connect):
adb_connect.return_value = None
name = 'fake_device'
device_id = 123456789
device_settings = {'root_disable_charging': True,
'charging_disabled_value': '0', 'usb_charging_disabled_file': 'test/file'}
return Device(name, device_id, device_settings)
@pytest.fixture()
@patch('AndroidRunner.Adb.connect')
def device_with_app_settings(self, adb_connect):
adb_connect.return_value = None
name = 'fake_device'
device_id = 123456789
device_settings = {"device_settings_reqs": {"app1": ["setting_1"], "app2": ["setting_1", "setting_2"]}}
return Device(name, device_id, device_settings)
@patch('AndroidRunner.Adb.connect')
def test_init(self, adb_connect):
name = 'fake_device'
device_id = 123456789
device_settings = {'root_disable_charging': True,
'charging_disabled_value': '0',
'usb_charging_disabled_file': 'test/file',
'power_device': {
'script_path': 'fake/path',
'py_path': 'python',
'vout': '3',
'serial_num': '23'
},
'device_settings_reqs': {'app1': ['a, b'], 'app2': ['c']}
}
device = Device(name, device_id, device_settings)
assert device.name == name
assert device.id == device_id
assert device.root_plug_value is None
assert device.root_unplug_file == 'test/file'
assert device.root_unplug_value == '0'
assert device.root_unplug is True
assert device.power_device is not None
assert device.power_device['script_path'] == 'fake/path'
assert device.power_device['py_path'] == 'python'
assert device.device_settings_reqs == {'app1': ['a, b'], 'app2': ['c']}
adb_connect.assert_called_once_with(device_id)
@patch('AndroidRunner.Adb.configure_settings')
@patch('logging.Logger.info')
def test_configure_settings_device(self, logger, configure_settings, device_with_app_settings):
device_with_app_settings.configure_settings_device("app1")
logger.assert_called_with('Enabling setting_1')
configure_settings.assert_called_with(device_with_app_settings.id, "setting_1",True)
device_with_app_settings.configure_settings_device("app2")
logger.assert_called_with('Enabling setting_2')
device_with_app_settings.configure_settings_device(None)
device_with_app_settings.configure_settings_device("app3")
assert configure_settings.call_count == 3
@patch('AndroidRunner.Adb.shell')
def test_get_version(self, adb_shell, device):
adb_shell.return_value = 9
version = device.get_version()
assert version == 9
adb_shell.assert_called_once_with(123456789, 'getprop ro.build.version.release')
@patch('AndroidRunner.Adb.shell')
def test_get_api_level(self, adb_shell, device):
adb_shell.return_value = 28
level = device.get_api_level()
assert level == 28
adb_shell.assert_called_once_with(123456789, 'getprop ro.build.version.sdk')
@patch('AndroidRunner.Device.Device.get_app_list')
def test_is_installed(self, get_app_list, device):
get_app_list.return_value = ['app1', 'app2', 'installed_app']
result_installed = device.is_installed(['app3', 'installed_app', 'app4'])
assert len(result_installed) == 3
assert 'app3' in result_installed and not result_installed['app3']
assert 'app4' in result_installed and not result_installed['app4']
assert 'installed_app' in result_installed and result_installed['installed_app']
@patch('AndroidRunner.Adb.list_apps')
def test_get_app_list(self, adb_list_apps, device):
adb_list_apps.return_value = ['app1', 'app2', 'app3']
app_list = device.get_app_list()
assert app_list == ['app1', 'app2', 'app3']
@patch('AndroidRunner.Adb.install')
def test_install_file_not_exist(self, adb_install, device):
with pytest.raises(Adb.AdbError):
device.install('fake.apk')
assert adb_install.call_count == 0
@patch('os.path.isfile')
@patch('AndroidRunner.Adb.install')
def test_install_file_exist(self, adb_install, os_isfile, device):
os_isfile.return_value = True
device.install('fake.apk')
adb_install.assert_called_once_with(123456789, 'fake.apk')
@patch('AndroidRunner.Adb.uninstall')
def test_uninstall(self, adb_uninstall, device):
app_name = 'fake_app'
device.uninstall(app_name)
adb_uninstall.assert_called_once_with(123456789, app_name)
@patch('AndroidRunner.Device.Device.su_unplug')
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Adb.shell')
def test_unplug_api_lower_23_no_root(self, adb_shell, get_api_level, su_unplug, device):
get_api_level.return_value = 22
device.unplug(False)
assert su_unplug.call_count == 0
adb_shell.assert_called_once_with(123456789, 'dumpsys battery set usb 0')
@patch('AndroidRunner.Device.Device.su_unplug')
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Adb.shell')
def test_unplug_api_higher_equal_23_no_root(self, adb_shell, get_api_level, su_unplug, device):
get_api_level.return_value = 23
device.unplug(False)
assert su_unplug.call_count == 0
adb_shell.assert_called_once_with(123456789, 'dumpsys battery unplug')
@patch('AndroidRunner.Device.Device.su_unplug')
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Adb.shell')
def test_unplug_api_lower_23_root(self, adb_shell, get_api_level, su_unplug, device_root):
get_api_level.return_value = 22
device_root.unplug(False)
su_unplug.assert_called_once_with(False)
assert adb_shell.call_count == 0
@patch('AndroidRunner.Device.Device.su_unplug')
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Adb.shell')
def test_unplug_api_lower_23_root_restart(self, adb_shell, get_api_level, su_unplug, device_root):
get_api_level.return_value = 22
device_root.unplug(True)
su_unplug.assert_called_once_with(True)
assert adb_shell.call_count == 0
@patch('AndroidRunner.Device.Device.su_unplug')
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Adb.shell')
def test_unplug_api_higher_equal_23_root(self, adb_shell, get_api_level, su_unplug, device_root):
get_api_level.return_value = 23
device_root.unplug(False)
su_unplug.assert_called_once_with(False)
assert adb_shell.call_count == 0
@patch('AndroidRunner.Device.Device.su_unplug')
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Adb.shell')
def test_unplug_api_higher_equal_23_root_restart(self, adb_shell, get_api_level, su_unplug, device_root):
get_api_level.return_value = 23
device_root.unplug(True)
su_unplug.assert_called_once_with(True)
assert adb_shell.call_count == 0
@patch('AndroidRunner.Device.Device.check_plug_value')
@patch('AndroidRunner.Adb.shell_su')
def test_su_unplug_no_error(self, shell_su, check_plug_value, device_root):
shell_su.side_effect = ['default_return', '']
device_root.su_unplug(False)
expected_calls = [call(device_root.id, 'cat %s' % device_root.root_unplug_file),
call(device_root.id, 'echo %s > %s' %
(device_root.root_unplug_value, device_root.root_unplug_file))]
assert shell_su.mock_calls == expected_calls
assert device_root.root_plug_value == 'default_return'
assert check_plug_value.call_count == 0
@patch('AndroidRunner.Device.Device.check_plug_value')
@patch('AndroidRunner.Adb.shell_su')
def test_su_unplug_not_rooted(self, shell_su, check_plug_value, device_root):
shell_su.side_effect = ['su: not found', 'default_return', 'No such file or directory']
with pytest.raises(Adb.AdbError):
device_root.su_unplug(False)
expected_calls = [call(device_root.id, 'cat test/file')]
assert shell_su.mock_calls == expected_calls
assert device_root.root_plug_value == 'su: not found'
assert check_plug_value.call_count == 0
@patch('AndroidRunner.Device.Device.check_plug_value')
@patch('AndroidRunner.Adb.shell_su')
def test_su_unplug_invalid_root_unplug_file(self, adb_shell, check_plug_value, device_root):
adb_shell.side_effect = ['No such file or directory', '']
with pytest.raises(ConfigError):
device_root.su_unplug(False)
expected_calls = [call(device_root.id, 'cat %s' % device_root.root_unplug_file)]
assert adb_shell.mock_calls == expected_calls
assert device_root.root_plug_value == 'No such file or directory'
assert check_plug_value.call_count == 0
@patch('AndroidRunner.Device.Device.check_plug_value')
@patch('AndroidRunner.Adb.shell_su')
def test_su_unplug_restart(self, shell_su, check_plug_value, device_root):
shell_su.side_effect = ['default_return', '']
device_root.su_unplug(True)
expected_calls = [call(device_root.id, 'cat %s' % device_root.root_unplug_file),
call(device_root.id, 'echo %s > %s' %
(device_root.root_unplug_value, device_root.root_unplug_file))]
assert shell_su.mock_calls == expected_calls
assert device_root.root_plug_value == 'default_return'
check_plug_value.assert_called_once()
def test_check_plug_value_no_action(self, device_root):
device_root.root_plug_value = 'enabled'
device_root.root_unplug_value = 'disabled'
device_root.check_plug_value()
assert device_root.root_plug_value == 'enabled'
assert device_root.root_unplug_value == 'disabled'
def test_check_plug_value_unplug_plug_int_no_match(self, device_root):
device_root.root_plug_value = 1
device_root.root_unplug_value = 0
device_root.check_plug_value()
assert device_root.root_plug_value == 1
assert device_root.root_unplug_value == 0
@patch('logging.Logger.info')
def test_check_plug_value_unplug_int_plug_string_no_match(self, logger, device_root):
device_root.root_plug_value = 'enabled'
device_root.root_unplug_value = 0
device_root.check_plug_value()
assert device_root.root_plug_value == 'enabled'
assert device_root.root_unplug_value == 0
logger.assert_called_once_with('Error setting root plug value, '
'check manually after experiment if charging is enabled')
def test_check_plug_value_same_plug_unplug_int(self, device_root):
device_root.root_plug_value = 0
device_root.root_unplug_value = 0
device_root.check_plug_value()
assert device_root.root_plug_value == 1
assert device_root.root_unplug_value == 0
def test_check_plug_value_same_plug_unplug_string_set_enabled(self, device_root):
device_root.root_plug_value = 'disabled'
device_root.root_unplug_value = 'disabled'
device_root.check_plug_value()
assert device_root.root_plug_value == 'enabled'
assert device_root.root_unplug_value == 'disabled'
def test_check_plug_value_same_plug_unplug_string_set_disabled(self, device_root):
device_root.root_plug_value = 'enabled'
device_root.root_unplug_value = 'enabled'
device_root.check_plug_value()
assert device_root.root_plug_value == 'disabled'
assert device_root.root_unplug_value == 'enabled'
@patch('AndroidRunner.Device.Device.su_plug')
@patch('AndroidRunner.Adb.shell')
def test_plug_no_root(self, adb_shell, su_plug, device):
device.plug()
assert su_plug.call_count == 0
adb_shell.assert_called_once_with(123456789, 'dumpsys battery reset')
@patch('AndroidRunner.Device.Device.su_plug')
@patch('AndroidRunner.Adb.shell')
def test_plug_root(self, adb_shell, su_plug, device_root):
device_root.plug()
su_plug.assert_called_once()
adb_shell.assert_called_once_with(123456789, 'dumpsys battery reset')
@patch('AndroidRunner.Adb.shell_su')
def test_su_plug(self, adb_shell_su, device_root):
device_root.root_plug_value = '123456'
device_root.su_plug()
adb_shell_su.assert_called_once_with(123456789, 'echo 123456 > test/file')
@patch('AndroidRunner.Adb.shell')
def test_current_activity_current_focus(self, adb_shell, device):
adb_shell.return_value = 'mCurrentFocus=Window{28d47066 u0 com.android.chrome/org.chromium.chrome.browser.' \
'ChromeTabbedActivity}\nmFocusedApp=Window{3078b3ad u0 com.sonyericsson.usbux/com.' \
'sonyericsson.usbux.settings.ConnectivitySettingsActivity}'
current_activity = device.current_activity()
assert current_activity == 'com.android.chrome'
@patch('AndroidRunner.Adb.shell')
def test_current_activity_focused_app(self, adb_shell, device):
adb_shell.return_value = 'mFocusedApp=AppWindowToken{<PASSWORD> token=Token{<PASSWORD> ActivityRecord{2<PASSWORD> u0 ' \
'com.android.chrome/org.chromium.chrome.browser.ChromeTabbedActivity t25385}}}'
current_activity = device.current_activity()
assert current_activity == 'com.android.chrome'
@patch('AndroidRunner.Adb.shell')
def test_current_activity_none(self, adb_shell, device):
adb_shell.return_value = 'mFocusedApp=null'
current_activity = device.current_activity()
assert current_activity is None
@patch('AndroidRunner.Adb.shell')
def test_current_activity_error(self, adb_shell, device):
adb_shell.return_value = 'mFocusedApp=ajislvfhbljhglalkjasfdhdhg'
with pytest.raises(Adb.AdbError):
device.current_activity()
@patch('AndroidRunner.Adb.shell')
def test_launch_package_succes(self, adb_shell, device):
package = 'fake.test.package'
adb_shell.return_value = 'successsss'
device.launch_package(package)
adb_shell.assert_called_once_with(123456789, 'monkey -p {} 1'.format(package))
@patch('AndroidRunner.Adb.shell')
def test_launch_package_failure(self, adb_shell, device):
package = 'fake.test.package'
adb_shell.return_value = 'error error error monkey aborted error'
with pytest.raises(Adb.AdbError):
device.launch_package(package)
adb_shell.assert_called_once_with(123456789, 'monkey -p {} 1'.format(package))
@patch('AndroidRunner.Adb.shell')
def test_launch_activity(self, adb_shell, device):
package = 'fake.test.package'
activity = 'main'
device.launch_activity(package, activity)
adb_shell.assert_called_once_with(123456789, 'am start -n {}/{}'.format(package, activity))
@patch('AndroidRunner.Adb.shell')
def test_launch_activity_force_stop(self, adb_shell, device):
package = 'fake.test.package'
activity = 'main'
device.launch_activity(package, activity, force_stop=True)
adb_shell.assert_called_once_with(123456789, 'am start -S -n {}/{}'.format(package, activity))
@patch('AndroidRunner.Adb.shell')
def test_launch_activity_action(self, adb_shell, device):
package = 'fake.test.package'
activity = 'main'
device.launch_activity(package, activity, action='action')
adb_shell.assert_called_once_with(123456789, 'am start -a {} -n {}/{}'.format('action', package, activity))
@patch('AndroidRunner.Adb.shell')
def test_launch_activity_data_uri(self, adb_shell, device):
package = 'fake.test.package'
activity = 'main'
device.launch_activity(package, activity, data_uri='data.uri')
adb_shell.assert_called_once_with(123456789, 'am start -n {}/{} -d {}'.format(package, activity, 'data.uri'))
@patch('AndroidRunner.Adb.shell')
def test_launch_activity_from_scratch(self, adb_shell, device):
package = 'fake.test.package'
activity = 'main'
device.launch_activity(package, activity, from_scratch=True)
adb_shell.assert_called_once_with(123456789,
'am start -n {}/{} --activity-clear-task'.format(package, activity))
@patch('AndroidRunner.Adb.shell')
def test_force_stop(self, adb_shell, device):
name = 'fake_app'
device.force_stop(name)
adb_shell.assert_called_once_with(123456789, 'am force-stop {}'.format(name))
@patch('AndroidRunner.Adb.clear_app_data')
def test_clear_app_data(self, adb_clear_app_data, device):
name = 'fake_app'
device.clear_app_data(name)
adb_clear_app_data.assert_called_once_with(123456789, name)
@patch('AndroidRunner.Adb.logcat')
def test_logcat_to_file(self, adb_logcat, device, tmpdir):
path = os.path.join(str(tmpdir), 'logcat')
logcat_result = "test file content: 123dsfg564sdfhg"
adb_logcat.return_value = logcat_result
device.logcat_to_file(path)
files_in_path = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
assert len(files_in_path) == 1
with open(os.path.join(path, files_in_path[0]), 'r') as fl:
file_content = fl.read()
assert file_content == logcat_result
adb_logcat.assert_called_once_with(123456789)
@patch('AndroidRunner.Adb.logcat')
def test_logcat_regex(self, adb_logcat, device):
logcat_result = "test result 123dsfg564sdfhg"
adb_logcat.return_value = logcat_result
fake_regex = 'auiashdfdfv'
result = device.logcat_regex(fake_regex)
adb_logcat.assert_called_once_with(123456789, regex=fake_regex)
assert result == logcat_result
@patch('AndroidRunner.Adb.push')
def test_push(self, adb_push, device):
adb_push.return_value = 'pushpush'
local_path = 'test/local/path'
remote_path = 'test/remote/path'
result = device.push(local_path, remote_path)
adb_push.assert_called_once_with(123456789, local_path, remote_path)
assert result == 'pushpush'
@patch('AndroidRunner.Adb.pull')
def test_pull(self, adb_pull, device):
adb_pull.return_value = 'pullpull'
local_path = 'test/local/path'
remote_path = 'test/remote/path'
result = device.pull(local_path, remote_path)
adb_pull.assert_called_once_with(123456789, local_path, remote_path)
assert result == 'pullpull'
@patch('AndroidRunner.Adb.shell')
def test_shell(self, adb_shell, device):
adb_shell.return_value = 'shell return value'
shell_command = 'dumpsys battery set usb 1'
result = device.shell(shell_command)
adb_shell.assert_called_once_with(123456789, shell_command)
assert result == 'shell return value'
@patch('AndroidRunner.Device.Device.get_api_level')
@patch('AndroidRunner.Device.Device.get_version')
def test_str(self, get_version, get_api_level, device):
get_version.return_value = 9
get_api_level.return_value = 28
device_string = str(device)
assert device_string == 'fake_device (123456789, Android 9, API level 28)'
class TestDevices(object):
@pytest.fixture()
@patch('AndroidRunner.Devices.load_json')
@patch('AndroidRunner.Adb.setup')
def devices(self, adb_setup, load_json):
adb_setup.return_value = None
load_json.return_value = {}
return Devices([])
@patch('AndroidRunner.Devices.load_json')
@patch('AndroidRunner.Adb.setup')
def test_init_error(self, adb_setup, load_json):
load_json.return_value = {}
with pytest.raises(ConfigError):
Devices(['fake_device'])
adb_setup.assert_called_once_with('adb')
@patch('AndroidRunner.Device.Device.__init__')
@patch('AndroidRunner.Devices.load_json')
@patch('AndroidRunner.Adb.setup')
def test_init_succes(self, adb_setup, load_json, device):
device.return_value = None
load_json.return_value = {'fake_device': 123456789}
mock_device_settings = Mock()
devices = Devices({'fake_device': mock_device_settings}, 'adb/path')
adb_setup.assert_called_once_with('adb/path')
device.assert_called_once_with('fake_device', 123456789, mock_device_settings)
assert len(devices.devices) == 1
assert isinstance(devices.devices[0], Device)
def test_iter(self, devices):
test_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
devices.devices = test_list
result_list = []
for n in devices:
result_list.append(n)
| |
1.0})
r = model.reactions.ETOHtex.copy()
r.id = 'IPtex'
r.name = 'Isopentenol transport via diffusion (extracellular to periplasm)'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_e': 1.0, 'etoh_p': -1.0, 'isoprenol_e': -1.0, 'isoprenol_p': 1.0})
# Add a boundary reaction
r = model.reactions.EX_etoh_e.copy()
r.id = 'EX_isoprenol_e'
r.name = 'Isopentenol exchange'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_e': 1.0, 'isoprenol_e': -1.0})
# Write model to files
outputfilename = user_params['modelfile'].split('.')[0] + '_IPP.json'
cobra.io.save_json_model(model, f'data/{outputfilename}')
return model
#=============================================================================
class Ropacus():
def __init__(self):
self.time_series_omics_data = {}
self.LOWER_BOUND = -15
self.UPPER_BOUND = -15
def generate_time_series_data(self, model):
# intiializing omics dictionaries to contain data across timepoints
proteomics_list: List = []
transcriptomics_list: List = []
fluxomics_list: List = []
metabolomics_list: List = []
# generating time series data for the following flux constraints
# 6, 9, 12, 15 corresponding to the times 0, 3, 6, 9 hours
# NOTE: The constraints and the timepoints should be supplied as command line inputs
time_series_omics_data = {}
experiment_timepoints = [0, 3, 6, 9]
flux_constraints = [6, 9, 12, 15]
# NOTE; constraints in flux_constraints, think about it
for i in range(len(flux_constraints)):
# Set global reactions bounds (in addition to local)
self.LOWER_BOUND = flux_constraints[i]
self.UPPER_BOUND = flux_constraints[i]
cobra_config = cobra.Configuration()
cobra_config.bounds = self.LOWER_BOUND, self.UPPER_BOUND
# Print the list of reaction names related to BIOMASS production
self.print_reactions(model)
# get fake proteomics data and write it to XLSX file
condition = 1
self.generate_mock_data(model, condition)
def add_random_noise(self):
# TODO
"""
:return:
"""
pass
def chemical_translation(self, dict_in: Dict[str, Any],
fmt_from: str = 'KEGG',
fmt_to: str = 'PubChem CID') -> Dict[str, Any]:
"""
Proxy to UCDavis Chemical Translation Service (CTS). Maps the keys of
the input dictionary keeping intact the values.
Default behaviour: map KEGG Compounds into PubChem CIDs
For details, see https://cts.fiehnlab.ucdavis.edu/services
"""
dict_out: Dict[str, float] = {}
print(gray('Mapping metabolites ids using CTS'), end='', flush=True)
ids_in: List[str] = list(dict_in.keys())
pattern = re.compile(
r"""(?:"searchTerm":")(\w+)(?:","results":\[")(\w+)(?:"])""")
for id_in in ids_in:
mapping_str: str = f'{fmt_from}/{fmt_to}/{id_in}'
mapping_data = urllib.parse.quote(mapping_str)
mapping_req = urllib.request.Request(CTS_URL + mapping_data)
with urllib.request.urlopen(mapping_req) as map_file:
mapping = map_file.read().strip().decode('utf-8')
match: re.Match = pattern.search(mapping)
if match:
assert match.group(1) == id_in
id_out: str = match.group(2)
if fmt_to == 'PubChem CID':
id_out = 'CID:' + id_out
dict_out[id_out] = dict_in[id_in]
print(green('.'), end='', flush=True)
dprint(f'Metabolite {id_in} mapped to {id_out}')
else:
print(red('.'), end='', flush=True)
dprint(yellow(f'Metabolite {id_in} mapping failed!'))
print(green('OK!'))
self.vprint(gray('Number of unmapped genes from'), fmt_from, gray('to'),
fmt_to, gray(':'), yellow(len(dict_in) - len(dict_out)))
return dict_out
def dict_to_edd(self, omics_dict: Dict[str, float],
omics: Omics) -> pd.DataFrame:
"""Get dataframe with EDD format from dictionary with omics values"""
edd: List[OrderedDict[str, Any]] = []
sample: OrderedDict[str, Any]
for measurement, value in omics_dict.items():
sample = col.OrderedDict([
('Line Name', 'WT'),
('Measurement Type', measurement),
('Time', 0), # TODO: Generalize for time-series
('Value', value),
('Units', UNITS[omics])
])
edd.append(sample)
return pd.DataFrame(edd)
def dprint(self, *a, **k):
"""Print only if debug mode is enabled"""
if args.debug:
print(*a, **k)
def generate_mock_data(self, model, cond):
"""
:param model: cobra model object
:param solution: solution for the model optimization using cobra
:param data_type: defines the type of -omics data to generate (all by default)
:return:
"""
while cond:
print(gray('Condition parameter:'), magenta(cond))
cond -= 1
self.optimize_solution(model, REACTION_ID)
solution: cobra.Solution = cobra.core.solution.get_solution(
model, raise_error=False)
self.vprint(gray('Solution objective value:'), solution.objective_value)
self.vprint(gray('Model summary after optimization:'))
try:
self.vprint(model.summary())
# self.vprint(model.metabolites.C00185_e.summary())
except Infeasible:
self.vprint(yellow(
'Model summary unavailable as solution was unfeasible!'))
# exit code here
self.write_experiment_description(cond)
self.get_omics_data(model, solution, cond)
def gene_to_protein(self, dict_in: Dict[str, Any],
fmt_gene: str = 'KEGG_ID',
fmt_prot: str = 'ID') -> Dict[str, Any]:
"""
From any dict whose keys are gene IDs, maps them to protein IDs and
keeps the value intact
Default behaviour: map KEGG IDs into UNIPROT IDs
For details, see https://www.uniprot.org/help/api_idmapping
"""
dict_out: Dict[str, float] = {}
print(gray('Mapping genes into proteins using UNIPROT... '), end='')
gene_ids: List[str] = list(dict_in.keys())
mapping_params: Dict[str, str] = {
'from': fmt_gene,
'to': fmt_prot,
'format': 'tab',
'query': '\t'.join(gene_ids)
}
mapping_data = urllib.parse.urlencode(mapping_params)
mapping_data = mapping_data.encode('utf-8')
mapping_req = urllib.request.Request(UNIPROT_URL, mapping_data)
with urllib.request.urlopen(mapping_req) as map_file:
mapping = map_file.read().strip().decode('utf-8').split('\n')
for gene2prot in mapping[1:]:
gene, prot = gene2prot.split('\t', 1)
dict_out[prot] = dict_in[gene]
dprint('Gene', gene, 'mapped to protein', prot)
if dict_out:
print(green('OK!'))
self.vprint(gray('Number of unmapped genes from'), fmt_gene, gray('to'),
fmt_prot, gray(':'), yellow(len(dict_in) - len(dict_out)))
else:
print(yellow('PROBLEM!'))
return dict_out
# NOTE: Name it consistently , generate_omics_data
def get_omics_data(self, model: cobra.Model,
solution: cobra.Solution,
cond: int):
"""
Core method that generates all omics data.
:param model:
:param solution:
:param cond:
:return:
"""
# Pre-determined linear constants
PROTE_SCALING: float = 10 # Scaling factor for fluxes to proteomics
TRANS_SCALING: float = 1.2 # S.F. for proteomics to transcriptomics
# TODO: Allow user to set those constants via parameters
# The omics variable name should coincide with those elements of Omics
proteomics: Dict[str, float] = {}
transcriptomics: Dict[str, float] = {}
metabolomics: Dict[str, float] = {}
# Get values and statistics for proteomics and transcriptomics
proteo_stats: Dict[str, Counter[str]] = {
db + status: col.Counter() for db in GENE_IDS_DBS
for status in ['_missing', '_success', '_zero']}
metabolite_awflux: Dict[str, List[float]] = {} # abs weighted fluxes
rxn_ids: pd.Index = solution.fluxes.index
# Cobra docs: Accessing reaction fluxes through a Solution object
# is the safer, preferred, and only guaranteed to be correct way.
# NOTE: Put the operations in fucntions , more modular
for rxn_id in rxn_ids:
reaction: cobra.Reaction = model.reactions.get_by_id(rxn_id)
flux: float = solution.fluxes[rxn_id]
gene: cobra.Gene
# Subloop 1/2: proteomics and transcriptomics
for gene in reaction.genes:
gene_id: str = ''
# WARNING! Based on gene.annotation property populated
gene_id_db: str = ''
for gene_id_db in GENE_IDS_DBS:
try:
gene_id = gene.annotation[gene_id_db]
except KeyError:
proteo_stats[gene_id_db + '_missing'][gene_id] += 1
else:
# Populates proteomics and transcriptomics dicts if
# related flux has a positive value
proteo: int = np.ceil(flux * PROTE_SCALING)
if proteo > _EPS:
# Accumulate in case of multiple genes
try:
proteomics[gene_id] += proteo
except KeyError:
proteomics[gene_id] = proteo
proteo_stats[gene_id_db + '_success'][gene_id] += 1
else:
proteo_stats[gene_id_db + '_zero'][gene_id] += 1
transc: float = proteo * TRANS_SCALING
if transc > _EPS * 1e+3:
transcriptomics[gene.id] = transc
break
else:
self.dprint(yellow('WARNING!'), gray('Gene'), gene.id,
gray('in reaction'), rxn_id,
gray('has no useful annotation. Skipping...'))
# Subloop 2/2: metabolomics (partial)
for metabolite, coeff in reaction.metabolites.items():
awflux: float = abs(coeff * flux) # absolute weighted flux
if awflux < _EPS:
continue
metabolite_id: str = metabolite.id.rsplit(
sep='_', maxsplit=1)[0] # Remove suffixes _c, _e, etc
try:
metabolite_awflux[metabolite_id].append(awflux)
except KeyError:
metabolite_awflux[metabolite_id] = [awflux]
# Metabolomics (final)
# Alt: to avoid this loop use a moving average in the subloop above
for metabolite, awfluxes in metabolite_awflux.items():
metabolomics[metabolite] = statistics.mean(awfluxes)
self.vprint(gray('Number of active metabolites:'), len(metabolomics))
dprint(gray('Number of fluxes related to each gene (top 10)'))
for gene_id_db in GENE_IDS_DBS:
for status in ['_missing', '_success', '_zero']:
self.dprint(gene_id_db + status, proteo_stats[
gene_id_db + status].most_common(10))
# Map genes ids into protein ids accepted by EDD
proteomics = self.gene_to_protein(proteomics)
# Map metabolites ids into those accepted by EDD
metabolomics = self.chemical_translation(metabolomics)
# Write omics files
for omic in Omics: # NOTE: omics variable names are elements of Omics
omics_df: pd.DataFrame = self.dict_to_edd(eval(str(omic)), omic)
self.write_data_files(omics_df, omic, cond)
def get_random_number(self):
"""
:return:
"""
random.seed(12312)
return random.random()
def optimize_solution(self, model: cobra.Model, reaction_id: str) -> None:
"""
:param model:
:param reaction_id:
:return solution:
"""
reaction: cobra.Reaction = model.reactions.get_by_id(reaction_id)
self.vprint(gray('Reaction:'), reaction)
if args.debug:
print(blue('List of reactants:'))
for reactant in reaction.reactants:
print(reactant, reactant.name)
print(blue('List of products:'))
for product in reaction.products:
print(product, product.name)
# Set local reaction bounds
model.reactions.get_by_id(reaction_id).lower_bound = LOWER_BOUND
model.reactions.get_by_id(reaction_id).upper_bound = UPPER_BOUND
self.vprint(gray('Displaying the reaction bounds after constraining them:'),
blue(model.reactions.get_by_id(reaction_id).bounds))
# Optimize the model using FBA
print(gray('Optimizing the model using FBA... '), end='')
model.slim_optimize()
try:
cobra.util.assert_optimal(model)
except OptimizationError as error:
print(yellow('PROBLEM!'), error)
else:
print(green('OK!'))
def read_model(self, file_name):
"""
:param file_name:
:return model:
"""
# Check presence of model file
if not os.path.isfile(file_name):
# NOTE: The error handling not consistent and will be dominated by the stack trace
print(red('ERROR!'),
f'File {file_name} missing from the data dir!')
raise IOError('Missing file')
# Load model depending on the kind of file
self.vprint(gray(f'Loading model in {file_name}... '), end='')
if file_name.endswith('.xml') or file_name.endswith('.sbml'):
model = cobra.io.read_sbml_model(file_name)
elif file_name.endswith('.json'):
model = cobra.io.load_json_model(file_name)
else:
| |
'TokenEndpoint': 'string',
'UserInfoEndpoint': 'string',
'ClientId': 'string',
'ClientSecret': 'string',
'SessionCookieName': 'string',
'Scope': 'string',
'SessionTimeout': 123,
'AuthenticationRequestExtraParams': {
'string': 'string'
},
'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate',
'UseExistingClientSecret': True|False
},
'AuthenticateCognitoConfig': {
'UserPoolArn': 'string',
'UserPoolClientId': 'string',
'UserPoolDomain': 'string',
'SessionCookieName': 'string',
'Scope': 'string',
'SessionTimeout': 123,
'AuthenticationRequestExtraParams': {
'string': 'string'
},
'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'
},
'Order': 123,
'RedirectConfig': {
'Protocol': 'string',
'Port': 'string',
'Host': 'string',
'Path': 'string',
'Query': 'string',
'StatusCode': 'HTTP_301'|'HTTP_302'
},
'FixedResponseConfig': {
'MessageBody': 'string',
'StatusCode': 'string',
'ContentType': 'string'
}
},
],
'IsDefault': True|False
},
],
'NextMarker': 'string'
}
**Response Structure**
- *(dict) --*
- **Rules** *(list) --*
Information about the rules.
- *(dict) --*
Information about a rule.
- **RuleArn** *(string) --*
The Amazon Resource Name (ARN) of the rule.
- **Priority** *(string) --*
The priority.
- **Conditions** *(list) --*
The conditions.
- *(dict) --*
Information about a condition for a rule.
- **Field** *(string) --*
The name of the field. The possible values are ``host-header`` and ``path-pattern`` .
- **Values** *(list) --*
The condition value.
If the field name is ``host-header`` , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.
* A-Z, a-z, 0-9
* - .
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
If the field name is ``path-pattern`` , you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.
* A-Z, a-z, 0-9
* _ - . $ / ~ " ' @ : +
* & (using &)
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
- *(string) --*
- **HostHeaderConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **PathPatternConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **HttpHeaderConfig** *(dict) --*
- **HttpHeaderName** *(string) --*
- **Values** *(list) --*
- *(string) --*
- **QueryStringConfig** *(dict) --*
- **Values** *(list) --*
- *(dict) --*
- **Key** *(string) --*
- **Value** *(string) --*
- **HttpRequestMethodConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **SourceIpConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **Actions** *(list) --*
The actions.
- *(dict) --*
Information about an action.
- **Type** *(string) --*
The type of action. Each rule must include exactly one of the following types of actions: ``forward`` , ``fixed-response`` , or ``redirect`` .
- **TargetGroupArn** *(string) --*
The Amazon Resource Name (ARN) of the target group. Specify only when ``Type`` is ``forward`` .
- **AuthenticateOidcConfig** *(dict) --*
[HTTPS listeners] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when ``Type`` is ``authenticate-oidc`` .
- **Issuer** *(string) --*
The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **AuthorizationEndpoint** *(string) --*
The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **TokenEndpoint** *(string) --*
The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **UserInfoEndpoint** *(string) --*
The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **ClientId** *(string) --*
The OAuth 2.0 client identifier.
- **ClientSecret** *(string) --*
The OAuth 2.0 client secret. This parameter is required if you are creating a rule. If you are modifying a rule, you can omit this parameter if you set ``UseExistingClientSecret`` to true.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **UseExistingClientSecret** *(boolean) --*
Indicates whether to use the existing client secret when modifying a rule. If you are creating a rule, you can omit this parameter or set it to false.
- **AuthenticateCognitoConfig** *(dict) --*
[HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when ``Type`` is ``authenticate-cognito`` .
- **UserPoolArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Cognito user pool.
- **UserPoolClientId** *(string) --*
The ID of the Amazon Cognito user pool client.
- **UserPoolDomain** *(string) --*
The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **Order** *(integer) --*
The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a ``forward`` or a ``fixed-response`` action.
- **RedirectConfig** *(dict) --*
[Application Load Balancer] Information for creating a redirect action. Specify only when ``Type`` is ``redirect`` .
- **Protocol** *(string) --*
The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.
- **Port** *(string) --*
The port. You can specify a value from 1 to 65535 or #{port}.
- **Host** *(string) --*
The hostname. This component is not percent-encoded. The hostname can contain #{host}.
- **Path** *(string) --*
The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.
- **Query** *(string) --*
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the | |
<reponame>ajroach42/bbj
from src.exceptions import BBJException, BBJParameterError, BBJUserError
from src import db, schema, formatting
from functools import wraps
from uuid import uuid1
from sys import argv
import traceback
import cherrypy
import sqlite3
import json
dbname = "data.sqlite"
# any values here may be overrided in the config.json. Any values not listed
# here will have no effect on the server.
app_config = {
"port": 7099,
"host": "127.0.0.1",
"instance_name": "BBJ",
"allow_anon": True,
"debug": False
}
try:
with open("config.json") as _conf:
app_config.update(json.load(_conf))
except FileNotFoundError:
with open("config.json", "w") as _conf:
json.dump(app_config, _conf)
def api_method(function):
"""
A wrapper that handles encoding of objects and errors to a
standard format for the API, resolves and authorizes users
from header data, and prepares the arguments for each method.
In the body of each api method and all the functions
they utilize, BBJExceptions are caught and their attached
schema is dispatched to the client. All other unhandled
exceptions will throw a code 1 back at the client and log
it for inspection. Errors related to JSON decoding are
caught as well and returned to the client as code 0.
"""
function.exposed = True
@wraps(function)
def wrapper(self, *args, **kwargs):
response = None
debug = app_config["debug"]
try:
connection = sqlite3.connect(dbname)
# read in the body from the request to a string...
if cherrypy.request.method == "POST":
read_in = str(cherrypy.request.body.read(), "utf8")
if not read_in:
# the body may be empty, not all methods require input
body = {}
else:
body = json.loads(read_in)
if not isinstance(body, dict):
raise BBJParameterError("Non-JSONObject input")
# lowercase all of its top-level keys
body = {key.lower(): value for key, value in body.items()}
else:
body = {}
username = cherrypy.request.headers.get("User")
auth = cherrypy.request.headers.get("Auth")
if (username and not auth) or (auth and not username):
raise BBJParameterError(
"User or Auth was given without the other.")
elif not username and not auth:
user = db.anon
else:
user = db.user_resolve(connection, username)
if not user:
raise BBJUserError("User %s is not registered" % username)
elif auth.lower() != user["auth_hash"].lower():
raise BBJException(
5, "Invalid authorization key for user.")
# api_methods may choose to bind a usermap into the thread_data
# which will send it off with the response
cherrypy.thread_data.usermap = {}
value = function(self, body, connection, user)
response = schema.response(value, cherrypy.thread_data.usermap)
except BBJException as e:
response = e.schema
except json.JSONDecodeError as e:
response = schema.error(0, str(e))
except Exception as e:
error_id = uuid1().hex
response = schema.error(
1, "Internal server error: code {} {}".format(
error_id, repr(e)))
with open("logs/exceptions/" + error_id, "a") as log:
traceback.print_tb(e.__traceback__, file=log)
log.write(repr(e))
print("logged code 1 exception " + error_id)
finally:
connection.close()
return json.dumps(response)
return wrapper
def create_usermap(connection, obj, index=False):
"""
Creates a mapping of all the user_ids that occur in OBJ to
their full user objects (names, profile info, etc). Can
be a thread_index or a messages object from one.
"""
user_set = {item["author"] for item in obj}
if index:
[user_set.add(item["last_author"]) for item in obj]
return {
user_id: db.user_resolve(
connection,
user_id,
externalize=True,
return_false=False)
for user_id in user_set
}
def do_formatting(format_spec, messages):
if not format_spec:
return None
elif format_spec == "sequential":
method = formatting.sequential_expressions
else:
raise BBJParameterError("invalid formatter specification")
formatting.apply_formatting(messages, method)
return True
def validate(json, args):
"""
Ensure the json object contains all the keys needed to satisfy
its endpoint (and isnt empty)
"""
if not json:
raise BBJParameterError(
"JSON input is empty. This method requires the following "
"arguments: {}".format(", ".join(args)))
for arg in args:
if arg not in json.keys():
raise BBJParameterError(
"Required parameter {} is absent from the request. "
"This method requires the following arguments: {}"
.format(arg, ", ".join(args)))
def no_anon_hook(user, message=None, user_error=True):
if user is db.anon:
exception = BBJUserError if user_error else BBJParameterError
if message:
raise exception(message)
elif not app_config["allow_anon"]:
raise exception(
"Anonymous participation has been disabled on this instance.")
class API(object):
"""
This object contains all the API endpoints for bbj. The html serving
part of the server is not written yet, so this is currently the only
module being served.
The docstrings below are specifically formatted for the mkdocs static
site generator. The ugly `doctype` and `arglist` attributes assigned
after each method definition are for use in the `mkendpoints.py` script.
"""
@api_method
def user_register(self, args, database, user, **kwargs):
"""
Register a new user into the system and return the new user object
on success. The returned object includes the same `user_name` and
`auth_hash` that you supply, in addition to all the default user
parameters. Returns code 4 errors for any failures.
"""
validate(args, ["user_name", "auth_hash"])
return db.user_register(
database, args["user_name"], args["auth_hash"])
user_register.doctype = "Users"
user_register.arglist = (
("user_name", "string: the desired display name"),
("auth_hash", "string: a sha256 hash of a password")
)
@api_method
def user_update(self, args, database, user, **kwargs):
"""
Receives new parameters and assigns them to the user object.
This method requires that you send a valid User/Auth header
pair with your request, and the changes are made to that
account.
Take care to keep your client's User/Auth header pair up to date
after using this method.
The newly updated user object is returned on success,
including the `auth_hash`.
"""
no_anon_hook(user, "Anons cannot modify their account.")
validate(args, []) # just make sure its not empty
return db.user_update(database, user, args)
user_update.doctype = "Users"
user_update.arglist = (
("Any of the following may be submitted", ""),
("user_name", "string: a desired display name"),
("auth_hash", "string: sha256 hash for a new password"),
("quip", "string: a short string that can be used as a signature"),
("bio", "string: a user biography for their profile"),
("color", "integer: 0-6, a display color for the user")
)
@api_method
def get_me(self, args, database, user, **kwargs):
"""
Requires no arguments. Returns your internal user object,
including your `auth_hash`.
"""
return user
get_me.doctype = "Users"
get_me.arglist = (("", ""),)
@api_method
def user_map(self, args, database, user, **kwargs):
"""
Returns an array with all registered user_ids, with the usermap
object populated by their full objects. This method is _NEVER_
neccesary when using other endpoints, as the usermap returned
on those requests already contains all the information you will
need. This endpoint is useful for statistic purposes only.
"""
users = {
user[0] for user in database.execute("SELECT user_id FROM users")
}
cherrypy.thread_data.usermap = {
user: db.user_resolve(
database,
user,
externalize=True,
return_false=False)
for user in users
}
return list(users)
user_map.doctype = "Tools"
user_map.arglist = (("", ""),)
@api_method
def user_get(self, args, database, user, **kwargs):
"""
Returns a user object for the given target.
"""
validate(args, ["target_user"])
return db.user_resolve(
database, args["target_user"], return_false=False, externalize=True)
user_get.doctype = "Users"
user_get.arglist = (
("target_user", "string: either a user_name or a user_id"),
)
@api_method
def user_is_registered(self, args, database, user, **kwargs):
"""
Returns boolean `true` or `false` of whether the given target is
registered on the server.
"""
validate(args, ["target_user"])
return bool(db.user_resolve(database, args["target_user"]))
user_is_registered.doctype = "Users"
user_is_registered.arglist = (
("target_user", "string: either a user_name or a user_id"),
)
@api_method
def check_auth(self, args, database, user, **kwargs):
"""
Returns boolean `true` or `false` of whether the hash given
is correct for the given user.
"""
validate(args, ["target_user", "target_hash"])
user = db.user_resolve(
database, args["target_user"], return_false=False)
return args["target_hash"].lower() == user["auth_hash"].lower()
check_auth.doctype = "Authorization"
check_auth.arglist = (
("target_user", "string: either a user_name or a user_id"),
("target_hash", "string: sha256 hash for the password to check")
)
@api_method
def thread_index(self, args, database, user, **kwargs):
"""
Return an array with all the server's threads. They are already sorted for
you; most recently modified threads are at the beginning of the array.
Unless you supply `include_op`, these threads have no `messages` parameter.
If you do, the `messages` parameter is an array with a single message object
for the original post.
"""
threads = db.thread_index(database, include_op=args.get("include_op"))
cherrypy.thread_data.usermap = create_usermap(database, threads, True)
return threads
thread_index.doctype = "Threads & Messages"
thread_index.arglist = (
("OPTIONAL: include_op", "boolean: Include a `messages` object containing the original post"),
)
@api_method
def message_feed(self, args, database, user, **kwargs):
"""
Returns a special object representing all activity on the board since `time`.
```javascript
{
"threads": {
"thread_id": {
// ...thread object
},
// ...more thread_id/object pairs
},
"messages": [
...standard message object array sorted by date
]
| |
<filename>src/x7zipfile.py
# -*- coding: utf-8 -*-
# Copyright 2021 UuuNyaa <<EMAIL>>
# This file is part of x7zipfile.
#
# BSD 3-Clause License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import errno
import os
import re
import subprocess
import sys
from dataclasses import dataclass
from typing import Callable, Iterator, List, Tuple, Union
_WIN32 = sys.platform == "win32"
_EXECUTABLES = ['7z'] + (['7z.exe'] if _WIN32 else [])
class x7ZipError(Exception):
"""Base class for x7zipfile errors."""
class x7ZipNoEntry(x7ZipError):
"""File not found in archive"""
class x7ZipExecError(x7ZipError):
"""Problem reported by 7-zip."""
class x7ZipCannotExec(x7ZipExecError):
"""Executable not found."""
@dataclass
class x7ZipInfo:
"""An entry in 7-zip archive.
Attributes:
filename
File name with relative path.
Path separator is '/'. Always unicode string.
date_time
File modification timestamp. As tuple of (year, month, day, hour, minute, second).
7-zip allows archives where it is missing, it's None then.
file_size
Uncompressed size.
compress_size
Compressed size.
compress_type
Compression method: eg. LZMA, LZMA2, PPMD, ...
encrypted
Encryption state: '+' = encrypted, '-' = not encrypted.
mode
File attributes. May be either dos-style or unix-style, depending on host_os.
CRC
CRC-32 of uncompressed file, unsigned int.
RAR5: may be None.
"""
filename: Union[str, None]
date_time: Union[Tuple[int, int, int, int, int, int], None] = None
file_size: Union[int, None] = None
compress_size: Union[int, None] = None
compress_type: Union[str, None] = None
encrypted: Union[str, None] = None
mode: Union[str, None] = None
CRC: Union[int, None] = None
block: Union[int, None] = None
def is_dir(self) -> bool:
"""Returns True if entry is a directory.
"""
if self.mode is None:
return False
return 'D' in self.mode
def is_symlink(self) -> bool:
"""Returns True if entry is a symlink.
"""
if self.mode is None:
return False
return ' l' in self.mode
def is_file(self) -> bool:
"""Returns True if entry is a normal file.
"""
if self.mode is None:
return False
return 'A' in self.mode
def is_readonly(self) -> bool:
"""Returns True if entry is a readonly file.
"""
if self.mode is None:
return False
return 'R' in self.mode
def needs_password(self) -> bool:
"""Returns True if entry is stored password-protected.
"""
return self.encrypted == '+'
class _Executor:
def __init__(self, executable: str):
self.executable = executable
def is_available(self) -> bool:
try:
p = self._popen(self.executable)
_, _ = p.communicate(timeout=1)
p.wait()
return p.returncode == 0
except:
return False
def _popen(self, command: Union[str, List[str]]) -> subprocess.Popen:
"""Disconnect command from parent fds, read only from stdout.
"""
try:
return subprocess.Popen(
command,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.DEVNULL,
creationflags=0x08000000 if _WIN32 else 0 # CREATE_NO_WINDOW
)
except OSError as e:
if e.errno == errno.ENOENT:
raise x7ZipCannotExec('7-zip not installed?') from None
if e.errno == errno.EACCES or e.errno == errno.EPERM:
raise x7ZipCannotExec('Cannot execute 7-zip') from None
raise
def execute(self, command: Union[str, List[str]]) -> Iterator[str]:
linesep = os.linesep
p = self._popen(command)
while True:
line = p.stdout.readline()
if line is not None:
yield line.decode('utf-8').rstrip(linesep)
if not line and p.poll() is not None:
break
exit_code = p.poll()
error_message = p.stderr.read().decode('utf-8').strip()
for stream in [p.stdin, p.stdout, p.stderr]:
try:
stream.close()
except:
pass
if exit_code == 0:
return
if exit_code == 1:
raise x7ZipExecError(f'Warning: {error_message}')
elif exit_code == 2:
raise x7ZipExecError(f'Fatal error: {error_message}')
elif exit_code == 7:
raise x7ZipExecError(f'Command line error: {error_message}')
elif exit_code == 8:
raise x7ZipExecError(f'Not enough memory for operation: {error_message}')
elif exit_code == 255:
raise x7ZipExecError(f'User stopped the process: {error_message}')
else:
raise x7ZipExecError(error_message)
_parsers: Tuple[str, int, Callable[[str], str]] = [
(
param[0],
param[1],
param[2],
len(param[0]),
)
for param in [
('Path = ', 'filename', lambda p: p),
('Size = ', 'file_size', lambda p: int(p) if p else None),
('Packed Size = ', 'compress_size', lambda p: int(p) if p else None),
('Modified = ', 'date_time', lambda p: tuple([int(v) for v in re.split(r'[ \-:]', p)]) if p else None),
('Attributes = ', 'mode', lambda p: p),
('CRC = ', 'CRC', lambda p: int(p, 16) if p else None),
('Encrypted = ', 'encrypted', lambda p: p),
('Method = ', 'compress_type', lambda p: p if p else None),
('Block = ', 'block', lambda p: int(p) if p else None),
]
]
def execute_list(self, archive_name: str, password: Union[str, None] = None) -> Iterator[x7ZipInfo]:
info = None
for line in self.execute([
self.executable,
'l',
'-slt',
'-sccUTF-8',
f"-p{password or ''}",
archive_name,
]):
for prefix, property_name, parse_property, prefix_length in self._parsers:
if not line.startswith(prefix):
continue
try:
value = parse_property(line[prefix_length:])
except:
raise x7ZipError(f'parse error: {line}')
if prefix == 'Path = ':
if info and info.filename:
yield info
info = x7ZipInfo(filename=None if info is None else value)
if info is None:
break
if info.filename is None:
break
setattr(info, property_name, value)
if info and info.filename:
yield info
def execute_extract(
self,
archive_name: str,
output_directory: Union[str, None] = None,
file_names: Union[List[str], None] = None,
password: Union[str, None] = None,
other_options: Union[List[str], None] = None,
):
command = [self.executable, 'x', '-sccUTF-8', archive_name]
if output_directory is not None:
command.append(f'-o{output_directory}')
command.append(f"-p{password or ''}")
if file_names is not None:
command.extend(file_names)
if other_options is not None:
command.extend(other_options)
for _ in self.execute(command):
pass
_EXECUTOR: _Executor = None
def get_executor() -> _Executor:
global _EXECUTOR
if _EXECUTOR is not None:
return _EXECUTOR
for executable in _EXECUTABLES:
executor = _Executor(executable)
if executor.is_available():
_EXECUTOR = executor
return _EXECUTOR
raise x7ZipCannotExec(
'Cannot find working 7-zip command. '
'Please install 7-zip and setup the PATH properly.'
)
class x7ZipFile:
"""Class with methods to open, close, list 7-zip files.
"""
def __init__(
self,
file: Union[str, bytes, os.PathLike],
mode: str = 'r',
pwd: Union[str, None] = None,
):
"""Open the 7-zip file with mode read 'r'.
"""
self._file = file if not isinstance(file, os.PathLike) else str(file)
if mode != 'r':
raise NotImplementedError('x7ZipFile supports only mode=r')
self._pwd = <PASSWORD>
self._executor = get_executor()
self._info_list = None
self._info_map = None
def __enter__(self) -> x7ZipInfo:
"""Open context."""
return self
def __exit__(self, typ, value, traceback):
"""Exit context."""
self.close()
def open(self):
"""Returns file-like object (:class:`x7ZipExtFile`) from where the data can be read.
The object implements :class:`io.RawIOBase` interface, so it can
be further wrapped with :class:`io.BufferedReader`
and :class:`io.TextIOWrapper`.
On older Python where io module is not available, it implements
only .read(), .seek(), .tell() and .close() methods.
The object is seekable, although the seeking is fast only on
uncompressed files, on compressed files the seeking is implemented
by reading ahead and/or restarting the decompression.
Parameters:
name
file name or RarInfo instance.
mode
must be 'r'
pwd
<PASSWORD> for extracting.
"""
raise NotImplementedError('x7ZipFile does not yet support open')
def close(self):
"""Release open resources."""
pass
def infolist(self) -> List[x7ZipInfo]:
"""Return x7ZipInfo objects for all files/directories in archive.
"""
if self._info_list is None:
self._info_list = self._executor.execute_list(self._file, password=<PASSWORD>._<PASSWORD>)
return self._info_list
def getinfo(self, member: str) -> x7ZipInfo:
"""Return x7ZipInfo for file.
"""
if self._info_map is None:
self._info_map = {
info.filename: info
for info in self.infolist()
}
try:
return self._info_map[member]
except KeyError:
raise x7ZipNoEntry(f'No such file: {member}') from None
def namelist(self) -> List[str]:
"""Return list of filenames | |
<filename>tools/simmc_dataset.py
import json
import pdb
import re
import string
import torch
from nltk.tokenize import WordPunctTokenizer
from torch.utils.data import Dataset
"""
The dialog intents have the shapes:
DA:<DIALOG_ACT>:<ACTIVITY>:<OBJECT> or DA:<DIALOG_ACT>:<ACTIVITY>:<OBJECT>.<attribute>
Examples:
DA:INFORM:GET:CLOTHING.embellishment
The <DIALOG_ACT> values are shared between fashion and furniture dataset. <ACTIVITY> values are dataset specific (see paper fig.3).
"""
DIALOG_ACT = {'ASK', 'CONFIRM', 'INFORM', 'PROMPT', 'REQUEST'}
ACTIVITY = {'ADD_TO_CART', 'CHECK', 'COMPARE', 'COUNT', 'DISPREFER', 'GET', 'PREFER', 'REFINE'}
class SIMMCDataset(Dataset):
"""Dataset wrapper for SIMMC Fashion
(list) self.ids[idx] = <dialogue_id>
(dict) self.id2dialog[<dialogue_id>].keys() = ['dialogue', 'dialogue_coref_map', 'dialogue_idx', 'domains', 'dialogue_task_id']
(dict) self.id2dialog[<dialogue_id>]['dialogue'][<dialogue_turn>].keys() = ['belief_state', 'domain', 'state_graph_0', 'state_graph_1', 'state_graph_2',
'system_transcript', 'system_transcript_annotated', 'system_turn_label',
'transcript', 'transcript_annotated', 'turn_idx', 'turn_label',
'visual_objects', 'raw_assistant_keystrokes']
(list) self.transcripts[idx] = 'dialogueid_turn' (e.g., '3094_3', '3094_0')
(dict) self.task_mapping[<task_id>].keys() = ['task_id', 'image_ids', 'focus_image', 'memory_images', 'database_images']
(dict) self.processed_turns[<dialogue_id>][turn] = {'transcript': <tokenized_transcript>, 'system_transcript': <tokenized_system_transcript>}
"""
def __init__(self, data_path, metadata_path, verbose=True):
"""Dataset constructor.
Args:
path (str): path to dataset json file
metadata_path (str): path to metadata json file
"""
data_fp = open(data_path)
raw_data = json.load(data_fp)
metadata_fp = open(metadata_path)
self.metadata = json.load(metadata_fp)
self.split = raw_data['split']
self.version = raw_data['version']
self.year = raw_data['year']
self.domain = raw_data['domain']
self.verbose = verbose
if self.verbose:
print('Creating dataset index ...')
self.create_index(raw_data)
if self.verbose:
print('Skipped dialogs: {}'.format(self.skipped_dialogs))
print(' ... index created')
def __len__(self):
return len(self.transcripts)
def __getitem__(self, index):
dial_id, turn = self.transcripts[index].split('_')
dial_id = int(dial_id)
turn = int(turn)
user_req = self.id2dialog[dial_id]['dialogue'][turn]['transcript']
wizard_resp = self.id2dialog[dial_id]['dialogue'][turn]['system_transcript']
# extract dialogue history
turn_str = '{} [SEP] {}'
history = [turn_str.format(self.id2dialog[dial_id]['dialogue'][t]['transcript'],
self.id2dialog[dial_id]['dialogue'][t]['transcript'])
for t in range(turn)]
# dispatch data across different dataset instantiation
if isinstance(self, SIMMCDatasetForActionPrediction,) or isinstance(self, SIMMCDatasetForResponseGeneration,):
focus_item = self.id2focus[dial_id][turn]
attributes = []
if self.id2act[dial_id][turn]['action_supervision'] is not None:
attributes = self.id2act[dial_id][turn]['action_supervision']['attributes']
return_tuple = (dial_id, turn, user_req, wizard_resp, history, focus_item, self.id2act[dial_id][turn]['action'], attributes)
if isinstance(self, SIMMCDatasetForResponseGeneration,):
return_tuple += (self.id2candidates[dial_id][turn]['retrieval_candidates'],)
return return_tuple
def extract_visual_context(self, dial_id):
task_id = self.id2dialog[dial_id]['dialogue_task_id']
init_focus = self.task_mapping[task_id]['focus_image']
focus_items = [init_focus]
for act_annotation in self.id2act[dial_id]:
#force object permanence
if act_annotation['action_supervision'] is None or 'focus' not in act_annotation['action_supervision']:
focus_items.append(focus_items[-1])
else:
focus_items.append(act_annotation['action_supervision']['focus'])
return focus_items
def create_index(self, raw_data):
self.ids = []
self.id2dialog = {}
self.transcripts = []
self.skipped_dialogs = set()
for dialog in raw_data['dialogue_data']:
if 'dialogue_task_id' in dialog:
self.ids.append(dialog['dialogue_idx'])
dialog_obj = {
'dialogue': dialog['dialogue'],
'dialogue_coref_map': dialog['dialogue_coref_map'],
'dialogue_idx': dialog['dialogue_idx'],
'domains': dialog['domains'],
'dialogue_task_id': dialog['dialogue_task_id']}
transcripts = ['{}_{}'.format(dialog['dialogue_idx'], turn) for turn, _ in enumerate(dialog['dialogue'])]
self.id2dialog[dialog['dialogue_idx']] = dialog_obj
self.transcripts.extend(transcripts)
else:
if self.verbose:
#print('id: {} ; is dialogue_task_id missing: {}'.format(dialog['dialogue_idx'], not 'dialogue_task_id' in dialog))
self.skipped_dialogs.add(dialog['dialogue_idx'])
self.task_mapping = {}
for task in raw_data['task_mapping']:
self.task_mapping[task['task_id']] = task
def getmetadata(self, obj_id):
"""Return metadata for the object with the specified id
Args:
obj_id (str): id of the object
Returns:
dict: returns a dict with the following shape
{'metadata':
{'availability': [],
'availableSizes': "['L', 'XXL']",
'brand': '212 Local',
'color': ['black'],
'customerRating': '2.06',
'embellishments': ['layered'],
'hemLength': ['knee_length'],
'pattern': [],
'price': '$269',
'size': [],
'skirtStyle': ['asymmetrical', 'fit_and_flare', 'loose'],
'type': 'skirt'
},
'url': 'GByeggJtfhLUq9UGAAAAAABqViN1btAUAAAB'
}
"""
return self.metadata[obj_id]
def __str__(self):
return '{}_{}_{}_v{}'.format(self.domain, self.split, self.year, self.version)
class SIMMCDatasetForResponseGeneration(SIMMCDataset):
# conversion from attribute and action annotations format to english string
_ATTRS = {'embellishment', 'skirtStyle', 'availableSizes', 'dressStyle', 'material', 'clothingStyle', 'jacketStyle',
'sleeveLength', 'soldBy', 'price', 'ageRange', 'hemLength', 'size', 'warmthRating', 'sweaterStyle',
'forGender', 'madeIn', 'info', 'customerRating', 'hemStyle', 'hasPart', 'pattern', 'clothingCategory',
'forOccasion', 'waistStyle', 'sleeveStyle', 'amountInStock', 'waterResistance', 'necklineStyle', 'skirtLength',
'color', 'brand', 'sequential'}
_ATTR2STR = {'skirtstyle': 'skirt style', 'availablesizes': 'available sizes', 'dressstyle': 'dress style', 'clothingstyle': 'clothing style',
'jacketstyle': 'jacket style', 'sleevelength': 'sleeve length', 'soldby': 'sold by', 'agerange': 'age range', 'hemlength': 'hem length',
'warmthrating': 'warmth rating', 'sweaterstyle': 'sweater style', 'forgender': 'for gender', 'madein': 'made in', 'customerrating': 'customer rating',
'hemstyle': 'hem style', 'haspart': 'has part', 'clothingcategory': 'clothing category', 'foroccasion': 'for occasion', 'waiststyle': 'waist style',
'sleevestyle': 'sleeve style', 'amountinstock': 'amount in stock', 'waterresistance': 'water resistance', 'necklinestyle': 'neckline style',
'skirtlength': 'skirt length'}
_ACT2STR = {'none': 'none', 'searchdatabase': 'search database', 'searchmemory': 'search memory', 'specifyinfo': 'specify info', 'addtocart': 'add to cart'}
#map attribute names to metadata fields
_ATTR2FIELD = {'embellishment': 'embellishments', 'skirtStyle': 'skirtStyle', 'availableSizes': 'availableSizes', 'dressStyle': 'dressStyle', 'jacketStyle': 'jacketStyle',
'sleeveLength': 'sleeveStyle', 'soldBy': 'brand', 'price': 'price', 'hemLength': 'hemLength', 'size': 'availableSizes', 'sweaterStyle': 'sweaterStyle',
'customerRating': 'customerRating', 'hemStyle': 'hemStyle', 'hasPart': 'embellishments', 'pattern': 'pattern', 'clothingCategory': 'type',
'waistStyle': 'waistStyle', 'sleeveStyle': 'sleeveStyle', 'necklineStyle': 'necklineStyle', 'skirtLength': 'skirtStyle', 'color': 'color', 'brand': 'brand'}
def __init__(self, data_path, metadata_path, actions_path, candidates_path, verbose=True):
super(SIMMCDatasetForResponseGeneration, self).__init__(data_path=data_path, metadata_path=metadata_path, verbose=verbose)
self.task = 'response_generation'
self.load_actions(actions_path)
self.load_candidates(candidates_path)
self.id2focus = {}
for id in self.ids:
#for response generation the context is shifted right (response based on the item chosen by the wizard)
self.id2focus[id] = self.extract_visual_context(id)[1:]
assert len(self.id2dialog[id]['dialogue']) == len(self.id2focus[id]), 'Focus items do not match dialogue {} length'.format(id)
self.processed_metadata = {}
self.process_metadata_items()
def process_metadata_items(self):
"""This method process the data inside metadata fields and make each field values a list
(avoiding mixing up single values and lists)
Args:
tokenizer ([type]): [description]
"""
for item_id, item in self.metadata.items():
assert item_id not in self.processed_metadata, 'Item {} presents twice'.format(item_id)
self.processed_metadata[item_id] = {}
for field, field_vals in item['metadata'].items():
curr_field = ''
# availability field is always empty
if field == 'availability' or field == 'url':
continue
values = field_vals
if field == 'availableSizes' and not isinstance(values, list,):
values = self.repair_size_list(values)
#field_tokens = tokenizer.tokenize(field)
field_tokens = re.split('_|\s', field)
for tok in field_tokens:
cleaned_tok = self._ATTR2STR[tok.lower()] if tok.lower() in self._ATTR2STR else tok.lower()
curr_field += cleaned_tok + ' '
curr_field = curr_field[:-1]
curr_val = ''
proc_values = []
if isinstance(values, list,):
for val in values:
curr_val = ''
#value_tokens = tokenizer.tokenize(val)
value_tokens = re.split('_|\s', val)
proc_values.append(' '.join(value_tokens))
else:
value_tokens = re.split('_|\s', values)
proc_values.append(' '.join(value_tokens))
#metadata JSON files contains different samples having hemLenght field twice.
# In this case just discard the one with no values.
if curr_field == 'hem length' and curr_field in self.processed_metadata[item_id]:
if not len(self.processed_metadata[item_id][curr_field]):
self.processed_metadata[item_id][curr_field] = proc_values
continue
assert curr_field not in self.processed_metadata[item_id], 'Field {} presents twice in item {}. Please remove one of them (preferably the empty one)'.format(curr_field, item_id)
self.processed_metadata[item_id][curr_field] = proc_values
def repair_size_list(self, str_val):
"""fixes availableSizes when it is a stringified list (e.g., "[' xl ', ' m ']"
Args:
str_val ([type]): [description]
"""
return [word for word in str_val[2:-2].split('\', \'')]
def __getitem__(self, index):
dial_id, turn, user_req, wizard_resp, history, focus, action, attributes, candidates_ids = super().__getitem__(index)
#convert actions and attributes to english strings
action = action.lower() if action.lower() not in self._ACT2STR else self._ACT2STR[action.lower()]
raw_fields = [attr if attr not in self._ATTR2FIELD else self._ATTR2FIELD[attr] for attr in attributes]
fields = [field.lower() if field.lower() not in self._ATTR2STR else self._ATTR2STR[field.lower()] for field in raw_fields]
item_attributes = []
if not len(fields):
item_attributes.append([])
for field in fields:
if field in self.processed_metadata[str(focus)] and len(self.processed_metadata[str(focus)][field]):
item_attributes.append(self.processed_metadata[str(focus)][field])
else:
item_attributes.append([])
retrieval_candidates = [self.candidates[candidate_id] for candidate_id in candidates_ids]
return dial_id, turn, user_req, wizard_resp, history, focus, action, item_attributes, retrieval_candidates
def __len__(self):
return super().__len__()
def __str__(self):
return '{}_subtask({})'.format(super().__str__(), self.task)
def load_candidates(self, candidates_path):
self.candidates = []
self.id2candidates = {}
with open(candidates_path) as fp:
raw_candidates = json.load(fp)
for candidate in raw_candidates['system_transcript_pool']:
self.candidates.append(candidate)
for candidates_per_dial in raw_candidates['retrieval_candidates']:
self.id2candidates[candidates_per_dial['dialogue_idx']] = candidates_per_dial['retrieval_candidates']
#check if all the candidate ids correspond to a valid candidate in the candidate pool
for (_, candidates_per_dial) in self.id2candidates.items():
for candidates_per_turn in candidates_per_dial:
for candidate_id in candidates_per_turn['retrieval_candidates']:
assert candidate_id < len(self.candidates), 'Candidate with id {} not present in candidate pool'.format(candidate_id)
def load_actions(self, actions_path):
self.id2act = {}
self.id2actfocus = {}
with open(actions_path) as fp:
raw_actions = json.load(fp)
for action in raw_actions:
if action['dialog_id'] in self.skipped_dialogs:
continue
assert len(action['actions']) == len(action['focus_images']), 'focus_images has different length than number of actions'
self.id2act[action['dialog_id']] = action['actions']
self.id2actfocus[action['dialog_id']] = action['focus_images']
#check if we have actions for all the turns
for dial_id in self.ids:
assert len(self.id2dialog[dial_id]['dialogue']) == len(self.id2act[dial_id]),\
'Actions number does not match dialogue turns in dialogue {}'.format(dial_id)
class SIMMCDatasetForActionPrediction(SIMMCDataset):
"""Dataset wrapper for SIMMC Fashion for api call prediction subtask
"""
_ACT2LABEL = {'None': 0,'SearchDatabase': 1, 'SearchMemory': 2, 'SpecifyInfo': 3, 'AddToCart': 4}
_LABEL2ACT = ['None','SearchDatabase', 'SearchMemory', 'SpecifyInfo', 'AddToCart']
"""
_ATTR2LABEL = {'embellishment': 0, 'skirtStyle': 1, 'availableSizes': 2, 'dressStyle': 3, 'material': 4, 'clothingStyle': 5, 'jacketStyle': 6,
'sleeveLength': 7, 'soldBy': 8, 'price': 9, 'ageRange': 10, 'hemLength': 11, 'size': 12, 'warmthRating': 13, 'sweaterStyle': 14,
'forGender': 15, 'madeIn': 16, 'info': 17, 'customerRating': 18, 'hemStyle': 19, 'hasPart': 20, 'pattern': | |
import os
import time
from unittest.mock import MagicMock, Mock, call
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter, get_iterable_dataset
def test_terminate():
engine = Engine(lambda e, b: 1)
assert not engine.should_terminate
engine.terminate()
assert engine.should_terminate
def test_invalid_process_raises_with_invalid_signature():
with pytest.raises(ValueError):
Engine(None)
with pytest.raises(ValueError):
Engine(lambda: None)
with pytest.raises(ValueError):
Engine(lambda batch: None)
with pytest.raises(ValueError):
Engine(lambda engine, batch, extra_arg: None)
def test_current_epoch_counter_increases_every_epoch():
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = EpochCounter()
engine.add_event_handler(Events.EPOCH_STARTED, counter)
state = engine.run([1, 2], max_epochs=max_epochs)
assert state.epoch == max_epochs
counter.current_epoch_count = 1
state = engine.run([1, 2], max_epochs=max_epochs)
assert state.epoch == max_epochs
def test_current_iteration_counter_increases_every_iteration():
batches = [1, 2, 3]
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = IterationCounter()
engine.add_event_handler(Events.ITERATION_STARTED, counter)
state = engine.run(batches, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(batches)
counter.current_iteration_count = 1
state = engine.run(batches, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(batches)
def test_stopping_criterion_is_max_epochs():
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == max_epochs
def test_terminate_at_end_of_epoch_stops_run():
max_epochs = 5
last_epoch_to_run = 3
engine = Engine(MagicMock(return_value=1))
def end_of_epoch_handler(engine):
if engine.state.epoch == last_epoch_to_run:
engine.terminate()
engine.add_event_handler(Events.EPOCH_COMPLETED, end_of_epoch_handler)
assert not engine.should_terminate
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == last_epoch_to_run
assert engine.should_terminate
def test_terminate_at_start_of_epoch_stops_run_after_completing_iteration():
max_epochs = 5
epoch_to_terminate_on = 3
batches_per_epoch = [1, 2, 3]
engine = Engine(MagicMock(return_value=1))
def start_of_epoch_handler(engine):
if engine.state.epoch == epoch_to_terminate_on:
engine.terminate()
engine.add_event_handler(Events.EPOCH_STARTED, start_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(batches_per_epoch, max_epochs=max_epochs)
# epoch is not completed so counter is not incremented
assert state.epoch == epoch_to_terminate_on
assert engine.should_terminate
# completes first iteration
assert state.iteration == ((epoch_to_terminate_on - 1) * len(batches_per_epoch)) + 1
def test_terminate_stops_run_mid_epoch():
num_iterations_per_epoch = 10
iteration_to_stop = num_iterations_per_epoch + 3
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate()
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data=[None] * num_iterations_per_epoch, max_epochs=3)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
assert state.iteration == iteration_to_stop
assert state.epoch == np.ceil(iteration_to_stop / num_iterations_per_epoch) # it starts from 0
def test_terminate_epoch_stops_mid_epoch():
num_iterations_per_epoch = 10
iteration_to_stop = num_iterations_per_epoch + 4
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate_epoch()
max_epochs = 3
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data=[None] * num_iterations_per_epoch, max_epochs=max_epochs)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
true_value = num_iterations_per_epoch * (max_epochs - 1) + iteration_to_stop % num_iterations_per_epoch
assert state.iteration == true_value
def _create_mock_data_loader(epochs, batches_per_epoch):
batches = [MagicMock()] * batches_per_epoch
data_loader_manager = MagicMock()
batch_iterators = [iter(batches) for _ in range(epochs)]
data_loader_manager.__iter__.side_effect = batch_iterators
data_loader_manager.__len__.return_value = batches_per_epoch
return data_loader_manager
def test_iteration_events_are_fired():
max_epochs = 5
num_batches = 3
data = _create_mock_data_loader(max_epochs, num_batches)
engine = Engine(MagicMock(return_value=1))
mock_manager = Mock()
iteration_started = Mock()
engine.add_event_handler(Events.ITERATION_STARTED, iteration_started)
iteration_complete = Mock()
engine.add_event_handler(Events.ITERATION_COMPLETED, iteration_complete)
mock_manager.attach_mock(iteration_started, "iteration_started")
mock_manager.attach_mock(iteration_complete, "iteration_complete")
engine.run(data, max_epochs=max_epochs)
assert iteration_started.call_count == num_batches * max_epochs
assert iteration_complete.call_count == num_batches * max_epochs
expected_calls = []
for i in range(max_epochs * num_batches):
expected_calls.append(call.iteration_started(engine))
expected_calls.append(call.iteration_complete(engine))
assert mock_manager.mock_calls == expected_calls
def test_last_event_name():
engine = Engine(MagicMock(return_value=1))
assert engine.last_event_name is None
@engine.on(Events.STARTED)
def _(_engine):
assert _engine.last_event_name == Events.STARTED
@engine.on(Events.EPOCH_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_STARTED
@engine.on(Events.ITERATION_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_STARTED
@engine.on(Events.ITERATION_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_COMPLETED
@engine.on(Events.EPOCH_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_COMPLETED
engine.run([0, 1])
assert engine.last_event_name == Events.COMPLETED
def test_reset_should_terminate():
def update_fn(engine, batch):
pass
engine = Engine(update_fn)
@engine.on(Events.ITERATION_COMPLETED)
def terminate_on_iteration_10(engine):
if engine.state.iteration == 10:
engine.terminate()
engine.run([0] * 20)
assert engine.state.iteration == 10
engine.run([0] * 20)
assert engine.state.iteration == 10
def test_batch_values():
def _test(data):
# This test check the content passed to update function
counter = [0]
num_iters = len(data)
def update_fn(_, batch):
assert batch == data[counter[0] % num_iters]
counter[0] += 1
engine = Engine(update_fn)
engine.run(data, max_epochs=10)
data = torch.randint(0, 1000, size=(256,))
_test(data)
def test_state_repr():
data = [0, 1, 2, 3, 4, 5]
max_epochs = 1
metrics = {"accuracy": Mock()}
state = State(dataloader=data, max_epochs=max_epochs, metrics=metrics)
s = repr(state)
assert "iteration" in s
assert "epoch" in s
assert "max_epochs: 1" in s
assert "dataloader" in s
assert "metrics" in s
assert "output" in s
assert "batch" in s
def test_alter_batch():
small_shape = (1, 2, 2)
large_shape = (1, 3, 3)
small_loader = torch.randint(0, 256, size=(30,) + small_shape)
large_loader = torch.randint(0, 256, size=(20,) + large_shape)
switch_iteration = 50
def should_take_large_img(i):
return i >= switch_iteration
def update_fn(engine, batch):
i = engine.state.iteration
if i < switch_iteration:
assert batch.shape == small_shape
assert (small_loader[(i - 1) % len(small_loader), ...] == batch).all()
else:
assert batch.shape == large_shape
assert (large_loader[(i - switch_iteration) % len(large_loader), ...] == batch).all()
trainer = Engine(update_fn)
def cycle(seq):
while True:
for i in seq:
yield i
small_loader_iter = cycle(small_loader)
large_loader_iter = cycle(large_loader)
@trainer.on(Events.ITERATION_STARTED)
def choose_batch(engine):
i = engine.state.iteration
if should_take_large_img(i):
batch = next(large_loader_iter)
else:
batch = next(small_loader_iter)
engine.state.batch = batch
num_epochs = 5
num_iters = 25
data = range(num_iters)
trainer.run(data, num_epochs)
def test__is_done():
state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
assert not Engine._is_done(state)
state = State(iteration=1000, max_epochs=10, epoch_length=100)
assert Engine._is_done(state)
def test__setup_engine():
engine = Engine(lambda e, b: 1)
engine.state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
data = list(range(100))
engine.state.dataloader = data
engine._setup_engine()
assert len(engine._init_iter) == 1 and engine._init_iter[0] == 10
# assert engine._dataloader_len == len(data)
def test_run_asserts():
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"Input data has zero size. Please provide non-empty data"):
engine.run([])
with pytest.warns(UserWarning, match="Argument seed is deprecated"):
engine.run([0, 1, 2, 3, 4], seed=1234)
def test_state_get_event_attrib_value():
state = State()
state.iteration = 10
state.epoch = 9
e = Events.ITERATION_STARTED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.ITERATION_STARTED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
def test_time_stored_in_state():
def _test(data, max_epochs, epoch_length):
sleep_time = 0.01
engine = Engine(lambda e, b: time.sleep(sleep_time))
def check_epoch_time(engine):
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length
def check_completed_time(engine):
assert engine.state.times[Events.COMPLETED.name] >= sleep_time * epoch_length * max_epochs
engine.add_event_handler(Events.EPOCH_COMPLETED, lambda e: check_epoch_time(e))
engine.add_event_handler(Events.COMPLETED, lambda e: check_completed_time(e))
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
_test(list(range(100)), max_epochs=2, epoch_length=100)
_test(list(range(200)), max_epochs=2, epoch_length=100)
_test(list(range(200)), max_epochs=5, epoch_length=100)
def _test_check_triggered_events(data, max_epochs, epoch_length, exp_iter_stops=None):
engine = Engine(lambda e, b: 1)
events = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.DATALOADER_STOP_ITERATION,
]
handlers = {e: MagicMock() for e in events}
for e, handler in handlers.items():
engine.add_event_handler(e, handler)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
expected_num_calls = {
Events.STARTED: 1,
Events.COMPLETED: 1,
Events.EPOCH_STARTED: max_epochs,
Events.EPOCH_COMPLETED: max_epochs,
Events.ITERATION_STARTED: max_epochs * epoch_length,
Events.ITERATION_COMPLETED: max_epochs * epoch_length,
Events.GET_BATCH_STARTED: max_epochs * epoch_length,
Events.GET_BATCH_COMPLETED: max_epochs * epoch_length,
Events.DATALOADER_STOP_ITERATION: (max_epochs - 1) if exp_iter_stops is None else exp_iter_stops,
}
for n, handler in handlers.items():
assert handler.call_count == expected_num_calls[n], "{}: {} vs {}".format(
n, handler.call_count, expected_num_calls[n]
)
def _test_run_check_triggered_events():
# tests issue https://github.com/pytorch/ignite/issues/818
_test_check_triggered_events(list(range(10)), max_epochs=4, epoch_length=10)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=100)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=50, exp_iter_stops=50 * 5 // 100)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=150, exp_iter_stops=150 * 5 // 100)
def test_run_check_triggered_events_list():
_test_run_check_triggered_events()
def _test_run_check_triggered_events_on_iterator():
def infinite_data_iterator():
while True:
for i in range(100):
yield i
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=100, exp_iter_stops=0)
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=50, exp_iter_stops=0)
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=150, exp_iter_stops=0)
def limited_data_iterator():
for i in range(100):
yield i
_test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=100, exp_iter_stops=0)
_test_check_triggered_events(limited_data_iterator(), max_epochs=10, epoch_length=10, exp_iter_stops=0)
# These tests will fail
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=100)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=75)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=101)
def test_run_check_triggered_events_on_iterator():
_test_run_check_triggered_events_on_iterator()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(distributed_context_single_node_nccl):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
def test_engine_random_state():
def random_data_generator():
while True:
yield torch.randint(0, 100, size=(5,))
def sum_data(_, batch):
result = torch.sum(batch)
return result
def get_engine():
engine = Engine(sum_data)
average = Average()
average.attach(engine, "average")
return engine
torch.manual_seed(34)
engine = get_engine()
state1 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(34)
engine = get_engine()
state2 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(42)
| |
a greater '
'value than current_release')
self.ValidateUniqueIds()
# Add the encoding check if it's not present (should ensure that it's always
# present in all .grd files generated by GRIT). If it's present, assert if
# it's not correct.
if 'enc_check' not in self.attrs or self.attrs['enc_check'] == '':
self.attrs['enc_check'] = constants.ENCODING_CHECK
else:
assert self.attrs['enc_check'] == constants.ENCODING_CHECK, (
'Are you sure your .grd file is in the correct encoding (UTF-8)?')
def ValidateUniqueIds(self):
"""Validate that 'name' attribute is unique in all nodes in this tree
except for nodes that are children of <if> nodes.
"""
unique_names = {}
duplicate_names = []
# To avoid false positives from mutually exclusive <if> clauses, check
# against whatever the output condition happens to be right now.
# TODO(benrg): do something better.
for node in self.ActiveDescendants():
if node.attrs.get('generateid', 'true') == 'false':
continue # Duplication not relevant in that case
for node_id in node.GetTextualIds():
if util.SYSTEM_IDENTIFIERS.match(node_id):
continue # predefined IDs are sometimes used more than once
if node_id in unique_names and node_id not in duplicate_names:
duplicate_names.append(node_id)
unique_names[node_id] = 1
if len(duplicate_names):
raise exception.DuplicateKey(', '.join(duplicate_names))
def GetCurrentRelease(self):
"""Returns the current release number."""
return int(self.attrs['current_release'])
def GetLatestPublicRelease(self):
"""Returns the latest public release number."""
return int(self.attrs['latest_public_release'])
def GetSourceLanguage(self):
"""Returns the language code of the source language."""
return self.attrs['source_lang_id']
def GetTcProject(self):
"""Returns the name of this project in the TranslationConsole, or
'NEED_TO_SET_tc_project_ATTRIBUTE' if it is not defined."""
return self.attrs['tc_project']
def SetOwnDir(self, dir):
"""Informs the 'grit' element of the directory the file it is in resides.
This allows it to calculate relative paths from the input file, which is
what we desire (rather than from the current path).
Args:
dir: r'c:\bla'
Return:
None
"""
assert dir
self.base_dir = os.path.normpath(os.path.join(dir, self.attrs['base_dir']))
def GetBaseDir(self):
"""Returns the base directory, relative to the working directory. To get
the base directory as set in the .grd file, use GetOriginalBaseDir()
"""
if hasattr(self, 'base_dir'):
return self.base_dir
else:
return self.GetOriginalBaseDir()
def GetOriginalBaseDir(self):
"""Returns the base directory, as set in the .grd file.
"""
return self.attrs['base_dir']
def IsWhitelistSupportEnabled(self):
return self.whitelist_support
def SetWhitelistSupportEnabled(self, whitelist_support):
self.whitelist_support = whitelist_support
def GetInputFiles(self):
"""Returns the list of files that are read to produce the output."""
# Importing this here avoids a circular dependency in the imports.
# pylint: disable-msg=C6204
from grit.node import include
from grit.node import misc
from grit.node import structure
from grit.node import variant
# Check if the input is required for any output configuration.
input_files = set()
# Collect even inactive PartNodes since they affect ID assignments.
for node in self:
if isinstance(node, misc.PartNode):
input_files.add(self.ToRealPath(node.GetInputPath()))
old_output_language = self.output_language
for lang, ctx, fallback in self.GetConfigurations():
self.SetOutputLanguage(lang or self.GetSourceLanguage())
self.SetOutputContext(ctx)
self.SetFallbackToDefaultLayout(fallback)
for node in self.ActiveDescendants():
if isinstance(node, (node_io.FileNode, include.IncludeNode,
structure.StructureNode, variant.SkeletonNode)):
input_path = node.GetInputPath()
if input_path is not None:
input_files.add(self.ToRealPath(input_path))
# If it's a flattened node, grab inlined resources too.
if ((node.name == 'structure' or node.name == 'include')
and node.attrs['flattenhtml'] == 'true'):
if node.name == 'structure':
node.RunPreSubstitutionGatherer()
input_files.update(node.GetHtmlResourceFilenames())
self.SetOutputLanguage(old_output_language)
return sorted(input_files)
def GetFirstIdsFile(self):
"""Returns a usable path to the first_ids file, if set, otherwise
returns None.
The first_ids_file attribute is by default relative to the
base_dir of the .grd file, but may be prefixed by GRIT_DIR/,
which makes it relative to the directory of grit.py
(e.g. GRIT_DIR/../gritsettings/resource_ids).
"""
if not self.attrs['first_ids_file']:
return None
path = self.attrs['first_ids_file']
GRIT_DIR_PREFIX = 'GRIT_DIR'
if (path.startswith(GRIT_DIR_PREFIX)
and path[len(GRIT_DIR_PREFIX)] in ['/', '\\']):
return util.PathFromRoot(path[len(GRIT_DIR_PREFIX) + 1:])
else:
return self.ToRealPath(path)
def GetOutputFiles(self):
"""Returns the list of <output> nodes that are descendants of this node's
<outputs> child and are not enclosed by unsatisfied <if> conditionals.
"""
for child in self.children:
if child.name == 'outputs':
return [node for node in child.ActiveDescendants()
if node.name == 'output']
raise exception.MissingElement()
def GetConfigurations(self):
"""Returns the distinct (language, context, fallback_to_default_layout)
triples from the output nodes.
"""
return set((n.GetLanguage(), n.GetContext(), n.GetFallbackToDefaultLayout())
for n in self.GetOutputFiles())
def GetSubstitutionMessages(self):
"""Returns the list of <message sub_variable="true"> nodes."""
return [n for n in self.ActiveDescendants()
if isinstance(n, message.MessageNode)
and n.attrs['sub_variable'] == 'true']
def SetOutputLanguage(self, output_language):
"""Set the output language. Prepares substitutions.
The substitutions are reset every time the language is changed.
They include messages designated as variables, and language codes for html
and rc files.
Args:
output_language: a two-letter language code (eg: 'en', 'ar'...) or ''
"""
if not output_language:
# We do not specify the output language for .grh files,
# so we get an empty string as the default.
# The value should match grit.clique.MessageClique.source_language.
output_language = self.GetSourceLanguage()
if output_language != self.output_language:
self.output_language = output_language
self.substituter = None # force recalculate
def SetOutputContext(self, output_context):
self.output_context = output_context
self.substituter = None # force recalculate
def SetFallbackToDefaultLayout(self, fallback_to_default_layout):
self.fallback_to_default_layout = fallback_to_default_layout
self.substituter = None # force recalculate
def SetDefines(self, defines):
self.defines = defines
self.substituter = None # force recalculate
def SetTargetPlatform(self, target_platform):
self.target_platform = target_platform
def GetSubstituter(self):
if self.substituter is None:
self.substituter = util.Substituter()
self.substituter.AddMessages(self.GetSubstitutionMessages(),
self.output_language)
if self.output_language in _RTL_LANGS:
direction = 'dir="RTL"'
else:
direction = 'dir="LTR"'
self.substituter.AddSubstitutions({
'GRITLANGCODE': self.output_language,
'GRITDIR': direction,
})
from grit.format import rc # avoid circular dep
rc.RcSubstitutions(self.substituter, self.output_language)
return self.substituter
def AssignFirstIds(self, filename_or_stream, defines):
"""Assign first ids to each grouping node based on values from the
first_ids file (if specified on the <grit> node).
"""
assert self._id_map is None, 'AssignFirstIds() after InitializeIds()'
# If the input is a stream, then we're probably in a unit test and
# should skip this step.
if not isinstance(filename_or_stream, six.string_types):
return
# Nothing to do if the first_ids_filename attribute isn't set.
first_ids_filename = self.GetFirstIdsFile()
if not first_ids_filename:
return
src_root_dir, first_ids = _ReadFirstIdsFromFile(first_ids_filename,
defines)
from grit.node import empty
for node in self.Preorder():
if isinstance(node, empty.GroupingNode):
abs_filename = os.path.abspath(filename_or_stream)
if abs_filename[:len(src_root_dir)] != src_root_dir:
filename = os.path.basename(filename_or_stream)
else:
filename = abs_filename[len(src_root_dir) + 1:]
filename = filename.replace('\\', '/')
if node.attrs['first_id'] != '':
raise Exception(
"Don't set the first_id attribute when using the first_ids_file "
"attribute on the <grit> node, update %s instead." %
first_ids_filename)
try:
id_list = first_ids[filename][node.name]
except KeyError as e:
print('-' * 78)
print('Resource id not set for %s (%s)!' % (filename, node.name))
print('Please update %s to include an entry for %s. See the '
'comments in resource_ids for information on why you need to '
'update that file.' % (first_ids_filename, filename))
print('-' * 78)
raise e
try:
node.attrs['first_id'] = str(id_list.pop(0))
except IndexError as e:
raise Exception('Please update %s and add a first id for %s (%s).'
% (first_ids_filename, filename, node.name))
def GetIdMap(self):
'''Return a dictionary mapping textual ids to numeric ids.'''
return self._id_map
def SetPredeterminedIdsFile(self, predetermined_ids_file):
assert self._id_map is None, (
'SetPredeterminedIdsFile() after InitializeIds()')
self._predetermined_ids_file = predetermined_ids_file
def InitializeIds(self):
'''Initializes the text ID -> numeric ID mapping.'''
predetermined_id_map = {}
if self._predetermined_ids_file:
with open(self._predetermined_ids_file) as f:
for line in f:
tid, nid = line.split()
predetermined_id_map[tid] = int(nid)
self._id_map = _ComputeIds(self, predetermined_id_map)
def RunGatherers(self, debug=False):
'''Call RunPreSubstitutionGatherer() on every node of the tree, then apply
substitutions, then call RunPostSubstitutionGatherer() on every node.
The substitutions step requires that the output language has been set.
Locally, get the Substitution messages and add them to the substituter.
Also add substitutions for language codes in the Rc.
Args:
debug: will print information while running gatherers.
'''
for node in self.ActiveDescendants():
if hasattr(node, 'RunPreSubstitutionGatherer'):
with node:
node.RunPreSubstitutionGatherer(debug=debug)
assert self.output_language
self.SubstituteMessages(self.GetSubstituter())
for node in self.ActiveDescendants():
if hasattr(node, 'RunPostSubstitutionGatherer'):
with node:
node.RunPostSubstitutionGatherer(debug=debug)
class IdentifierNode(base.Node):
"""A node for specifying identifiers that should appear in the resource
header file, and be unique amongst all other resource identifiers, but don't
have any other attributes or reference any resources.
"""
def MandatoryAttributes(self):
return ['name']
def DefaultAttributes(self):
return { 'comment' : '', 'id' : '', 'systemid': 'false' }
def GetId(self):
"""Returns the id of this identifier if it has one, None otherwise
"""
if 'id' in self.attrs:
return self.attrs['id']
return None
def EndParsing(self):
"""Handles system identifiers."""
super(IdentifierNode, self).EndParsing()
if self.attrs['systemid'] == 'true':
util.SetupSystemIdentifiers((self.attrs['name'],))
@staticmethod
def Construct(parent, name, id, comment, systemid='false'):
"""Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
| |
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import PoseDataset
from pointnet.model import PointNetCls, feature_transform_regularizer
from pointnet.fusion_model import PoseNet
import torch.nn.functional as F
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
import sys
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
import sklearn.metrics
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--batchsize', type=int, default=64, help='input batch size')
parser.add_argument(
'--num_points', type=int, default=2500, help='input batch size')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument(
'--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument('--dataset_annotation', type=str, required=True, help="dataset coco file path")
parser.add_argument('--test_dataset_annotation', type=str, required=False, help="test dataset coco file path")
parser.add_argument('--dataset_type', type=str, default='shapenet', help="dataset type shapenet|modelnet40")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")
parser.add_argument('--render_poses', action='store_true', help="use pose rendering for viz")
parser.add_argument('--test_only', action='store_true', help="use pose rendering for viz")
parser.add_argument('--model', type=str, default='', help='model path')
opt = parser.parse_args()
print(opt)
return opt
class ModelInterface():
def __init__(self, opt):
self.opt = opt
# if opt.dataset_type == 'shapenet':
# self.dataset = ShapeNetDataset(
# root=opt.dataset,
# classification=True,
# npoints=opt.num_points)
# self.test_dataset = ShapeNetDataset(
# root=opt.dataset,
# classification=True,
# split='test',
# npoints=opt.num_points,
# data_augmentation=False)
# elif opt.dataset_type == 'modelnet40':
# self.dataset = ModelNetDataset(
# root=opt.dataset,
# npoints=opt.num_points,
# split='trainval')
# self.test_dataset = ModelNetDataset(
# root=opt.dataset,
# split='test',
# npoints=opt.num_points,
# data_augmentation=False)
if opt.dataset_type == "ycb":
self.opt.num_points = 2000 #number of points on the input pointcloud
self.opt.outf = 'trained_models/ycb' #folder to save trained models
self.opt.log_dir = 'experiments/logs/ycb' #folder to save logs
self.opt.noise_trans = 0.00
self.opt.feature_transform = False
self.opt.num_objects = 1
self.dataset = PoseDataset(
'train',
self.opt.num_points,
True,
self.opt.dataset,
self.opt.dataset_annotation,
self.opt.noise_trans,
False)
if 'test_dataset_annotation' in self.opt:
self.test_dataset = PoseDataset(
'test',
opt.num_points,
False,
self.opt.dataset,
self.opt.test_dataset_annotation,
self.opt.noise_trans,
False)
else:
print("Not loading test dataset!")
print('num pose samples', self.dataset.num_pose_samples)
else:
exit('wrong dataset type')
self.tboard = SummaryWriter(self.opt.log_dir)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.opt.batchsize,
shuffle=True,
num_workers=int(self.opt.workers))
self.testdataloader = torch.utils.data.DataLoader(
self.test_dataset,
batch_size=self.opt.batchsize,
shuffle=False,
num_workers=int(self.opt.workers))
print("Train dataset size :{}, Test dataset size : {}".format(len(self.dataset), len(self.test_dataset)))
num_classes = len(self.dataset.classes)
print('classes', self.dataset.classes)
# self.classifier = PointNetCls(k=self.dataset.num_pose_samples, feature_transform=self.opt.feature_transform)
self.estimator = PoseNet(num_points = self.opt.num_points, num_obj = self.opt.num_objects, num_classes = self.dataset.num_pose_samples)
print(self.estimator)
self.estimator.cuda()
if self.opt.model != '':
print("Preloading saved model : {}".format(self.opt.model))
# self.classifier.load_state_dict(torch.load(self.opt.model))
# print(torch.load(self.opt.model))
self.estimator.load_state_dict(torch.load(self.opt.model))
try:
os.makedirs(self.opt.outf)
except OSError:
pass
def compute_multilabel_ap(self, gt, pred, average="macro"):
"""
Compute the multi-label classification accuracy.
Args:
gt (np.ndarray): Shape Nx20, 0 or 1, 1 if the object i is present in that
image.
pred (np.ndarray): Shape Nx20, probability of that object in the image
(output probablitiy).
valid (np.ndarray): Shape Nx20, 0 if you want to ignore that class for that
image. Some objects are labeled as ambiguous.
Returns:
AP (list): average precision for all classes
"""
nclasses = gt.shape[1]
AP = []
# pred_cls = pred_cls[pred_cls > 0]
# print(gt.shape)
gt[gt > 0] = 1
for cid in range(nclasses):
gt_cls = gt[:, cid].astype('float32')
pred_cls = pred[:, cid].astype('float32')
# As per PhilK. code:
# https://github.com/philkr/voc-classification/blob/master/src/train_cls.py
pred_cls -= 1e-5 * gt_cls
# print(pred_cls)
# print(gt_cls)
# if (np.count_nonzero(gt_cls) > 0):
ap = sklearn.metrics.average_precision_score(
gt_cls, pred_cls, average=average)
AP.append(ap)
# print(AP)
# return AP
return np.nanmean(AP), AP
def plot_comparisons(self, mode, batch_target_scores, batch_pred_scores, batch_input_imgs, batch_masked_input_img, batch_input_clouds, epoch, i):
fig, (axs) = plt.subplots(2, 4)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
tboard_tag = "{}_images_epoch_{}/iteration_{}".format(mode, epoch, i)
topk = 3
input_img = batch_input_imgs[0, :, :]
masked_input_img = batch_masked_input_img[0, :, :]
# Plot point clouds
input_cloud = batch_input_clouds[0, :, :]
input_cloud = np.expand_dims(input_cloud, axis=0)
input_cloud_color = np.zeros((input_cloud.shape[0], 3), dtype=int)
input_cloud_color = np.expand_dims(input_cloud_color, axis=0)
# self.tboard.add_mesh(
# "images_epoch_{}_iteration_{}_{}/input_cloud".format(epoch, i, 0),
# vertices=input_cloud,
# colors=input_cloud_color
# )
# Point image RGB inputs
input_img = np.transpose(input_img, (1, 2, 0))
# print(input_img.shape)
masked_input_img = np.transpose(masked_input_img, (1, 2, 0))
axs[0,0].imshow(input_img)
axs[0,0].axis('off')
axs[1,0].imshow(masked_input_img)
axs[1,0].axis('off')
# Render topk predictions and target
target_rgb_dls = self.render_scores(
"train",
batch_target_scores,
topk
)
pred_rgb_dls = self.render_scores(
"train",
batch_pred_scores,
topk
)
for i in range(topk):
axs[0, 1+i].imshow(target_rgb_dls[i])
axs[0, 1+i].axis('off')
axs[1, 1+i].imshow(pred_rgb_dls[i])
axs[1, 1+i].axis('off')
# Plot scores
# axs[0, 1].plot(batch_target_scores[0, :])
# axs[1, 1].plot(batch_pred_scores[0, :])
self.tboard.add_figure(tboard_tag, fig, 0)
plt.close(fig)
def render_scores(self, mode, batch_pose_scores, topk):
pose_scores = batch_pose_scores[0, :]
if mode == "train":
topk_rgbs, topk_depths = self.dataset.render_poses(pose_scores, topk)
elif mode == "test":
topk_rgbs, topk_depths = self.test_dataset.render_poses(pose_scores, topk)
# rgb_dls = np.zeros((topk, self.dataset.img_height, self.dataset.img_width, 3))
rgb_dls = []
for p_i in range(len(topk_rgbs)):
rgb_dl = topk_rgbs[p_i]
rgb_dl = cv2.cvtColor(rgb_dl, cv2.COLOR_BGR2RGB)
# rgb_dls[p_i, :, :, :] = rgb_dl
rgb_dls.append(rgb_dl)
return rgb_dls
def plot_poses(self, mode, batch_pose_scores, batch_input_imgs, batch_masked_input_img, batch_input_clouds, epoch, i, prefix):
# print(batch_input_imgs.shape)
# print(batch_input_clouds.shape)
tboard_tag = "{}_images_epoch_{}/iteration_{}".format(mode, epoch, i)
topk = 3
pose_scores = batch_pose_scores[0, :]
input_img = batch_input_imgs[0, :, :]
masked_input_img = batch_masked_input_img[0, :, :]
# print(batch_input_clouds.shape)
input_cloud = batch_input_clouds[0, :, :]
input_cloud = np.expand_dims(input_cloud, axis=0)
# print(input_cloud.shape)
input_cloud_color = np.zeros((input_cloud.shape[0], 3), dtype=int)
input_cloud_color = np.expand_dims(input_cloud_color, axis=0)
# print(input_img.shape)
# input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
# self.tboard.add_mesh(
# "images_epoch_{}_iteration_{}_{}/input_cloud".format(epoch, i, 0),
# vertices=input_cloud,
# colors=input_cloud_color
# )
# input_img = np.transpose(input_img, (2, 0, 1))
# print(input_img.shape)
self.tboard.add_image("{}_input_img".format(tboard_tag), input_img)
# self.tboard.add_image("{}_input_img".format(tboard_tag), masked_input_img)
if mode == "train":
topk_rgbs, topk_depths = self.dataset.render_poses(pose_scores, topk)
elif mode == "test":
topk_rgbs, topk_depths = self.test_dataset.render_poses(pose_scores, topk)
rgb_dls = np.zeros((topk, input_img.shape[0], input_img.shape[1], input_img.shape[2]))
# print(rgb_dls.shape)
for p_i in range(len(topk_rgbs)):
rgb_dl = topk_rgbs[p_i]
rgb_dl = cv2.cvtColor(rgb_dl, cv2.COLOR_BGR2RGB)
rgb_dl = np.transpose(rgb_dl, (2, 0, 1))
# print(rgb_dl.shape)
rgb_dls[p_i, :, :, :] = rgb_dl
# print(rgb_dl.shape)
self.tboard.add_images("{}_{}_topk_pose".format(tboard_tag, prefix), rgb_dls)
fig = plt.figure()
plt.plot(pose_scores)
self.tboard.add_figure("{}_{}_pose_scores".format(tboard_tag, prefix), fig, 0)
plt.close()
def test(self, epoch):
# self.classifier.eval()
self.estimator.eval()
# criterion = nn.KLDivLoss(reduction="batchmean")
criterion = nn.BCEWithLogitsLoss()
targets_nonzero_all = []
preds_nonzero_all = []
num_batch = len(self.testdataloader)
test_avg_loss = 0.0
targets_all = []
preds_all = []
for i, data in enumerate(self.testdataloader, 0):
# if i > 10:
# break
# if i % 60 != 0:
# continue
# imgs, points_orig, img_masked, choose, target = data
# points = points_orig.transpose(2, 1)
# imgs, points, img_masked, choose, target = \
# imgs.cuda(), points.cuda(), img_masked.cuda(), choose.cuda(), target.cuda()
imgs, points_orig, img_masked_orig, img_masked, choose, target = data
points = points_orig
imgs, points, img_masked_orig, img_masked, choose, target = \
imgs.cuda(), points.cuda(), img_masked_orig.cuda(), img_masked.cuda(), choose.cuda(), target.cuda()
# pred, trans, trans_feat = self.classifier(points)
pred = self.estimator(img_masked, points, choose)
loss = criterion(pred, target)
targets_all.append(target.detach().cpu().numpy().flatten().tolist())
# pred_prob = torch.exp(pred)
pred_prob = torch.sigmoid(pred)
preds_all.append(pred_prob.detach().cpu().numpy().flatten().tolist())
# print(pred_prob)
# print(target)
# target_nonzero = target[target > 0].detach()
# pred_nonzero = pred[target > 0].detach()
# target_nonzero = target[target > 0].detach()
# pred_nonzero = pred[target > 0].detach()
# pred_prob_nonzero = torch.exp(pred_nonzero)
# l2_pred = torch.norm(pred_prob_nonzero - target_nonzero, 2, -1)
test_avg_loss += loss.item()
# print('[%d: %d/%d] test loss: %f non-zero l2 error: %f' % (epoch, i, num_batch, loss.item(), l2_pred))
print('[%d: %d/%d] test loss: %f ' % (epoch, i, num_batch, loss.item()))
counter = epoch * len(self.testdataloader) + i
self.tboard.add_scalar('test/loss', loss.item(), counter)
if self.opt.render_poses and i % 6 == 0:
self.plot_comparisons(
"test",
target.detach().cpu().numpy(),
pred.detach().cpu().numpy(),
imgs.detach().cpu().numpy(),
img_masked_orig.detach().cpu().numpy(),
points_orig.detach().cpu().numpy(),
epoch,
i,
)
test_avg_loss /= len(self.testdataloader)
self.tboard.add_scalar('test/average_loss', test_avg_loss, epoch)
targets_all = np.array(targets_all)
preds_all = np.array(preds_all)
# print(targets_all)
# print(preds_all)
mean_ap, _ = self.compute_multilabel_ap(
targets_all,
preds_all
)
print("test mean ap : {}".format(mean_ap))
self.tboard.add_scalar('test/mean_ap', mean_ap, counter)
# preds_nonzero_all = torch.FloatTensor(preds_nonzero_all)
# preds_nonzero_probs_all = torch.exp(preds_nonzero_all)
# targets_nonzero_all = torch.FloatTensor(targets_nonzero_all)
# loss = criterion(preds_nonzero_all, targets_nonzero_all)
# l2_pred = torch.norm(preds_nonzero_probs_all - targets_nonzero_all, 2, -1)
# print('[%d] test loss: %f non-zero l2 error: %f' % (epoch, loss.item(), l2_pred))
# self.classifier.train()
def train(self):
# self.estimator.cuda()
self.estimator.train()
# optimizer = optim.Adam(self.classifier.parameters(), lr=0.0001, betas=(0.9, 0.999))
optimizer = optim.Adam(self.estimator.parameters(), lr=0.0001)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
# criterion = nn.KLDivLoss(reduction="batchmean")
criterion = nn.BCEWithLogitsLoss()
# self.classifier.cuda()
num_batch = len(self.dataset) / opt.batchsize
for epoch in range(self.opt.nepoch):
# Test after every epoch
# self.test(epoch)
self.estimator.train()
optimizer.zero_grad()
for i, data in enumerate(self.dataloader, 0):
imgs, points_orig, img_masked_orig, img_masked, choose, target = data
# points = points_orig.transpose(2, 1)
points = points_orig
imgs, points, img_masked_orig, img_masked, choose, target = \
imgs.cuda(), points.cuda(), img_masked_orig.cuda(), img_masked.cuda(), choose.cuda(), target.cuda()
# optimizer.zero_grad()
# self.classifier = self.classifier.train()
# pred, trans, trans_feat = | |
from robin_stocks.gemini.authentication import generate_signature
from robin_stocks.gemini.helper import (format_inputs, login_required,
request_get, request_post)
from robin_stocks.gemini.urls import URLS
@format_inputs
def get_pubticker(ticker, jsonify=None):
""" Gets the pubticker information for a crypto.
:param ticker: The ticker of the crypto.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * bid - The highest bid currently available
* ask - The lowest ask currently available
* last - The price of the last executed trade
* volume - Information about the 24 hour volume on the exchange
"""
url = URLS.pubticker(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@format_inputs
def get_ticker(ticker, jsonify=None):
""" Gets the recent trading information for a crypto.
:param ticker: The ticker of the crypto.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * symbol - BTCUSD etc.
* open - Open price from 24 hours ago
* high - High price from 24 hours ago
* low - Low price from 24 hours ago
* close - Close price (most recent trade)
* changes - Hourly prices descending for past 24 hours
* bid - Current best bid
* ask - Current best offer
"""
url = URLS.ticker(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@format_inputs
def get_symbols(jsonify=None):
""" Gets a list of all available crypto tickers.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a list of strings and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.symbols()
data, error = request_get(url, None, jsonify)
return data, error
@format_inputs
def get_symbol_details(ticker, jsonify=None):
""" Gets detailed information for a crypto.
:param ticker: The ticker of the crypto.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * symbol - BTCUSD etc.
* base_currency - CCY1 or the top currency. (ie BTC in BTCUSD)
* quote_currency - CCY2 or the quote currency. (ie USD in BTCUSD)
* tick_size - The number of decimal places in the quote_currency
* quote_increment - The number of decimal places in the base_currency
* min_order_size - The minimum order size in base_currency units.
* status - Status of the current order book. Can be open, closed, cancel_only, post_only, limit_only.
"""
url = URLS.symbol_details(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@login_required
@format_inputs
def get_notional_volume(jsonify=None):
""" Gets information about notional volume
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * date - UTC date in yyyy-MM-dd format
* last_updated_ms - Unix timestamp in millisecond of the last update
* web_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for web orders
* web_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for web orders
* web_auction_fee_bps - Integer value representing the auction fee for all symbols in basis point for web orders
* api_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for API orders
* api_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for API orders
* api_auction_fee_bps - Integer value representing the auction fee for all symbols in basis point for API orders
* fix_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for FIX orders
* fix_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for FIX orders
* fix_auction_fee_bps - Integer value representing the auction fee for all symbols in basis point for FIX orders
* block_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for block orders
* block_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for block orders
* notional_30d_volume - Maker plus taker trading volume for the past 30 days, including auction volume
* notional_1d_volume - A list of 1 day notional volume for the past 30 days
"""
url = URLS.notional_volume()
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def get_trade_volume(jsonify=None):
""" Gets information about trade volume. The response will be an array of up to 30 days of trade volume for each symbol.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * symbol - The symbol.
* base_currency - quantity is denominated in this currency.
* notional_currency - price is denominated as the amount of notional currency per one unit of base currency. Notional values are denominated in this currency.
* data_date - UTC date in yyyy-MM-dd format.
* total_volume_base - Total trade volume for this day.
* maker_buy_sell_ratio - Maker buy/sell ratio is the proportion of maker base volume on trades where the account was on the buy side versus all maker trades. If there is no maker base volume on the buy side, then this value is 0.
* buy_maker_base - Quantity for this day where the account was a maker on the buy side of the trade.
* buy_maker_notional - Notional value for this day where the account was a maker on the buy side of the trade.
* buy_maker_count - Number of trades for this day where the account was a maker on the buy side of the trade.
* sell_maker_base - Quantity for this day where the account was a maker on the sell side of the trade.
* sell_maker_notional - Notional value for this | |
<reponame>emtom2019/em_topicmodeling_project<filename>model_analysis.py
from time import time
from datetime import datetime
import os, sys
import numpy as np
import pandas as pd
import gensim, spacy, scispacy
import pickle
import glob
import random
import model_figure_pipeline as mfp
import data_nl_processing, data_nl_processing_v2
import model_utilities as mu
def coherence_set(model, coherence, window=None):
model_topics_list = model.gensim_topic_words_(model.model.show_topics(formatted=False, num_words=20, num_topics=-1))
coh_model = gensim.models.CoherenceModel(topics=model_topics_list, texts=model.nlp_data.get_token_text(),
dictionary=model.nlp_data.get_id2word(), window_size=window,
coherence=coherence)
model_coherence = coh_model.get_coherence()
return model_coherence
if __name__ == "__main__": # Code only runs if this file is run directly.
if False: # Loading data and model
t = time()
print("Loading Data...")
with open('models/main_mallet_t40a25o200', 'rb') as model:
mallet_model = pickle.load(model)
data_path = 'data/external/data_cleaned.csv'
data_column = 'title_abstract'
df = pd.read_csv(data_path)
raw_text = df[data_column].tolist()
year_list = df['year'].tolist()
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # docs per 1 year
t = time()
print("Running doc counts per time ...")
df1, df2 = mu.doc_topics_per_time(mallet_model.model, mallet_model.nlp_data, year_list=year_list, year_res=1)
df1.to_csv('reports/main_model/doc_n_per1_year.csv', index=False)
df2.to_csv('reports/main_model/doc_w_per1_year.csv', index=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Total docs per topic
t = time()
print("Running total Doc counts per topic ...")
df3, df4 = mu.docs_per_topic(mallet_model.model, mallet_model.nlp_data)
df3.to_csv('reports/main_model/doc_n_per_topic.csv', index=False)
df4.to_csv('reports/main_model/doc_w_per_topic.csv', index=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Docs by dominant topic
t = time()
print("Running docs by dominant topic ...")
topic_df = mu.dominant_doc_topic_df(mallet_model.model, mallet_model.nlp_data)
topic_df.to_csv('reports/main_model/docs_dom_topic.csv')
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
t = time()
print("Running best doc per topic ...")
best_doc_df = mu.best_doc_for_topic(topic_df)
best_doc_df.to_csv('reports/main_model/best_doc_per_topic.csv')
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
t = time()
print("Running best doc per topic with raw test ...")
doc_list = best_doc_df["Best Document"]
new_column = []
for doc in doc_list:
new_column.append(raw_text[int(doc-1)])
best_doc_raw_df = best_doc_df.copy()
best_doc_raw_df["Raw Text"] = pd.Series(new_column).values
best_doc_raw_df.to_csv('reports/main_model/best_doc_per_topic_with_raw.csv')
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Document token histogram
t = time()
print("Creating doc token counts ...")
mu.plot_doc_token_counts(topic_df,fig_save_path='reports/main_model/doc_token_counts.png', show=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Reloading dataframes
t = time()
print("Reloading df data ...")
data_path2 = 'reports/main_model/best_doc_per_topic_with_raw.csv'
data_column2 = 'Raw Text'
df_raw = pd.read_csv(data_path2)
new_column = df_raw[data_column2].tolist()
df1_data_path = 'reports/main_model/doc_n_per1_year.csv'
df1 = pd.read_csv(df1_data_path)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Create sample colored paragraph
t = time()
print("Creating sample colored paragraph ...")
mu.color_doc_topics(mallet_model.model, raw_text[0], mallet_model.nlp_data, topics=4, line_word_length=12,
fig_save_path='reports/main_model/sample_colordoctopics.png', show=False, custom_titles=mu.MAIN_TOPICS)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Creating colored docs of dominant topics
t = time()
print("Creating best colored paragraph per topic...")
for i, text in enumerate(new_column):
mu.color_doc_topics(mallet_model.model, text, mallet_model.nlp_data, topics=4, line_word_length=12,
fig_save_path='reports/main_model/best_colored_paragraphs/colordoctopic_{}.png'.format(i+1), show=False,
custom_titles=mu.MAIN_TOPICS)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Creating colored docs of dominant topics
t = time()
print("Creating best colored paragraph per topic...")
for i, text in enumerate(new_column):
if i == 26:
mu.color_doc_topics(mallet_model.model, text, mallet_model.nlp_data, topics=4, line_word_length=12,
fig_save_path='reports/main_model/best_colored_paragraphs/colordoctopic_{}.png'.format(i+1), show=False,
custom_titles=mu.MAIN_TOPICS)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Graph of number of docs per topic per year
t = time()
print("Creating graph of number of docs per topic per year...")
x_val = list(range(1980,2020))
mu.plot_doc_topics_per_time(df1, 40, 8, ylabel='Proportion of Documents', xlabel='Years', fig_save_path='reports/main_model/plot_n_abs_per1y.png',
x_val=x_val, hide_x_val=False, xtick_space=5, custom_titles=mu.MAIN_TOPICS, relative_val=True,
df_data2=df1, relative_val2=False, ylabel2="Absolute Count of Documents", show=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Graph of number of docs per topic per year
t = time()
print("Creating graph of number of docs per topic per year...")
columns = list(df1.columns)[1:]
column_totals = df1.loc[:,columns[0]:].sum(axis=0)
column_totals_list = list(column_totals)
topics_list = df1["Topic"]
years = list(range(1980,2020))
mu.graph(years, column_totals_list, title="Total Abstracts by Year", x_label="Year", y_label="Number of Abstracts",
fig_save_path='reports/main_model/total_docs_per_year.png')
total_docs = 0
for total in column_totals_list:
total_docs += total
print("Total docs: {}".format(total_docs))
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Graph of number of docs for every 5 years
t = time()
print("Running doc counts per time ...")
df5, df6 = mu.doc_topics_per_time(mallet_model.model, mallet_model.nlp_data, year_list=year_list, year_res=5)
df5.to_csv('reports/main_model/doc_n_per5_year.csv', index=False)
df6.to_csv('reports/main_model/doc_w_per5_year.csv', index=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Reloading dataframes
t = time()
print("Reloading df data ...")
df5_data_path = 'reports/main_model/doc_n_per5_year.csv'
df5 = pd.read_csv(df5_data_path)
df6_data_path = 'reports/main_model/doc_w_per5_year.csv'
df6 = pd.read_csv(df6_data_path)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Total abstracts for every 5 years
t = time()
print("Creating graph of number of docs per year...")
columns = list(df5.columns)[1:]
column_totals = df5.loc[:,columns[0]:].sum(axis=0)
column_totals_list = list(column_totals)
topics_list = df5["Topic"]
years = list(range(1980,2020,5))
mu.graph(columns, column_totals_list, title="Total Abstracts by Year", x_label="Years", y_label="Number of Abstracts",
fig_save_path='reports/main_model/total_docs_per_5year.png')
total_docs = 0
for total in column_totals_list:
total_docs += total
print("Total docs: {}".format(total_docs))
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Number of docs per topic per 5 years
t = time()
print("Creating graph of number of docs per topic per year...")
x_val = list(range(1980,2020, 5))
mu.plot_doc_topics_per_time(df5, 40, 8, ylabel='Proportion of Documents', xlabel='Years', fig_save_path='reports/main_model/plot_n_abs_per5y.png',
x_val=x_val, hide_x_val=False, xtick_space=10, xmintick_space=5, custom_titles=mu.MAIN_TOPICS_TRUNC, relative_val=True,
df_data2=df5, relative_val2=False, ylabel2="Absolute Count of Documents", show=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Number of docs per topic per 5 years with weight
t = time()
print("Creating graph of document weight per topic per year...")
x_val = list(range(1980,2020, 5))
mu.plot_doc_topics_per_time(df5, 40, 8, ylabel='Proportion of Documents', xlabel='Years', fig_save_path='reports/main_model/plot_n_w_per5y.png',
x_val=x_val, hide_x_val=False, xtick_space=10, xmintick_space=5, custom_titles=mu.MAIN_TOPICS, relative_val=True,
df_data2=df6, relative_val2=True, ylabel2=None, show=False)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Create several colored paragraph samples
t = time()
print("Creating sample colored paragraph ...")
for i in range(10):
index = random.randrange(20000)
mu.color_doc_topics(mallet_model.model, raw_text[index], mallet_model.nlp_data, topics=3, line_word_length=12, incl_perc=True,
fig_save_path='reports/main_model/sample_color_docs/doc_{}.png'.format(index), show=False, custom_titles=mu.MAIN_TOPICS)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False:
t = time()
print("Creating t-SNE doc Cluster ...")
seed = 2019
mu.plot_tsne_doc_cluster(mallet_model.model, mallet_model.nlp_data, marker_size=1, min_tw=None, seed=seed, show_topics=True,
show=False, custom_titles=mu.MAIN_TOPICS_TRUNC, fig_save_path='reports/main_model/tsne_doc_cluster_s{}.png'.format(2019))
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False:
t = time()
print("Creating topic word clouds ...")
mu.creat_multi_wordclouds(40, 8, mallet_model.model, mallet_model.nlp_data, custom_titles=mu.MAIN_TOPICS_TRUNC, show=False,
fig_save_path='reports/main_model/topic_wordclouds.png', title_font=14)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # Group data by journal
t = time()
print("Calculating df counts ...")
df_dict, counts = mu.rows_per_df_grp(df, "journal")
print(counts)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
for journal in df_dict:
y_list = df_dict[journal]['year'].tolist()
y_list.sort()
print("Journal: {}, First pub: {}".format(journal, y_list[0]))
if False: # Document counts by year topic and journal
t = time()
print("Running doc counts per time per journal...")
total_counts_list = []
labels_list = []
columns_list = []
for journal in df_dict:
df1, df2 = mu.doc_topics_per_time(mallet_model.model, mallet_model.nlp_data, df=df_dict[journal], data_column="title_abstract",
year_column="year", year_res=5, year_start=1980)
#df1.to_csv('reports/main_model/journals/dnp5y_{}.csv'.format(journal), index=False)
#df2.to_csv('reports/main_model/journals/dwp5y_{}.csv'.format(journal), index=False)
x_val = list(range(1980,2020, 5))
'''
mu.plot_doc_topics_per_time(df1, 40, 8, ylabel='Proportion of Documents', xlabel='Years',
fig_save_path='reports/main_model/journals/p_n_abs_p5y_{}.png'.format(journal),
x_val=x_val, hide_x_val=False, xtick_space=10, xmintick_space=5,
custom_titles=mu.MAIN_TOPICS_TRUNC, relative_val=True,
df_data2=df1, relative_val2=False, ylabel2="Absolute Count of Documents", show=False)
'''
columns = list(df1.columns)[1:]
column_totals = df1.loc[:,columns[0]:].sum(axis=0)
column_totals_list = list(column_totals)
columns_list.append(list(columns))
total_counts_list.append(column_totals_list)
labels_list.append(journal)
legend_params = {'loc':2, 'fontsize':'xx-small'}
mu.graph_multi(columns_list, total_counts_list, label_list=labels_list, title="Total Abstracts by Year",
x_label="Years", y_label="Number of Abstracts", legend="Journal", legend_params=legend_params,
fig_save_path='reports/main_model/journals/total_docs_per_5year.png', show=True)
comp_time = (time() - t)
print("Done in %0.3fs." % comp_time)
if False: # looking at long tokens
long_token_list = []
for doc in raw_text:
long_token_list.extend(gensim.utils.simple_preprocess(str(doc), deacc=True, min_len=16, max_len=50))
print(len(long_token_list))
long_token_set = set(long_token_list)
print(long_token_set)
maxlen = 0
for token in long_token_set:
if len(token) > maxlen:
maxlen = len(token)
print("max length: {}".format(maxlen))
nlp = spacy.load(mallet_model.nlp_data.spacy_lib, disable=['parser','ner'])
allowed_postags = ['NOUN', 'ADJ', 'VERB','ADV']
doc = nlp(" ".join(long_token_list))
lem_list = [token.lemma_ for token in doc if token.pos_ in allowed_postags and token.lemma_ not in ['-PRON-']
]
print(len(lem_list))
lem_set = set(lem_list)
print(len(lem_set))
print(lem_set)
if False: # Testing Alternate Model
with open('models/main_mallet_t40a5o200_v2', 'rb') as model:
mallet_model_v2 = pickle.load(model)
t = time()
print("Creating sample colored paragraph |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.