text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
/***************************************************************************
CalcolodannoDialog
A QGIS plugin
-------------------
begin : 2014-11-02
copyright : (C) 2014 by RSE
email : FloodRiskGroup@rse-web.it
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtCore, QtGui
from qgis.core import *
from ui_calcolodanno import Ui_FloodRisk
import CalcoloDannoInondazione
import os.path
from xml.dom import minidom
from xml.dom.minidom import Document
from tableViewer_gui import TableViewer
try:
from pylab import *
except:
pass
import sys
import os
import sqlite3
# to reading cvs file
import csv
import locale
try:
from osgeo import ogr
except:
import ogr
from help import show_context_help
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
vectors = [
'shp','mif', 'tab','000','dgn','vrt','bna','csv','gml',
'gpx','kml','geojson','itf','xml','ili','gmt',
'sqlite','mdb','e00','dxf','gxt','txt','xml'
]
rasters = [
'ecw','sid','vrt','tiff',
'tif','ntf','toc','img',
'gff','asc','ddf','dt0',
'dt1','dt2','png','jpg',
'jpeg','mem','gif','n1',
'xpm','bmp','pix','map',
'mpr','mpl','rgb','hgt',
'ter','nc','grb','hdr',
'rda','bt','lcp','rik',
'dem','gxf','hdf5','grd',
'grc','gen','img','blx',
'blx','sqlite','sdat','adf'
]
def openFile(self,filePath,table):
#Get the extension without the .
extn = os.path.splitext(filePath)[1][1:].lower()
if extn == 'qgs':
#If we are project file we can just open that.
self.iface.addProject(filePath)
elif extn in vectors:
if extn=='mdb':
uri = "DRIVER='Microsoft Access Driver (*.mdb)',Database=%s,host=localhost|layername=%s" % (filePath,'HydroArea')
uri = "%s |layername=%s" % (filePath,'HydroArea')
uri = "%s |layer=%s" % (filePath,0)
elif extn=='sqlite':
uri = QgsDataSourceURI()
uri.setDatabase(filePath)
schema = ''
geom_column = 'geom'
uri.setDataSource(schema, table, geom_column)
display_name=table
self.iface.addVectorLayer(uri.uri(),display_name,"spatialite")
else:
self.iface.addVectorLayer(filePath,"","ogr")
elif extn in rasters:
self.iface.addRasterLayer(filePath,"")
else:
#We should never really get here, but just in case.
pass
def LayerCaricato(self,NomeLayer):
ok=bool()
nome = str.split(str(os.path.basename(NomeLayer)),'.')[0]
layers = QgsMapLayerRegistry.instance().mapLayers().values()
for l in layers:
if l.name()==nome:
ok=bool('True')
break
return ok
def checkNumRowsFromCSV(pathToCsvFile,sep):
ok=False
try :
with open(pathToCsvFile, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=sep, quotechar='"')
headers = reader.next()
numheaders=len(headers)
if numheaders>1:
row = reader.next()
numrow=len(row)
if numheaders==numrow:
ok=True
except:
pass
return ok
def check_csv_separator(pathToCsvFile):
locale.setlocale(locale.LC_ALL, '') # set to user's locale, not "C"
dec_pt_chr = locale.localeconv()['decimal_point']
if dec_pt_chr == ",":
list_delimiter = ";"
else:
list_delimiter = ","
check1=checkNumRowsFromCSV(pathToCsvFile,list_delimiter)
if not check1:
if list_delimiter==',':
list_delimiter=';'
elif list_delimiter==';':
list_delimiter=','
check2 = checkNumRowsFromCSV(pathToCsvFile,list_delimiter)
if not check2:
list_delimiter=' '
return list_delimiter
class calcolodannoDialog(QtGui.QDialog, Ui_FloodRisk):
def __init__(self,iface):
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.iface=iface
self.btnChooseShellFile_3.setIcon(QIcon(":/plugins/floodrisk/icons/folder_explore.png"))
self.btnChooseShellFile_3.setIconSize(QSize(25,25))
self.pushButtonView.setIcon(QIcon(":/plugins/floodrisk/icons/table_go.png"))
self.pushButtonView.setIconSize(QSize(25,25))
self.pushButton.setIcon(QIcon(":/plugins/floodrisk/icons/chart_bar.png"))
self.pushButton.setIconSize(QSize(25,25))
self.buttonGrafici.setIcon(QIcon(":/plugins/floodrisk/icons/images.jpg"))
self.buttonGrafici.setIconSize(QSize(35,25))
self.label_red_danno.setPixmap(QPixmap(":/plugins/floodrisk/icons/red20.png"))
self.label_red_vuln.setPixmap(QPixmap(":/plugins/floodrisk/icons/red20.png"))
self.label_green_danno.setPixmap(QPixmap(":/plugins/floodrisk/icons/green20.png"))
self.label_green_vuln.setPixmap(QPixmap(":/plugins/floodrisk/icons/green20.png"))
# initialize actions
QObject.connect(self.btnChooseShellFile_3, SIGNAL("clicked()"), self.setFileMaxH)
QObject.connect(self.buttonGrafici, SIGNAL("clicked()"), self.graficoCurve)
QObject.connect(self.pushButtonView, SIGNAL("clicked()"), self.VediTabellaDanni)
QObject.connect(self.toolButtonEsegui, SIGNAL("clicked()"), self.EseguiCalcoloDanni)
QObject.connect(self.pushButtonSalvaProgetto, SIGNAL("clicked()"), self.writexml)
QObject.connect(self.pushButtonLoadLayer, SIGNAL("clicked()"), self.CaricaLayers)
QObject.connect(self.pushButton, SIGNAL("clicked()"), self.istogrammi)
self.dic_TypeId={}
self.CurveType=''
self.TotalDamage=0.0
#self.sep=set_csv_separator()
# help
QObject.connect(self.buttonBox, SIGNAL(_fromUtf8("helpRequested()")), self.show_help)
#------------- Actions -----------------------
def show_help(self):
"""Load the help text into the system browser."""
show_context_help(context='include3')
def setFileMaxH(self):
message = QtGui.QMessageBox.question(self, self.tr('Attention'),self.tr("Warning you are editing the data input to the project: current data" \
" of output will be deleted. Are you sure?"), QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if message == QtGui.QMessageBox.Yes:
FileMaxHPath = QFileDialog.getOpenFileName(self, self.tr('Select peak flow depth file'), \
'.', 'File tif (*.tif);;All files (*.*)')
self.txtShellFilePath_3.setText(FileMaxHPath)
msg=FileMaxHPath
QMessageBox.information(None, "FileMaxHPath", msg)
msg=str(self.txtShellFilePath.text())
if msg=="":
aa=str(FileMaxHPath)
DirOut=os.path.dirname(aa)
Name=os.path.basename(aa)
pp=str.split(Name,'.')
FileProgettoPath=DirOut+os.sep + pp[0]+'.dmg'
self.txtShellFilePath.setText(FileProgettoPath)
msg=str(self.txtShellFilePath_5.text())
if msg=="":
aa=str(FileMaxHPath)
DirOut=os.path.dirname(aa)
Name=os.path.basename(aa)
pp=str.split(Name,'.')
FileDannoPath=DirOut+os.sep + pp[0]+'_dmg.tif'
self.txtShellFilePath_5.setText(FileDannoPath)
msg=str(self.txtShellFilePath_6.text())
if msg=="":
aa=str(FileMaxHPath)
DirOut=os.path.dirname(aa)
Name=os.path.basename(aa)
pp=str.split(Name,'.')
FileTabDannoPath=DirOut+os.sep + pp[0]+'_dmg.csv'
self.txtShellFilePath_6.setText(FileTabDannoPath)
msg=str(self.txtShellFilePath_vulnerato.text())
if msg=="":
aa=str(FileMaxHPath)
DirOut=os.path.dirname(aa)
Name=os.path.basename(aa)
pp=str.split(Name,'.')
FileVulnPath=DirOut+os.sep + pp[0]+'_vuln.tif'
self.txtShellFilePath_vulnerato.setText(FileVulnPath)
#----Deleting output data -----
self.txtShellFilePath_5.setText("")
self.txtShellFilePath_vulnerato.setText("")
self.txtShellFilePath_6.setText("")
abil=bool("true")
self.pushButtonSalvaProgetto.setEnabled(abil)
def graficoCurve(self):
tipo = self.comboBoxGrafici.currentText()
try:
self.idTipo = self.dic_TypeId[tipo]
from graficofloodriskdialog import graficofloodriskDialog
gfd=graficofloodriskDialog(self.iface, self.idTipo, tipo)
geoDataBase=str(self.txtShellFilePath_2.text())
if geoDataBase!="":
gfd.lineEdit.setText(geoDataBase)
gfd.run()
except:
txt0='Geodatabase: %s \n\n' % self.txtShellFilePath_2.text()
txt1=self.tr("Error in table Vulnerability")
msg='%s %s' % (txt0,txt1)
QMessageBox.information(None, "Graph", msg)
def EseguiCalcoloDanni(self):
self.Nome=[]
self.listafiles=[]
# FileDEM1
self.Nome.append('File Max Water Depth')
self.listafiles.append(str(self.txtShellFilePath_3.text()))
# DBfile
self.Nome.append('File Geodatabase')
self.listafiles.append(str(self.txtShellFilePath_2.text()))
# NameFileGridVulnerability
self.Nome.append('File Grid Vulnerability')
self.listafiles.append(str(self.txtShellFilePath_vulnerato.text()))
# NameFileGridDamages
self.Nome.append('File Grid Damages')
self.listafiles.append(str(self.txtShellFilePath_5.text()))
# NameFileTable
self.Nome.append('File Table 1')
self.listafiles.append(str(self.txtShellFilePath_6.text()))
tipo = self.comboBoxGrafici.currentText()
self.CurveType=tipo
abil0=bool("true")
try:
self.idTipo = self.dic_TypeId[tipo]
self.listafiles.append(self.idTipo)
except:
txt0='Geodatabase: %s \n\n' % self.txtShellFilePath_2.text()
txt1=self.tr('Warning the Depth-Damage Curves Type')
txt2=self.tr('does not exists')
msg='%s %s %s: %s' % (txt0,txt1,tipo,txt2)
QMessageBox.information(None, "Input", msg)
abil0=bool()
errMsg='Input Error'
if abil0:
abil=bool("true")
for i in range(2):
if not os.path.exists(self.listafiles[i]):
txt1=self.tr('Warning the file')
txt2=self.tr('does not exists')
msg='%s %s: %s' % (txt1,self.Nome[i],txt2)
QMessageBox.information(None, "File input", msg)
abil=bool()
errMsg='Input Error'
for k in range(3):
i=k+2
if len(self.listafiles[i])==0:
txt1=self.tr('Attention assign a name to file')
msg='%s: %s ' % (txt1,self.Nome[i])
QMessageBox.information(None, "File output", msg)
abil=bool()
errMsg='Input Error'
else:
abil=bool()
if abil:
fileprogetto=str(self.txtShellFilePath.text())
# initializes progressbar
self.progressBar.setFormat(self.tr('Damage assessment') +': %p%')
self.progressBar.setValue(0)
abil=bool()
self.buttonBox.setEnabled(abil)
NotErr, errMsg, TotalDamage = CalcoloDannoInondazione.main(self.listafiles,self.progressBar)
self.TotalDamage=TotalDamage
self.luci()
if NotErr:
msg=self.tr('End of Job')
QMessageBox.information(None, "FloodRisk", msg)
self.writexml()
abil=bool('True')
self.buttonBox.setEnabled(abil)
self.progressBar.setFormat(('%p%'))
self.progressBar.setValue(0)
else:
msg=errMsg + " - " + self.tr("Run not executed")
QMessageBox.information(None, "Run", msg)
self.luci()
else:
msg=errMsg + " - " + self.tr("Run not executed")
QMessageBox.information(None, "Run", msg)
self.luci()
def writexml (self):
fileprogetto=str(self.txtShellFilePath.text())
dicProgetto={}
dicParameter={}
if fileprogetto!="":
xmlfile=open(fileprogetto)
dom=minidom.parse(xmlfile)
for node in dom.getElementsByTagName("General"):
L = node.getElementsByTagName("File")
for node2 in L:
Button = node2.getAttribute("Button")
nome = node2.getAttribute("name")
dicProgetto[Button] = nome
for node in dom.getElementsByTagName("Parameters"):
L = node.getElementsByTagName("Parameter")
for node2 in L:
Param = node2.getAttribute("Param")
Value = node2.getAttribute("Value")
dicParameter[Param] = Value
xmlfile.close()
# Create the minidom document
doc = Document()
# Create the <wml> base element
wml = doc.createElement("FloodRisk")
doc.appendChild(wml)
# Create the main <card> element
maincard = doc.createElement("General")
wml.appendChild(maincard)
# Create a <p> element
ShellFilePath= str(self.txtShellFilePath_2.text())
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", 'FileGeodatabase')
paragraph1.setAttribute("name", ShellFilePath)
maincard.appendChild(paragraph1)
ShellFilePath= str(self.txtShellFilePath_3.text())
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", 'FilePeakFloodDepth')
paragraph1.setAttribute("name", ShellFilePath)
#paragraph1.setAttribute("unit", '[m]')
maincard.appendChild(paragraph1)
# Save input file
for Button in dicProgetto.keys():
nome = dicProgetto[Button]
if Button == 'FilePeakFloodVelocity':
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", Button)
paragraph1.setAttribute("name", nome)
#paragraph1.setAttribute("unit", '[m]')
maincard.appendChild(paragraph1)
if Button == 'FileWarningTime':
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", Button)
paragraph1.setAttribute("name", nome)
#paragraph1.setAttribute("unit", '[m]')
maincard.appendChild(paragraph1)
# Create a <p> element
ShellFilePath= str(self.txtShellFilePath_5.text())
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", 'FileGridDamages')
paragraph1.setAttribute("name", ShellFilePath)
#paragraph1.setAttribute("unit", '[kEuro]')
maincard.appendChild(paragraph1)
#
# Create a <p> element
ShellFilePath= str(self.txtShellFilePath_vulnerato.text())
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", 'FileGridVulnerability')
paragraph1.setAttribute("name", ShellFilePath)
maincard.appendChild(paragraph1)
#
# Create a <p> element
ShellFilePath= str(self.txtShellFilePath_6.text())
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", 'FileTable1')
paragraph1.setAttribute("name", ShellFilePath)
maincard.appendChild(paragraph1)
# Save input file
for Button in dicProgetto.keys():
nome = dicProgetto[Button]
if Button == 'FileGridPopRisk':
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", Button)
paragraph1.setAttribute("name", nome)
maincard.appendChild(paragraph1)
if Button == 'FileTable2':
paragraph1 = doc.createElement("File")
paragraph1.setAttribute("Button", Button)
paragraph1.setAttribute("name", nome)
maincard.appendChild(paragraph1)
# Create the main <card> element
maincard2 = doc.createElement("Parameters")
wml.appendChild(maincard2)
# Create a <p> element
Param1 = doc.createElement("Parameter")
Param1.setAttribute("Param", 'CurveType')
Param1.setAttribute("Value", self.CurveType)
maincard2.appendChild(Param1)
Param1 = doc.createElement("Parameter")
Param1.setAttribute("Param", 'TotalDamage')
msg='%.1f' % self.TotalDamage
Param1.setAttribute("Value", msg)
maincard2.appendChild(Param1)
for Param in dicParameter.keys():
Value = dicParameter[Param]
if Param == 'Method':
Param2 = doc.createElement("Parameter")
Param2.setAttribute("Param", 'Method')
Param2.setAttribute("Value", Value)
maincard2.appendChild(Param2)
if Param == 'Understand':
Param2 = doc.createElement("Parameter")
Param2.setAttribute("Param", 'Understand')
Param2.setAttribute("Value", Value)
maincard2.appendChild(Param2)
if Param == 'LOL':
Param2 = doc.createElement("Parameter")
Param2.setAttribute("Param", 'LOL')
Param2.setAttribute("Value", Value)
maincard2.appendChild(Param2)
if fileprogetto!="":
fp = open(fileprogetto,"w")
# writexml(self, writer, indent='', addindent='', newl='', encoding=None)
doc.writexml(fp, "", " ", "\n", "UTF-8")
self.AutoLoad=fileprogetto
QMessageBox.information(None, "Info", self.tr("Project Saved"))
def VediTabellaDanni(self):
self.NomeTabella=str(self.txtShellFilePath_6.text())
self.TabView = TableViewer(self.iface,self.NomeTabella)
self.TabView.show()# show the dialog
result = self.TabView.exec_()
def CaricaLayers(self):
filePath=str(self.txtShellFilePath_2.text())
if os.path.exists(filePath):
# case geodatabase
tabelle=['StructurePoly','InfrastrLines','CensusBlocks']
for nomelayer in tabelle:
if not LayerCaricato(self,nomelayer):
openFile(self,filePath,nomelayer)
filePath=str(self.txtShellFilePath_3.text())
if os.path.exists(filePath):
if not LayerCaricato(self,filePath):
openFile(self,filePath,'')
filePath=str(self.txtShellFilePath_vulnerato.text())
if os.path.exists(filePath):
if not LayerCaricato(self,filePath):
openFile(self,filePath,'')
filePath=str(self.txtShellFilePath_5.text())
if os.path.exists(filePath):
if not LayerCaricato(self,filePath):
openFile(self,filePath,'')
def istogrammi(self):
self.NomeFile=str(self.txtShellFilePath_6.text())
if os.path.exists(self.NomeFile):
try:
import matplotlib
self.sep=check_csv_separator(self.NomeFile)
# Reading csv file
finp = open(self.NomeFile)
csv_reader = csv.reader(finp, delimiter=self.sep, quotechar='"')
headers = csv_reader.next()
self.fields=[]
for p in headers:
self.fields.append(p)
progress = unicode('Reading data ') # As a progress bar is used the main window's status bar, because the own one is not initialized yet
yEuro1=[]
yEuro2=[]
xCodice=[]
for record in csv_reader:
for i in range(len(record)):
if i == 0:
xCodice += [record[i]]
if i == 5:
yEuro2 += [float(record[i])]
if i == 6:
yEuro1 += [float(record[i])]
finp.close()
#---------------Draw Chart-----------------
y1=yEuro1
y2=yEuro2
x1=xCodice
width=0.3
i=arange(len(y1))
r1=bar(i, y1,width, color='r',linewidth=1)
r2=bar(i+width,y2,width,color='b',linewidth=1)
xticks(i+width/2,x1)
xlabel('Code'); ylabel('Euro'); title(self.tr('Damage assessment results'))
try:
legend((r1[0],r2[0]),(self.tr('Content Damage'), self.tr('Structure Damage')), 'best')
except:
pass
grid()
show()
except:
QMessageBox.information(None, "Warning", "The current version of QGIS does not allow import matplotlib")
else:
txt1=self.tr('Warning the file')
txt2=self.tr('does not exists')
msg='%s\n\n %s\n\n %s' % (txt1,self.NomeFile,txt2)
QMessageBox.information(None, "Input", msg)
#------------------- Functions ---------------------------
def startxml (self):
fileprogetto=str(self.txtShellFilePath.text())
if fileprogetto!="":
xmlfile=open(fileprogetto)
dom=minidom.parse(xmlfile)
for node in dom.getElementsByTagName("General"):
L = node.getElementsByTagName("File")
for node2 in L:
Button = node2.getAttribute("Button")
nome = node2.getAttribute("name")
if Button=='FileGeodatabase':
self.txtShellFilePath_2.setText(nome)
elif Button=='FilePeakFloodDepth':
self.txtShellFilePath_3.setText(nome)
elif Button=='FileGridDamages':
self.txtShellFilePath_5.setText(nome)
elif Button=='FileGridVulnerability':
self.txtShellFilePath_vulnerato.setText(nome)
elif Button=='FileTable1':
self.txtShellFilePath_6.setText(nome)
for node in dom.getElementsByTagName("Parameters"):
L = node.getElementsByTagName("Parameter")
for node2 in L:
Param = node2.getAttribute("Param")
try:
Value = node2.getAttribute("Value")
except:
Value = node2.getAttribute("name")
if Param=='CurveType':
self.CurveType=Value
self.setCurrentCurveType(Value)
xmlfile.close()
abil=bool("true")
self.pushButtonSalvaProgetto.setEnabled(abil)
def luci(self):
FilePath= str(self.txtShellFilePath_6.text())
#FileGridDamages
FilePath= str(self.txtShellFilePath_5.text())
if os.path.exists(FilePath):
self.label_red_danno.hide()
self.label_green_danno.show()
else:
self.label_red_danno.show()
self.label_green_danno.hide()
#FileGridVulnerability
FilePath= str(self.txtShellFilePath_vulnerato.text())
if os.path.exists(FilePath):
self.label_red_vuln.hide()
self.label_green_vuln.show()
else:
self.label_red_vuln.show()
self.label_green_vuln.hide()
def setListaTipoCurvaVuln(self):
FileGDB = str(self.txtShellFilePath_2.text())
if FileGDB != "":
if self.CheckGeodatabase():
conn = sqlite3.connect(FileGDB)
cursor = conn.cursor()
testoQuery='SELECT VulnID FROM Vulnerability GROUP BY VulnID'
cursor.execute(testoQuery)
ListaTipi1 = cursor.fetchall()
ListaTipi = []
for row in ListaTipi1:
ListaTipi.append(int(row[0]))
dic_VulnType={}
self.dic_TypeId={}
testoQuery2='SELECT * FROM VulnType'
cursor.execute(testoQuery2)
ListaDescription = cursor.fetchall()
if len(ListaDescription)>0:
for row in ListaDescription:
dic_VulnType[int(row[1])] = str(row[2])
self.dic_TypeId[str(row[2])] = int(row[1])
ListaDescrizione=[]
for num in ListaTipi:
ListaDescrizione.append(dic_VulnType[num])
self.comboBoxGrafici.clear()
self.comboBoxGrafici.addItems(ListaDescrizione)
else:
QMessageBox.information(None, "FloodRisk", self.tr("You must first create the Geodb.Sqlite"))
else:
QMessageBox.information(None, "FloodRisk", self.tr("You must first create the Geodb.Sqlite"))
def setCurrentCurveType(self,ItemText):
# set currentItem CurveType
self.CurveType=''
NumType=self.comboBoxGrafici.count()
AllItems = [self.comboBoxGrafici.itemText(i) for i in range(NumType)]
if NumType>0:
index=-1
for ii in range(NumType):
if ItemText==AllItems[ii]:
index=ii
if index>=0:
self.comboBoxGrafici.setCurrentIndex(index)
self.CurveType=ItemText
def CheckGeodatabase(self):
res=bool()
if os.path.exists(self.txtShellFilePath_2.text()):
mydb_path=self.txtShellFilePath_2.text()
try:
# connecting the db
conn = sqlite3.connect(mydb_path)
# creating a Cursor
cur = conn.cursor()
TablesList=['spatial_ref_sys','AnalysisArea','CensusBlocks','FatalityRate']
TablesList.append('FatalityRate')
TablesList.append('FloodSeverity')
TablesList.append('InfrastrLines')
TablesList.append('VulnType')
TablesList.append('Vulnerability')
TablesList.append('StructurePoly')
for NomeTabella in TablesList:
sql="SELECT sql FROM sqlite_master WHERE type='table' AND name='%s';" % (NomeTabella)
cur.execute(sql)
Tabella=str(cur.fetchone()[0])
res=bool('True')
except:
res=bool()
else:
res=bool()
return res
|
FloodRiskGroup/floodrisk
|
calcolodanno/calcolodannodialog.py
|
Python
|
gpl-2.0
| 28,148
|
[
"ADF"
] |
306d35caa9c81c7a203e3f582563782d3e51070e6927c0d826f38c2cb11dd637
|
# 2.12.2014 fix negative temp readings. avoid -127 and 85 gdegrees C for a timeout
# to be imported to access modbus registers as counters
# combines previous achannels.py and cchannels.py into one universal acchannel.py. add cfg bit for counter?
# 28 may 2014.... handle negative values
# 27.06.2014 make_svc() handles state change and age detection together with value (incl power) calc.
# svc stalled if a member is older than 10xself.readperiod
''' For testing:
from main_energy_starman import *
comm_doall() # read mb
ac.make_svc('A2W','A2S') # ai values limits, status, returns ['A2S', 0, 'A2W', '6720 6480 6420 10000 15000']
'''
from droidcontroller.sqlgeneral import * # SQLgeneral / vaja ka time,mb, conn jne
s=SQLgeneral() # init sisse?
from droidcontroller.counter2power import * # Counter2Power() as cp handles power calculation based on pulse count increments
import time
import logging
log = logging.getLogger(__name__)
class ACchannels(SQLgeneral): # handles aichannels and counters, modbus registers and sqlite tables
''' Access to io by modbus analogue register addresses (and also via services?). Modbus client must be opened before.
Able to sync input and output channels and accept changes to service members by their sta_reg code.
Channel configuration is defined in sql tables.
Read and send only happen if enough time is passed from previous, chk readperiod, sendperiod!
'''
def __init__(self, in_sql = 'aicochannels.sql', out_sql = 'aochannels.sql', readperiod = 3, sendperiod = 30):
self.setReadPeriod(readperiod)
self.setSendPeriod(sendperiod)
self.in_sql = in_sql.split('.')[0]
self.out_sql = out_sql.split('.')[0]
#self.s = SQLgeneral()
self.cp = [] # possible counter2value calculation instances
self.Initialize()
def setReadPeriod(self, invar):
''' Set the ai reading period in s, to execute sync if time from last read was earlier than period ago. '''
self.readperiod = invar # values considered as stalled after 10x self.readperiod
def getReadPeriod(self):
''' Returns the ai reading period in s '''
return self.readperiod # values considered as stalled after 10x self.readperiod
def setSendPeriod(self, invar):
''' Set the ai notification period s, to execute sync if time from last read was earlier than period ago '''
self.sendperiod = invar
def getSendPeriod(self):
''' Set the ai notification period s, executes sync if time from last read was earlier than period ago '''
return self.sendperiod
def sqlread(self, table):
''' Read the sql file into memory '''
s.sqlread(table) #
def Initialize(self): # before using this create s=SQLgeneral()
''' initialize delta t variables, create tables and modbus connection '''
self.ts = round(time.time(),1)
self.ts_read = self.ts # time of last read
self.ts_send = self.ts-10 # allow counters restoring
self.sqlread(self.in_sql) # read counters table
self.sqlread(self.out_sql) # read aochannels if exist
self.ask_counters() # ask server about the last known values of all counter related services (1024 in cfg)
def ask_counters(self): # use on init, send ? to server
''' Queries last counter service values from the server '''
Cmd="select val_reg,max(cfg) from "+self.in_sql+" where (cfg+0 & 1024) group by val_reg" # counters only, to be asked and restored
#print "Cmd=",Cmd
cur=conn.cursor()
cur.execute(Cmd) # getting services to be read and reported
for row in cur: # possibly multivalue service members
val_reg=row[0]
#cfg=int(row[1]) if row[1] != '' else 0
udp.udpsend(val_reg+':?\n') # ask last value from uniscada server if counter
conn.commit()
return 0
def parse_udp(self,data_dict): # search for setup or set counter values
''' Channels setup change based on message from monitoring server '''
cur=conn.cursor()
setup_changed = 0 # flag general setup change, data to be dumped into sql file
msg=''
mval=''
res=0
member=0
if data_dict == {} or data_dict == '' or data_dict == None:
log.warning('ac: nothing to parse in',data_dict)
return 0
log.debug('parsing for possible match key:value data ',data_dict) # debug
for key in data_dict: # process per key:value
found = 0
if key[-1] == 'W': # must end with W to be multivalue service containing setup values
valmembers=data_dict[key].split(' ') # convert value to member list
log.debug('number of members for '+str(key)+' is '+str(len(valmembers)))
for valmember in range(len(valmembers)): # 0...N-1
Cmd="select mba,regadd,val_reg,member,value,regtype,wcount,mbi,x2,y2,cfg from "+self.in_sql+" where val_reg='"+key+"' and member='"+str(valmember+1)+"'"
#print(Cmd) # debug
cur.execute(Cmd)
conn.commit()
for row in cur: # single member
found += 1
log.debug('srow:'+str(row)) # debug
sqlvalue = int(row[4]) if row[4] != '' else 0 # eval(row[4]) if row[4] != '' else 0 #
cfg = int(row[10]) if row[10] != '' else 0
#val_reg = row[2] # pole vaja
try:
value=eval(valmembers[valmember])
except:
value = sqlvalue # no change!
#log.warning('invalid value in message from server for key '+key)
regtype=row[5] # 'h' 'i' 's!'
if sqlvalue != value and ((regtype == 'h' and value == 0 or value > sqlvalue) or (regtype == 's!')) and (cfg&2048) == 0:
# replace actual counters only if bigger than existing or zero and not 1wire channel, no limits for setup type 's!'
member=valmember+1
log.debug('going to replace '+key+' member '+str(member)+' existing value '+str(sqlvalue)+' with '+str(value)) # debug
# faster to use physical data instead of svc. also clear counter2power buffer if cp[] exsists!
if regtype == 's!': # setup row, external modif allowed (!)
if (row[0] == '' and row[1] == ''): # mba, regadd
if self.set_aivalue(str(key),member,value) == 0: # set setup value in sql table
msg='setup changed for key '+str(key)+', member '+str(member)+' to value '+str(value)
setup_changed=1
log.info(msg)
#udp.syslog(msg)
else:
msg='svc member setting problem for key '+str(key)+', member '+str(member)+' to value '+str(value)
log.warning(msg)
#udp.syslog(msg)
res+=1
else:
msg='acchannels.udp_parse: setup value cannot have mba,regadd defined!'
log.warning(msg)
#udp.syslog(msg)
res+=1
elif regtype == 'h': # holding register, probably counter
if (row[0] != '' and row[1] != ''): # mba,regadd probably valid
mba=int(row[0]) if row[0] != '' else 0
regadd=int(row[1]) if row[1] != '' else None
wcount=int(row[6]) if row[6] != '' else 1
mbi=int(row[7]) if row[7] != '' else None
x2=int(row[8]) if row[8] != '' else 0
y2=int(row[9]) if row[9] != '' else 0
#if self.set_counter(val_reg=key, member=member,value=value, wcount=wcount) == 0: # faster to use physical data instead of svc
if self.set_counter(mbi=mbi, mba=mba, regadd=regadd, value=value, wcount=wcount, x2=x2, y2=y2) == 0: # set counter
#set_counter also cleared counter2power buffer if cp[] exsisted!
msg='counter set for key '+key+', member '+str(member)+' to value '+str(value)
log.debug(msg)
#udp.syslog(msg)
else:
msg='member value setting problem for key '+key+', member '+str(member)+' to value '+str(value)
log.warning(msg)
#udp.syslog(msg)
res += 1
else:
msg='acchannels.udp_parse: holding register must have mba,regadd defined!'
log.warning(msg)
#udp.syslog(msg)
res += 1
else: # skip
log.debug('parse_udp: write for key '+key+' SKIPPED due to sqlvalue '+str(sqlvalue)+', value '+str(value)+', regtype '+regtype)
if found > 0: # process status too
self.make_svc(key,'') ### processing svc and notify
if setup_changed == 1:
log.info('going to dump table '+self.in_sql)
try:
s.dump_table(self.in_sql)
#sendstring=self.make_svc(key,key[:-1]+'S')
###sendstring = self.make_svc(key) # ei taha ilma olekuta saatmisi
###log.debug('going to report back sendstring '+str(sendstring))
#udp.send(sendstring) # ????
except:
log.warning('FAILED to dump table '+self.in_sql)
traceback.print_exc() # debug
#if res == 0:
#self.read_all() # reread the changed channels to avoid repeated restore - no need
return res # kui setup_changed ==1, siis todo = varlist! aga kui samal ajal veel miski ootel?
def set_counter(self, value = 0, **kwargs): # value, mba,regadd,mbi,val_reg,member # one counter to be set. check wcount from counters table
''' Sets consecutive holding registers, wordlen 1 or 2 or -2 (must be defined in sql table in use).
Usable for cumulative counter counting initialization, not for analogue output (use set_output for this).
Must also clear counter2power instance buffer dictionary if cp[] instance exsists, to avoid unwanted spike in power calculation result!
'''
#val_reg='' # arguments to use a subset of them
#member=0
#mba=0
#mbi=0
#regadd=0
#wcount=0
#value=value
cur=conn.cursor()
x2=0
y2=0
Cmd=''
try: # is is mba or val_reg based addressing in use?
mba=kwargs['mba']
regadd=kwargs['regadd']
mbi=kwargs['mbi']
wcount=kwargs['wcount']
x2=kwargs['x2']
y2=kwargs['y2']
# if this fails, svc_name and member must be given as parameters
except:
try:
val_reg=kwargs.get('val_reg')
member=kwargs.get('member')
if val_reg == '' or member == 0:
log.debug('set_counter: invalid parameters val_reg '+str(val_reg)+', member '+str(member))
return 1
except:
log.warning('invalid parameters for set_counter() '+str(kwargs))
return 2
Cmd="select mbi,mba,regadd,wcount,x2,y2 from "+self.in_sql+" where val_reg='"+val_reg+"' and member='"+str(member)+"'"
#print('set_counter: ',Cmd) # debug
cur.execute(Cmd) # what about commit()? FIXME
conn.commit()
for row in cur:
log.debug('row:',row) # debug
mbi=row[0]
mba=int(row[1]) if row[1] != '' else 0
regadd=int(row[2]) if row[2] != '' else 0
wcount=int(row[3]) if row[3] != '' else 0
x2=int(row[4]) if row[4] != '' else 0
y2=int(row[5]) if row[5] != '' else 0
#print('set_counter: mbi,mba,regadd,wcount,x2,y2',mbi,mba,regadd,wcount,x2,y2) # debug
if x2 != 0 and y2 != 0: #convert
value=round(1.0*value*x2/y2) # assuming x1=x2=0, only counter registers to be written this way...
else:
log.warning('set_counter: invalid scaling x2,y2',x2,y2)
return 1
value=(int(value)&0xFFFFFFFF) # to make sure the value to write is 32 bit integer
try:
if wcount == 2: # normal counter, type h
#res=mb[mbi].write(mba, regadd, values=[(value&0xFFFF0000)>>16,(value&0xFFFF)]) # works if multiple register write supported
res=mb[mbi].write(mba, regadd, value=(value&0xFFFF0000)>>16) # single registe write
time.sleep(0.1)
res+=mb[mbi].write(mba, regadd+1, value=(value&0xFFFF)) # single registe write
elif wcount == -2:
#res=mb[mbi].write(mba, regadd, values=[(value&0xFFFF), (value&0xFFFF0000)>>16]) # works if multiple register write supported
res=mb[mbi].write(mba, regadd, value=(value&0xFFFF)) # single registe write
time.sleep(0.1)
res+=mb[mbi].write(mba, regadd+1, value=(value&0xFFFF0000)>>16) # single registe write
elif wcount == 1:
res=mb[mbi].write(mba, regadd, value=(value&0xFFFF)) # single register write
else:
log.warning('set_counter: unsupported word count! mba '+str(mba)+', regadd '+str(regadd)+', wcount '+str(wcount))
res=1
if res == 0:
log.info('write success to counter mba.regadd '+str(mba)+'.'+str(regadd))
else:
log.warning('set_counter: write FAILED to mba '+str(mba)+', regadd '+str(regadd))
return res
except: # set failed
msg='failed set_counter mbi.mba.regadd '+str(mbi)+'.'+str(mba)+'.'+str(regadd)
#udp.syslog(msg)
log.warning(msg)
traceback.print_exc()
return 1
# no need for commit, this method is used in transaction
def read_grp(self,mba,regadd,count,wcount,mbi=0,regtype='h'): # update raw in self.in_sql with data from modbus registers
''' Reads sequential register group, process numbers according to counter size and store raw into table self.in_sql. Inside transaction!
Compares the raw value from mobdbus register with old value in the table. If changed, ts is set to the modbus readout time self.ts.
Add here counter state recovery if suddenly zeroed
# if value == 0 and ovalue >0: # possible pic reset. perhaps value <= 100?
# msg='restoring lost content for counter '+str(mba)+'.'+str(regadd)+':2 to become '+str(ovalue)+' again instead of '+str(value)
# #syslog(msg)
# print(msg)
# self.set_counter(value=ovalue, mba=mba, regadd=regadd, mbi=mbi, wcount=wcount, x2=x2, y2=y2) # does not contain commit()!
#this above should be fixed. value is already saved, put it there!
Delay in the end attempts to increase reliability of reading on mba change. INVESTIGATE, is it possibly a slave (ioboard) related problem?
FIMXME: do not attempt to access counters that are not defined in devices.sql! this should be an easy way to add/remove devices.
'''
self.ts = round(time.time(),2) # refresh timestamp for raw, common for grp members
step=int(abs(wcount))
cur=conn.cursor()
oraw=0
msg='grp read from mba '+str(mba)+'.'+str(regadd)+', cnt '+str(count)+', wc '+str(wcount)+', mbi '+str(mbi)+', regtype '+regtype
if count>0 and mba != 0 and wcount != 0:
try:
if mb[mbi]:
result = mb[mbi].read(mba, regadd, count=count, type=regtype) # client.read_holding_registers(address=regadd, count=1, unit=mba)
msg += ', raw: '+str(result)
else:
msg += ' -- no mb[]!'
log.debug(msg) ## log.info(msg) ###### See on hea kompaktne raw kontroll
except:
msg += ' -- FAILED!'
log.warning(msg)
return 2
else:
msg += ' -- invalid count, mba, wcount or regtype!'
log.warning(msg)
return 2
if result != None: # got something from modbus register
try:
for i in range(int(count/step)): # ai-co processing loop. tuple to table rows. tuple len is twice count! int for py3 needed
tcpdata=0
#print('aico_grp debug: i',i,'step',step,'results',result[step*i],result[step*i+1]) # debug
if wcount == 2:
tcpdata = (result[step*i]<<16)+result[step*i+1]
#print('normal counter',str(i),'result',tcpdata) # debug
elif wcount == -2:
tcpdata = (result[step*i+1]<<16)+result[step*i] # swapped word order, as in barionet
#print('swapped words counter',str(i),'result',tcpdata) # debug
elif wcount == 1: # normal ai and 1wire. the latter can be negative!
if len(result) - 1 >= i:
tcpdata = result[i]
else:
log.warning('read_grp invalid, i='+str(i)+' while result='+str(result))
else: # something else, lengths other than 1 2 -2 not yet supported!
log.warning('unsupported counter word size '+str(wcount))
return 1
#Cmd="select raw from "+self.in_sql+" where mbi="+str(mbi)+" and mba='"+str(mba)+"' and regadd='"+str(regadd+i*step)+"' group by mbi,mba,regadd"
Cmd="select raw,max(cfg) from "+self.in_sql+" where mbi="+str(mbi)+" and mba='"+str(mba)+"' and regadd='"+str(regadd+i*step)+"' group by mbi,mba,regadd"
# get the old value to compare with new. can be multiple rows, group to single row. if counter and zero, ask_counters()
cur.execute(Cmd)
for row in cur:
oraw=int(row[0]) if row[0] !='' else -1
cfg=int(row[1]) if row[1] != '' else 0
#if tcpdata != oraw or oraw == -1: # update only if change needed or empty so far - NO! value CAN stay the same, but age is needed!
if str(regadd)[0] == '6' and tcpdata == 2560: # failing temperature sensor, do not update
log.warning('failing temperature sensor on address '+str(regadd+i*step))
else:
Cmd="UPDATE "+self.in_sql+" set raw='"+str(tcpdata)+"', ts='"+str(self.ts)+ \
"' where mba='"+str(mba)+"' and regadd='"+str(regadd+i*step)+"' and mbi="+str(mbi) # koigile korraga selle mbi, mba, regadd jaoks
conn.execute(Cmd)
log.debug('updated '+self.in_sql+' with raw='+str(tcpdata)+' from mba '+str(mba)+' regadd '+str(regadd+i*step)) ######
time.sleep(0.03) # ainult seriali puhul? ########## FIXME
return 0
except:
traceback.print_exc()
time.sleep(0.2)
return 1
else:
log.warning('recreating modbus channel due to error on '+str(mbhost[mbi]))
mb[mbi] = CommModbus(host=mbhost[mbi])
msg='recreated mb['+str(mbi)+'], this aicochannels grp read FAILED for mbi,mba,regadd,count '+str(mbi)+', '+str(mba)+', '+str(regadd)+', '+str(count)
log.warning(msg)
time.sleep(0.5) # hopefully helps to avoid sequential error / recreations
return 1
def read_all(self): # read all defined modbus ai and counter channels to sql in groups by regtype, usually 32 bit / 2 registers.
''' Must read the counter registers by sequential regadd blocks if possible (if regadd increment == wcount.
Also converts the raw data (incl member rows wo mba) into services, calculates the svc component statuses
and summary stratus, sends away to UDPchannel.
'''
respcode=0
mba=0
val_reg=''
sta_reg=''
status=0
value=0
lisa=''
desc=''
comment=''
#mcount=0
Cmd1=''
#self.ts = round(time.time(),2) # not needed here
#ts_created=self.ts # not needed here
cur=conn.cursor()
cur3=conn.cursor()
bmba=0 # mba for sequential register address block
bfirst=0 # sequential register block start
blast=0
wcount=0
bwcount=0
bcount=0
tcpdata=0
sent=0
regtype=''
bregtype=''
self.cpi = -1 # start with cp instance numbering, by service members not services!
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # combines several read/writes into one transaction
# read mbi,mba,regadd,wcount,regtype from channels table to define groups
# read modbus registers in groups and write raw into table
# read svc from table to calculate and update value
#
conn.execute(Cmd)
Cmd="select mba,regadd,wcount,mbi,regtype from "+self.in_sql+" where mba != '' and regadd != '' group by mbi,mba,regtype,regadd" # gruppideks jagamine
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur: # these groups can be interrupted into pieces to be queried!
mba=int(row[0]) if int(row[0]) != '' else 0
regadd=int(row[1]) if int(row[1]) != '' else 0
wcount=int(row[2]) if int(row[2]) != '' else 0 # wordcount for the whole group!!
mbi=int(row[3]) if int(row[3]) != '' else 0 # modbus connection indexed
regtype=row[4] if row[4] != '' else 'h' # modbus register holding or input
#print('found channel mbi,mba,regadd,wcount,regtype',mbi,mba,regadd,wcount,regtype) # debug
if bfirst == 0:
bfirst = regadd
blast = regadd
bwcount = wcount # wcount can change with next group
bcount=int(abs(wcount)) # word count is the count
bmba=mba
bmbi=mbi
bregtype= regtype
#print(' group mba '+str(bmba)+' start ',bfirst) # debug
else: # not the first
if mbi == bmbi and regtype == bregtype and mba == bmba and regadd == blast+abs(wcount): # sequential group still growing
blast = regadd
bcount=bcount+int(abs(wcount)) # increment by word size
#print('group end shifted to',blast) # debug
else: # a new group started, make a query for previous
self.read_grp(bmba,bfirst,bcount,bwcount,bmbi,bregtype) # reads and updates table with previous data ##################### READ MB ######
bfirst = regadd # new grp starts immediately
blast = regadd
#bwcount = wcount # does not change inside group
bcount=int(abs(wcount)) # new read piece started
bwcount=wcount
bmba=mba
bmbi=mbi
bregtype=regtype
#print('group mba '+str(bmba)+' start ',bfirst) # debug
if bfirst != 0: # last group yet unread
#print(' group end detected at regadd',blast) # debug
#print('going to read last group, registers from',bmba,bfirst,'to',blast,'regcount',bcount,'regtype',bregtype) # debug
self.read_grp(bmba,bfirst,bcount,bwcount,bmbi,bregtype) # reads and updates table with previous data ##################### READ MB ######
# raw sync (from modbus to sql) done.
# now process raw -> value and find statuses using make_svc() for each service.
#power calculations happen in make_svc too!
Cmd="select val_reg from "+self.in_sql+" group by val_reg" # find services
log.debug('read_all Cmd='+Cmd) ### oli kommenteeritud 31.1?
cur.execute(Cmd) ### oli kommenteeritud 31.1?
for row in cur: # SERVICES LOOP
#val_reg=''
#sta_reg=''
#status=0 #
#value=0
val_reg=row[0] # service value register name
#sta_reg=val_reg[:-1]+"S" # status register name ASSUMPTION. BUT MAKE_SVC ACCEPTS NOW STA_REG=''
log.debug('processing '+self.in_sql+' rows into service with val_reg '+val_reg+' sta_reg '+sta_reg)
#self.make_svc(val_reg,sta_reg) ## sets status and notifies id status chg in any member
self.make_svc(val_reg) ## sets status and notifies id status chg in any member
conn.commit() # haarab ka make_svc()
sys.stdout.write('A')
return 0
except:
msg='problem with acchannels.read_all(): '+str(sys.exc_info()[1])
log.warning(msg)
#udp.syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(1)
return 1
#read_all end #############
def sync_ao(self):
''' Synchronizes AI registers with data in aochannels table '''
#print('write_aochannels start') # debug
# and use write_register() write modbus registers to get the desired result (all ao channels must be also defined in aichannels table!)
respcode=0
mbi=0
mba=0
omba=0 # previous value
val_reg=''
desc=''
value=0
word=0 # 16 bit register value
#comment=''
mcount=0
cur = conn.cursor()
cur3 = conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
# 0 1 2 3 4 5 6 7
#mba,regadd,bit,bootvalue,value,rule,desc,comment
Cmd="select "+self.out_sql+".mba,"+self.out_sql+".regadd,"+self.out_sql+".value,"+self.out_sql+".mbi from "+self.out_sql+" left join "+self.in_sql+" \
on "+self.out_sql+".mba = "+self.in_sql+".mba AND "+self.out_sql+".mbi = "+self.in_sql+".mbi AND "+self.out_sql+".regadd = "+self.in_sql+".regadd \
where "+self.out_sql+".value != "+self.in_sql+".value" #
# the command above retrieves mba, regadd and value where values do not match in aichannels and aochannels
#print "Cmd=",Cmd
cur.execute(Cmd)
for row in cur: # got mba, regadd and value for registers that need to be updated / written
#log.debug('row: '+str(repr(row))) # toob appd.log sisse
regadd=0
mba=0
mba=int(eval(row[0])) if row[0] != '' else 0 # must be a number
regadd=int(eval(row[1])) if row[1] != '' else 0 # must be a number
value=int(eval(row[2])) if row[2] != '' else 0 # komaga nr voib olla, teha int!
mbi=row[3] if row[3] != None else 0 # mbi on num!
try:
if mb[mbi] and mba > 0:
respcode=respcode+mb[mbi].write(mba=mba, reg=regadd, value=value)
if respcode == 0:
log.debug('successfully written value '+str(value)+' to mbi '+str(mbi)+', mba '+str(mba)+' regadd '+str(regadd))
else:
log.warning('FAILED write to modbus device mbi '+str(mbi)+', mba '+str(mba))
return 1
except:
log.warning('FAILED write to modbus device mbi '+str(mbi)+', mba '+str(mba)+' not defined in devices.sql?')
return 2
conn.commit() # transaction end - why?
return 0
except:
msg='problem with acchannel.sync_ao()!'
log.warning(msg)
traceback.print_exc()
return 1
# sync_ao() end. FRESHENED DICHANNELS TABLE VALUES AND CGH BITS (0 TO SEND, 1 TO PROCESS)
def get_aivalue(self,svc,member):
''' Returns raw,value,lo,hi,substatus values based on service name and member number '''
# status gets reported as summary status foir service, not svc member!
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
cur=conn.cursor()
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3, et ei saaks muutuda lugemise ajal
conn.execute(Cmd)
Cmd="select value,outlo,outhi,status from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
#print(Cmd) # debug
cur.execute(Cmd)
raw = 0
value = 0 # None
outlo = 0
outhi = 0
status = 0
found = 0
for row in cur: # should be one row only
#print(repr(row)) # debug
found=1
#raw=int(float(row[0])) if row[0] != '' and row[0] != None else 0
value=int(eval(row[0])) if row[0] != '' and row[0] != None else 0
outlo=int(eval(row[1])) if row[1] != '' and row[1] != None else 0
outhi=int(eval(row[2])) if row[2] != '' and row[2] != None else 0
status=int(eval(row[3])) if row[3] != '' and row[3] != None else 0
if found == 0:
msg='get_aivalue failure, no member '+str(member)+' for '+svc+' found!'
log.warning(msg)
conn.commit()
log.debug('svc '+svc+' member '+str(member)+' value '+str(value)) # debug
return value,outlo,outhi,status
def set_aivalue(self,svc,member,value): # sets variables like setpoints or limits to be reported within services, based on service name and member number
''' Setting member value using sqlgeneral set_membervalue. adding sql table below for that '''
return s.set_membervalue(svc,member,value,self.in_sql,raw=False)
def set_airaw(self,svc,member,value): # sets variables like setpoints or limits to be reported within services, based on service name and member number
''' Setting member raw value using sqlgeneral set_membervalue. adding sql table below for that '''
return s.set_membervalue(svc,member,value,self.in_sql,raw=True)
def set_aovalue(self, value, mba, reg): # sets variables to control, based on physical addresses
''' Write value to follow into aochannels table.
The according modbus holding register will be written by sync_ao() until the according
aicochannels register contain the same value.
'''
#(mba,regadd,bootvalue,value,ts,rule,desc,comment)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn
conn.execute(Cmd)
Cmd="update "+self.out_sql+" set value='"+str(value)+"' where regadd='"+str(reg)+"' and mba='"+str(mba)+"'" # set_aovalue()
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aovalue failure: '+str(sys.exc_info()[1])
log.warning(msg)
#udp.syslog(msg)
return 1 # update failure
def set_aosvc(self, svc, member, value): # to set a readable output channel by the service name and member using aicochannels table
''' Set service member value by service name and member number, to be synced into holding register.
The aicochannels table must contain a similar input channel, to compare the result with.
'''
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer) # ai
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
Cmd="select mba,regadd from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
cur=conn.cursor()
cur.execute(Cmd)
mba=None
reg=None
for row in cur: # should be one row only
try:
mba=row[0]
reg=row[1]
self.set_aovalue(value,mba,reg)
conn.commit()
return 0
except:
msg='set_aovalue failed for reg '+str(reg)+': '+str(sys.exc_info()[1])
log.warning(msg)
#udp.syslog(msg)
return 1
def report_all(self, svc = ''): # send the aico service messages to the monitoring server (only if fresh enough, not older than 2xappdelay). all or just one svc.
''' Make all (defined self.in_sql) services reportable (with status chk) and send it away to UDPchannel '''
mba=0
val_reg=''
desc=''
cur=conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3, kogu selle teenustegrupiga (aichannels) tegelemine on transaction
conn.execute(Cmd)
if svc == '': # all services
Cmd="select val_reg from "+self.in_sql+" group by val_reg"
else: # just one
Cmd="select val_reg from "+self.in_sql+" where val_reg='"+svc+"'"
cur.execute(Cmd)
for row in cur: # services
val_reg=row[0] # teenuse nimi
#sta_reg=val_reg[:-1]+"S" # nimi ilma viimase symbolita ja S - statuse teenuse nimi, analoogsuuruste ja temp kohta
sendtuple=self.make_svc(val_reg)
if sendtuple != None: #
udp.send(sendtuple) # can send to buffer double if make_svc found change. no dbl sending if ts is the same.
log.debug ('buffered for reporting: '+str(sendtuple))
else:
log.warning('FAILED to report svc '+val_reg)
# return 1 # other services still need to be reported, commit needs to be done.
conn.commit() # aicochannels svc_report transaction end
return 0
except:
msg='PROBLEM with acchannels.report_all() for svc '+svc+' based on table '+self.in_sql+': '+str(sys.exc_info()[1])
log.warning(msg)
#udp.syslog(msg)
traceback.print_exc()
return 1
def make_svc(self, val_reg, sta_reg=''): # ONE svc, both val_reg and sta_reg exist for ai and counters
''' Make a single service record WITH STATUS based on existing values and update the scaled value in sql table.
Use block as hysteresis in value units for status change, if cfg&8192 == True.
Use block as off_tout in s for counters with power-on/off detection if cfg&64 == True.
If sta_reg is empty and vale_reg ends with W, S is assumed for sta_reg name end.
'''
status = 0 # initially for whole service
mstatus = 0
cur = conn.cursor()
lisa = ''
value = None
if sta_reg == '' and val_reg[-1] == 'W':
sta_reg = val_reg[0:-1]+'S' # assuming S in the end
Cmd="select mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,regtype,grp,mbi,wcount from "+self.in_sql \
+" where val_reg='"+val_reg+"' order by member asc" # avoid trouble with column order
log.debug(Cmd)
#print(Cmd)
cur.execute(Cmd) # another cursor to read the same table
ts_now = time.time() # time now in sec
rowproblemcount = 0 # count of invalid members in svc
for srow in cur: # go through service members
log.debug(repr(srow))
mba=-1 #
regadd=-1
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # previous member status
#tvalue=0 # test, vordlus
oraw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
block=0 # power off_tout for counters
hyst = 0
result=None
#desc=''
#comment=''
rowproblem = 0 # initially ok
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # aichannels
try:
mba = int(srow[0]) if srow[0] != '' else 0 # must be int! will be -1 if empty (setpoints)
regadd = int(srow[1]) if srow[1] != '' else 0 # must be int! will be -1 if empty
val_reg = srow[2] # see on string
member = int(srow[3]) if srow[3] != '' else 0
cfg = int(srow[4]) if srow[4] != '' else 0 # konfibait nii ind kui grp korraga, esita hex kujul hiljem
x1 = int(srow[5]) if srow[5] != '' else 0
x2 = int(srow[6]) if srow[6] != '' else 0
y1 = int(srow[7]) if srow[7] != '' else 0
y2 = int(srow[8]) if srow[8] != '' else 0
outlo = int(srow[9]) if srow[9] != '' else 0
outhi = int(srow[10]) if srow[10] != '' else 0
avg = int(srow[11]) if srow[11] != '' else 0 # averaging strength, values 0 and 1 do not average!
block = int(srow[12]) if srow[12] != '' else 0 # off-tout for power related on/off
raw = int(srow[13]) if srow[13] != '' else 0 # Nonen teeb jama
ovalue = int(srow[14]) if (srow[14] != None and srow[14] != '' ) else 0 # teenuseliikme endine vaartus - MIKS vahel None???
ostatus = int(srow[15]) if srow[15] != '' else 0 # teenusekomponendi status - ei kasuta / votame kasutusele
ots = eval(srow[16]) if srow[16] != '' else 0
#desc=srow[17]
regtype = srow[18] # should be h or i for modbus registers
mbi = srow[20] # int
wcount = int(srow[21]) if srow[21] != '' else 1 # word count
chg = 0 # member status change flag
log.debug('val_reg '+val_reg+' member '+str(member)+', cfg='+str(cfg)+', raw='+str(raw)+', ovalue='+str(ovalue)+', outlo='+str(outlo)+', outhi='+str(outhi))
#print('val_reg '+val_reg+' member '+str(member)+', cfg='+str(cfg)+', raw='+str(raw)+', ovalue='+str(ovalue)+', outlo='+str(outlo)+', outhi='+str(outhi)) # debug
except:
log.debug('invalid data from '+self.in_sql+' for svc '+val_reg+', srow: '+repr(srow))
rowproblem = 1
traceback.print_exc()
time.sleep(5)
#power instances to be done
if (cfg&64): # power instance index increment HERE! within service list
self.cpi += 1
log.debug('****** cpi '+str(self.cpi))
try:
if self.cpi != -1 and self.cp[self.cpi]:
pass # this instance already exists
except:
# make_svc() must append self.cp if not exists
self.cp.append(Counter2Power(val_reg, member, off_tout = block)) # another Count2Power instance. 100s = 36W threshold if 1000 imp/kWh
log.info('created counter2power instance '+str(self.cpi)+' for mbi '+str(mbi)+',mba.regadd '+str(mba)+'.'+str(regadd)+' m '+str(member)+': '+str(self.cp[self.cpi]))
# cfg related tests and calc
if (regtype == 'h' or regtype == 'i' or regtype == 'c' or regtype == 'r'): # for channel data only, not for setup values (s, s!)
if raw != None:
if rowproblem == 1:
log.warning('svc processing skipped due to invalid data from '+self.in_sql+' for svc '+val_reg+', srow: '+repr(srow))
elif (ots < ts_now - 10*self.readperiod and ots < ts_now - 2*self.sendperiod): # raw too old, stalled
log.warning('svc processing skipped due to stalled (for '+str(int(ts_now - ots))+' s) raw data ('+str(raw)+') for '+val_reg+'.'+str(member))
else: # data fresh enough, going to process
## POWER? FILTER? ####
if (cfg&64): # power, no sign, increment to be calculated! divide increment to time from the last reading to get the power
#cpi += 1 # counter2power index, increment BEFORE value validation
log.debug('going to calc power for mba.regadd '+str(mba)+'.'+str(regadd)+' using cp['+str(self.cpi)+']')
#res = self.cp[self.cpi].calc(ots, raw, ts_now = self.ts) # power calculation based on raw counter increase
res = self.cp[self.cpi].calc(raw) # based on current ts only!
log.debug('got result from cp['+str(self.cpi)+']: '+str(res)+', params ots '+str(ots)+', raw '+str(raw)+', ts_now '+str(self.ts)) # debug
if (cfg&128): # on off state from power
raw = res[1] # state on/off
if res[2] != 0: # on/off change
chg = 1 # immediate notification needed due to state change
log.info('state change in cp['+str(self.cpi)+']')
else:
raw = res[0] # power
elif (cfg&2048): # 1wire filter. should have cfg bit 4096 as well!
if raw == 1360 or raw == 4096:
log.warning('invalid raw value '+str(raw)+' for temp sensor (cfg=2048) in svc '+val_reg+'.'+str(member)+', replacing with None')
raw = None
## SCALING #############
if raw != None:
if (cfg&4096): # take sign into account
if raw >= (2**(wcount*16-1)): # negative!
raw = raw-(2**(wcount*16))
log.debug('converted to negative: '+str(raw)) # debug
if x1 != x2 and y1 != y2: # seems like normal input data, also not state from power
value=(raw-x1)*(y2-y1)/(x2-x1)
value=int(round(y1+value)) # integer values to be reported only
else:
#log.debug('val_reg '+val_reg+' member '+str(member)+', raw '+str(raw)+' ai2scale conversion NOT DONE! using value = raw ='+str(raw))
#log.warning('val_reg '+val_reg+' member '+str(member)+', raw '+str(raw)+' ai2scale conversion NOT DONE!')
value=None
rowproblem = 1 # this service will not be used in notification
## binary services defined in aicochjannels must have x1 x2 y1 y2! 0 1 0 1
if value != None and avg != None and ovalue != None:
if avg > 1 and abs(value - ovalue) < value / 2: # averaging the readings. big jumps (more than 50% change) are not averaged.
value=int(((avg - 1) * ovalue+value)/avg) # averaging with the previous value, works like RC low pass filter
log.debug('averaging on, value became '+str(value)) # debug
if (cfg&256) and abs(value - ovalue) > value / 5.0: # change more than 20% detected, use num w comma!
log.debug('value change of more than 20% detected in '+val_reg+'.'+str(member)+', need to notify')
chg = 1
# counter2power and scaling done, status check begins ##########
if cfg&8192: # use hysteresis from block
hyst = block # int
mstatus=self.value2status(value,cfg,outlo,outhi,ostatus, hyst) # default hyst=0 value units
if mstatus != ostatus: # member status change detected
chg = 1 # immediate notification within this method
log.debug('member status chg (after possible inversion) to ' +str(mstatus))
if value != None:
Cmd="update "+self.in_sql+" set status='"+str(mstatus)+"', value='"+str(value)+"' where val_reg='"+val_reg+"' and member='"+str(member)+"'"
conn.execute(Cmd) # who commits? the calling method, read_all()!!!
else:
log.warning(self.in_sql+' NOT updated due to '+val_reg+' member '+str(member)+' value None! chk regadd '+str(regadd))
rowproblem = 1
############# h or i processing done #######
elif 's' in regtype: # setup value
value = ovalue # use the value in table without conversion or influence on status
if mba > 0:
log.warning('NO mba SHOULD be set for setup value '+val_reg+'.'+str(member)) # debug
if lisa != '': # not the first member
lisa += ' ' # separator between member values
try:
lisa += str(int(round(value))) # adding member values into one string
except:
log.debug('invalid value to use for service '+val_reg+'.'+str(member)) # do not refer value here, may be missing from another mba!
rowproblem = 1
if mstatus > status:
status=mstatus
rowproblemcount += rowproblem
#if self.cpi > -1: # counters in use
# log.debug(' /// counter instances count '+str(len(self.cp)))
# service members done, check if all of them valid to use in svc tuple
if rowproblemcount == 0: # all members valid
sendtuple = [sta_reg,status,val_reg,lisa] # sending service to buffer
#if not (cfg&512) and chg == 1: # immediate notification / FIXME - too late here
# udp.send(sendtuple) # to uniscada instance for immediate notification (without buffering?)
# log.debug('sendtuple for '+val_reg+' sent to buffer due to value change')
return sendtuple # for regular send or status check
else:
log.debug(val_reg+' had '+str(rowproblemcount)+' problematic members, sendtuple NOT created!')
return None
def value2status(self,value,cfg,outlo,outhi,ostatus=0,hyst=0):
''' Returns svc member status based on value and limits, taking cfg and previous status into account.
If value to status inversion is in use (to define forbidden instead of allowed zones),
normalize old status ostatus first and then invert mstatus in the end.
Use hysteresis in value units for status change if needed.
'''
# svc STATUS CHK. check the value limits and set the status, according to configuration byte cfg bits values
# use hysteresis to return from non-zero status values
# CONFIG BYTE BIT MEANINGS
# 1 - below outlo warning,
# 2 - below outlo critical,
# NB! 3 - not to be sent if value below outlo
# 4 - above outhi warning
# 8 - above outhi critical
# NB! 3 - not to be sent if value above outhi
# 16 - - immediate notification on status change (USED FOR STATE FROM POWER)
# 32 - limits to state inversion
# 64 - power to be counted based on count increase and time period between counts
# 128 - state from power flag
# 256 - notify on 10% value change (not only limit crossing that becomes activated by first 4 cfg bits)
# 512 - do not report at all, for internal usage
# 1024 - raw counter
# 2147 1wire, filter out 4096 and 1086
# 4096 signed value
# 8192 use hysteresis from block
mstatus=0 # initial service member status
bitvalue=0 # remember the important bitvalue for nonzero internal status
#print('value,cfg,outlo,outhi',value,cfg,outlo,outhi) # debug
if (cfg&32): # status inversion IN USE, normalize
if ostatus>0:
ostatus=0
else:
ostatus=1 # treating statuses 1 and 2 equally
if outhi != None: # hi limit set
if value > outhi + hyst: # above hi limit
#print('value above outhi,cfg',cfg) # debug
if (cfg&4)>0: # warning
mstatus=1
if (cfg&8)>0: # critical
mstatus=2
if (cfg&12) == 12: # not to be sent
mstatus=3
#print('mstatus due to value above outhi',mstatus) # debug
else: # POSSIBLE return with hysteresis, even without existing outlo
if value < outhi - hyst and (outlo == None or (outlo != None and value > outlo + hyst)):
mstatus=0 # below hyst limit
#print('mstatus due to return below outhi',mstatus) # debug
else: # within dead zone or above
if mstatus == 0 and ostatus > 0:
mstatus=ostatus
#print('no change for old mstatus due to dead zone hi',mstatus) # debug
if outlo != None: # lo limit set
if value < outlo - hyst: # below lo limit
#print('value below outlo') # debug
if (cfg&1): # warning
mstatus = 1
if (cfg&2): # critical
mstatus = 2
if (cfg&3) == 3: # not to be sent, unknown
mstatus = 3
#print('mstatus due to value below outlo',mstatus) # debug
else: # POSSIBLE return with hysteresis, even without existing outlo
if value > outlo + hyst and (outhi == None or (outhi != None and value < outhi - hyst)):
mstatus = 0 # below hyst limits
#print('mstatus due to return above outlo',mstatus) # debug
else: # within dead zone or below
if mstatus == 0 and ostatus > 0:
mstatus = ostatus
#print('no change for old mstatus due to dead zone lo',mstatus) # debug
if (cfg&32): # possible status inversion for each member
#print('status inversion enabled,cfg,mstatus before inv',cfg,mstatus) # debug
if mstatus == 0: # within FORBIDDEN zone
if (cfg&5):
mstatus = 1 # normal becomes warning
elif(cfg&10):
mstatus = 2 # normal becomes critical, higher cfg bit wins
else: # outside forbidden zone
mstatus = 0 # not normal becomes normal
else:
#print('no inversion used, unchanged mstatus',mstatus) # debug
pass
return mstatus
def doall(self): # do this regularly, executes only if time is is right
''' Does everything that is regularly needed in this class on time if executed often enough.
Do not report too after early, counters may get restored from server.
'''
res=0
self.ts = round(time.time(),2)
if self.ts - self.ts_read > self.readperiod:
self.ts_read = self.ts
try:
res=self.read_all() ## read all registers defined in aicochannels
self.sync_ao() ### write ao registers that are also present in aicochannels but the content is different
except:
traceback.print_exc()
if self.ts - self.ts_send > self.sendperiod:
self.ts_send = self.ts
try:
res=res+self.report_all() ### report all services in aicochannels
except:
traceback.print_exc()
return res
|
dcneeme/droidcontroller
|
droidcontroller/acchannels.py
|
Python
|
gpl-3.0
| 53,971
|
[
"BLAST"
] |
55f162453b33f445427124972074ef4159c8be95501b49996e20700c893a30da
|
"""Computes activation for the given class, neurons, or channels of a CNN.
CNN = convolutional neural network
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import argparse
import numpy
from keras import backend as K
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.deep_learning import testing_io
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import model_activation
from gewittergefahr.deep_learning import training_validation_io as trainval_io
K.set_session(K.tf.Session(config=K.tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
allow_soft_placement=False
)))
LARGE_INTEGER = int(1e10)
NUM_EXAMPLES_PER_BATCH = 1000
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
CLASS_COMPONENT_TYPE_STRING = model_interpretation.CLASS_COMPONENT_TYPE_STRING
NEURON_COMPONENT_TYPE_STRING = model_interpretation.NEURON_COMPONENT_TYPE_STRING
CHANNEL_COMPONENT_TYPE_STRING = (
model_interpretation.CHANNEL_COMPONENT_TYPE_STRING)
MODEL_FILE_ARG_NAME = 'model_file_name'
COMPONENT_TYPE_ARG_NAME = 'component_type_string'
TARGET_CLASS_ARG_NAME = 'target_class'
LAYER_NAME_ARG_NAME = 'layer_name'
NEURON_INDICES_ARG_NAME = 'neuron_indices'
CHANNEL_INDICES_ARG_NAME = 'channel_indices'
EXAMPLE_DIR_ARG_NAME = 'input_example_dir_name'
FIRST_SPC_DATE_ARG_NAME = 'first_spc_date_string'
LAST_SPC_DATE_ARG_NAME = 'last_spc_date_string'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
MODEL_FILE_HELP_STRING = (
'Path to input file, containing a trained CNN. Will be read by '
'`cnn.read_model`.')
COMPONENT_TYPE_HELP_STRING = (
'Component type. Activations may be computed for one class, one/many '
'neurons, or one/many channels. Valid options are listed below.\n{0:s}'
).format(str(model_interpretation.VALID_COMPONENT_TYPE_STRINGS))
TARGET_CLASS_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] Activations will be computed for class k, '
'where k = `{2:s}`.'
).format(COMPONENT_TYPE_ARG_NAME, CLASS_COMPONENT_TYPE_STRING,
TARGET_CLASS_ARG_NAME)
LAYER_NAME_HELP_STRING = (
'[used only if {0:s} = "{1:s}" or "{2:s}"] Name of layer with neurons or '
'channels whose activations will be computed.'
).format(COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING,
CLASS_COMPONENT_TYPE_STRING)
NEURON_INDICES_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] Indices for each neuron whose activation is'
' to be computed. For example, to compute activations for neuron (0, 0, 2)'
', this argument should be "0 0 2". To compute activations for neurons '
'(0, 0, 2) and (1, 1, 2), this list should be "0 0 2 -1 1 1 2". In other '
'words, use -1 to separate neurons.'
).format(COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING)
CHANNEL_INDICES_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] Index for each channel whose activation is '
'to be computed.'
).format(COMPONENT_TYPE_ARG_NAME, CHANNEL_COMPONENT_TYPE_STRING)
EXAMPLE_DIR_HELP_STRING = (
'Name of top-level directory with input examples. Files therein will be '
'found by `input_examples.find_example_file` and read by '
'`input_examples.read_example_file`.')
SPC_DATE_HELP_STRING = (
'SPC date (format "yyyymmdd"). For each model component, activation will '
'be computed for each example (storm object) from `{0:s}`...`{1:s}`.'
).format(FIRST_SPC_DATE_ARG_NAME, LAST_SPC_DATE_ARG_NAME)
OUTPUT_FILE_HELP_STRING = (
'Path to output file (will be written by `model_activation.write_file`).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + MODEL_FILE_ARG_NAME, type=str, required=True,
help=MODEL_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + COMPONENT_TYPE_ARG_NAME, type=str, required=True,
help=COMPONENT_TYPE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + TARGET_CLASS_ARG_NAME, type=int, required=False, default=1,
help=TARGET_CLASS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAYER_NAME_ARG_NAME, type=str, required=False, default='',
help=LAYER_NAME_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NEURON_INDICES_ARG_NAME, type=int, nargs='+', required=False,
default=[-1], help=NEURON_INDICES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + CHANNEL_INDICES_ARG_NAME, type=int, nargs='+', required=False,
default=[-1], help=CHANNEL_INDICES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_DIR_ARG_NAME, type=str, required=True,
help=EXAMPLE_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAST_SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING)
def _run(
model_file_name, component_type_string, target_class, layer_name,
neuron_indices_flattened, channel_indices, top_example_dir_name,
first_spc_date_string, last_spc_date_string, output_file_name):
"""Creates activation maps for one class, neuron, or channel of a CNN.
This is effectively the main method.
:param model_file_name: See documentation at top of file.
:param component_type_string: Same.
:param target_class: Same.
:param layer_name: Same.
:param neuron_indices_flattened: Same.
:param channel_indices: Same.
:param top_example_dir_name: Same.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:param output_file_name: Same.
"""
# Check input args.
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
model_interpretation.check_component_type(component_type_string)
if component_type_string == CHANNEL_COMPONENT_TYPE_STRING:
error_checking.assert_is_geq_numpy_array(channel_indices, 0)
if component_type_string == NEURON_COMPONENT_TYPE_STRING:
neuron_indices_flattened = neuron_indices_flattened.astype(float)
neuron_indices_flattened[neuron_indices_flattened < 0] = numpy.nan
neuron_indices_2d_list = general_utils.split_array_by_nan(
neuron_indices_flattened)
neuron_index_matrix = numpy.array(neuron_indices_2d_list, dtype=int)
else:
neuron_index_matrix = None
# Read model and metadata.
print('Reading model from: "{0:s}"...'.format(model_file_name))
model_object = cnn.read_model(model_file_name)
metadata_file_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading metadata from: "{0:s}"...'.format(metadata_file_name))
model_metadata_dict = cnn.read_model_metadata(metadata_file_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
# Create generator.
example_file_names = input_examples.find_many_example_files(
top_directory_name=top_example_dir_name, shuffled=False,
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string,
raise_error_if_any_missing=False)
training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
training_option_dict[trainval_io.EXAMPLE_FILES_KEY] = example_file_names
training_option_dict[trainval_io.FIRST_STORM_TIME_KEY] = (
time_conversion.get_start_of_spc_date(first_spc_date_string)
)
training_option_dict[trainval_io.LAST_STORM_TIME_KEY] = (
time_conversion.get_end_of_spc_date(last_spc_date_string)
)
training_option_dict[trainval_io.NUM_EXAMPLES_PER_BATCH_KEY] = (
NUM_EXAMPLES_PER_BATCH
)
if model_metadata_dict[cnn.LAYER_OPERATIONS_KEY] is not None:
generator_object = testing_io.gridrad_generator_2d_reduced(
option_dict=training_option_dict,
desired_num_examples=LARGE_INTEGER,
list_of_operation_dicts=model_metadata_dict[
cnn.LAYER_OPERATIONS_KEY]
)
elif model_metadata_dict[cnn.CONV_2D3D_KEY]:
generator_object = testing_io.myrorss_generator_2d3d(
option_dict=training_option_dict,
desired_num_examples=LARGE_INTEGER)
else:
generator_object = testing_io.generator_2d_or_3d(
option_dict=training_option_dict,
desired_num_examples=LARGE_INTEGER)
# Compute activation for each example (storm object) and model component.
full_id_strings = []
storm_times_unix_sec = numpy.array([], dtype=int)
activation_matrix = None
print(SEPARATOR_STRING)
while True:
try:
this_storm_object_dict = next(generator_object)
except StopIteration:
break
this_list_of_input_matrices = this_storm_object_dict[
testing_io.INPUT_MATRICES_KEY]
these_id_strings = this_storm_object_dict[testing_io.FULL_IDS_KEY]
these_times_unix_sec = this_storm_object_dict[
testing_io.STORM_TIMES_KEY]
full_id_strings += these_id_strings
storm_times_unix_sec = numpy.concatenate((
storm_times_unix_sec, these_times_unix_sec))
if component_type_string == CLASS_COMPONENT_TYPE_STRING:
print('Computing activations for target class {0:d}...'.format(
target_class))
this_activation_matrix = (
model_activation.get_class_activation_for_examples(
model_object=model_object, target_class=target_class,
list_of_input_matrices=this_list_of_input_matrices)
)
this_activation_matrix = numpy.reshape(
this_activation_matrix, (len(this_activation_matrix), 1)
)
elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
this_activation_matrix = None
for j in range(neuron_index_matrix.shape[0]):
print((
'Computing activations for neuron {0:s} in layer "{1:s}"...'
).format(str(neuron_index_matrix[j, :]), layer_name))
these_activations = (
model_activation.get_neuron_activation_for_examples(
model_object=model_object, layer_name=layer_name,
neuron_indices=neuron_index_matrix[j, :],
list_of_input_matrices=this_list_of_input_matrices)
)
these_activations = numpy.reshape(
these_activations, (len(these_activations), 1)
)
if this_activation_matrix is None:
this_activation_matrix = these_activations + 0.
else:
this_activation_matrix = numpy.concatenate(
(this_activation_matrix, these_activations), axis=1)
else:
this_activation_matrix = None
for this_channel_index in channel_indices:
print((
'Computing activations for channel {0:d} in layer '
'"{1:s}"...'
).format(this_channel_index, layer_name))
these_activations = (
model_activation.get_channel_activation_for_examples(
model_object=model_object, layer_name=layer_name,
channel_index=this_channel_index,
list_of_input_matrices=this_list_of_input_matrices,
stat_function_for_neuron_activations=K.max)
)
these_activations = numpy.reshape(
these_activations, (len(these_activations), 1)
)
if this_activation_matrix is None:
this_activation_matrix = these_activations + 0.
else:
this_activation_matrix = numpy.concatenate(
(this_activation_matrix, these_activations), axis=1)
if activation_matrix is None:
activation_matrix = this_activation_matrix + 0.
else:
activation_matrix = numpy.concatenate(
(activation_matrix, this_activation_matrix), axis=0)
print(SEPARATOR_STRING)
print('Writing activations to file: "{0:s}"...'.format(output_file_name))
model_activation.write_file(
pickle_file_name=output_file_name, activation_matrix=activation_matrix,
full_id_strings=full_id_strings,
storm_times_unix_sec=storm_times_unix_sec,
model_file_name=model_file_name,
component_type_string=component_type_string, target_class=target_class,
layer_name=layer_name, neuron_index_matrix=neuron_index_matrix,
channel_indices=channel_indices)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
model_file_name=getattr(INPUT_ARG_OBJECT, MODEL_FILE_ARG_NAME),
component_type_string=getattr(
INPUT_ARG_OBJECT, COMPONENT_TYPE_ARG_NAME),
target_class=getattr(INPUT_ARG_OBJECT, TARGET_CLASS_ARG_NAME),
layer_name=getattr(INPUT_ARG_OBJECT, LAYER_NAME_ARG_NAME),
neuron_indices_flattened=numpy.array(
getattr(INPUT_ARG_OBJECT, NEURON_INDICES_ARG_NAME), dtype=int),
channel_indices=numpy.array(
getattr(INPUT_ARG_OBJECT, CHANNEL_INDICES_ARG_NAME), dtype=int),
top_example_dir_name=getattr(INPUT_ARG_OBJECT, EXAMPLE_DIR_ARG_NAME),
first_spc_date_string=getattr(
INPUT_ARG_OBJECT, FIRST_SPC_DATE_ARG_NAME),
last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_SPC_DATE_ARG_NAME),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
|
thunderhoser/GewitterGefahr
|
gewittergefahr/scripts/get_cnn_activations.py
|
Python
|
mit
| 14,054
|
[
"NEURON"
] |
12348870c4450118050152d37dc0e15afa9d8a21537baa029e81d0ddfa61eb32
|
import web, os, hashlib
import requests
import threading
import time
import lxml.objectify
urls = (
'/', 'index',
'/reverse', 'reverse',
'/ioc','ioc',
'/search', 'search',
'/search_archive', 'search_archive',
'/add','add',
'/process', 'process',
'/config', 'config',
'/dash','dashboard',
'/close','close',
'/reopen','reopen'
)
db = web.database(dbn='postgres', db='webpy', user='webpy', pw='')
render = web.template.render('/var/www/templates')
web.template.Template.globals.update(dict(
render = render,
getsize = os.path.getsize,
))
param_results = db.select('config',what='param_value', where='param_name = \'ELK-IP\'')
for plaso_result in param_results:
plaso_ip = plaso_result.param_value
param_results = db.select('config',what='param_value', where='param_name = \'Plaso-dash\'')
for plaso_result in param_results:
plaso_dash = plaso_result.param_value
plaso_url = "http://" + plaso_ip + ":9292/index.html#/dashboard/file/" + plaso_dash
for572_url = "http://" + plaso_ip + ":9292/index.html#/dashboard/file/for572.json"
xplico_url = "http://" + plaso_ip + ":9876/"
class index:
def GET(self):
jenkins_url = web.ctx.homedomain + ":8080/"
return render.tabbed("Forensicator FATE", jenkins_url, plaso_url)
class add:
def POST(self):
i = web.input()
n = db.insert('cases', casename=i.casename,memory_image=i.memory_image,disk_image=i.disk_image,disk_name=i.disk_name,timezone=i.timezone,volatility_profile=i.volatility_profile,notes=i.notes,case_keywords=i.case_keywords,status='open')
raise web.seeother('/')
class close:
def POST(self):
i = web.input()
n = db.update('cases', where="id = " + i.CASE_ID,status='closed')
raise web.seeother('/')
class reopen:
def POST(self):
i = web.input()
n = db.update('cases', where="id = " + i.CASE_ID,status='open')
raise web.seeother('/')
class reverse:
def GET(self):
name_hash = []
for (dirname, dirs, files) in os.walk('/reverse'):
for filename in files:
thefile = os.path.join(dirname,filename)
name_hash.append(tuple([thefile, hashlib.md5(open(thefile, 'r').read()).hexdigest()]))
return render.re_listing(name_hash)
class ioc:
def GET(self):
name_hash = []
for (dirname, dirs, files) in os.walk('/ioc'):
for filename in files:
thefile = os.path.join(dirname,filename)
# next 3 lines idea courtesy of Jeff Bryner http://www.jeffbryner.com/blog/itsec/pythoniocdump.html
ioc_root = lxml.objectify.parse(thefile).getroot()
short_desc = ioc_root.short_description
desc = ioc_root.description
name_hash.append(tuple([thefile, hashlib.md5(open(thefile, 'r').read()).hexdigest(), short_desc, desc]))
return render.ioc_listing(name_hash)
class search:
def GET(self):
return render.listing(db.select('cases',what='*',where="status is null or status<>'closed'",order='id',limit=10000), 'process')
class search_archive:
def GET(self):
return render.archive_listing(db.select('cases',what='*',where="status='closed'",order='id',limit=10000))
class config:
def GET(self):
return render.cfg_listing(db.select('config',what='*',limit=10000))
class dashboard:
def GET(self):
jenkins_url = web.ctx.homedomain + ":8080/"
return render.dash(plaso_url, jenkins_url, for572_url, xplico_url)
class process:
def POST(self):
i = web.input()
jenkins_url = web.ctx.homedomain + ":8080/"
jenkins_job_url = jenkins_url + "job/findWindowsEvidence/buildWithParameters"
jenkins_job_url_with_params = jenkins_job_url + "?CASE_NAME=" + i.CASE_NAME + "&MEMORY_IMAGE_FILE=" + i.MEMORY_IMAGE_FILE + "&DISK_IMAGE_FILE=" + i.DISK_IMAGE_FILE + "&DISK_NAME=" + i.DISK_NAME + "&TIMEZONE=" + i.TIMEZONE + "&VOLATILITY_PROFILE=" + i.VOLATILITY_PROFILE
threading.Thread(target=requests.get, args=(jenkins_job_url_with_params,)).start()
raise web.seeother('/')
if __name__ == "__main__":
app.run()
app = web.application(urls, globals())
application = app.wsgifunc()
|
z3ndrag0n/forensicator-fate
|
webapp/ffate.py
|
Python
|
apache-2.0
| 4,303
|
[
"Elk"
] |
e299b19209ce7e21105b8a6cc036c944ee65c4c91e1a9d1c166a04fd8c70ecc9
|
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import inspect
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Entries
-------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i*k_dims:(i+1)*k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0/3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
|
ElDeveloper/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
Python
|
bsd-3-clause
| 66,251
|
[
"Gaussian"
] |
cd1d61c45f87dfa602ca10f407196343d271d0ba456dbaff8a450fac4310efd5
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if.
Python 2 compatibility version. Not maintained.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import annos
# TODO(mdan): Refactor functions to make them smaller.
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if len(returns) == 1:
template = """
return retval
"""
return_stmt = templates.replace(template, retval=returns[0])
else:
template = """
return (retvals,)
"""
return_stmt = templates.replace(template, retvals=returns)
if aliased_orig_names:
template = """
def body_name():
aliased_new_names, = aliased_orig_names,
body
return_stmt
"""
return templates.replace(
template,
body_name=body_name,
body=body,
aliased_orig_names=aliased_orig_names,
aliased_new_names=aliased_new_names,
return_stmt=return_stmt)
else:
template = """
def body_name():
body
return_stmt
"""
return templates.replace(
template, body_name=body_name, body=body, return_stmt=return_stmt)
def _create_cond_expr(self, results, test, body_name, orelse_name,
state_getter_name, state_setter_name,
basic_symbol_names, composite_symbol_names):
if results is not None:
template = """
results = ag__.if_stmt(test, body_name, orelse_name,
state_getter_name, state_setter_name,
(basic_symbol_names,),
(composite_symbol_names,))
"""
return templates.replace(
template,
test=test,
results=results,
body_name=body_name,
orelse_name=orelse_name,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names)
else:
template = """
ag__.if_stmt(test, body_name, orelse_name, getter_name, setter_name,
(basic_symbol_names,), (composite_symbol_names,))
"""
return templates.replace(
template,
test=test,
body_name=body_name,
orelse_name=orelse_name,
getter_name=state_getter_name,
setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names)
def _fmt_symbols(self, symbol_set):
if not symbol_set:
return 'no variables'
return ', '.join(map(str, symbol_set))
def _determine_aliased_symbols(self, scope, node_defined_in, block):
if block:
block_live_in = set(anno.getanno(block[0], anno.Static.LIVE_VARS_IN))
else:
block_live_in = set()
modified_live = scope.modified & node_defined_in & block_live_in
# Composite symbols are handled elsewhere see _create_state_functions
return {s for s in modified_live if not s.is_composite()}
def _create_state_functions(self, composites, state_getter_name,
state_setter_name):
if composites:
composite_tuple = tuple(composites)
template = """
def state_getter_name():
return composite_tuple,
def state_setter_name(vals):
composite_tuple, = vals
"""
node = templates.replace(
template,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_tuple=composite_tuple)
else:
template = """
def state_getter_name():
return ()
def state_setter_name(_):
pass
"""
node = templates.replace(
template,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name)
return node
def _create_loop_options(self, node):
if not anno.hasanno(node, anno.Basic.DIRECTIVES):
return gast.Dict([], [])
loop_directives = anno.getanno(node, anno.Basic.DIRECTIVES)
if directives.set_loop_options not in loop_directives:
return gast.Dict([], [])
opts_dict = loop_directives[directives.set_loop_options]
str_keys, values = zip(*opts_dict.items())
keys = [gast.Constant(s, kind=None) for s in str_keys]
values = list(values) # ast and gast don't play well with tuples.
return gast.Dict(keys, values)
def _create_undefined_assigns(self, undefined_symbols):
assignments = []
for s in undefined_symbols:
template = '''
var = ag__.Undefined(symbol_name)
'''
assignments += templates.replace(
template,
var=s,
symbol_name=gast.Constant(s.ssf(), kind=None))
return assignments
def visit_If(self, node):
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
# Note: this information needs to be extracted before the body conversion
# that happens in the call to generic_visit below, because the conversion
# generates nodes that lack static analysis annotations.
need_alias_in_body = self._determine_aliased_symbols(
body_scope, defined_in, node.body)
need_alias_in_orelse = self._determine_aliased_symbols(
orelse_scope, defined_in, node.orelse)
node = self.generic_visit(node)
modified_in_cond = body_scope.modified | orelse_scope.modified
returned_from_cond = set()
composites = set()
for s in modified_in_cond:
if s in live_out and not s.is_composite():
returned_from_cond.add(s)
if s.is_composite():
# Special treatment for compound objects, always return them.
# This allows special handling within the if_stmt itself.
# For example, in TensorFlow we need to restore the state of composite
# symbols to ensure that only effects from the executed branch are seen.
composites.add(s)
created_in_body = body_scope.modified & returned_from_cond - defined_in
created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in
basic_created_in_body = tuple(
s for s in created_in_body if not s.is_composite())
basic_created_in_orelse = tuple(
s for s in created_in_orelse if not s.is_composite())
# These variables are defined only in a single branch. This is fine in
# Python so we pass them through. Another backend, e.g. Tensorflow, may need
# to handle these cases specially or throw an Error.
possibly_undefined = (set(basic_created_in_body) ^
set(basic_created_in_orelse))
# Alias the closure variables inside the conditional functions, to allow
# the functions access to the respective variables.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
aliased_body_orig_names = tuple(need_alias_in_body)
aliased_orelse_orig_names = tuple(need_alias_in_orelse)
aliased_body_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
aliased_orelse_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), orelse_scope.referenced)
for s in aliased_orelse_orig_names)
alias_body_map = dict(zip(aliased_body_orig_names, aliased_body_new_names))
alias_orelse_map = dict(
zip(aliased_orelse_orig_names, aliased_orelse_new_names))
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
cond_var_name = self.ctx.namer.new_symbol('cond', body_scope.referenced)
body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)
all_referenced = body_scope.referenced | orelse_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', all_referenced)
state_setter_name = self.ctx.namer.new_symbol('set_state', all_referenced)
returned_from_cond = tuple(returned_from_cond)
composites = tuple(composites)
if returned_from_cond:
if len(returned_from_cond) == 1:
cond_results = returned_from_cond[0]
else:
cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)
returned_from_body = tuple(
alias_body_map[s] if s in need_alias_in_body else s
for s in returned_from_cond)
returned_from_orelse = tuple(
alias_orelse_map[s] if s in need_alias_in_orelse else s
for s in returned_from_cond)
else:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
cond_results = None
# TODO(mdan): Replace with None once side_effect_guards is retired.
returned_from_body = (templates.replace_as_expression(
'ag__.match_staging_level(1, cond_var_name)',
cond_var_name=cond_var_name),)
returned_from_orelse = (templates.replace_as_expression(
'ag__.match_staging_level(1, cond_var_name)',
cond_var_name=cond_var_name),)
cond_assign = self.create_assignment(cond_var_name, node.test)
body_def = self._create_cond_branch(
body_name,
aliased_orig_names=aliased_body_orig_names,
aliased_new_names=aliased_body_new_names,
body=node_body,
returns=returned_from_body)
orelse_def = self._create_cond_branch(
orelse_name,
aliased_orig_names=aliased_orelse_orig_names,
aliased_new_names=aliased_orelse_new_names,
body=node_orelse,
returns=returned_from_orelse)
undefined_assigns = self._create_undefined_assigns(possibly_undefined)
composite_defs = self._create_state_functions(
composites, state_getter_name, state_setter_name)
basic_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in returned_from_cond)
composite_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in composites)
cond_expr = self._create_cond_expr(cond_results, cond_var_name, body_name,
orelse_name, state_getter_name,
state_setter_name, basic_symbol_names,
composite_symbol_names)
if_ast = (
undefined_assigns + composite_defs + body_def + orelse_def +
cond_assign + cond_expr)
return if_ast
def _get_basic_loop_vars(self, modified_symbols, live_in, live_out):
# The loop variables corresponding to simple symbols (e.g. `x`).
basic_loop_vars = []
for s in modified_symbols:
if s.is_composite():
# TODO(mdan): Raise an error when this happens for a TF loop.
continue
# Variables not live into or out of the loop are considered local to the
# loop.
if s not in live_in and s not in live_out:
continue
basic_loop_vars.append(s)
return frozenset(basic_loop_vars)
def _get_composite_loop_vars(self, modified_symbols, live_in):
# The loop variables corresponding to composite symbols (e.g. `self.x`).
composite_loop_vars = []
for s in modified_symbols:
if not s.is_composite():
continue
# Mutations made to objects created inside the loop will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the loop.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the loop, but x is not.
#
# Note that some parents might not be symbols - for example, in x['foo'],
# 'foo' is a parent, but it's a literal, not a symbol. We don't check the
# liveness of literals.
support_set_symbols = tuple(
sss for sss in s.support_set if sss.is_symbol())
if not all(sss in live_in for sss in support_set_symbols):
continue
composite_loop_vars.append(s)
return frozenset(composite_loop_vars)
def _get_loop_vars(self, node, modified_symbols):
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
reserved_symbols = body_scope.referenced
basic_loop_vars = self._get_basic_loop_vars(
modified_symbols, live_in, live_out)
composite_loop_vars = self._get_composite_loop_vars(
modified_symbols, live_in)
# Variable that are used or defined inside the loop, but not defined
# before entering the loop. Only simple variables must be defined. The
# composite ones will be implicitly checked at runtime.
undefined_lives = basic_loop_vars - defined_in
return (basic_loop_vars, composite_loop_vars, reserved_symbols,
undefined_lives)
def _loop_var_constructs(self, basic_loop_vars):
loop_vars = tuple(basic_loop_vars)
loop_vars_ast_tuple = gast.Tuple([n.ast() for n in loop_vars], None)
if len(loop_vars) == 1:
loop_vars = loop_vars[0]
return loop_vars, loop_vars_ast_tuple
def visit_While(self, node):
node = self.generic_visit(node)
(basic_loop_vars, composite_loop_vars, reserved_symbols,
possibly_undefs) = self._get_loop_vars(
node,
anno.getanno(node, annos.NodeAnno.BODY_SCOPE).modified)
loop_vars, loop_vars_ast_tuple = self._loop_var_constructs(
basic_loop_vars)
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved_symbols)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved_symbols)
state_functions = self._create_state_functions(
composite_loop_vars, state_getter_name, state_setter_name)
basic_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in basic_loop_vars)
composite_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in composite_loop_vars)
opts = self._create_loop_options(node)
# TODO(mdan): Use a single template.
# If the body and test functions took a single tuple for loop_vars, instead
# of *loop_vars, then a single template could be used.
if loop_vars:
template = """
state_functions
def body_name(loop_vars):
body
return loop_vars,
def test_name(loop_vars):
return test
loop_vars_ast_tuple = ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(loop_vars,),
(basic_symbol_names,),
(composite_symbol_names,),
opts)
"""
node = templates.replace(
template,
loop_vars=loop_vars,
loop_vars_ast_tuple=loop_vars_ast_tuple,
test_name=self.ctx.namer.new_symbol('loop_test', reserved_symbols),
test=node.test,
body_name=self.ctx.namer.new_symbol('loop_body', reserved_symbols),
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names,
opts=opts)
else:
template = """
state_functions
def body_name():
body
return ()
def test_name():
return test
ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(),
(),
(composite_symbol_names,),
opts)
"""
node = templates.replace(
template,
test_name=self.ctx.namer.new_symbol('loop_test', reserved_symbols),
test=node.test,
body_name=self.ctx.namer.new_symbol('loop_body', reserved_symbols),
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_symbol_names=composite_symbol_names,
opts=opts)
undefined_assigns = self._create_undefined_assigns(possibly_undefs)
return undefined_assigns + node
def visit_For(self, node):
node = self.generic_visit(node)
(basic_loop_vars, composite_loop_vars,
reserved_symbols, possibly_undefs) = self._get_loop_vars(
node, (anno.getanno(node, annos.NodeAnno.BODY_SCOPE).modified
| anno.getanno(node, annos.NodeAnno.ITERATE_SCOPE).modified))
loop_vars, loop_vars_ast_tuple = self._loop_var_constructs(
basic_loop_vars)
body_name = self.ctx.namer.new_symbol('loop_body', reserved_symbols)
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved_symbols)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved_symbols)
state_functions = self._create_state_functions(
composite_loop_vars, state_getter_name, state_setter_name)
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST)
extra_test_name = self.ctx.namer.new_symbol(
'extra_test', reserved_symbols)
template = """
def extra_test_name(loop_vars):
return extra_test_expr
"""
extra_test_function = templates.replace(
template,
extra_test_name=extra_test_name,
loop_vars=loop_vars,
extra_test_expr=extra_test)
else:
extra_test_name = parser.parse_expression('None')
extra_test_function = []
# Workaround for PEP-3113
# iterates_var holds a single variable with the iterates, which may be a
# tuple.
iterates_var_name = self.ctx.namer.new_symbol(
'iterates', reserved_symbols)
template = """
iterates = iterates_var_name
"""
iterate_expansion = templates.replace(
template,
iterates=node.target,
iterates_var_name=iterates_var_name)
undefined_assigns = self._create_undefined_assigns(possibly_undefs)
basic_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in basic_loop_vars)
composite_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in composite_loop_vars)
opts = self._create_loop_options(node)
# TODO(mdan): Use a single template.
# If the body and test functions took a single tuple for loop_vars, instead
# of *loop_vars, then a single template could be used.
if loop_vars:
template = """
undefined_assigns
state_functions
def body_name(iterates_var_name, loop_vars):
iterate_expansion
body
return loop_vars,
extra_test_function
loop_vars_ast_tuple = ag__.for_stmt(
iter_,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(loop_vars,),
(basic_symbol_names,),
(composite_symbol_names,),
opts)
"""
return templates.replace(
template,
undefined_assigns=undefined_assigns,
loop_vars=loop_vars,
loop_vars_ast_tuple=loop_vars_ast_tuple,
iter_=node.iter,
iterate_expansion=iterate_expansion,
iterates_var_name=iterates_var_name,
extra_test_name=extra_test_name,
extra_test_function=extra_test_function,
body_name=body_name,
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names,
opts=opts)
else:
template = """
undefined_assigns
state_functions
def body_name(iterates_var_name):
iterate_expansion
body
return ()
extra_test_function
ag__.for_stmt(
iter_,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(),
(),
(composite_symbol_names,),
opts)
"""
return templates.replace(
template,
undefined_assigns=undefined_assigns,
iter_=node.iter,
iterate_expansion=iterate_expansion,
iterates_var_name=iterates_var_name,
extra_test_name=extra_test_name,
extra_test_function=extra_test_function,
body_name=body_name,
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_symbol_names=composite_symbol_names,
opts=opts)
def transform(node, ctx):
node = ControlFlowTransformer(ctx).visit(node)
return node
|
gunan/tensorflow
|
tensorflow/python/autograph/converters/control_flow_deprecated_py2.py
|
Python
|
apache-2.0
| 22,919
|
[
"VisIt"
] |
6a9ebde939e56cfb44845a45d40443e28ecfa76665dfd0bde214189503edc26d
|
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import nose.tools
from nose.tools import assert_true
from hyperspy._signals.signal1d import Signal1D
from hyperspy.components1d import Gaussian
class TestFitOneComponent:
def setUp(self):
g = Gaussian()
g.A.value = 10000.0
g.centre.value = 5000.0
g.sigma.value = 500.0
axis = np.arange(10000)
s = Signal1D(g.function(axis))
m = s.create_model()
self.model = m
self.g = g
self.axis = axis
self.rtol = 0.00
def test_fit_component(self):
m = self.model
axis = self.axis
g1 = Gaussian()
m.append(g1)
m.fit_component(g1, signal_range=(4000, 6000))
np.testing.assert_allclose(self.g.function(axis),
g1.function(axis),
rtol=self.rtol,
atol=10e-3)
@nose.tools.raises(ValueError)
def test_component_not_in_model(self):
self.model.fit_component(self.g)
class TestFitSeveralComponent:
def setUp(self):
gs1 = Gaussian()
gs1.A.value = 10000.0
gs1.centre.value = 5000.0
gs1.sigma.value = 500.0
gs2 = Gaussian()
gs2.A.value = 60000.0
gs2.centre.value = 2000.0
gs2.sigma.value = 300.0
gs3 = Gaussian()
gs3.A.value = 20000.0
gs3.centre.value = 6000.0
gs3.sigma.value = 100.0
axis = np.arange(10000)
total_signal = (gs1.function(axis) +
gs2.function(axis) +
gs3.function(axis))
s = Signal1D(total_signal)
m = s.create_model()
g1 = Gaussian()
g2 = Gaussian()
g3 = Gaussian()
m.append(g1)
m.append(g2)
m.append(g3)
self.model = m
self.gs1 = gs1
self.gs2 = gs2
self.gs3 = gs3
self.g1 = g1
self.g2 = g2
self.g3 = g3
self.axis = axis
self.rtol = 0.01
def test_fit_component_active_state(self):
m = self.model
axis = self.axis
g1 = self.g1
g2 = self.g2
g3 = self.g3
g2.active = True
g3.active = False
m.fit_component(g1, signal_range=(4500, 5200), fit_independent=True)
np.testing.assert_allclose(self.gs1.function(axis),
g1.function(axis),
rtol=self.rtol,
atol=10e-3)
assert_true(g1.active)
assert_true(g2.active)
assert_true(not g3.active)
def test_fit_component_free_state(self):
m = self.model
axis = self.axis
g1 = self.g1
g2 = self.g2
g3 = self.g3
g2.A.free = False
g2.sigma.free = False
m.fit_component(g1, signal_range=(4500, 5200))
np.testing.assert_allclose(self.gs1.function(axis),
g1.function(axis),
rtol=self.rtol,
atol=10e-3)
assert_true(g1.A.free)
assert_true(g1.sigma.free)
assert_true(g1.centre.free)
assert_true(not g2.A.free)
assert_true(not g2.sigma.free)
assert_true(g2.centre.free)
assert_true(g3.A.free)
assert_true(g3.sigma.free)
assert_true(g3.centre.free)
def test_fit_multiple_component(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.fit_component(g1, signal_range=(4500, 5200))
m.fit_component(g2, signal_range=(1500, 2200))
m.fit_component(g3, signal_range=(5800, 6150))
np.testing.assert_allclose(self.model.signal.data,
m(),
rtol=self.rtol,
atol=10e-3)
|
vidartf/hyperspy
|
hyperspy/tests/model/test_fit_component.py
|
Python
|
gpl-3.0
| 4,618
|
[
"Gaussian"
] |
3a0d3393d9d6998c1ce578420749a79b70f353f1f0ae30999db4ab6ef3a98f9d
|
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""Training AdaGAN on various datasets.
Refer to the arXiv paper 'AdaGAN: Boosting Generative Models'
Coded by Ilya Tolstikhin, Carl-Johann Simon-Gabriel
"""
import os
import argparse
import logging
import tensorflow as tf
import numpy as np
from datahandler import DataHandler
from adagan import AdaGan
from metrics import Metrics
import utils
flags = tf.app.flags
flags.DEFINE_float("g_learning_rate", 0.0001,
"Learning rate for Generator optimizers [16e-4]")
flags.DEFINE_float("d_learning_rate", 0.0001,
"Learning rate for Discriminator optimizers [4e-4]")
flags.DEFINE_float("learning_rate", 0.003,
"Learning rate for other optimizers [8e-4]")
flags.DEFINE_float("adam_beta1", 0.5, "Beta1 parameter for Adam optimizer [0.5]")
flags.DEFINE_integer("zdim", 100, "Dimensionality of the latent space [100]")
flags.DEFINE_float("init_std", 0.0099999, "Initial variance for weights [0.02]")
flags.DEFINE_string("workdir", 'results_cifar10_pot_conv', "Working directory ['results']")
flags.DEFINE_bool("unrolled", False, "Use unrolled GAN training [True]")
flags.DEFINE_bool("vae", False, "Use VAE instead of GAN")
flags.DEFINE_bool("pot", True, "Use POT instead of GAN")
flags.DEFINE_float("pot_lambda", 1., "POT regularization")
flags.DEFINE_bool("is_bagging", False, "Do we want to use bagging instead of adagan? [False]")
FLAGS = flags.FLAGS
def main():
opts = {}
# Utility
opts['random_seed'] = 66
opts['dataset'] = 'cifar10' # gmm, circle_gmm, mnist, mnist3 ...
opts['data_dir'] = 'cifar10'
opts['trained_model_path'] = None #'models'
opts['mnist_trained_model_file'] = None #'mnist_trainSteps_19999_yhat' # 'mnist_trainSteps_20000'
opts['work_dir'] = FLAGS.workdir
opts['ckpt_dir'] = 'checkpoints'
opts["verbose"] = 1
opts['tf_run_batch_size'] = 128
opts["early_stop"] = -1 # set -1 to run normally
opts["plot_every"] = 150
opts["save_every_epoch"] = 10
opts['gmm_max_val'] = 15.
# Datasets
opts['toy_dataset_size'] = 10000
opts['toy_dataset_dim'] = 2
opts['mnist3_dataset_size'] = 2 * 64 # 64 * 2500
opts['mnist3_to_channels'] = False # Hide 3 digits of MNIST to channels
opts['input_normalize_sym'] = False # Normalize data to [-1, 1]
opts['gmm_modes_num'] = 5
# AdaGAN parameters
opts['adagan_steps_total'] = 1
opts['samples_per_component'] = 1000
opts['is_bagging'] = FLAGS.is_bagging
opts['beta_heur'] = 'uniform' # uniform, constant
opts['weights_heur'] = 'theory_star' # theory_star, theory_dagger, topk
opts['beta_constant'] = 0.5
opts['topk_constant'] = 0.5
opts["mixture_c_epoch_num"] = 5
opts["eval_points_num"] = 25600
opts['digit_classification_threshold'] = 0.999
opts['inverse_metric'] = False # Use metric from the Unrolled GAN paper?
opts['inverse_num'] = 100 # Number of real points to inverse.
opts['objective'] = None
# Generative model parameters
opts["init_std"] = FLAGS.init_std
opts["init_bias"] = 0.0
opts['latent_space_distr'] = 'normal' # uniform, normal
opts['latent_space_dim'] = FLAGS.zdim
opts["gan_epoch_num"] = 100
opts['convolutions'] = True
opts['d_num_filters'] = 512
opts['d_num_layers'] = 4
opts['g_num_filters'] = 1024
opts['g_num_layers'] = 4
opts['e_is_random'] = False
opts['e_pretrain'] = False
opts['e_add_noise'] = False
opts['e_pretrain_bsize'] = 1000
opts['e_num_filters'] = 1024
opts['e_num_layers'] = 4
opts['g_arch'] = 'dcgan_mod'
opts['g_stride1_deconv'] = False
opts['g_3x3_conv'] = 0
opts['e_arch'] = 'dcgan'
opts['e_3x3_conv'] = 0
opts['conv_filters_dim'] = 5
# --GAN specific:
opts['conditional'] = False
opts['unrolled'] = FLAGS.unrolled # Use Unrolled GAN? (only for images)
opts['unrolling_steps'] = 5 # Used only if unrolled = True
# --VAE specific
opts['vae'] = FLAGS.vae
opts['vae_sigma'] = 0.01
# --POT specific
opts['pot'] = FLAGS.pot
opts['pot_pz_std'] = 2.
opts['pot_lambda'] = FLAGS.pot_lambda
opts['adv_c_loss'] = 'conv'
opts['vgg_layer'] = 'pool2'
opts['adv_c_patches_size'] = 5
opts['adv_c_num_units'] = 32
opts['adv_c_loss_w'] = 1.0
opts['cross_p_w'] = 0.0
opts['diag_p_w'] = 0.0
opts['emb_c_loss_w'] = 1.0
opts['reconstr_w'] = 0.0
opts['z_test'] = 'gan'
opts['gan_p_trick'] = False
opts['z_test_corr_w'] = 1.0
opts['z_test_proj_dim'] = 10
# Optimizer parameters
opts['optimizer'] = 'adam' # sgd, adam
opts["batch_size"] = 100
opts["d_steps"] = 1
opts['d_new_minibatch'] = False
opts["g_steps"] = 2
opts['batch_norm'] = True
opts['dropout'] = False
opts['dropout_keep_prob'] = 0.5
opts['recon_loss'] = 'l2sq'
# "manual" or number (float or int) giving the number of epochs to divide
# the learning rate by 10 (converted into an exp decay per epoch).
opts['decay_schedule'] = 'manual'
opts['opt_learning_rate'] = FLAGS.learning_rate
opts['opt_d_learning_rate'] = FLAGS.d_learning_rate
opts['opt_g_learning_rate'] = FLAGS.g_learning_rate
opts["opt_beta1"] = FLAGS.adam_beta1
opts['batch_norm_eps'] = 1e-05
opts['batch_norm_decay'] = 0.9
if opts['e_is_random']:
assert opts['latent_space_distr'] == 'normal',\
'Random encoders currently work only with Gaussian Pz'
# Data augmentation
opts['data_augm'] = True
if opts['verbose']:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
utils.create_dir(opts['work_dir'])
utils.create_dir(os.path.join(opts['work_dir'], opts['ckpt_dir']))
with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
text.write('Parameters:\n')
for key in opts:
text.write('%s : %s\n' % (key, opts[key]))
data = DataHandler(opts)
assert data.num_points >= opts['batch_size'], 'Training set too small'
adagan = AdaGan(opts, data)
metrics = Metrics()
train_size = data.num_points
random_idx = np.random.choice(train_size, 4*320, replace=False)
metrics.make_plots(opts, 0, data.data,
data.data[random_idx], adagan._data_weights, prefix='dataset_')
for step in range(opts["adagan_steps_total"]):
logging.info('Running step {} of AdaGAN'.format(step + 1))
adagan.make_step(opts, data)
num_fake = opts['eval_points_num']
logging.debug('Sampling fake points')
fake_points = adagan.sample_mixture(num_fake)
logging.debug('Sampling more fake points')
more_fake_points = adagan.sample_mixture(500)
logging.debug('Plotting results')
if opts['dataset'] == 'gmm':
metrics.make_plots(opts, step, data.data[:500],
fake_points[0:100], adagan._data_weights[:500])
logging.debug('Evaluating results')
(likelihood, C) = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
else:
metrics.make_plots(opts, step, data.data,
fake_points[:320], adagan._data_weights)
if opts['inverse_metric']:
logging.debug('Evaluating results')
l2 = np.min(adagan._invert_losses[:step + 1], axis=0)
logging.debug('MSE=%.5f, STD=%.5f' % (np.mean(l2), np.std(l2)))
res = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
logging.debug("AdaGan finished working!")
if __name__ == '__main__':
main()
|
tolstikhin/adagan
|
cifar_sota.py
|
Python
|
bsd-3-clause
| 7,946
|
[
"Gaussian"
] |
b0ddbd9460439c4a0531f6a2ad607b1e743aec6cc7ce01c7325f793a1018f2e0
|
from ..thing import Thing
from ..children import ChildGenerator
# new Thing("sea monster",["sea monster thoughts",["tentacle,0-6","fish fin,0-4","",""],"stinger,20%",["crustacean claw,0-4",""],["crustacean leg,0-8",""],["crustacean shell","scales","fur","exoskeleton",""],["mouth,1-2","beak,1-2"],"skull,80%",["eye,1-8","simple eye,1-8","",""],"weird soft organ,0-4","weird hard organ,0-4"],[["giant","timeless","colossal","abyssal","forgotten","ancient","gigantic","monstrous"],[" "],["craze","drift","dredge","dread","slumber","dream","wander","frost","magma","stone","slime","ooze","egg","larva","grudge","stride","flail","wail","time","star","crystal","terror","horror","scream","wrath","burst","dark","deep","tickle"],["fin","tail","sinker","sunk","singer","song","polyp","rifter","glider","squirmer","titan","colossus","brain","queen","king","child","guardian","seer","whale","worm","spider","crab","leech","fish","shark","squid","saur","buddy","lord"]]);
# new Thing("sea monster thoughts",["sea monster thought,1-2"],["thoughts"]);
# new Thing("sea monster thought",[],["IIIIII MUSSST SCREEEAAAM","I AMMMM AWAKENED","ALLLLLL FEAR MEEEEE","NOOOOONE SHALL LIVE","I MUSSSSST EATTTTT","DEEEEEEEEP I SSSSLUMBER","IIIII SHALL CONSSSSUME","IIIII SHALL DEVOUUUUURRRRR","LIFFFFFFE MUSSSSST PERISHHHHH","NNNNNNNNURISHMENT","ALL SHALLLLLLL GO INSSSSSSANE","SSSSSSANITY SHALL YIELDDDDD","EXXXXXILED I WASSSSS","EONSSSSS I HAVE SLUMBERED","EONSSSSS I HAVE WAITED","MORTALSSSSSS BEHOLDDDDD","I COMMMMME FROM DEEP","IMMMMMMOBILE I WATCHHHH","SSSSSKITTER","THEY FFFFFLOAAAAAT"]);
class SpaceMonster(Thing):
type_name = "space monster"
child_generators = [
ChildGenerator("space monster thoughts"),
[
ChildGenerator("tentacle", (0, 6)),
ChildGenerator("fish fin", (0, 4)),
ChildGenerator(),
ChildGenerator(),
],
ChildGenerator("stinger", probability=20),
[
ChildGenerator("crustacean claw", (0, 4)),
ChildGenerator(),
],
[
ChildGenerator("crustacean leg", (0, 8)),
ChildGenerator()
],
[
ChildGenerator("crustacean shell"),
ChildGenerator("scales"),
ChildGenerator("fur"),
ChildGenerator("exoskeleton"),
ChildGenerator(),
],
[
ChildGenerator("mouth", (1, 2)),
ChildGenerator("beak", (1, 2))
],
ChildGenerator("skull", probability=80),
[
ChildGenerator("eye", (1, 8)),
ChildGenerator("simple eye", (1, 8)),
ChildGenerator(),
ChildGenerator()
],
ChildGenerator("weird soft organ", (0, 4)),
ChildGenerator("weird hard organ", (0, 4)),
]
names_data = [
["C'","Vr'","Ksh","Zn'","Sh","Hrl","X","O","Yog","Gorg","Morg","Marg","Magg"],
["","","agn","soth","norgn","ngas","alx","orx","rgl","iirn","egw","thulh","t","g","m"],
["org","orgon","orgus","orkus","oid","us","u","esth","ath","oth","um","ott","aur"],
[""," the Forgotten"," the Entity"," the Ancient"," the Starchild"," the Seeder"," the Leech"," the Timeless"," the Eon"," the Many"," the Countless"," the Boundless"," the Prisoner"," the Child"," the Form"," the Shape"," the Drifter"," the Swarm"," the Vicious"," the Warden"," the Ender"," the Unworldly"," the Unfriendly"," the All-Consumer"]
]
class SpaceMonsterThoughts(Thing):
type_name = "space monster thoughts"
child_generators = [ChildGenerator("space monster thought", (1, 2))]
names_data = ["thoughts"]
class SpaceMonsterThought(Thing):
type_name = "space monster thought"
names_data = [
"WWWWWWWIDER THAN STARRRRRRS",
"AWAKENNNN MY CHILDRENNNNNN",
"GALAXIESSSSS SHALL FALLLLLLL",
"I AMMMMMM INFFFFFINITE",
"I SSSSSSSPAN AGESSSS",
"WWWWWWEEEEE ARE UNDYINGGGGGG",
"WE COMMMMMMMME",
"WE ANSSSSSWER THE CALLLLLLL",
"I TRAVELLLLLLL SLLLLLLUMBERING",
"FROMMMMMM FARRRRRR I COMMMME",
"IIIIII MUSSST SCREEEAAAM",
"I AMMMM AWAKENED",
"ALLLLLL FEAR MEEEEE",
"NOOOOONE SHALL LIVE",
"I MUSSSSST EATTTTT",
"DEEEEEEEEP I SSSSLUMBER",
"IIIII SHALL CONSSSSUME",
"IIIII SHALL DEVOUUUUURRRRR",
"LIFFFFFFE MUSSSSST PERISHHHHH",
"NNNNNNNNURISHMENT",
"ALL SHALLLLLLL GO INSSSSSSANE",
"SSSSSSANITY SHALL YIELDDDDD",
"EXXXXXILED I WASSSSS",
"EONSSSSS I HAVE SLUMBERED",
"EONSSSSS I HAVE WAITED",
"MORTALSSSSSS BEHOLDDDDD",
"I COMMMMME FROM DEEP",
"IMMMMMMOBILE I WATCHHHH",
"SSSSSKITTER",
"HHHHHHHEY HOW YOU DOIN'",
"AWKWAAAAAAAAARD"
]
class SpaceAnimal(Thing):
type_name = "space animal"
child_generators = [
ChildGenerator("space animal thoughts", probability=85),
ChildGenerator("space animal body"),
]
names_data = [
["e", "a", "o", "", "", "", "", "", ""],
["sm", "cr", "shn", "sh", "sn", "gl", "g", "m", "c", "x", "h", "dr", "r", "l"],
["o", "a", "u", "i", "e", "ee"],
["x", "b", "rv", "z", "s", "gg", "g", "k", "rf", "gl", "bl", "th", "kt", "m", "sh", "l", "dr", "v", "p", "nt","nk"],
["o", "a", "i", "u", "e"],
["n", "ne", "se", "b", "m", "l", "s", "sh", "th", "t", "sk", "zer", "bbler", "ggler", "ddler", "ter", "nt", "r","r","r"],
]
class SpaceAnimalBody(Thing):
type_name = "space animal body"
child_generators = [
[
ChildGenerator("tentacle", (0, 6)),
ChildGenerator("crustacean leg", (0, 8)),
ChildGenerator("fish fin", (0, 4)),
ChildGenerator("mammal leg", (1, 6)),
ChildGenerator(),
ChildGenerator(),
],
[
ChildGenerator("insect wing", (0, 6)),
ChildGenerator(),
ChildGenerator(),
],
[
ChildGenerator("crustacean claw", (0, 4)),
ChildGenerator(),
ChildGenerator(),
],
ChildGenerator("flesh", probability=40),
ChildGenerator("snout", probability=3),
ChildGenerator("stinger", probability=10),
ChildGenerator("whiskers", probability=10),
[
ChildGenerator("crustacean shell"),
ChildGenerator("scales"),
ChildGenerator("fur"),
ChildGenerator("exoskeleton"),
ChildGenerator(),
],
[
ChildGenerator("mouth", (1, 4)),
ChildGenerator("beak", (1, 4)),
ChildGenerator(),
],
ChildGenerator("skull", probability=30),
ChildGenerator("brain", probability=50),
[
ChildGenerator("eye", (1, 2)),
ChildGenerator("eye", (1, 6)),
ChildGenerator("simple eye", (1, 6)),
ChildGenerator(),
],
ChildGenerator("weird soft organ", probability=50),
ChildGenerator("weird soft organ", probability=20),
ChildGenerator("weird hard organ", probability=50),
ChildGenerator("weird hard organ", probability=20),
]
names_data = ["body"]
class SpaceAnimalThoughts(Thing):
type_name = "space animal thoughts"
child_generators = [ChildGenerator("space animal thought", (1, 3))]
names_data = ["thoughts"]
class SpaceAnimalThought(Thing):
type_name = "space animal thought"
names_data = [
["sk'","mop","nanu","nug","gmap","shmu","dna","no","xle","doda","daia","de",""],["g ","gek ","th ","iap ","glib ","ph ","d't ","neig'","dip ","shna ","sh "],
["sk'","mop","nanu","nug","gmap","shmu","dna","no","xle","doda","daia","de",""],["g ","gek ","th ","iap ","glib ","ph ","d't ","neig'","dip ","shna ","sh "],
["mi","di","glu","dra","shwa","ama",""],["ben","ri","nap","dap","top","gog"],
[".",".",".",".","!","?"],
]
# new Thing("can of nightmare",["space animal,4-12","sea monster,2-6","space monster,2-6"]);//do not open
CONTENTS = [
SpaceMonster,
SpaceMonsterThoughts,
SpaceMonsterThought,
SpaceAnimal,
SpaceAnimalBody,
SpaceAnimalThoughts,
SpaceAnimalThought,
]
|
d2emon/generator-pack
|
src/genesys/generator/_unknown/nested/biology/monsters.py
|
Python
|
gpl-3.0
| 8,272
|
[
"CRYSTAL"
] |
fd685e0cb12353fe054403accba763b0a800297fa2b788e849b2ea03811e4bc2
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import string
import Bio.PDB
import Bio.PDB.MMCIF2Dict
import numpy as np
import moldesign as mdt
from moldesign import units as u
from moldesign.helpers.pdb import BioAssembly
def exports(o):
__all__.append(o.__name__)
return o
__all__ = []
def parse_mmcif(f):
"""Parse an mmCIF file (using the Biopython parser) and return a molecule
Note:
This routine is not currently called by any part of the user-facing API! The
OpenBabel parser appears to give more accurate results for the time being. The
molecules created using this routine will NOT have any bond topology!
Args:
f (file): file-like object containing the mmCIF file
Returns:
moldesign.Molecule: parsed molecule
"""
return _parse_file(f, Bio.PDB.MMCIFParser)
def parse_pdb(f):
"""Parse a PDB file (using the Biopython parser) and return the basic structure
Note:
This structure will be missing some key data - most notably bonds, but also
any biomolecular assembly information. Therefore, our default parser combines
this routine with a few other methods to create the final Molecule object
See also:
moldesign.fileio.read_pdb
Args:
f (file): file-like object containing the PDB file
Returns:
moldesign.Molecule: parsed molecule
"""
# TODO: this needs to handle strings and streams
# TODO: deal with alternate locations
return _parse_file(f, Bio.PDB.PDBParser)
def _parse_file(f, parser_type):
parser = parser_type()
struc = parser.get_structure('no name', f)
mol = biopy_to_mol(struc)
return mol
@exports
def biopy_to_mol(struc):
"""Convert a biopython PDB structure to an MDT molecule.
Note:
Biopython doesn't deal with bond data, so no bonds will be present
in the Molecule
Args:
struc (Bio.PDB.Structure.Structure): Biopython PDB structure to convert
Returns:
moldesign.Molecule: converted molecule
"""
# TODO: assign bonds using 1) CONECT records, 2) residue templates, 3) distance
newatoms = []
backup_chain_names = list(string.ascii_uppercase)
for chain in struc.get_chains():
tmp, pdbidx, pdbid = chain.get_full_id()
if not pdbid.strip():
pdbid = backup_chain_names.pop()
newchain = mdt.Chain(pdbname=pdbid.strip())
for residue in chain.get_residues():
newresidue = mdt.Residue(pdbname=residue.resname.strip(),
pdbindex=residue.id[1])
newchain.add(newresidue)
for atom in residue.get_atom():
elem = atom.element
if len(elem) == 2:
elem = elem[0] + elem[1].lower()
newatom = mdt.Atom(element=elem,
name=atom.get_name(),
pdbname=atom.get_name(),
pdbindex=atom.get_serial_number())
newatom.position = atom.coord * u.angstrom
newresidue.add(newatom)
newatoms.append(newatom)
return mdt.Molecule(newatoms,
name=struc.get_full_id()[0])
def get_mmcif_assemblies(fileobj=None, mmcdata=None):
"""Parse an mmCIF file, return biomolecular assembly specifications
Args:
fileobj (file-like): File-like object for the PDB file
(this object will be rewound before returning)
mmcdata (dict): dict version of complete mmCIF data structure (if passed, this will
not be read again from fileobj)
Returns:
Mapping[str, BioAssembly]: dict mapping assembly ids to BioAssembly instances
"""
if mmcdata is None:
mmcdata = get_mmcif_data(fileobj)
if '_pdbx_struct_assembly.id' not in mmcdata:
return {} # no assemblies present
# Get assembly metadata
ids = mmcdata['_pdbx_struct_assembly.id']
details = mmcdata['_pdbx_struct_assembly.details']
chains = mmcdata['_pdbx_struct_assembly_gen.asym_id_list']
opers = mmcdata['_pdbx_struct_assembly_gen.oper_expression']
transform_ids = mmcdata['_pdbx_struct_oper_list.id']
# Get matrix transformations
tmat = np.zeros((4, 4)).tolist()
for i in xrange(3): # construct displacement vector
tmat[i][3] = mmcdata['_pdbx_struct_oper_list.vector[%d]' % (i+1)]
for i, j in itertools.product(xrange(0, 3), xrange(0, 3)): # construct rotation matrix
tmat[i][j] = mmcdata['_pdbx_struct_oper_list.matrix[%d][%d]' % (i+1, j+1)]
transforms = _make_transform_dict(tmat, transform_ids)
# Make sure it's a list
if not isinstance(ids, list):
ids = [ids]
details = [details]
chains = [chains]
opers = [opers]
# now create the assembly specifications
assemblies = {}
for id, detail, chainlist, operlist in zip(ids, details, chains, opers):
assert id not in assemblies
transforms = [transforms[i] for i in operlist.split(',')]
assemblies[id] = BioAssembly(detail, chainlist.split(','), transforms)
return assemblies
def _make_transform_dict(tmat, transform_ids):
if isinstance(transform_ids, list):
for i, j in itertools.product(xrange(0, 3), xrange(0, 4)):
tmat[i][j] = map(float, tmat[i][j])
tmat[3][3] = [1.0]*len(transform_ids)
tmat[3][0] = tmat[3][1] = tmat[3][2] = [0.0]*len(transform_ids)
tmat = np.array(tmat)
transforms = {id: tmat[:, :, i] for i, id in enumerate(transform_ids)}
else:
for i, j in itertools.product(xrange(0, 4), xrange(0, 4)):
tmat[i][j] = float(tmat[i][j])
tmat[3][3] = 1.0
tmat = np.array(tmat)
transforms = {transform_ids: tmat}
return transforms
def get_mmcif_data(fileobj):
mmcdata = Bio.PDB.MMCIF2Dict.MMCIF2Dict(fileobj)
fileobj.seek(0) # rewind for future access
return mmcdata
|
tkzeng/molecular-design-toolkit
|
moldesign/interfaces/biopython_interface.py
|
Python
|
apache-2.0
| 6,575
|
[
"Biopython"
] |
257bd5b3f132accbeff318dae0bc82c3fbf98e45ad127b42b063171ba44c01fd
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
**espressopp.analysis.Pressure**
********************************
.. function:: espressopp.analysis.Pressure(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_Pressure
class PressureLocal(ObservableLocal, analysis_Pressure):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_Pressure, system)
if pmi.isController :
class Pressure(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.PressureLocal'
)
|
capoe/espressopp.soap
|
src/analysis/Pressure.py
|
Python
|
gpl-3.0
| 1,656
|
[
"ESPResSo"
] |
104405c0a720887efcd821444c89132283715ea9a62e9c441f20940dca53133e
|
# coding=utf-8
# Copyright 2021 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""User entity for the simulation of learning latent variable models."""
from typing import Callable, Optional, Text
import edward2 as ed # type: ignore
import gin
from gym import spaces
import numpy as np
from recsim_ng.core import value
from recsim_ng.entities.choice_models import affinities
from recsim_ng.entities.choice_models import selectors
from recsim_ng.entities.recommendation import user
from recsim_ng.entities.state_models import static
from recsim_ng.lib.tensorflow import entity
from recsim_ng.lib.tensorflow import field_spec
import tensorflow as tf
Constructor = Callable[Ellipsis, object]
Value = value.Value
ValueSpec = value.ValueSpec
Space = field_spec.Space
@gin.configurable
class ModelLearningDemoUser(user.User):
"""User model with embedding target intent and satisfaction.
This entity models a user which interacts with a recommender system by
repeatedly selecting items among slates of items. The user's action
space consists of selecting one of k presented items for consumption or
abstaining from a choice.
The user's state consists of:
* an intent realized by a target item
* a dynamic satisfaction s, which reflects the user's impression of whether
the recommender makes progress towards the target
The user's choice process is to either select a document for consumption,
using the sum of item utilities and satisfaction as logits, or abstain
according to a constant "no choice" logit, whereas the logit of the
"no choice" action remains fixed. The users' satisfaction acts as a boost to
all item logits compared to the "no choice" logit, thus, at high levels of
satisfaction the user is more likely to pick items for consumption.
The user state updates as follows:
* The target remains fixed over time.
* The satisfaction s evolves as:
s_t = satisfaction_sensitivity * s_{t-1} + delta_t + eps,
where satisfaction_sensitivity is 0.8, delta_t is difference between the
maximum utility of the items from the t-slate and that of the (t-1)-slate,
and eps is zero-mean Gaussian noise with std=0.1.
"""
def __init__(
self,
config,
affinity_model_ctor = affinities.TargetPointSimilarity,
choice_model_ctor = selectors.MultinomialLogitChoiceModel,
user_intent_variance = 0.1,
satisfaction_sensitivity = None,
initial_satisfication = 5.0,
name = 'ModelLearningDemoUser'):
user.User.__init__(self, config)
entity.Entity.__init__(self, name=name)
self._slate_size = config['slate_size']
self._user_intent_variance = user_intent_variance
if satisfaction_sensitivity is None:
self._sat_sensitivity = 0.8 * tf.ones(self._num_users)
else:
self._sat_sensitivity = satisfaction_sensitivity
self._initial_satisfication = initial_satisfication
# Sample from a number of user intents.
self._num_intents = config['num_topics']
batch_intent_means = tf.eye(
self._num_intents,
num_columns=self._num_topics,
batch_shape=(self._num_users,))
lop_ctor = lambda params: tf.linalg.LinearOperatorScaledIdentity( # pylint: disable=g-long-lambda
num_rows=self._num_topics,
multiplier=params)
self._intent_model = static.GMMVector(
batch_ndims=1,
mixture_logits=tf.zeros((self._num_users, self._num_intents)),
component_means=batch_intent_means,
component_scales=tf.sqrt(self._user_intent_variance),
linear_operator_ctor=lop_ctor)
self._choice_model = choice_model_ctor((self._num_users,),
tf.zeros(self._num_users))
self._affinity_model = affinity_model_ctor((self._num_users,),
self._num_topics)
def initial_state(self):
"""The state value after the initial value."""
return Value(
satisfaction=ed.Deterministic(self._initial_satisfication *
tf.ones(self._num_users)),
intent=self._intent_model.initial_state().get('state'),
max_slate_utility=tf.zeros(self._num_users))
def next_state(self, previous_state, _, slate_docs):
"""The state value after the initial value."""
# Compute the improvement of slate scores.
slate_doc_features = slate_docs.get('features')
slate_doc_affinities = self._affinity_model.affinities(
previous_state.get('intent'), slate_doc_features).get('affinities')
max_slate_utility = tf.reduce_max(slate_doc_affinities, axis=-1) + 2.0
improvement = max_slate_utility - previous_state.get('max_slate_utility')
next_satisfaction = self._sat_sensitivity * previous_state.get(
'satisfaction') + improvement
return Value(
satisfaction=ed.Normal(loc=next_satisfaction, scale=0.01),
intent=self._intent_model.next_state(
Value(state=previous_state.get('intent'))).get('state'),
max_slate_utility=max_slate_utility)
def next_response(self, previous_state, slate_docs):
"""The response value after the initial value."""
slate_doc_features = slate_docs.get('features')
slate_doc_scores = self._affinity_model.affinities(
previous_state.get('intent'), slate_doc_features).get('affinities')
adjusted_scores = (
slate_doc_scores + 2.0 +
tf.expand_dims(previous_state.get('satisfaction'), axis=-1))
return self._choice_model.choice(adjusted_scores)
def observation(self):
pass
def specs(self):
response_spec = self._choice_model.specs()
state_spec = ValueSpec(
intent=self._intent_model.specs().get('state'),
satisfaction=Space(
spaces.Box(low=-np.Inf, high=np.Inf, shape=(self._num_users,))),
max_slate_utility=Space(
spaces.Box(low=-np.Inf, high=np.Inf, shape=(self._num_users,))))
return state_spec.prefixed_with('state').union(
response_spec.prefixed_with('response'))
|
google-research/recsim_ng
|
recsim_ng/applications/latent_variable_model_learning/user.py
|
Python
|
apache-2.0
| 6,551
|
[
"Gaussian"
] |
6e3b134b8e60eb1511716313aaf7b333937f23e564e764b732e63cffdc0cff9a
|
# -*- coding: utf-8 -*-
import django
from django.conf import settings
from django.test import TestCase
from django.utils import translation, encoding
import jinja2
import mock
from nose.tools import eq_
from caching import base, invalidation
cache = invalidation.cache
from testapp.models import Addon, User
if django.get_version().startswith('1.3'):
class settings_patch(object):
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self._old_settings = dict((k, getattr(settings, k, None)) for k in self.options)
for k, v in self.options.items():
setattr(settings, k, v)
def __exit__(self, *args):
for k in self.options:
setattr(settings, k, self._old_settings[k])
TestCase.settings = settings_patch
class CachingTestCase(TestCase):
multi_db = True
fixtures = ['tests/testapp/fixtures/testapp/test_cache.json']
extra_apps = ['tests.testapp']
def setUp(self):
cache.clear()
self.old_timeout = base.TIMEOUT
if getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
invalidation.redis.flushall()
def tearDown(self):
base.TIMEOUT = self.old_timeout
def test_flush_key(self):
"""flush_key should work for objects or strings."""
a = Addon.objects.get(id=1)
eq_(base.flush_key(a.cache_key), base.flush_key(a))
def test_cache_key(self):
a = Addon.objects.get(id=1)
eq_(a.cache_key, 'o:testapp.addon:1:default')
keys = set((a.cache_key, a.author1.cache_key, a.author2.cache_key))
eq_(set(a._cache_keys()), keys)
def test_cache(self):
"""Basic cache test: second get comes from cache."""
assert Addon.objects.get(id=1).from_cache is False
assert Addon.objects.get(id=1).from_cache is True
def test_filter_cache(self):
assert Addon.objects.filter(id=1)[0].from_cache is False
assert Addon.objects.filter(id=1)[0].from_cache is True
def test_slice_cache(self):
assert Addon.objects.filter(id=1)[:1][0].from_cache is False
assert Addon.objects.filter(id=1)[:1][0].from_cache is True
def test_invalidation(self):
assert Addon.objects.get(id=1).from_cache is False
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is False
assert Addon.objects.get(id=1).from_cache is True
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is True
a.save()
assert Addon.objects.get(id=1).from_cache is False
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is False
assert Addon.objects.get(id=1).from_cache is True
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is True
def test_invalidation_cross_locale(self):
assert Addon.objects.get(id=1).from_cache is False
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is False
assert Addon.objects.get(id=1).from_cache is True
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is True
# Do query & invalidation in a different locale.
old_locale = translation.get_language()
translation.activate('fr')
assert Addon.objects.get(id=1).from_cache is True
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is True
a.save()
translation.activate(old_locale)
assert Addon.objects.get(id=1).from_cache is False
a = [x for x in Addon.objects.all() if x.id == 1][0]
assert a.from_cache is False
def test_fk_invalidation(self):
"""When an object is invalidated, its foreign keys get invalidated."""
a = Addon.objects.get(id=1)
assert User.objects.get(name='clouseroo').from_cache is False
a.save()
assert User.objects.get(name='clouseroo').from_cache is False
def test_fk_parent_invalidation(self):
"""When a foreign key changes, any parent objects get invalidated."""
assert Addon.objects.get(id=1).from_cache is False
a = Addon.objects.get(id=1)
assert a.from_cache is True
u = User.objects.get(id=a.author1.id)
assert u.from_cache is True
u.name = 'fffuuu'
u.save()
assert User.objects.get(id=a.author1.id).from_cache is False
a = Addon.objects.get(id=1)
assert a.from_cache is False
eq_(a.author1.name, 'fffuuu')
def test_raw_cache(self):
sql = 'SELECT * FROM %s WHERE id = 1' % Addon._meta.db_table
raw = list(Addon.objects.raw(sql))
eq_(len(raw), 1)
raw_addon = raw[0]
a = Addon.objects.get(id=1)
for field in Addon._meta.fields:
eq_(getattr(a, field.name), getattr(raw_addon, field.name))
assert raw_addon.from_cache is False
cached = list(Addon.objects.raw(sql))
eq_(len(cached), 1)
cached_addon = cached[0]
a = Addon.objects.get(id=1)
for field in Addon._meta.fields:
eq_(getattr(a, field.name), getattr(cached_addon, field.name))
assert cached_addon.from_cache is True
def test_raw_cache_params(self):
"""Make sure the query params are included in the cache key."""
sql = 'SELECT * from %s WHERE id = %%s' % Addon._meta.db_table
raw = list(Addon.objects.raw(sql, [1]))[0]
eq_(raw.id, 1)
raw2 = list(Addon.objects.raw(sql, [2]))[0]
eq_(raw2.id, 2)
@mock.patch('caching.base.CacheMachine')
def test_raw_nocache(self, CacheMachine):
base.TIMEOUT = 60
sql = 'SELECT * FROM %s WHERE id = 1' % Addon._meta.db_table
raw = list(Addon.objects.raw(sql, timeout=base.NO_CACHE))
eq_(len(raw), 1)
raw_addon = raw[0]
assert not hasattr(raw_addon, 'from_cache')
assert not CacheMachine.called
@mock.patch('caching.base.cache')
def test_count_cache(self, cache_mock):
base.TIMEOUT = 60
cache_mock.scheme = 'memcached'
cache_mock.get.return_value = None
q = Addon.objects.all()
count = q.count()
args, kwargs = cache_mock.set.call_args
key, value, timeout = args
eq_(value, 2)
eq_(timeout, 60)
@mock.patch('caching.base.cached')
def test_count_none_timeout(self, cached_mock):
base.TIMEOUT = base.NO_CACHE
Addon.objects.count()
eq_(cached_mock.call_count, 0)
@mock.patch('caching.base.cached')
def test_count_nocache(self, cached_mock):
base.TIMEOUT = 60
Addon.objects.no_cache().count()
eq_(cached_mock.call_count, 0)
def test_queryset_flush_list(self):
"""Check that we're making a flush list for the queryset."""
q = Addon.objects.all()
objects = list(q) # Evaluate the queryset so it gets cached.
base.invalidator.add_to_flush_list({q.flush_key(): ['remove-me']})
cache.set('remove-me', 15)
Addon.objects.invalidate(objects[0])
assert cache.get(q.flush_key()) is None
assert cache.get('remove-me') is None
def test_jinja_cache_tag_queryset(self):
env = jinja2.Environment(extensions=['caching.ext.cache'])
def check(q, expected):
t = env.from_string(
"{% cache q %}{% for x in q %}{{ x.id }}:{{ x.val }};"
"{% endfor %}{% endcache %}")
eq_(t.render(q=q), expected)
# Get the template in cache, then hijack iterator to make sure we're
# hitting the cached fragment.
check(Addon.objects.all(), '1:42;2:42;')
qs = Addon.objects.all()
qs.iterator = mock.Mock()
check(qs, '1:42;2:42;')
assert not qs.iterator.called
# Make changes, make sure we dropped the cached fragment.
a = Addon.objects.get(id=1)
a.val = 17
a.save()
q = Addon.objects.all()
flush = cache.get(q.flush_key())
assert cache.get(q.flush_key()) is None
check(Addon.objects.all(), '1:17;2:42;')
qs = Addon.objects.all()
qs.iterator = mock.Mock()
check(qs, '1:17;2:42;')
def test_jinja_cache_tag_object(self):
env = jinja2.Environment(extensions=['caching.ext.cache'])
addon = Addon.objects.get(id=1)
def check(obj, expected):
t = env.from_string(
'{% cache obj, 30 %}{{ obj.id }}:{{ obj.val }}{% endcache %}')
eq_(t.render(obj=obj), expected)
check(addon, '1:42')
addon.val = 17
addon.save()
check(addon, '1:17')
def test_jinja_multiple_tags(self):
env = jinja2.Environment(extensions=['caching.ext.cache'])
addon = Addon.objects.get(id=1)
template = ("{% cache obj %}{{ obj.id }}{% endcache %}\n"
"{% cache obj %}{{ obj.val }}{% endcache %}")
def check(obj, expected):
t = env.from_string(template)
eq_(t.render(obj=obj), expected)
check(addon, '1\n42')
addon.val = 17
addon.save()
check(addon, '1\n17')
def test_jinja_cache_tag_extra(self):
env = jinja2.Environment(extensions=['caching.ext.cache'])
addon = Addon.objects.get(id=1)
template = ('{% cache obj, extra=[obj.key] %}{{ obj.id }}:'
'{{ obj.key }}{% endcache %}')
def check(obj, expected):
t = env.from_string(template)
eq_(t.render(obj=obj), expected)
addon.key = 1
check(addon, '1:1')
addon.key = 2
check(addon, '1:2')
template = ('{% cache obj, 10, extra=[obj.key] %}{{ obj.id }}:'
'{{ obj.key }}{% endcache %}')
addon.key = 1
check(addon, '1:1')
addon.key = 2
check(addon, '1:2')
def test_cached_with(self):
counter = mock.Mock()
def expensive():
counter()
return counter.call_count
a = Addon.objects.get(id=1)
f = lambda: base.cached_with(a, expensive, 'key')
# Only gets called once.
eq_(f(), 1)
eq_(f(), 1)
# Switching locales does not reuse the cache.
old_locale = translation.get_language()
translation.activate('fr')
eq_(f(), 2)
# Called again after flush.
a.save()
eq_(f(), 3)
translation.activate(old_locale)
eq_(f(), 4)
counter.reset_mock()
q = Addon.objects.filter(id=1)
f = lambda: base.cached_with(q, expensive, 'key')
# Only gets called once.
eq_(f(), 1)
eq_(f(), 1)
# Called again after flush.
list(q)[0].save()
eq_(f(), 2)
eq_(f(), 2)
def test_cached_with_bad_object(self):
"""cached_with shouldn't fail if the object is missing a cache key."""
counter = mock.Mock()
def f():
counter()
return counter.call_count
eq_(base.cached_with([], f, 'key'), 1)
def test_cached_with_unicode(self):
u = ':'.join(map(encoding.smart_str, [u'תיאור אוסף']))
obj = mock.Mock()
obj.query_key.return_value = u'xxx'
obj.flush_key.return_value = 'key'
f = lambda: 1
eq_(base.cached_with(obj, f, 'adf:%s' % u), 1)
def test_cached_method(self):
a = Addon.objects.get(id=1)
eq_(a.calls(), (1, 1))
eq_(a.calls(), (1, 1))
a.save()
# Still returns 1 since the object has it's own local cache.
eq_(a.calls(), (1, 1))
eq_(a.calls(3), (3, 2))
a = Addon.objects.get(id=1)
eq_(a.calls(), (1, 3))
eq_(a.calls(4), (4, 4))
eq_(a.calls(3), (3, 2))
b = Addon.objects.create(id=5, val=32, author1_id=1, author2_id=2)
eq_(b.calls(), (1, 5))
# Make sure we're updating the wrapper's docstring.
eq_(b.calls.__doc__, Addon.calls.__doc__)
@mock.patch('caching.base.CacheMachine')
def test_no_cache_from_manager(self, CacheMachine):
a = Addon.objects.no_cache().get(id=1)
eq_(a.id, 1)
assert not hasattr(a, 'from_cache')
assert not CacheMachine.called
@mock.patch('caching.base.CacheMachine')
def test_no_cache_from_queryset(self, CacheMachine):
a = Addon.objects.all().no_cache().get(id=1)
eq_(a.id, 1)
assert not hasattr(a, 'from_cache')
assert not CacheMachine.called
def test_timeout_from_manager(self):
q = Addon.objects.cache(12).filter(id=1)
eq_(q.timeout, 12)
a = q.get()
assert hasattr(a, 'from_cache')
eq_(a.id, 1)
def test_timeout_from_queryset(self):
q = Addon.objects.all().cache(12).filter(id=1)
eq_(q.timeout, 12)
a = q.get()
assert hasattr(a, 'from_cache')
eq_(a.id, 1)
def test_cache_and_no_cache(self):
"""Whatever happens last sticks."""
q = Addon.objects.no_cache().cache(12).filter(id=1)
eq_(q.timeout, 12)
no_cache = q.no_cache()
# The querysets don't share anything.
eq_(q.timeout, 12)
assert no_cache.timeout != 12
assert not hasattr(no_cache.get(), 'from_cache')
eq_(q.get().id, 1)
assert hasattr(q.get(), 'from_cache')
@mock.patch('caching.base.cache')
def test_cache_machine_timeout(self, cache):
cache.scheme = 'memcached'
cache.get.return_value = None
cache.get_many.return_value = {}
a = Addon.objects.cache(12).get(id=1)
eq_(a.id, 1)
assert cache.add.called
args, kwargs = cache.add.call_args
eq_(kwargs, {'timeout': 12})
def test_unicode_key(self):
list(User.objects.filter(name=u'ümlaüt'))
def test_empty_in(self):
# Raised an exception before fixing #2.
eq_([], list(User.objects.filter(pk__in=[])))
def test_empty_queryset(self):
for k in (1, 1):
with self.assertNumQueries(k):
eq_(len(Addon.objects.filter(pk=42)), 0)
@mock.patch('caching.base.CACHE_EMPTY_QUERYSETS', True)
def test_cache_empty_queryset(self):
for k in (1, 0):
with self.assertNumQueries(k):
eq_(len(Addon.objects.filter(pk=42)), 0)
def test_invalidate_empty_queryset(self):
u = User.objects.create()
eq_(list(u.addon_set.all()), [])
Addon.objects.create(val=42, author1=u, author2=u)
eq_([a.val for a in u.addon_set.all()], [42])
def test_invalidate_new_object(self):
u = User.objects.create()
Addon.objects.create(val=42, author1=u, author2=u)
eq_([a.val for a in u.addon_set.all()], [42])
Addon.objects.create(val=17, author1=u, author2=u)
eq_([a.val for a in u.addon_set.all()], [42, 17])
def test_make_key_unicode(self):
translation.activate(u'en-US')
f = 'fragment\xe9\x9b\xbb\xe8\x85\xa6\xe7\x8e'
# This would crash with a unicode error.
base.make_key(f, with_locale=True)
translation.deactivate()
@mock.patch('caching.invalidation.cache.get_many')
def test_get_flush_lists_none(self, cache_mock):
if not getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
cache_mock.return_value.values.return_value = [None, [1]]
eq_(base.invalidator.get_flush_lists(None), set([1]))
def test_multidb_cache(self):
""" Test where master and slave DB result in two different cache keys """
assert Addon.objects.get(id=1).from_cache is False
assert Addon.objects.get(id=1).from_cache is True
from_slave = Addon.objects.using('slave').get(id=1)
assert from_slave.from_cache is False
assert from_slave._state.db == 'slave'
def test_multidb_fetch_by_id(self):
""" Test where master and slave DB result in two different cache keys with FETCH_BY_ID"""
with self.settings(FETCH_BY_ID=True):
assert Addon.objects.get(id=1).from_cache is False
assert Addon.objects.get(id=1).from_cache is True
from_slave = Addon.objects.using('slave').get(id=1)
assert from_slave.from_cache is False
assert from_slave._state.db == 'slave'
|
DramaFever/django-cache-machine
|
tests/test_cache.py
|
Python
|
bsd-3-clause
| 16,475
|
[
"ADF"
] |
6c9931a9d0cc178aae30770dc5de699aa13a9464403ddda7b263ea10afa50bc9
|
####################################################################
# Rdesigneur example 3.1
# Making an axon with a propagating action potential.
####################################################################
import numpy as np
import moose
import pylab
import rdesigneur as rd
numAxonSegments = 200
comptLen = 10e-6
comptDia = 1e-6
RM = 1.0
RA = 10.0
CM = 0.01
def makeAxonProto():
axon = moose.Neuron( '/library/axon' )
prev = rd.buildCompt( axon, 'soma', RM = RM, RA = RA, CM = CM, dia = 10e-6, x=0, dx=comptLen)
theta = 0
x = comptLen
y = 0.0
for i in range( numAxonSegments ):
dx = comptLen * np.cos( theta )
dy = comptLen * np.sin( theta )
r = np.sqrt( x * x + y * y )
theta += comptLen / r
compt = rd.buildCompt( axon, 'axon' + str(i), RM = RM, RA = RA, CM = CM, x = x, y = y, dx = dx, dy = dy, dia = comptDia )
moose.connect( prev, 'axial', compt, 'raxial' )
prev = compt
x += dx
y += dy
return axon
moose.Neutral( '/library' )
makeAxonProto()
rdes = rd.rdesigneur(
chanProto = [['make_HH_Na()', 'Na'], ['make_HH_K()', 'K']],
cellProto = [['elec','axon']],
chanDistrib = [
['Na', '#', 'Gbar', '1200' ],
['K', '#', 'Gbar', '360' ]],
stimList = [['soma', '1', '.', 'inject', '(t>0.005 && t<0.2) * 2e-11' ]],
plotList = [['soma', '1', '.', 'Vm', 'Membrane potential']],
moogList = [['#', '1', '.', 'Vm', 'Vm (mV)']]
)
rdes.buildModel()
moose.reinit()
rdes.displayMoogli( 0.00005, 0.04, 0.0 )
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex3.2_squid_axon_propgn.py
|
Python
|
gpl-2.0
| 1,557
|
[
"MOOSE",
"NEURON"
] |
e69bf74ffe5f9b96ef1fe11438804aee98b736047107fbc7c99f4a5d805562b7
|
"""
@author: Martin Kuemmel
@organization: LMU / USM
@license: Gnu Public Licence
@contact: mkuemmel@usm.lmu.de
@version: $Revision: $
@date: $Date: $
@changeDate: $LastChangedDate: $
Initialize the test library.
"""
import os.path
import shutil
import unittest
# check tips version
import tips
if float(tips.__version__)<2.0:
vtipslt2 = True
else:
vtipslt2 = False
class Test_VerifyFluxBasic(unittest.TestCase):
def setUp(self):
# flag for mop up
self.doRemove = False
# global detector-flag
self.detectorFlag=False
# global silent-flag
self.silentFlag=True
# the (list of) environment variables, names given to them and the files to be copied there
subDirs = [('AXE_IMAGE_PATH', 'DATA', ['galaxyThumbs.fits', 'input_stars_imgs.fits', 'input_flat.spc.fits', 'input_cat_verifyI.dat', 'input_cat_verifyII.dat', 'input_cat_verifyIII.dat', 'input_cat_verifyIV.dat', 'input_cat_verifyII.fits', 'input_cat_verifyIV.fits']), \
('AXE_CONFIG_PATH', 'CONF', ['verificationConfI.conf', 'constSensI.fits', 'mef_c4.00000_x-0.38167_y1.08146.fits', 'mef_c4.00000_x0.28625_y1.17167.fits', 'verificationConfI.fits']), \
('AXE_OUTPUT_PATH', 'OUTPUT'), ('AXE_OUTSIM_PATH', 'OUTSIM'), \
#('AXE_SIMDATA_PATH', 'SIMDATA', ['wfc3_ir_f125w_tpass_m01.dat']), \
('AXE_DRIZZLE_PATH', 'DRIZZLE')]
# define the directory with the input data and make sure it exists
self.dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'verifyData'))
if not os.path.isdir(self.dataDir):
errMsg = 'File does not exist: %s!' % self.dataDir
raise Exception(errMsg)
# define a name for the run directory;
# destroy any old version;
# create a new one
#self.runDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'verifyTests'))
# run test in source directory may cause trouble depending where the code is integrated (not necessery writable)
# new path is defined relative, assuming the test would be ran in a appropiate directory
self.runDir = os.path.abspath('./verifyTests')
#if os.path.isdir(self.runDir):
# shutil.rmtree(self.runDir, ignore_errors=True, onerror=None)
if not os.path.isdir(self.runDir):
os.mkdir(self.runDir)
# create the various sub-dirs
# and point the environment variables on it
for aSub in subDirs:
subDir = os.path.join(self.runDir, aSub[1])
if not os.path.isdir(subDir):
os.mkdir(subDir)
os.environ[aSub[0]] = subDir
# copy files in this sub-dir
if len(aSub) > 2:
# extract the file list
fileList = aSub[2]
# copy files in the sub-dir
for aFile in fileList:
# put together file names
inFile = os.path.join(self.dataDir, aFile)
outFile = os.path.join(subDir, aFile)
# make sure the file exists
if not os.path.isfile(inFile):
errMsg = 'File does not exist: %s!' % inFile
raise Exception(errMsg)
# copy the file
shutil.copy(inFile, outFile)
# create a subdir for tips tests
if not os.path.isdir(os.path.join(self.runDir,'tips')):
os.mkdir(os.path.join(self.runDir,'tips'))
# create a subdir for tips tests
if not os.path.isdir(os.path.join(self.runDir,'tips','flux')):
os.mkdir(os.path.join(self.runDir,'tips','flux'))
self.tipsDir = os.path.join(self.runDir,'tips','flux')
def tearDown(self):
# check the mop up flag
if self.doRemove:
# tear down the run directory
if os.path.isdir(self.runDir):
shutil.rmtree(self.runDir, ignore_errors=True, onerror=None)
def testGauss(self):
"""
Only Gaussian objects
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyI.dat', config='verificationConfI.conf',
dispim_name='test_verify_Gauss.fits', exptime=10., bck_flux=0.0,
detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_Gauss.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_Gauss.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyI.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'))
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 1.0E-05)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxindex'], relDiff4
def testGaussModSpec(self):
"""
Gaussian objects with model spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyII.dat', config='verificationConfI.conf',
dispim_name='test_verify_GaussModspec.fits', model_spectra='input_flat.spc.fits',
exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_GaussModspec.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_GaussModspec.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyII.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits'))
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 2.0E-05)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxdev'], extrVals[index]['maxindex'], relDiff4
def testGaussModSpec_tips(self):
"""
Gaussian objects with model spectra at TIPS level
"""
import math
import tips
import verify
inCat = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyII.fits')
inSpc = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits')
obs = tips.Observation(inCat, inSpc, inCatForm='TIPS', inSpcForm='aXeSIM', norm=True)
obs.loadFromFile(os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.fits'))
obs.runSimulation(workDir=self.tipsDir)
# check that the output image exists
resultFile = os.path.join(self.tipsDir,'OUTSIM', 'input_cat_verifyII_WFC3_IR_00_v1_verify_d300914_IMG.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
axesimCat = os.path.join(self.tipsDir,'DATA','input_cat_verifyII_WFC3_IR_00.cat')
verify.getInitialModIndex(inCat, axesimCat)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(self.tipsDir,'OUTSIM','input_cat_verifyII_WFC3_IR_00_v1_verify_d300914_IMG.fits'),
axesimCat, os.path.join(self.tipsDir,'CONF','WFC3_IR_00_v1_verify_d300914.conf'), inSpec=inSpc)
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 2.0E-05)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxdev'], extrVals[index]['maxindex'], relDiff4
@unittest.skipIf(vtipslt2, "not supported in with tips < 2.0")
def testStarsModSpec(self):
"""
Stellar objects with model spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyIII.dat', config='verificationConfI.conf',
dispim_name='test_verify_starsModspec.fits', model_images='input_stars_imgs.fits', model_spectra='input_flat.spc.fits',
psf_file='mef_c4.00000_x0.28625_y1.17167.fits', exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_starsModspec.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_starsModspec.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIII.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits'),
inModel=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_stars_imgs.fits'),
inPSF=os.path.join(os.environ['AXE_CONFIG_PATH'],'mef_c4.00000_x-0.38167_y1.08146.fits'))
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 1.0E-03)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxindex'], relDiff4
def testModImgSpec(self):
"""
Model images with input spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyIV.dat', config='verificationConfI.conf',
dispim_name='test_verify_ModimgSpec.fits', model_images='galaxyThumbs.fits', model_spectra='input_flat.spc.fits',
exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_ModimgSpec.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_ModimgSpec.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits'),
inModel=os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits'))
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 2.0E-05)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxindex'], relDiff4
def testModImgSpec_tips(self):
"""
Model images with input spectra at TIPS level
"""
import math
import axesim
import verify
inCat = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.fits')
inSpc = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits')
inThm = os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits')
obs = tips.Observation(inCat, inSpc, inCatForm='TIPS', inSpcForm='aXeSIM', norm=True, inThmDir=inThm)
obs.loadFromFile(os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.fits'))
obs.runSimulation(workDir=self.tipsDir)
# check that the output image exists
resultFile = os.path.join(self.tipsDir,'OUTSIM', 'input_cat_verifyIV_WFC3_IR_00_v1_verify_d300914_IMG.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
axesimCat = os.path.join(self.tipsDir,'DATA','input_cat_verifyIV_WFC3_IR_00.cat')
verify.getInitialModIndex(inCat, axesimCat)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(self.tipsDir,'OUTSIM','input_cat_verifyIV_WFC3_IR_00_v1_verify_d300914_IMG.fits'),
axesimCat, os.path.join(self.tipsDir,'CONF','WFC3_IR_00_v1_verify_d300914.conf'),
inSpec=inSpc, inModel=inThm)
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 2.0E-05)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxindex'], relDiff4
@unittest.skipIf(vtipslt2, "not supported in with tips < 2.0")
def testModImgSpecPSF(self):
"""
Model images with input spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyIV.dat', config='verificationConfI.conf',
dispim_name='test_verify_ModimgSpecPSF.fits', model_images='galaxyThumbs.fits', model_spectra='input_flat.spc.fits',
psf_file='mef_c4.00000_x-0.38167_y1.08146.fits', exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_ModimgSpecPSF.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simFlux, extrVals = verify.verify(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_ModimgSpecPSF.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits'),
inModel=os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits'),
inPSF=os.path.join(os.environ['AXE_CONFIG_PATH'],'mef_c4.00000_x-0.38167_y1.08146.fits'))
# go over all objects
for index in range(len(simFlux)):
# compute the relative difference between the simulated
# and the extracted flux
relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index]
relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index]
relDiff3 = extrVals[index]['maxdev']/simFlux[index]
relDiff4 = extrVals[index]['std']/simFlux[index]
# make sure the difference is small
self.assertLess(relDiff1, 4.0E-05)
#print relDiff1, relDiff2, relDiff3, extrVals[index]['maxindex'], relDiff4
|
nfourmanoit/TIPS
|
test/test_tips/testverify/testVerifyFlux.py
|
Python
|
gpl-3.0
| 19,047
|
[
"Gaussian"
] |
12b7ec792350e7346255d4c04982c3d529ad7194b6f3b41740cfa639124b5154
|
# coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
"""
Unittest for synchrotron_radiation.synchrotron_radiation.py
:Authors: **Markus Schwarz, Konstantinos Iliakis**
"""
import unittest
import numpy as np
import os
from blond.utils import bmath as bm
from blond.input_parameters.ring import Ring
from blond.beam.beam import Beam, Electron, Positron
from blond.beam.distributions import bigaussian, matched_from_distribution_function
from blond.input_parameters.rf_parameters import RFStation
from blond.beam.profile import Profile
from blond.trackers.tracker import RingAndRFTracker, FullRingAndRF
from blond.synchrotron_radiation.synchrotron_radiation import SynchrotronRadiation
from scipy.constants import c, e, m_e
from blond.beam.profile import CutOptions
class TestSynchtrotronRadiation(unittest.TestCase):
# Run before every test
def setUp(self):
circumference = 110.4 # [m]
energy = 2.5e9 # [eV]
alpha = 0.0082
self.R_bend = 5.559 # bending radius [m]
# C_gamma = e**2 / (3*epsilon_0 * (m_e*c**2)**4) # [m J^3]
# C_gamma *= e**3 # [m eV^3]
harmonic_number = 184
voltage = 800e3 # eV
phi_offsets = 0
self.seed = 1234
self.intensity = 2.299e9
self.n_macroparticles = int(1e2)
self.sigma_dt = 10e-12 # RMS, [s]
self.ring = Ring(circumference, alpha, energy, Positron(),
synchronous_data_type='total energy', n_turns=1)
self.rf_station = RFStation(self.ring, harmonic_number, voltage,
phi_offsets, n_rf=1)
self.beam = Beam(self.ring, self.n_macroparticles, self.intensity)
bigaussian(self.ring, self.rf_station, self.beam,
self.sigma_dt, seed=self.seed)
# # energy loss per turn [eV]; assuming isomagnetic lattice
# self.U0 = C_gamma * self.ring.beta[0,0]**3 * self.ring.energy[0,0]**4 / self.R_bend
def test_initial_beam(self):
atol = 0
rtol = 1e-7
np.testing.assert_allclose([np.mean(self.beam.dt)], [1.0010434293297664e-09],
atol=atol, rtol=rtol,
err_msg='Initial avg beam.dt wrong')
np.testing.assert_allclose([np.std(self.beam.dt)], [9.956848503354043e-12],
atol=atol, rtol=rtol,
err_msg='Initial std beam.dt wrong')
np.testing.assert_allclose([np.mean(self.beam.dE)], [-22869.066735787248],
atol=atol, rtol=rtol,
err_msg='Initial avg beam.dE wrong')
np.testing.assert_allclose([np.std(self.beam.dE)], [446199.02910303336],
atol=atol, rtol=rtol,
err_msg='Initial std beam.dE wrong')
def test_affect_only_dE(self):
atol = 0
rtol = 1e-7
# incoherent synchrotron radiation, no displacement of beam
iSR = SynchrotronRadiation(self.ring, self.rf_station, self.beam, self.R_bend,
seed=self.seed, n_kicks=1, shift_beam=False,
python=True, quantum_excitation=False)
iSR.track()
np.testing.assert_allclose([np.mean(self.beam.dt)], [1.0010434293297664e-09],
atol=atol, rtol=rtol,
err_msg='SR affected mean beam.dt')
np.testing.assert_allclose([np.std(self.beam.dt)], [9.956848503354043e-12],
atol=atol, rtol=rtol,
err_msg='SR affected std beam.dt')
# np.testing.assert_almost_equal(
# self.beam.dt[0], 1.0054066581358374e-09, decimal=10,
# err_msg='SR affected beam.dt')
def test_synchrotron_radiation_python_vs_C(self):
atol = 0
rtol = 1e-7
iSR = SynchrotronRadiation(self.ring, self.rf_station, self.beam, self.R_bend,
n_kicks=1, shift_beam=False,
python=True, quantum_excitation=False, seed=self.seed)
iSR.track() # Python implementation
beam_C = Beam(self.ring, self.n_macroparticles, self.intensity)
bigaussian(self.ring, self.rf_station, beam_C,
self.sigma_dt, seed=self.seed)
iSR = SynchrotronRadiation(self.ring, self.rf_station, beam_C, self.R_bend,
n_kicks=1, shift_beam=False,
python=False, quantum_excitation=False, seed=self.seed)
iSR.track() # C implementation
np.testing.assert_allclose([np.mean(self.beam.dE)], [np.mean(beam_C.dE)],
atol=atol, rtol=rtol,
err_msg='Python anc C yield different avg beam.dE for single kick')
np.testing.assert_allclose([np.std(self.beam.dE)], [np.std(beam_C.dE)],
atol=atol, rtol=rtol,
err_msg='Python anc C yield different std beam.dE for single kick')
# np.testing.assert_almost_equal(self.beam.dE, beam_C.dE, decimal=8,
# err_msg='SR: Python and C implementations yield different results for single kick')
def test_synchrotron_radiation_python_vs_C_double_kick(self):
atol = 0
rtol = 1e-7
iSR = SynchrotronRadiation(self.ring, self.rf_station, self.beam, self.R_bend,
n_kicks=2, shift_beam=False,
python=True, quantum_excitation=False, seed=self.seed)
iSR.track() # Python implementation
beam_C = Beam(self.ring, self.n_macroparticles, self.intensity)
bigaussian(self.ring, self.rf_station, beam_C,
self.sigma_dt, seed=self.seed)
iSR = SynchrotronRadiation(self.ring, self.rf_station, beam_C, self.R_bend,
n_kicks=2, shift_beam=False,
python=False, quantum_excitation=False, seed=self.seed)
iSR.track() # C implementation
np.testing.assert_allclose([np.mean(self.beam.dE)], [np.mean(beam_C.dE)],
atol=atol, rtol=rtol,
err_msg='Python anc C yield different avg beam.dE for two kicks')
np.testing.assert_allclose([np.std(self.beam.dE)], [np.std(beam_C.dE)],
atol=atol, rtol=rtol,
err_msg='Python anc C yield different std beam.dE for two kicks')
# np.testing.assert_almost_equal(self.beam.dE, beam_C.dE, decimal=8,
# err_msg='SR: Python and C implementations yield different results for two kicks')
class TestSynchRad(unittest.TestCase):
# SIMULATION PARAMETERS -------------------------------------------------------
# Beam parameters
particle_type = Electron()
n_particles = int(1.7e11)
n_macroparticles = int(1e5)
sync_momentum = 175e9 # [eV]
distribution_type = 'gaussian'
emittance = 1.0
distribution_variable = 'Action'
# Machine and RF parameters
radius = 15915.49
gamma_transition = 377.96447
C = 2 * np.pi * radius # [m]
# Tracking details
n_turns = int(200)
# Derived parameters
E_0 = m_e * c**2 / e # [eV]
tot_beam_energy = np.sqrt(sync_momentum**2 + E_0**2) # [eV]
momentum_compaction = 1 / gamma_transition**2 # [1]
# Cavities parameters
n_rf_systems = 1
harmonic_numbers = 133650
voltage_program = 10e9
phi_offset = np.pi
bucket_length = C / c / harmonic_numbers
n_sections = 2
rho = 11e3
# Run before every testn_turns
def setUp(self):
self.general_params = Ring(np.ones(self.n_sections) * self.C/self.n_sections,
np.tile(self.momentum_compaction,
(1, self.n_sections)).T,
np.tile(self.sync_momentum,
(self.n_sections, self.n_turns+1)),
self.particle_type, self.n_turns, n_sections=self.n_sections)
self.RF_sct_par = []
self.RF_sct_par_cpp = []
for i in np.arange(self.n_sections)+1:
self.RF_sct_par.append(RFStation(self.general_params,
[self.harmonic_numbers], [
self.voltage_program/self.n_sections],
[self.phi_offset], self.n_rf_systems, section_index=i))
self.RF_sct_par_cpp.append(RFStation(self.general_params,
[self.harmonic_numbers], [
self.voltage_program/self.n_sections],
[self.phi_offset], self.n_rf_systems, section_index=i))
# DEFINE BEAM------------------------------------------------------------------
self.beam = Beam(self.general_params,
self.n_macroparticles, self.n_particles)
self.beam_cpp = Beam(self.general_params,
self.n_macroparticles, self.n_particles)
# DEFINE SLICES----------------------------------------------------------------
number_slices = 500
cut_options = CutOptions(
cut_left=0., cut_right=self.bucket_length, n_slices=number_slices)
self.slice_beam = Profile(self.beam, CutOptions=cut_options)
self.slice_beam_cpp = Profile(self.beam_cpp, CutOptions=cut_options)
# DEFINE TRACKER---------------------------------------------------------------
self.longitudinal_tracker = []
self.longitudinal_tracker_cpp = []
for i in range(self.n_sections):
self.longitudinal_tracker.append(RingAndRFTracker(
self.RF_sct_par[i], self.beam, Profile=self.slice_beam))
self.longitudinal_tracker_cpp.append(RingAndRFTracker(
self.RF_sct_par_cpp[i], self.beam_cpp, Profile=self.slice_beam_cpp))
full_tracker = FullRingAndRF(self.longitudinal_tracker)
full_tracker_cpp = FullRingAndRF(self.longitudinal_tracker_cpp)
# BEAM GENERATION--------------------------------------------------------------
matched_from_distribution_function(self.beam, full_tracker, emittance=self.emittance,
distribution_type=self.distribution_type,
distribution_variable=self.distribution_variable, seed=1000)
matched_from_distribution_function(self.beam_cpp, full_tracker_cpp, emittance=self.emittance,
distribution_type=self.distribution_type,
distribution_variable=self.distribution_variable, seed=1000)
self.slice_beam.track()
self.slice_beam_cpp.track()
# Run after every test
def tearDown(self):
pass
def test_no_quant_exc_10t(self):
os.environ['OMP_NUM_THREADS'] = '1'
turns = 10
atol = 0
rtol_avg = 1e-7
rtol_std = 1e-7
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=False, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=False, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_with_quant_exc_10t(self):
os.environ['OMP_NUM_THREADS'] = '1'
turns = 10
atol = 0
rtol_avg = 1e-2
rtol_std = 1e-2
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=True, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=True, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_no_quant_exc_100t(self):
os.environ['OMP_NUM_THREADS'] = '1'
turns = 100
atol = 0
rtol_avg = 1e-7
rtol_std = 1e-7
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=False, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=False, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_with_quant_exc_100t(self):
os.environ['OMP_NUM_THREADS'] = '1'
turns = 100
atol = 0
rtol_avg = 1e-2
rtol_std = 1e-1
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=True, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=True, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_no_quant_exc_10t_parallel(self):
os.environ['OMP_NUM_THREADS'] = '2'
turns = 10
atol = 0
rtol_avg = 1e-7
rtol_std = 1e-7
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=False, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=False, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_with_quant_exc_10t_parallel(self):
os.environ['OMP_NUM_THREADS'] = '2'
turns = 10
atol = 0
rtol_avg = 1e-2
rtol_std = 1e-2
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=True, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=True, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_no_quant_exc_100t_parallel(self):
os.environ['OMP_NUM_THREADS'] = '2'
turns = 100
atol = 0
rtol_avg = 1e-7
rtol_std = 1e-7
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=False, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=False, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
def test_with_quant_exc_100t_parallel(self):
os.environ['OMP_NUM_THREADS'] = '2'
turns = 100
atol = 0
rtol_avg = 1e-2
rtol_std = 1e-1
SR = []
SR_cpp = []
for i in range(self.n_sections):
SR.append(SynchrotronRadiation(self.general_params, self.RF_sct_par[i],
self.beam, self.rho,
quantum_excitation=True, python=True))
SR_cpp.append(SynchrotronRadiation(self.general_params, self.RF_sct_par_cpp[i],
self.beam_cpp, self.rho,
quantum_excitation=True, python=False))
map_ = []
for i in range(self.n_sections):
map_ += [self.longitudinal_tracker[i]] + [SR[i]]
map_ += [self.slice_beam]
map_cpp = []
for i in range(self.n_sections):
map_cpp += [self.longitudinal_tracker_cpp[i]] + [SR_cpp[i]]
map_cpp += [self.slice_beam_cpp]
avg_dt = np.zeros(turns)
std_dt = np.zeros(turns)
avg_dE = np.zeros(turns)
std_dE = np.zeros(turns)
avg_dt_cpp = np.zeros(turns)
std_dt_cpp = np.zeros(turns)
avg_dE_cpp = np.zeros(turns)
std_dE_cpp = np.zeros(turns)
for i in range(turns):
for m in map_:
m.track()
for m in map_cpp:
m.track()
avg_dt[i] = np.mean(self.beam.dt)
std_dt[i] = np.std(self.beam.dt)
avg_dE[i] = np.mean(self.beam.dE)
std_dE[i] = np.std(self.beam.dE)
avg_dt_cpp[i] = np.mean(self.beam_cpp.dt)
std_dt_cpp[i] = np.std(self.beam_cpp.dt)
avg_dE_cpp[i] = np.mean(self.beam_cpp.dE)
std_dE_cpp[i] = np.std(self.beam_cpp.dE)
np.testing.assert_allclose(avg_dt, avg_dt_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dt arrays not close")
np.testing.assert_allclose(std_dt, std_dt_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dt arrays not close")
np.testing.assert_allclose(avg_dE, avg_dE_cpp, atol=atol, rtol=rtol_avg,
err_msg="Pyhton and C++ avg beam dE arrays not close")
np.testing.assert_allclose(std_dE, std_dE_cpp, atol=atol, rtol=rtol_std,
err_msg="Pyhton and C++ std beam dE arrays not close")
if __name__ == '__main__':
unittest.main()
|
dquartul/BLonD
|
unittests/synchrotron_radiation/test_synch_rad.py
|
Python
|
gpl-3.0
| 32,285
|
[
"Gaussian"
] |
0ba0f5a185b0b855c046814087259077962933ac8bc881010323bbbbf1598108
|
"""
Base class for models. Encapsulates the model description
and getters and setters for the parameters of the model.
Also includes hyperparameters
"""
from parameters import Parameter
from optofit.inference.distributions import *
from optofit.neuron.channels import *
from optofit.simulation.stimulus import Stimulus
from collections import defaultdict
from optofit.population.population import Population
from optofit.neuron.neuron import Neuron
from optofit.neuron.compartment import CalciumCompartment
from optofit.neuron.channels import *
from optofit.observation.observable import NewDirectCompartmentVoltage, LowPassCompartmentVoltage, IndependentObservations, LinearFluorescence
class Model(object):
"""
Scott's proposal for a new model class. Should contain:
1. a population of neurons
2. a (set of) observation model(s).
3. a set of stimuli
"""
def __init__(self):
self.population = None
self.observation = None
# self.stimuli = []
self.data_sequences = []
def add_population(self, population):
"""
Add a population of neurons to the model.
"""
assert self.population is None, "Only supporting one population"
self.population = population
def add_observation(self, observation):
"""
Add a new observation to the model. E.g. a fluorescence observation
would give a noisy version of the voltage of each neuron compartment.
"""
assert self.observation is None, "Only supporting one population"
self.observation = observation
def add_data(self, name, t, stimuli, observations):
"""
Add a set of stimuli and observations for a given experiment.
:param stimuli: A dict of {'stimulus name' : {'input name' : value}}
:param observations: A dict of {'obs name' : {'output name' : value}}
"""
# TODO: Check that the given stimuli and observations correspond to
# TODO: stimulus and observation components in the model
T = len(t)
# Initialize the latent, state, and input sequences of the population
Z = np.zeros((T,), dtype=self.population.latent_dtype)
I = np.zeros((T,), dtype=self.population.input_dtype)
S = np.zeros((T,), dtype=self.population.state_dtype)
# Convert the observations to the observation dtype
# Set the inputs using stimuli
if isinstance(stimuli, Stimulus):
stimuli = [stimuli]
else:
assert isinstance(stimuli, list)
for stim in stimuli:
stim.set_input(t, I)
# Create a new DataSequence object
self.data_sequences.append(DataSequence(name, t, stimuli, observations,
Z, I, S))
def add_data_sequence(self, data_sequence):
self.data_sequences.append(data_sequence)
def point_parameter_model(values):
model = Model()
population = Population('population', model)
neuron = Neuron('neuron', population)
body = CalciumCompartment('body', neuron)
channel_constructor_dict = {
'leak': LeakChannel,
'ca3kdr': Ca3KdrChannel,
'ca3ka': Ca3KaChannel,
'ca3na': Ca3NaChannel,
'ca3ca': Ca3CaChannel,
'ca3kahp': Ca3KahpChannel,
'ca3kc': Ca3KcChannel,
'chr2': ChR2Channel
}
for ch, d in values.iteritems():
if ch in channel_constructor_dict:
channel = channel_constructor_dict[ch](
ch, body,
Parameter('g_' + ch, d['g'] ,lb=0.0),
Parameter('E_' + ch, d['E'] ,lb=0.0)
)
body.add_channel(channel)
else:
print "Warning: ", ch, " not in dict"
neuron.add_compartment(body, None)
population.add_neuron(neuron)
model.add_population(population)
return model, body
class DataSequence(object):
"""
Container for data sequences. Holds the time points, the stimuli, the
observations, and also the latent state sequence of the model.
"""
def __init__(self, name, t, stimuli, observations, latent, inpt, states):
self.name = name
self.t = t
self.T = len(t)
self.stimuli = stimuli
self.observations = observations
self.latent = latent
self.input = inpt
self.states = states
class OldModel(object):
_description = {}
_parameters = []
_hyperparameters = []
def __init__(self, desc , params, hypers):
self._description = desc
self._parameters = params
self._hyperparameters = hypers
@property
def description(self):
return self._description
# The parameters property refers to all of the model settings
# that will be inferred, alongside the voltage trace.
@property
def parameters(self):
return self._parameters
# The hyperparameters property refers to settings that will
# be fixed for a given run of the inference algorithm.
@property
def hyperparameters(self):
return self._hyperparameters
def get_parameter(self, name):
for p in self.parameters:
if p.name == name:
return p
def set_parameter(self, name, value):
parameter_set = False
for p in self.parameters:
if p.name == name:
p.value = value
parameter_set = True
if not parameter_set:
print 'WARNING: Parameter %s not found!' % name
def merge_models(models):
"""
This function takes in a list of models and merges them
"""
desc = defaultdict(dict)
params = []
hypers = []
for m in models:
for k, v in m.description.iteritems():
if isinstance(v, dict):
desc[k].update(v)
else:
if k in desc:
print "Warning: Overwrote value ", desc[k], " with ", v
desc[k] = v
params += m.parameters
hypers += m.hyperparameters
desc = dict(desc)
return OldModel(desc, params, hypers)
def make_single_compartment_model(name, channels, comp_type="compartment"):
"""
Make a single compartment model with the specified channels.
NOTE: The hyperparameters are hardcoded in this function!
"""
parameters = []
hyperparameters = []
desc = \
{
# Neuron wide properties
'type' : 'neuron',
'name' : name,
'C' : 1,
# Single compartment with only a leak channel
'compartment1' :
{
'type' : 'compartment',
'compartment_type' : comp_type,
'name' : 'compartment1',
}
}
# Make the channels
for ch in channels:
if ch == 'leak':
leak, leak_params, leak_hypers = _make_leak_channel()
# Add the channel to the description
desc['compartment1']['leak'] = leak
# Add the parameters and hyperparameters to our list
parameters.extend(leak_params)
hyperparameters.extend(leak_hypers)
elif ch == 'Na':
na, na_params, na_hypers = _make_na_channel()
# Add the channel to the description
desc['compartment1']['Na'] = na
# Add the parameters and hyperparameters to our list
parameters.extend(na_params)
hyperparameters.extend(na_hypers)
elif ch == 'Ca3Na':
na, na_params, na_hypers = _make_ca3na_channel()
# Add the channel to the description
desc['compartment1']['Ca3Na'] = na
# Add the parameters and hyperparameters to our list
parameters.extend(na_params)
hyperparameters.extend(na_hypers)
elif ch == 'Kdr':
kdr, kdr_params, kdr_hypers = _make_kdr_channel()
# Add the channel to the description
desc['compartment1']['Kdr'] = kdr
# Add the parameters and hyperparameters to our list
parameters.extend(kdr_params)
hyperparameters.extend(kdr_hypers)
elif ch == 'Ca3Kdr':
kdr, kdr_params, kdr_hypers = _make_ca3kdr_channel()
# Add the channel to the description
desc['compartment1']['Ca3Kdr'] = kdr
# Add the parameters and hyperparameters to our list
parameters.extend(kdr_params)
hyperparameters.extend(kdr_hypers)
elif ch == 'Ca3Ka':
ka, ka_params, ka_hypers = _make_ca3ka_channel()
# Add the channel to the description
desc['compartment1']['Ca3Ka'] = ka
# Add the parameters and hyperparameters to our list
parameters.extend(ka_params)
hyperparameters.extend(ka_hypers)
elif ch == 'Ca3Ca':
ca, ca_params, ca_hypers = _make_ca3ca_channel()
# Add the channel to the description
desc['compartment1']['Ca3Ca'] = ca
# Add the parameters and hyperparameters to our list
parameters.extend(ca_params)
hyperparameters.extend(ca_hypers)
elif ch == 'Ca3Kahp':
kahp, kahp_params, kahp_hypers = _make_ca3kahp_channel()
# Add the channel to the description
desc['compartment1']['Ca3Kahp'] = kahp
# Add the parameters and hyperparameters to our list
parameters.extend(kahp_params)
hyperparameters.extend(kahp_hypers)
elif ch == 'Ca3Kc':
ca3kc, ca3kc_params, ca3kc_hypers = _make_ca3kc_channel()
# Add the channel to the description
desc['compartment1']['Ca3Kc'] = ca3kc
# Add the parameters and hyperparameters to our list
parameters.extend(ca3kc_params)
hyperparameters.extend(ca3kc_hypers)
elif ch == 'ChR2':
chr2, chr2_params, chr2_hypers = _make_chr2_channel()
desc['compartment1']['ChR2'] = chr2
parameters.extend(chr2_params)
hyperparameters.extend(chr2_hypers)
return OldModel(desc, parameters, hyperparameters)
def make_single_compartment_observations(observables):
mapping = {'DirectVoltage': obs_model._make_direct_voltage}
desc = {'compartment1': {}}
parameters = []
hyperparameters = []
comp = desc['compartment1']
for obs in observables:
if obs in mapping:
d, params, hypers = mapping[obs]()
comp[obs] = d
parameters.extend(params)
hyperparameters.extend(hypers)
return OldModel(desc, parameters, hyperparameters)
def make_single_compartment_model_with_observations(name, channels, observables, comp_type="calcium"):
"""
Make a single compartment model with the specified channels and observables
"""
neuron_model = make_single_compartment_model(name, channels, comp_type)
observation_model = make_single_compartment_observations(observables)
return merge_models([neuron_model, observation_model])
def _make_leak_channel():
E_leak = Parameter('E_leak', -60.0)
# Hard code the gamma distribution over the leak conductance
a_g_leak = Parameter('a_g_leak', 2.0, lb=1.0)
b_g_leak = Parameter('b_g_leak', 10.0, lb=0.0)
g_leak = Parameter('g_leak', 0.2,
distribution=GammaDistribution(a_g_leak.value, b_g_leak.value),
lb=0.0)
leak = \
{
'type' : 'channel',
'channel_type' : 'leak',
'name' : 'leak',
'E' : E_leak,
# Gamma distributed leak conductance
'a_g_leak' : a_g_leak,
'b_g_leak' : b_g_leak,
'g' : g_leak
}
return leak, [E_leak, g_leak], [a_g_leak, b_g_leak]
def _make_na_channel():
E_na = Parameter('E_Na', 50.0)
# Hard code the gamma distribution over the leak conductance
a_g_na = Parameter('a_g_na', 5.0, lb=1.0)
b_g_na = Parameter('b_g_na', 0.33, lb=0.0)
g_na = Parameter('g_na', 15.0,
distribution=GammaDistribution(a_g_na.value, b_g_na.value),
lb=0.0)
na = \
{
'type' : 'channel',
'channel_type' : 'sodium',
'name' : 'Na',
'E' : E_na,
# Gamma distributed Na conductance
'a_g_na' : a_g_na,
'b_g_na' : b_g_na,
'g' : g_na
}
return na, [E_na, g_na], [a_g_na, b_g_na]
def _make_ca3na_channel():
E_na = Parameter('E_Na', 50.0)
# Hard code the gamma distribution over the leak conductance
a_g_na = Parameter('a_g_na', 5.0, lb=1.0)
b_g_na = Parameter('b_g_na', 0.33, lb=0.0)
g_na = Parameter('g_ca3na', 15.0,
distribution=GammaDistribution(a_g_na.value, b_g_na.value),
lb=0.0)
na = \
{
'type' : 'channel',
'channel_type' : 'ca3_sodium',
'name' : 'Ca3Na',
'E' : E_na,
# Gamma distributed Na conductance
'a_g_na' : a_g_na,
'b_g_na' : b_g_na,
'g' : g_na
}
return na, [E_na, g_na], [a_g_na, b_g_na]
def _make_kdr_channel():
E_kdr = Parameter('E_K', -77.0)
# Hard code the gamma distribution over the leak conductance
a_g_kdr = Parameter('a_g_kdr', 6.0, lb=1.0)
b_g_kdr = Parameter('b_g_kdr', 1.0, lb=0.0)
g_kdr = Parameter('g_kdr', 6.0,
distribution=GammaDistribution(a_g_kdr.value, b_g_kdr.value),
lb=0.0)
kdr = \
{
'type' : 'channel',
'channel_type' : 'Kdr',
'name' : 'Kdr',
'E' : E_kdr,
# Gamma distributed Kdr conductance
'a_g_kdr' : a_g_kdr,
'b_g_kdr' : b_g_kdr,
'g' : g_kdr
}
return kdr, [E_kdr, g_kdr], [a_g_kdr, b_g_kdr]
def _make_ca3kdr_channel():
E_ca3kdr = Parameter('E_K', -80.0)
# Hard code the gamma distribution over the leak conductance
a_g_ca3kdr = Parameter('a_g_ca3kdr', 6.0, lb=1.0)
b_g_ca3kdr = Parameter('b_g_ca3kdr', 1.0, lb=0.0)
g_ca3kdr = Parameter('g_ca3kdr', 6.0,
distribution=GammaDistribution(a_g_ca3kdr.value, b_g_ca3kdr.value),
lb=0.0)
ca3kdr = \
{
'type' : 'channel',
'channel_type' : 'Ca3Kdr',
'name' : 'Ca3Kdr',
'E' : E_ca3kdr,
# Gamma distributed Kdr conductance
'a_g_kdr' : a_g_ca3kdr,
'b_g_kdr' : b_g_ca3kdr,
'g' : g_ca3kdr
}
return ca3kdr, [E_ca3kdr, g_ca3kdr], [a_g_ca3kdr, b_g_ca3kdr]
def _make_ca3ka_channel():
E_ca3ka = Parameter('E_K', -80.0)
# Hard code the gamma distribution over the leak conductance
a_g_ca3ka = Parameter('a_g_ca3ka', 2.0, lb=1.0)
b_g_ca3ka = Parameter('b_g_ca3ka', 2.0, lb=0.0)
g_ca3ka = Parameter('g_ca3ka', 1.0,
distribution=GammaDistribution(a_g_ca3ka.value, b_g_ca3ka.value),
lb=0.0)
ca3ka = \
{
'type' : 'channel',
'channel_type' : 'Ca3Ka',
'name' : 'Ca3Ka',
'E' : E_ca3ka,
# Gamma distributed Ka conductance
'a_g_ca3ka' : a_g_ca3ka,
'b_g_ca3ka' : b_g_ca3ka,
'g' : g_ca3ka
}
return ca3ka, [E_ca3ka, g_ca3ka], [a_g_ca3ka, b_g_ca3ka]
def _make_ca3ca_channel():
E_ca = Parameter('E_Ca', 80.0)
# Hard code the gamma distribution over the leak conductance
# I have no idea what this prior should be
a_g_ca = Parameter('a_g_ca', 2.0, lb=1.0)
b_g_ca = Parameter('b_g_ca', 2.0, lb=0.0)
g_ca = Parameter('g_ca3ca', 1.0,
distribution=GammaDistribution(a_g_ca.value, b_g_ca.value),
lb=0.0)
ca = \
{
'type' : 'channel',
'channel_type' : 'ca3_calcium',
'name' : 'Ca3Ca',
'E' : E_ca,
# Gamma distributed Ka conductance
'a_g_ca' : a_g_ca,
'b_g_ca' : b_g_ca,
'g' : g_ca
}
return ca, [E_ca, g_ca], [a_g_ca, b_g_ca]
def _make_ca3kahp_channel():
E_kahp = Parameter('E_Kahp', -80)
# Hard code the gamma distribution over the leak conductance
# I have no idea what this prior should be
a_g_kahp = Parameter('a_g_kahp', 2.0, lb=1.0)
b_g_kahp = Parameter('b_g_kahp', 2.0, lb=0.0)
g_kahp = Parameter('g_ca3kahp', 1.0,
distribution=GammaDistribution(a_g_kahp.value, b_g_kahp.value),
lb=0.0)
kahp = \
{
'type' : 'channel',
'channel_type' : 'Kahp',
'name' : 'Kahp',
'E' : E_kahp,
# Gamma distributed Ka conductance
'a_g_kahp' : a_g_kahp,
'b_g_kahp' : b_g_kahp,
'g' : g_kahp
}
return kahp, [E_kahp, g_kahp], [a_g_kahp, b_g_kahp]
def _make_ca3kc_channel():
E_ca3kc = Parameter('E_Ca3Kc', -80)
# Hard code the gamma distribution over the leak conductance
# I have no idea what this prior should be
a_g_ca3kc = Parameter('a_g_ca3kc', 2.0, lb=1.0)
b_g_ca3kc = Parameter('b_g_ca3kc', 2.0, lb=0.0)
g_ca3kc = Parameter('g_ca3kc', 1.0,
distribution=GammaDistribution(a_g_ca3kc.value, b_g_ca3kc.value),
lb=0.0)
ca3kc = \
{
'type' : 'channel',
'channel_type' : 'Ca3Kc',
'name' : 'Ca3Kc',
'E' : E_ca3kc,
# Gamma distributed Ka conductance
'a_g_ca3kc' : a_g_ca3kc,
'b_g_ca3kc' : b_g_ca3kc,
'g' : g_ca3kc
}
return ca3kc, [E_ca3kc, g_ca3kc], [a_g_ca3kc, b_g_ca3kc]
def _make_chr2_channel():
E_chr2 = Parameter('E_ChR2', 0)
# Hard code the gamma distribution over the leak conductance
# I have no idea what this prior should be
a_g_chr2 = Parameter('a_g_chr2', 2.0, lb=1.0)
b_g_chr2 = Parameter('b_g_chr2', 2.0, lb=0.0)
g_chr2 = Parameter('g_chr2', 1.0,
distribution=GammaDistribution(a_g_chr2.value, b_g_chr2.value),
lb=0.0)
chr2 = \
{
'type' : 'channel',
'channel_type' : 'ChR2',
'name' : 'ChR2',
'E' : E_chr2,
# Gamma distributed Ka conductance
'a_g_chr2' : a_g_chr2,
'b_g_chr2' : b_g_chr2,
'g' : g_chr2
}
return chr2, [E_chr2, g_chr2], [a_g_chr2, b_g_chr2]
|
HIPS/optofit
|
optofit/models/model.py
|
Python
|
gpl-2.0
| 18,706
|
[
"NEURON"
] |
4bf246526960b887b59c0eca38ffdb1be2d494b86c122ef720e06127e2fd8ead
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy',
},
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported when SEND_BROKEN_LINK_EMAILS is True. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
|
mixman/djangodev
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 20,793
|
[
"VisIt"
] |
400cf7dc397d3ce84b7da8a404cdd38ed17ead2bcb60337f4a3898db3cd5323b
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import os
import numpy as np
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/06-active_matter/solutions/rectification_simulation.py",
cmd_arguments=[6.0], PROD_STEPS=100, PROD_LENGTH=150)
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test_rectification(self):
x = tutorial.system.part[:].pos[:, 0]
left_chamber = np.sum(x < tutorial.LENGTH / 2.0)
right_chamber = np.sum(x > tutorial.LENGTH / 2.0)
excess = (right_chamber - left_chamber) * 100. / tutorial.N_PART
# expecting at least 5% excess due to rectification
self.assertGreater(excess, 5.0)
def test_file_generation(self):
# test .vtk/.dat files exist
for name in ["CMS_{}.dat", "points_{}.vtk"]:
filepath = os.path.join(tutorial.outdir, name.format(tutorial.vel))
self.assertTrue(
os.path.isfile(filepath),
filepath + " not created")
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/scripts/tutorials/test_06-active_matter__rectification_simulation.py
|
Python
|
gpl-3.0
| 1,809
|
[
"ESPResSo",
"VTK"
] |
c3247864f41643b0389c5e448f019f2438579a9c8e319e234e695d64bdfc3157
|
import utils.dave_reader as DaveReader
import utils.dataset_helper as DsHelper
import utils.filters_helper as FltHelper
import utils.model_helper as ModelHelper
import utils.exception_helper as ExHelper
import utils.plotter as Plotter
import math
import numpy as np
import scipy as sp
import utils.dave_logger as logging
import utils.dataset_cache as DsCache
import model.dataset as DataSet
from stingray.events import EventList
from stingray.lightcurve import Lightcurve
from stingray import Powerspectrum, AveragedPowerspectrum, DynamicalPowerspectrum
from stingray import Crossspectrum, AveragedCrossspectrum
from stingray import Covariancespectrum
from stingray.varenergyspectrum import LagEnergySpectrum
from stingray.gti import cross_two_gtis
from stingray.utils import excess_variance
from stingray.modeling import PSDLogLikelihood, PSDPosterior, PSDParEst
from stingray.simulator import simulator
from stingray.pulse.search import z_n_search, epoch_folding_search, phaseogram, search_best_peaks
from stingray.pulse.pulsar import z2_n_detection_level
from astropy.stats import LombScargle, poisson_conf_interval
from config import CONFIG
import sys
# get_dataset_schema: Returns the schema of a dataset of given file
#
# @param: destination: file destination
#
def get_dataset_schema(destination):
dataset, cache_key = DaveReader.get_file_dataset(destination)
if dataset:
return dataset.get_schema()
else:
logging.debug("get_dataset_schema -> Null dataset for file: " + destination)
return None
# get_dataset_header: Returns the header info of a dataset of given file
#
# @param: destination: file destination
#
def get_dataset_header(destination):
dataset, cache_key = DaveReader.get_file_dataset(destination)
if dataset:
return dataset.get_header()
else:
logging.debug("get_dataset_header -> Null dataset for file: " + destination)
return None
# append_file_to_dataset: Appends Fits data to a dataset
#
# @param: destination: file destination or dataset cache key
# @param: next_destination: file destination of file to append
#
def append_file_to_dataset(destination, next_destination):
dataset, cache_key = DaveReader.get_file_dataset(destination)
if dataset:
# Tries to get TSTART from dataset to set the ofset to next_dataset
ds_start_time = DsHelper.get_dataset_start_time(dataset)
next_dataset, next_cache_key = DaveReader.get_file_dataset(next_destination, ds_start_time)
if next_dataset:
if DsHelper.are_datasets_of_same_type(dataset, next_dataset):
if DsHelper.is_lightcurve_dataset(dataset):
if DsHelper.get_binsize_from_lightcurve_ds(dataset) == 0 \
or DsHelper.get_binsize_from_lightcurve_ds(dataset) != DsHelper.get_binsize_from_lightcurve_ds(next_dataset):
logging.error('append_file_to_dataset: Bin Sizes are not readables or not equal!')
return ""
# Looks what dataset is earliest
next_ds_start_time = DsHelper.get_dataset_start_time(next_dataset)
if next_ds_start_time < ds_start_time:
#Change event times and swap datasets
time_offset = ds_start_time - next_ds_start_time
DsHelper.add_time_offset_to_dataset(dataset, time_offset)
DsHelper.add_time_offset_to_dataset(next_dataset, time_offset)
tmp_ds = dataset
dataset = next_dataset
next_dataset = tmp_ds
#Join and cache joined dataset
new_dataset = dataset.clone()
new_hdutable = DsHelper.get_hdutable_from_dataset(new_dataset)
next_hdutable = DsHelper.get_hdutable_from_dataset(next_dataset)
new_dataset.tables[new_hdutable.id] = new_hdutable.join(next_hdutable)
new_dataset.tables["GTI"] = DsHelper.join_gti_tables(new_dataset.tables["GTI"], next_dataset.tables["GTI"])
# DsCache.remove(destination) # Removes previous cached dataset for prev key
new_cache_key = DsCache.get_key(destination + "|" + next_destination)
DsCache.add(new_cache_key, new_dataset) # Adds new cached dataset for new key
return new_cache_key
else:
logging.error('append_file_to_dataset: Datasets are not of same type!')
else:
logging.error('append_file_to_dataset: Cant read next dataset from: ' + str(next_destination))
else:
logging.error('append_file_to_dataset: Cant read dataset from: ' + str(destination))
return ""
# apply_rmf_file_to_dataset: Appends Fits data to a dataset
#
# @param: destination: file destination or dataset cache key
# @param: rmf_destination: file destination of file to apply
# @param: column: column to use for the conversion: PHA, or PI for NuSTAR
#
def apply_rmf_file_to_dataset(destination, rmf_destination, column):
try:
dataset, cache_key = DaveReader.get_file_dataset(destination)
if DsHelper.is_events_dataset(dataset):
rmf_dataset, rmf_cache_key = DaveReader.get_file_dataset(rmf_destination)
if DsHelper.is_rmf_dataset(rmf_dataset):
# Applies rmf data to dataset
events_table = dataset.tables["EVENTS"]
rmf_table = rmf_dataset.tables["EBOUNDS"]
if column not in events_table.columns:
logging.warn('apply_rmf_file_to_dataset: ' + str(column) + ' column not found!')
return False
pha_data = events_table.columns[column].values
e_avg_data = dict((channel, (min + max)/2) for channel, min, max in zip(rmf_table.columns["CHANNEL"].values,
rmf_table.columns["E_MIN"].values,
rmf_table.columns["E_MAX"].values))
e_values = []
for i in range(len(pha_data)):
if pha_data[i] in e_avg_data:
e_values.append(e_avg_data[pha_data[i]])
else:
e_values.append(0)
if "E" not in events_table.columns:
events_table.add_columns(["E"])
else:
events_table.columns["E"].clear()
events_table.columns["E"].add_values(e_values)
DsCache.remove_with_prefix("FILTERED") # Removes all filtered datasets from cache
DsCache.remove_with_prefix("LC")
DsCache.add(cache_key, dataset) # Stores dataset on cache
if len(events_table.columns["E"].values) == len(pha_data):
return list(e_avg_data.values())
except:
logging.error(ExHelper.getException('apply_rmf_file_to_dataset'))
return []
# get_plot_data: Returns the data for a plot
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "txt_table", column = "Time", from=0, to=10 }, ... ]
# @param: styles: dictionary with the plot style info
# { type = "2d", ... }
# @param: axis: array with the column names to use in ploting
# [{ table = "txt_table", column = "Time" },
# { table = "txt_table", column = "Rate" } ... ]
#
def get_plot_data(src_destination, bck_destination, gti_destination, filters, styles, axis):
try:
filters = FltHelper.get_filters_clean_color_filters(filters)
filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
# Config checking
if "type" not in styles:
return common_error("No plot type specified on styles")
if len(axis) < 2:
return common_error("Wrong number of axis")
# Plot type mode
if styles["type"] == "2d":
return Plotter.get_plotdiv_xy(filtered_ds, axis)
elif styles["type"] == "3d":
return Plotter.get_plotdiv_xyz(filtered_ds, axis)
elif styles["type"] == "scatter":
return Plotter.get_plotdiv_scatter(filtered_ds, axis)
else:
return common_error("Wrong plot type specified on styles")
except:
logging.error(ExHelper.getException('get_plot_data'))
return common_error(ExHelper.getWarnMsg())
return None
# get_lightcurve: Returns the data for the Lightcurve
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: baseline_opts: Object with the baseline parameters.
# @param: meanflux_opts: Object with the meanflux parameters.
# @param: variance_opts: Object with the excess variance parameters.
#
def get_lightcurve(src_destination, bck_destination, gti_destination,
filters, axis, dt, baseline_opts, meanflux_opts,
variance_opts):
time_vals = []
count_rate = []
error_values = []
gti_start_values = []
gti_stop_values = []
baseline = []
meanflux = []
chunk_times = []
chunk_lengths = []
mean = []
mean_err = []
excessvar = []
excessvar_err = []
excessvarmean = []
excessvarmean_err = []
fvar = []
fvar_err = []
fvarmean = []
fvarmean_err = []
chunk_mean_times = []
chunk_mean_lengths = []
confidences = []
warnmsg = []
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
# Creates the lightcurve
lc = get_lightcurve_any_dataset(src_destination, bck_destination, gti_destination, filters, dt)
if not lc:
return common_error("Can't create lightcurve or is empty")
elif not math.isclose(dt, lc.dt, abs_tol=0.001):
warnmsg = ["@WARN@Overriden Bin Size: " + str(lc.dt)]
# Sets lc values
time_vals = lc.time
count_rate = lc.countrate
error_values = lc.countrate_err
# Sets gtis ranges
gti_start_values = lc.gti[:, 0]
gti_stop_values = lc.gti[:, 1]
# Gets the baseline values
if baseline_opts["niter"] > 0:
logging.debug("Preparing lightcurve baseline");
lam = baseline_opts["lam"] # 1000
p = baseline_opts["p"] # 0.01
niter = baseline_opts["niter"] # 10
baseline = lc.baseline(lam, p, niter, offset_correction=False) / dt # Baseline from count, divide by dt to get countrate
# Gets the meanflux values
if meanflux_opts["niter"] > 0:
try:
logging.debug("Preparing lightcurve meanflux");
lam = meanflux_opts["lam"] # 1000
p = meanflux_opts["p"] # 0.01
niter = meanflux_opts["niter"] # 10
meanflux = lc.baseline(lam, p, niter, offset_correction=True) / dt # Baseline from count, divide by dt to get countrate
except:
logging.error(ExHelper.getException('get_lightcurve: Cant estimate Mean Flux'))
warnmsg = ["@WARN@Can't estimate Mean Flux, check GTIs"]
meanflux = []
# Gets the Long-Term variability values
if variance_opts and ("min_counts" in variance_opts) and (variance_opts["min_counts"] > 0):
logging.debug("Preparing lightcurve excess variance");
chunk_length = lc.estimate_chunk_length(variance_opts["min_counts"], variance_opts["min_bins"])
start, stop, res = lc.analyze_lc_chunks(chunk_length, lightcurve_meancount)
mean = nan_and_inf_to_num(res[0])
mean_err = nan_and_inf_to_num(res[1])
chunk_times = np.array([(s + e)/2 for s, e in zip(start, stop)])
chunk_lengths = np.array([(e - s)/2 for s, e in zip(start, stop)]) # This will be plotted as an error bar on xAxis, soo only need the half of the values
start, stop, res = lc.analyze_lc_chunks(chunk_length, lightcurve_excvar)
excessvar = nan_and_inf_to_num(res[0])
excessvar_err = nan_and_inf_to_num(res[1])
mean_count = variance_opts["mean_count"]
len_excessvar = len(excessvar)
if mean_count > len_excessvar:
logging.warn("mean_count fixed to " + str(len_excessvar));
warnmsg = ["@WARN@Mean count fixed to " + str(len_excessvar)]
mean_count = len_excessvar
excessvarmean = get_means_from_array(excessvar, mean_count)
excessvarmean_err = get_means_from_array(excessvar_err, mean_count)
start, stop, res = lc.analyze_lc_chunks(chunk_length, lightcurve_fractional_rms)
fvar = nan_and_inf_to_num(res[0])
fvar_err = nan_and_inf_to_num(res[1])
fvarmean = get_means_from_array(fvar, mean_count)
fvarmean_err = get_means_from_array(fvar_err, mean_count)
chunk_mean_times = get_means_from_array(chunk_times, mean_count)
chunk_mean_lengths = np.array([l * mean_count for l in chunk_lengths])
confidences += (mean_confidence_interval(excessvar, confidence=0.90))
confidences += (mean_confidence_interval(excessvar, confidence=0.99))
confidences += (mean_confidence_interval(fvar, confidence=0.90))
confidences += (mean_confidence_interval(fvar, confidence=0.99))
lc = None # Dispose memory
except:
logging.error(ExHelper.getException('get_lightcurve'))
return common_error(ExHelper.getWarnMsg())
# Preapares the result
logging.debug("Result lightcurve .... " + str(len(time_vals)))
result = push_to_results_array([], time_vals) #0
result = push_to_results_array(result, count_rate) #1
result = push_to_results_array(result, error_values) #2
result = push_to_results_array(result, gti_start_values) #3
result = push_to_results_array(result, gti_stop_values) #4
result = push_to_results_array(result, baseline) #5
result = push_to_results_array(result, meanflux) #6
result = push_to_results_array(result, chunk_times) #7
result = push_to_results_array(result, chunk_lengths) #8
result = push_to_results_array(result, mean) #9
result = push_to_results_array(result, mean_err) #10
result = push_to_results_array(result, excessvar) #11
result = push_to_results_array(result, excessvar_err) #12
result = push_to_results_array(result, excessvarmean) #13
result = push_to_results_array(result, excessvarmean_err) #14
result = push_to_results_array(result, fvar) #15
result = push_to_results_array(result, fvar_err) #16
result = push_to_results_array(result, fvarmean) #17
result = push_to_results_array(result, fvarmean_err) #18
result = push_to_results_array(result, chunk_mean_times) #19
result = push_to_results_array(result, chunk_mean_lengths) #20
result = push_to_results_array(result, confidences) #21
result = push_to_results_array(result, warnmsg) #22
return result
# get_joined_lightcurves: Returns the joined data of LC0 and LC1
#
# @param: lc0_destination: lightcurve 0 file destination
# @param: lc1_destination: lightcurve 1 file destination
# @param: bck0_destination: lightcurve 0 backgrund file destination
# @param: bck1_destination: lightcurve 1 backgrund file destination
# @param: filters: array with the filters to apply
# [{ table = "RATE", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "RATE", column = "TIME" },
# { table = "RATE", column = "PHA" } ]
# @param: dt: The time resolution of the events.
#
def get_joined_lightcurves(lc0_destination, lc1_destination, bck0_destination, bck1_destination, filters, axis, dt):
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
lc0 = get_lightcurve_any_dataset(lc0_destination, bck0_destination, "", filters, dt)
if not lc0:
return common_error("Wrong dataset type for lc0")
lc1 = get_lightcurve_any_dataset(lc1_destination, bck1_destination, "", filters, dt)
if not lc1:
return common_error("Wrong dataset type for lc1")
if lc0.countrate.shape == lc1.countrate.shape:
# Preapares the result
logging.debug("Result joined lightcurves ....")
result = push_to_results_array_with_errors([], lc0.countrate, lc0.countrate_err)
result = push_to_results_array_with_errors(result, lc1.countrate, lc1.countrate_err)
return result
else:
return common_warn("Lightcurves have different durations.")
except:
logging.error(ExHelper.getException('get_joined_lightcurves'))
return common_error(ExHelper.getWarnMsg())
return None
# get_divided_lightcurves_from_colors: Returns the joined data of src_lc and ColorX / ColorY
# if len(color_filters) == 2, else if len(color_filters) == 4 returns the joined data
# of ColorZ / ColorS and ColorX / ColorY
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
#
def get_divided_lightcurves_from_colors(src_destination, bck_destination, gti_destination, filters, axis, dt):
if len(axis) != 2:
return common_error("Wrong number of axis")
try:
filters = FltHelper.apply_bin_size_to_filters(filters, dt)
color_keys = FltHelper.get_color_keys_from_filters(filters)
if len(color_keys) != 2 and len(color_keys) != 4:
return common_error("Wrong number of color filters")
gti_start_values = []
gti_stop_values = []
if len(color_keys) == 2:
# Prepares SRC_LC
clean_filters = FltHelper.get_filters_clean_color_filters(filters)
filtered_ds = get_filtered_dataset(src_destination, clean_filters, gti_destination)
#Sets gtis ranges
gti_start_values = filtered_ds.tables["GTI"].columns["START"].values
gti_stop_values = filtered_ds.tables["GTI"].columns["STOP"].values
# Creates src lightcurve applying bck and gtis
src_lc = get_lightcurve_from_events_dataset(filtered_ds, bck_destination, clean_filters, gti_destination, dt)
if not src_lc:
return common_error("Cant create source lc")
# Prepares datasets from color filters
filtered_datasets = split_dataset_with_color_filters(src_destination, filters, color_keys, gti_destination)
# Creates lightcurves array applying bck and gtis from each color
logging.debug("Create color lightcurves ....")
lightcurves = get_lightcurves_from_events_datasets_array(filtered_datasets, color_keys, bck_destination, filters, gti_destination, dt)
filtered_datasets = None # Dispose memory
if len(lightcurves) == len(color_keys):
# Preapares the result
logging.debug("Result divided lightcurves ....")
if len(color_keys) == 2:
result = push_to_results_array_with_errors([], src_lc.countrate, src_lc.countrate_err)
else:
count_rate, count_rate_error = get_divided_values_and_error (lightcurves[2].countrate, lightcurves[3].countrate,
lightcurves[2].countrate_err, lightcurves[3].countrate_err)
result = push_to_results_array_with_errors([], count_rate, count_rate_error)
count_rate, count_rate_error = get_divided_values_and_error (lightcurves[0].countrate, lightcurves[1].countrate,
lightcurves[0].countrate_err, lightcurves[1].countrate_err)
result = push_to_results_array_with_errors(result, count_rate, count_rate_error)
if len(color_keys) == 2:
result = push_to_results_array(result, src_lc.time)
else:
result = push_to_results_array(result, lightcurves[0].time)
result = push_to_results_array(result, gti_start_values)
result = push_to_results_array(result, gti_stop_values)
return result
else:
return common_warn("Cant create the colors filtered ligthcurves")
except:
logging.error(ExHelper.getException('get_divided_lightcurves_from_colors'))
return common_error(ExHelper.getWarnMsg())
return None
# get_divided_lightcurve_ds: Returns a new dataset key for the LC0 divided by LC1
#
# @param: lc0_destination: lightcurve 0 file destination
# @param: lc1_destination: lightcurve 1 file destination
# @param: lc0_bck_destination: lightcurve 0 background file destination
# @param: lc1_bck_destination: lightcurve 1 background file destination
#
def get_divided_lightcurve_ds(lc0_destination, lc1_destination, lc0_bck_destination, lc1_bck_destination):
try:
count_rate_0, count_rate_error_0 = get_countrate_from_lc_ds (lc0_destination, lc0_bck_destination, "lc0_ds", "lc0_bck")
if count_rate_0 is None:
return ""
count_rate_1, count_rate_error_1 = get_countrate_from_lc_ds (lc1_destination, lc1_bck_destination, "lc1_ds", "lc1_bck")
if count_rate_1 is None:
return ""
if count_rate_0.shape == count_rate_1.shape:
lc_ds, lc_cache_key = DaveReader.get_file_dataset(lc0_destination)
ret_lc_ds = lc_ds.clone(True)
count_rate, count_rate_error = get_divided_values_and_error (count_rate_0, count_rate_1,
count_rate_error_0, count_rate_error_1)
ret_lc_ds.tables["RATE"].columns["RATE"].clear()
ret_lc_ds.tables["RATE"].columns["RATE"].add_values(count_rate, count_rate_error)
new_cache_key = DsCache.get_key(lc0_destination + "|" + lc1_destination + "|ligthcurve")
DsCache.add(new_cache_key, ret_lc_ds) # Adds new cached dataset for new key
return new_cache_key
else:
logging.warn("Lightcurves have different shapes.")
return ""
except:
logging.error(ExHelper.getException('get_divided_lightcurve_ds'))
return ""
# get_power_density_spectrum: Returns the PDS of a given dataset
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) power spectrum.
# @param: pds_type: Type of PDS to use, single or averaged.
# @param: df: If not 0 is the frequency rebining value
#
def get_power_density_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df=0):
freq = []
power = []
power_err = []
duration = []
warnmsg = []
try:
pds, lc, gti = create_power_density_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df)
if pds:
freq = pds.freq
power = pds.power
power_err = pds.power_err
duration = [lc.tseg]
warnmsg = [""]
if gti is not None and len(gti) == 0 and DsHelper.hasGTIGaps(lc.time):
warnmsg = ["@WARN@GTI gaps found on LC"]
if not math.isclose(dt, lc.dt, abs_tol=0.001):
warnmsg = ["@WARN@Overriden Bin Size: " + str(lc.dt)]
pds = None # Dispose memory
lc = None # Dispose memory
gti = None # Dispose memory
except:
logging.error(ExHelper.getException('get_power_density_spectrum'))
help_msg = ""
if len(freq) == 0 and pds_type != 'Sng':
help_msg = " Try with PDSType: Single or a smaller segment length."
warnmsg = [ExHelper.getWarnMsg() + help_msg]
# Preapares the result
logging.debug("Result power density spectrum .... " + str(len(freq)))
result = push_to_results_array([], freq)
result = push_to_results_array_with_errors(result, power, power_err)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
return result
# get_dynamical_spectrum: Returns the Dynamical Spectrum of a given dataset
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) power spectrum.
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency
# @param: df: If not 0 is the frequency rebining value
#
def get_dynamical_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, freq_range, df=0):
freq = []
power_all = []
time = []
duration = []
warnmsg = []
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
if norm not in ['frac', 'abs', 'leahy', 'none']:
return common_error("Wrong normalization")
if segm_size == 0:
segm_size = None
warnmsg = [""]
# Creates the lightcurve
lc = get_lightcurve_any_dataset(src_destination, bck_destination, gti_destination, filters, dt)
if not lc:
return common_error("Can't create lightcurve or is empty")
elif not math.isclose(dt, lc.dt, abs_tol=0.001):
warnmsg = ["@WARN@Overriden Bin Size: " + str(lc.dt)]
# Prepares GTI if passed
gti = load_gti_from_destination (gti_destination)
if not gti:
logging.debug("External GTIs not loaded using defaults")
gti = lc.gti
# Check if there is only one GTI and tries to split it by segm_size
if gti is not None and len(gti) == 1:
logging.debug("Only one GTI found, splitting by segm_size")
new_gtis = DsHelper.get_splited_gti(gti[0], segm_size)
if new_gtis is not None:
gti = new_gtis
warnmsg = ["@WARN@GTIs obtained by splitting with segment length"]
else:
warnmsg = ["@WARN@The GTI is not splitable by segment length"]
logging.warn("Can't create splitted gtis from segm_size")
# Creates the power density spectrum
logging.debug("Create dynamical spectrum")
pds = DynamicalPowerspectrum(lc=lc, segment_size=segm_size, norm=norm, gti=gti)
if pds:
if df > 0:
pds.rebin_frequency(df)
filtered_indexes = np.where((pds.freq >= freq_range[0]) & (pds.freq <= freq_range[1]))[0]
freq = pds.freq[filtered_indexes]
time = pds.time
for tmp_pds in np.transpose(pds.dyn_ps):
power_all = push_to_results_array(power_all, tmp_pds[filtered_indexes])
duration = [lc.tseg]
if gti is not None and len(gti) == 0 and DsHelper.hasGTIGaps(lc.time):
warnmsg = ["@WARN@GTI gaps found on LC"]
pds = None # Dispose memory
lc = None # Dispose memory
except:
logging.error(ExHelper.getException('get_dynamical_spectrum'))
warnmsg = [ExHelper.getWarnMsg()]
# Preapares the result
logging.debug("Result dynamical spectrum .... " + str(len(freq)))
result = push_to_results_array([], freq)
result = push_to_results_array(result, power_all)
result = push_to_results_array(result, time)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
return result
# get_cross_spectrum: Returns the XS of two given datasets
#
# @param: src_destination1: source file destination
# @param: bck_destination1: background file destination, is optional
# @param: gti_destination1: gti file destination, is optional
# @param: filters1: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis1: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt1: The time resolution of the events.
# @param: src_destination2: source file destination
# @param: bck_destination2: background file destination, is optional
# @param: gti_destination2: gti file destination, is optional
# @param: filters2: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis2: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt2: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) cross spectrum.
# @param: xds_type: Type of XDS to use, single or averaged.
#
def get_cross_spectrum(src_destination1, bck_destination1, gti_destination1, filters1, axis1, dt1,
src_destination2, bck_destination2, gti_destination2, filters2, axis2, dt2,
nsegm, segm_size, norm, xds_type):
freq = []
power = []
power_err = []
time_lag_array = []
coherence_array = []
duration = []
warnmsg = []
try:
if len(axis1) != 2:
return common_error("Wrong number of axis 1")
if len(axis2) != 2:
return common_error("Wrong number of axis 1")
if norm not in ['frac', 'abs', 'leahy', 'none']:
return common_error("Wrong normalization")
if xds_type not in ['Sng', 'Avg']:
return common_error("Wrong cross spectrum type")
if segm_size == 0:
segm_size = None
# Creates the lightcurve 1
lc1 = get_lightcurve_any_dataset(src_destination1, bck_destination1, gti_destination1, filters1, dt1)
if not lc1:
return common_error("Cant create lightcurve 1")
# Prepares GTI1 if passed
gti1 = load_gti_from_destination (gti_destination1)
if not gti1:
logging.debug("External GTIs 1 not loaded using defaults")
gti1 = lc1.gti
# Creates the lightcurve 2
lc2 = get_lightcurve_any_dataset(src_destination2, bck_destination2, gti_destination2, filters2, dt2)
if not lc2:
return common_error("Cant create lightcurve 2")
# Prepares GTI2 if passed
gti2 = load_gti_from_destination (gti_destination2)
if not gti2:
logging.debug("External GTIs 2 not loaded using defaults")
gti2 = lc2.gti
# Join gtis in one gti
gti = None
gti1_valid = gti1 is not None and len(gti1) > 0
gti2_valid = gti2 is not None and len(gti2) > 0
if gti1_valid and gti2_valid:
gti = cross_two_gtis(gti1, gti2)
logging.debug("GTIS crossed")
elif gti1_valid and not gti2_valid:
gti = gti1
logging.debug("GTI 1 applied")
elif not gti1_valid and gti2_valid:
gti = gti2
logging.debug("GTI 2 applied")
# Creates the cross spectrum
logging.debug("Create cross spectrum")
if xds_type == 'Sng':
xs = Crossspectrum(lc1=lc1, lc2=lc2, norm=norm, gti=gti)
else:
xs = AveragedCrossspectrum(lc1=lc1, lc2=lc2, segment_size=segm_size, norm=norm, gti=gti)
if xs:
if not hasattr(xs, 'pds1'):
return common_error("Cant create PDS of lightcurve 1")
if not hasattr(xs, 'pds2'):
return common_error("Cant create PDS of lightcurve 2")
freq = xs.freq
power = xs.power
power_err = xs.power_err
if xds_type == 'Sng':
time_lag, time_lag_err = xs.time_lag(), np.array([])
coherence, coherence_err = xs.coherence(), np.array([])
else:
time_lag, time_lag_err = xs.time_lag()
coherence, coherence_err = xs.coherence()
# Replace posible out of range values
time_lag = nan_and_inf_to_num(time_lag)
time_lag[time_lag > CONFIG.BIG_NUMBER]=0
time_lag_err = nan_and_inf_to_num(time_lag_err)
time_lag_err[time_lag_err > CONFIG.BIG_NUMBER]=0
time_lag_array = [ time_lag, time_lag_err ]
coherence = nan_and_inf_to_num(coherence)
coherence[coherence > CONFIG.BIG_NUMBER]=0
coherence_err = nan_and_inf_to_num(coherence_err)
coherence_err[coherence_err > CONFIG.BIG_NUMBER]=0
coherence_array = [ coherence, coherence_err ]
# Set duration and warnmsg
duration = [lc1.tseg, lc2.tseg]
warnmsg = []
if gti1 is not None and len(gti1) == 0 and DsHelper.hasGTIGaps(lc1.time):
warnmsg.append("@WARN@GTI gaps found on LC 1")
if gti2 is not None and len(gti2) == 0 and DsHelper.hasGTIGaps(lc2.time):
warnmsg.append("@WARN@GTI gaps found on LC 2")
xs = None # Dispose memory
lc1 = None # Dispose memory
lc2 = None # Dispose memory
except:
logging.error(ExHelper.getException('get_cross_spectrum'))
help_msg = ""
if len(freq) == 0 and xds_type != 'Sng':
help_msg = " Try with PDSType: Single a smaller segment length."
warnmsg = [ExHelper.getWarnMsg() + help_msg]
# Preapares the result
logging.debug("Result cross spectrum .... " + str(len(freq)))
result = push_to_results_array([], freq)
result = push_to_results_array_with_errors(result, power, power_err)
result = push_to_results_array(result, time_lag_array)
result = push_to_results_array(result, coherence_array)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
return result
# get_covariance_spectrum:
# Returns the energy values and its correlated covariance and covariance errors
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: dt: The time resolution of the events.
# @param: ref_band_interest : A tuple with minimum and maximum values of the range in the band
# of interest in reference channel.
# @param: energy_range: A tuple with minimum and maximum values of the
# range of energy, send [-1, -1] for use all energies
# @param: n_bands: The number of bands to split the refence band
# @param: std: The standard deviation
#
def get_covariance_spectrum(src_destination, bck_destination, gti_destination, filters, dt, ref_band_interest, energy_range, n_bands, std):
energy_arr = []
covariance_arr =[]
covariance_err_arr = []
try:
filters = FltHelper.get_filters_clean_color_filters(filters)
filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
if DsHelper.is_events_dataset(filtered_ds):
events_table = filtered_ds.tables["EVENTS"]
time_vals = events_table.columns[CONFIG.TIME_COLUMN].values
if len(time_vals) > 0:
if "E" in events_table.columns:
if (time_vals[len(time_vals) - 1] - time_vals[0]) >= dt:
event_list = np.column_stack((time_vals, events_table.columns["E"].values))
band_width = energy_range[1] - energy_range[0]
band_step = band_width / n_bands
from_val = energy_range[0]
band_interest = []
for i in range(n_bands):
band_interest.extend([[energy_range[0] + (i * band_step), energy_range[0] + ((i + 1) * band_step)]])
energy_arr.extend([(energy_range[0] + (i * band_step) + energy_range[0] + ((i + 1) * band_step))/2])
if std < 0:
std = None
# Calculates the Covariance Spectrum
cs = Covariancespectrum(event_list, dt, band_interest=band_interest, ref_band_interest=ref_band_interest, std=std)
covariance_arr = nan_and_inf_to_num(cs.covar)
covariance_err_arr = nan_and_inf_to_num(cs.covar_error)
else:
logging.warn('get_covariance_spectrum: Lc duration must be greater than bin size!')
return common_error("LC duration must be greater than bin size")
else:
logging.warn('get_covariance_spectrum: E column not found!')
return common_error("E column not found")
else:
logging.warn('get_covariance_spectrum: No events data!')
return common_error('No events data')
else:
logging.warn('get_covariance_spectrum: Wrong dataset type!')
return common_error("Wrong dataset type")
except:
logging.error(ExHelper.getException('get_covariance_spectrum'))
return common_error(ExHelper.getWarnMsg())
# Preapares the result
result = push_to_results_array([], energy_arr)
result = push_to_results_array_with_errors(result, covariance_arr, covariance_err_arr)
return result
# get_phase_lag_spectrum:
# Returns the energy values and its correlated phase lag and lag errors
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) power spectrum.
# @param: pds_type: Type of PDS to use, single or averaged.
# @param: df: If not 0 is the frequency rebining value
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency, send [-1, -1] for use all frequencies
# @param: energy_range: A tuple with minimum and maximum values of the
# range of energy, send [-1, -1] for use all energies
# @param: n_bands: The number of bands to split the refence band
#
def get_phase_lag_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df,
freq_range, energy_range, n_bands):
energy_arr = []
lag_arr =[]
lag_err_arr = []
duration = []
warnmsg = []
freq_min_max = [-1, -1]
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
if norm not in ['frac', 'abs', 'leahy', 'none']:
return common_error("Wrong normalization")
if pds_type not in ['Sng', 'Avg']:
return common_error("Wrong power density spectrum type")
if segm_size == 0:
segm_size = None
filters = FltHelper.get_filters_clean_color_filters(filters)
filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
if DsHelper.is_events_dataset(filtered_ds):
events_table = filtered_ds.tables["EVENTS"]
if len(events_table.columns[CONFIG.TIME_COLUMN].values) > 0:
min_time = events_table.columns[CONFIG.TIME_COLUMN].values[0]
max_time = events_table.columns[CONFIG.TIME_COLUMN].values[len(events_table.columns[CONFIG.TIME_COLUMN].values) - 1]
duration = [(max_time - min_time)]
if "E" in events_table.columns:
pds, lc, gti = create_power_density_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df)
if pds:
if not math.isclose(dt, lc.dt, abs_tol=0.001):
warnmsg = ["@WARN@Overriden Bin Size: " + str(lc.dt)]
#Preapares the eventlist with energies and gtis
event_list = EventList()
event_list.time = np.array(events_table.columns[CONFIG.TIME_COLUMN].values)
event_list.ncounts = len(event_list.time)
event_list.gti = gti
event_list.energy = np.array(events_table.columns["E"].values)
# Calculates the energy range
if energy_range[0] < 0:
min_energy = min(event_list.energy)
else:
min_energy = energy_range[0]
if energy_range[1] >= min_energy:
max_energy = energy_range[1]
else:
max_energy = max(event_list.energy)
# Calculates the frequency range
if freq_range[0] < 0:
freq_low = min(pds.freq)
else:
freq_low = freq_range[0]
freq_min_max[0] = freq_low
if freq_range[1] < 0:
freq_high = max(pds.freq)
else:
freq_high = freq_range[1]
freq_min_max[1] = max([freq_min_max[1], freq_high])
# Sets the energy ranges
energy_spec = (min_energy, max_energy, n_bands, "lin")
ref_band = [min_energy, max_energy]
# Calculates the Phase Lag Spectrum
les = LagEnergySpectrum(event_list, freq_min_max,
energy_spec, ref_band,
bin_time=dt,
segment_size=segm_size)
energy_arr = np.array([(ei[0] + ei[1])/2 for ei in les.energy_intervals])
lag_arr = les.spectrum
lag_err_arr = les.spectrum_error
else:
logging.warn("get_phase_lag_spectrum: can't create power density spectrum.")
warnmsg = ['Cant create PDS']
else:
logging.warn('get_phase_lag_spectrum: E column not found!')
warnmsg = ['E column not found']
else:
logging.warn('get_phase_lag_spectrum: No events data!')
warnmsg = ['No events data']
else:
logging.warn('get_phase_lag_spectrum: Wrong dataset type!')
warnmsg = ['Wrong dataset type']
except:
logging.error(ExHelper.getException('get_phase_lag_spectrum'))
warnmsg = [ExHelper.getWarnMsg()]
# Preapares the result
result = push_to_results_array([], energy_arr)
result = push_to_results_array_with_errors(result, lag_arr, lag_err_arr)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
result = push_to_results_array(result, freq_min_max)
return result
# get_rms_spectrum:
# Returns the energy values and its correlated rms and rms errors
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) power spectrum.
# @param: pds_type: Type of PDS to use, single or averaged.
# @param: df: If not 0 is the frequency rebining value
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency, send [-1, -1] for use all frequencies
# @param: energy_range: A tuple with minimum and maximum values of the
# range of energy, send [-1, -1] for use all energies
# @param: n_bands: The number of bands to split the refence band
# @param: white_noise_offset: The white noise level, in Leahy normalization,
# default 0, if passed value is less than -100 the white_noise_offset
# will be calculated automatically.
#
def get_rms_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df,
freq_range, energy_range, n_bands, white_noise_offset=0.):
energy_arr = []
rms_arr =[]
rms_err_arr = []
duration = []
warnmsg = []
freq_min_max = [-1, -1]
auto_white_noise_offset = 0.0
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
if norm not in ['frac', 'leahy']:
return common_error("Wrong normalization")
if pds_type not in ['Sng', 'Avg']:
return common_error("Wrong power density spectrum type")
if segm_size == 0:
segm_size = None
# Prepares GTI if passed
base_gti = load_gti_from_destination (gti_destination)
filters = FltHelper.get_filters_clean_color_filters(filters)
filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
if DsHelper.is_events_dataset(filtered_ds):
events_table = filtered_ds.tables["EVENTS"]
if len(events_table.columns[CONFIG.TIME_COLUMN].values) > 0:
min_time = events_table.columns[CONFIG.TIME_COLUMN].values[0]
max_time = events_table.columns[CONFIG.TIME_COLUMN].values[len(events_table.columns[CONFIG.TIME_COLUMN].values) - 1]
duration = [(max_time - min_time)]
if "E" in events_table.columns:
event_list = np.column_stack((events_table.columns[CONFIG.TIME_COLUMN].values,
events_table.columns["E"].values))
auto_white_noise_offset = get_white_noise_offset (event_list, base_gti, dt, pds_type, segm_size, df)
if white_noise_offset < -100:
white_noise_offset = auto_white_noise_offset
logging.debug("get_rms_spectrum: white_noise_offset calculated automatically, value " + str(white_noise_offset))
if energy_range[0] < 0:
min_energy = min(event_list[:,1])
else:
min_energy = energy_range[0]
if energy_range[1] >= min_energy:
energy_range = energy_range[1] - min_energy
else:
energy_range = max(event_list[:,1]) - min_energy
energy_step = energy_range / n_bands
for i in range(n_bands):
energy_low = min_energy + (i * energy_step)
energy_high = energy_low + energy_step
energy_arr.extend([(energy_low + energy_high) / 2])
rms, rms_err = 0, 0
try:
filtered_event_list = event_list[ (energy_high>event_list[:,1]) & (event_list[:,1]>energy_low) ]
if (len(filtered_event_list) > 0):
evt_list = EventList(filtered_event_list[:,0], pi=filtered_event_list[:,1])
if evt_list and evt_list.ncounts > 1:
if (evt_list.time[evt_list.ncounts - 1] - evt_list.time[0]) >= dt:
lc = evt_list.to_lc(dt)
if lc and np.sqrt(lc.meancounts * lc.meancounts) > 0:
gti = base_gti
if not gti:
gti = lc.gti
if segm_size > lc.tseg:
segm_size = lc.tseg
logging.warn("get_rms_spectrum: range: " + str(energy_low) + " to " + str(energy_high) + ", segmsize bigger than lc.duration, lc.duration applied instead.")
pds = None
if pds_type == 'Sng':
pds = Powerspectrum(lc, norm=norm, gti=gti)
else:
pds = AveragedPowerspectrum(lc=lc, segment_size=segm_size, norm=norm, gti=gti)
if pds:
if df > 0:
pds = pds.rebin(df=df)
#amp, x0, fwhm, white_noise_offset = ModelHelper.fit_data_with_lorentz_and_const(pds.freq, pds.power)
#logging.info("get_rms_spectrum: amp: " + str(amp) + ", x0: " + str(x0) + ", fwhm: " + str(fwhm) + ", white_noise: " + str(white_noise))
if freq_range[0] < 0:
freq_low = min(pds.freq)
else:
freq_low = freq_range[0]
if freq_min_max[0] >= 0:
freq_min_max[0] = min([freq_min_max[0], freq_low])
else:
freq_min_max[0] = freq_low
if freq_range[1] < 0:
freq_high = max(pds.freq)
else:
freq_high = freq_range[1]
freq_min_max[1] = max([freq_min_max[1], freq_high])
rms, rms_err = pds.compute_rms(freq_low, freq_high, white_noise_offset)
else:
logging.warn("get_rms_spectrum: can't create power density spectrum. Energy range: " + str(energy_low) + " to " + str(energy_high))
else:
logging.warn("get_rms_spectrum: can't create lightcurve or is invalid. Energy range: " + str(energy_low) + " to " + str(energy_high))
else:
logging.warn("get_rms_spectrum: can't create lightcurve. Not enougth duration. Energy range: " + str(energy_low) + " to " + str(energy_high))
else:
logging.warn("get_rms_spectrum: can't create eventlist or counts are 0. Energy range: " + str(energy_low) + " to " + str(energy_high) + ", counts: " + str(len(filtered_event_list)))
else:
logging.warn("get_rms_spectrum: range: " + str(energy_low) + " to " + str(energy_high) + " has no events")
except:
logging.warn(ExHelper.getException('get_rms_spectrum: Energy range: ' + str(energy_low) + ' to ' + str(energy_high)))
rms_arr.extend([nan_and_inf_to_num(rms)])
rms_err_arr.extend([nan_and_inf_to_num(rms_err)])
else:
logging.warn('get_rms_spectrum: E column not found!')
warnmsg = ['E column not found']
else:
logging.warn('get_rms_spectrum: No events data!')
warnmsg = ['No events data']
else:
logging.warn('get_rms_spectrum: Wrong dataset type!')
warnmsg = ['Wrong dataset type']
except:
logging.error(ExHelper.getException('get_rms_spectrum'))
warnmsg = [ExHelper.getWarnMsg()]
# Preapares the result
result = push_to_results_array([], energy_arr)
result = push_to_results_array_with_errors(result, rms_arr, rms_err_arr)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
result = push_to_results_array(result, freq_min_max)
result = push_to_results_array(result, auto_white_noise_offset)
return result
# get_rms_vs_countrate:
# Returns the energy values and its correlated rms and rms errors
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: df: If not 0 is the frequency rebining value
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency, send [-1, -1] for use all frequencies
# @param: energy_range: A tuple with minimum and maximum values of the
# range of energy, send [-1, -1] for use all energies
# @param: white_noise_offset: The white noise level, in Leahy normalization,
# default 0, if passed value is less than -100 the white_noise_offset
# will be calculated automatically.
#
def get_rms_vs_countrate(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, df, freq_range, energy_range,
white_noise_offset=0.):
countrate_arr = []
rms_arr =[]
rms_err_arr = []
duration = []
warnmsg = []
freq_min_max = [-1, -1]
auto_white_noise_offset = 0.0
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
# Prepares GTI if passed
base_gti = load_gti_from_destination (gti_destination)
filters = FltHelper.get_filters_clean_color_filters(filters)
filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
if DsHelper.is_events_dataset(filtered_ds):
events_table = filtered_ds.tables["EVENTS"]
if len(events_table.columns[CONFIG.TIME_COLUMN].values) > 0:
min_time = events_table.columns[CONFIG.TIME_COLUMN].values[0]
max_time = events_table.columns[CONFIG.TIME_COLUMN].values[len(events_table.columns[CONFIG.TIME_COLUMN].values) - 1]
duration = [(max_time - min_time)]
if "E" in events_table.columns:
event_list = np.column_stack((events_table.columns[CONFIG.TIME_COLUMN].values,
events_table.columns["E"].values))
auto_white_noise_offset = get_white_noise_offset (event_list, base_gti, dt, 'Sng', 0, df)
if white_noise_offset < -100:
white_noise_offset = auto_white_noise_offset
logging.debug("get_rms_vs_countrate: white_noise_offset calculated automatically, value " + str(white_noise_offset))
if energy_range[0] < 0:
min_energy = min(event_list[:,1])
else:
min_energy = energy_range[0]
if energy_range[1] >= min_energy:
max_energy = energy_range[1] - min_energy
else:
max_energy = max(event_list[:,1]) - min_energy
event_list = event_list[ (max_energy>event_list[:,1]) & (event_list[:,1]>min_energy) ]
time_step = duration[0] / nsegm
for i in range(nsegm):
time_low = min_energy + (i * time_step)
time_high = time_low + time_step
try:
filtered_event_list = event_list[ (time_high>event_list[:,0]) & (event_list[:,0]>time_low) ]
if (len(filtered_event_list) > 0):
evt_list = EventList(filtered_event_list[:,0], pi=filtered_event_list[:,1])
if evt_list and evt_list.ncounts > 1:
if (evt_list.time[evt_list.ncounts - 1] - evt_list.time[0]) >= dt:
lc = evt_list.to_lc(dt)
if lc and np.sqrt(lc.meancounts * lc.meancounts) > 0:
rms, rms_err = 0, 0
gti = base_gti
if not gti:
gti = lc.gti
pds = Powerspectrum(lc, norm='frac', gti=gti)
if pds:
if df > 0:
pds = pds.rebin(df=df)
if len(pds.freq):
if freq_range[0] < 0:
freq_low = min(pds.freq)
else:
freq_low = freq_range[0]
if freq_min_max[0] >= 0:
freq_min_max[0] = min([freq_min_max[0], freq_low])
else:
freq_min_max[0] = freq_low
if freq_range[1] < 0:
freq_high = max(pds.freq)
else:
freq_high = freq_range[1]
freq_min_max[1] = max([freq_min_max[1], freq_high])
rms, rms_err = pds.compute_rms(freq_low, freq_high, white_noise_offset)
else:
logging.warn("get_rms_vs_countrate: can't create power density spectrum. Time range: " + str(time_low) + " to " + str(time_high))
countrate_arr.extend([lc.meanrate])
rms_arr.extend([rms])
rms_err_arr.extend([rms_err])
else:
logging.warn("get_rms_vs_countrate: can't create lightcurve. Time range: " + str(time_low) + " to " + str(time_high))
else:
logging.warn("get_rms_vs_countrate: can't create lightcurve. Not enougth duration. Time range: " + str(time_low) + " to " + str(time_high))
else:
logging.warn("get_rms_vs_countrate: can't create eventlist or counts are 0. Time range: " + str(time_low) + " to " + str(time_high) + ", counts: " + str(len(filtered_event_list)))
else:
logging.warn("get_rms_vs_countrate: Time range: " + str(time_low) + " to " + str(time_high) + " has no events")
except:
logging.warn(ExHelper.getException('get_rms_vs_countrate: Time range: ' + str(time_low) + ' to ' + str(time_high)))
# If x_type is countrate we need to sort values
sorted_idx = np.argsort(countrate_arr)
countrate_arr = np.array(countrate_arr)[sorted_idx]
rms_arr = np.array(rms_arr)[sorted_idx]
rms_err_arr = np.array(rms_err_arr)[sorted_idx]
else:
logging.warn('get_rms_vs_countrate: E column not found!')
warnmsg = ['E column not found']
else:
logging.warn('get_rms_vs_countrate: No events data!')
warnmsg = ['No events data']
else:
logging.warn('get_rms_vs_countrate: Wrong dataset type!')
warnmsg = ['Wrong dataset type']
except:
logging.error(ExHelper.getException('get_rms_vs_countrate'))
warnmsg = [ExHelper.getWarnMsg()]
# Preapares the result
result = push_to_results_array([], countrate_arr)
result = push_to_results_array_with_errors(result, rms_arr, rms_err_arr)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
result = push_to_results_array(result, freq_min_max)
result = push_to_results_array(result, auto_white_noise_offset)
return result
# get_plot_data_from_models:
# Returns the plot Y data for each model of an array of models with a given X_axis values
# and the sum of all Y data of models from the given x range
#
# @param: models: array of models, dave_model definition
# @param: x_values: array of float, the x range
#
def get_plot_data_from_models(models, x_values):
models_arr = []
try:
sum_values = []
for i in range(len(models)):
model_obj = ModelHelper.get_astropy_model(models[i])
if model_obj:
val_array = []
for i in range(len(x_values)):
val_array.append(nan_and_inf_to_num(model_obj(x_values[i])))
if len(val_array) > 0:
models_arr = push_to_results_array(models_arr, nan_and_inf_to_num(val_array))
if len (sum_values) == 0:
sum_values = val_array
else:
sum_values = np.sum([sum_values, val_array], axis=0)
models_arr = push_to_results_array(models_arr, sum_values)
except:
logging.error(ExHelper.getException('get_plot_data_from_models'))
return common_error(ExHelper.getWarnMsg())
return models_arr
# get_fit_powerspectrum_result:
# Returns the results of fitting a PDS with an astropy model. If priors are
# sent the a Bayesian parameter estimation is calculated, else a Maximum Likelihood Fitting
# is used as default.
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) power spectrum.
# @param: pds_type: Type of PDS to use, single or averaged.
# @param: df: If not 0 is the frequency rebining value
# @param: models: array of models, dave_model definition with the starting parameters
# @param: priors: array of priors, dave_priors defined for each model parameters
# @param: sampling_params: dict with the parameter values for do the MCMC sampling
#
def get_fit_powerspectrum_result(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df,
models, priors=None, sampling_params=None):
results = []
try:
pds, lc, gti = create_power_density_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df)
lc = None # Dispose memory
gti = None # Dispose memory
if pds:
results = fit_power_density_spectrum(pds, models, priors=priors, sampling_params=sampling_params)
pds = None # Dispose memory
else:
logging.warn("get_fit_powerspectrum_result: can't create power density spectrum.")
except:
logging.error(ExHelper.getException('get_fit_powerspectrum_result'))
return common_error(ExHelper.getWarnMsg())
return results
# get_bootstrap_results:
# Returns the data of applying bootstrap error analisys method to a given dave model
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: nsegm: The number of segments for splitting the lightcurve
# @param: segm_size: The segment length for split the lightcurve
# @param: norm: The normalization of the (real part of the) power spectrum.
# @param: pds_type: Type of PDS to use, single or averaged.
# @param: df: If not 0 is the frequency rebining value
# @param: models: array of models, dave_model definition with the optimal parameters
# @param: n_iter: Number of bootstrap iterations
# @param: mean: Mean value of the simulated light curve
# @param: red_noise: The red noise value
# @param: seed: The random state seed for simulator
#
def get_bootstrap_results(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df,
models, n_iter, mean, red_noise, seed):
results = []
try:
# Gets de power density espectrum from given params
pds, lc, gti = create_power_density_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df)
if pds:
# Creates the model from dave_model
fit_model, starting_pars = ModelHelper.get_astropy_model_from_dave_models(models)
if fit_model:
# For n_iter: generate the PDS from the fit_model using the Stingray.Simulator
# then fit the simulated PDS and record the new model params and the PDS values
rms, rms_err = pds.compute_rms(min(pds.freq), max(pds.freq))
if mean <= 0:
mean = lc.meanrate
logging.debug('get_bootstrap_results lc.meanrate: ' + str(lc.meanrate))
if seed < 0:
seed = None
# N = max([(len(lc.time) + 1), int(math.ceil(segm_size * nsegm))])
# logging.debug('get_bootstrap_results len(lc.time): ' + str((len(lc.time) + 1)))
# logging.debug('get_bootstrap_results segm_size * nsegm: ' + str(int(math.ceil(segm_size * nsegm))))
bins_per_segm = int(math.ceil(segm_size / dt))
N = int(math.ceil(bins_per_segm / 1024) * 1024) # max([ bins_per_segm, 1024 ])
#logging.debug('get_bootstrap_results bins_per_segm: ' + str(bins_per_segm))
#logging.debug('get_bootstrap_results N: ' + str(N))
models_params = []
powers = []
for i in range(n_iter):
try:
the_simulator = simulator.Simulator(N=N, dt=dt, mean=mean,
rms=rms, red_noise=red_noise, random_state=seed)
sim_lc = the_simulator.simulate(fit_model)
if pds_type == 'Sng':
sim_pds = Powerspectrum(sim_lc, norm=norm, gti=gti)
else:
sim_pds = AveragedPowerspectrum(lc=sim_lc, segment_size=segm_size, norm=norm, gti=gti)
if sim_pds:
if df > 0:
pds = pds.rebin(df=df)
#sim_pds = rebin_spectrum_if_necessary(sim_pds)
parest, res = fit_powerspectrum(sim_pds, fit_model, starting_pars,
max_post=False, priors=None, fitmethod="L-BFGS-B")
models_params.append(res.p_opt)
powers.append(sim_pds.power)
else:
logging.warn(ExHelper.getException('get_bootstrap_results: cant create powerspectrum for i: ' + str(i)))
except:
logging.error(ExHelper.getException('get_bootstrap_results for i: ' + str(i)))
models_params = np.array(models_params)
powers = np.array(powers)
fixed = [fit_model.fixed[n] for n in fit_model.param_names]
parnames = [n for n, f in zip(fit_model.param_names, fixed) \
if f is False]
if len(models_params) > 0 and len(powers) == len(models_params):
# Histogram all the recorded model parameters
param_errors = []
for i in range(models_params.shape[1]):
param_values = models_params[:, i]
counts, values = DsHelper.get_histogram(param_values, 0.1)
# Fit the histogram with a Gaussian an get the optimized parameters
x = np.array(list(counts.keys()))
y = np.array(list(counts.values()))
amplitude, mean, stddev = ModelHelper.fit_data_with_gaussian(x, y)
param = dict()
param["index"] = i
param["name"] = parnames[i]
param["err"] = nan_and_inf_to_num([stddev])
param_errors.extend([param])
results = push_to_results_array(results, param_errors)
# Histogram all the recorded power values
power_means = []
power_errors = []
for i in range(powers.shape[1]):
power_values = powers[:, i]
counts, values = DsHelper.get_histogram(power_values, 0.1)
# Fit the histogram with a Gaussian an get the optimized parameters
x = np.array(list(counts.keys()))
y = np.array(list(counts.values()))
amplitude, mean, stddev = ModelHelper.fit_data_with_gaussian(x, y)
power_means.extend(nan_and_inf_to_num([mean]))
power_errors.extend(nan_and_inf_to_num([stddev]))
results = push_to_results_array(results, power_means)
results = push_to_results_array(results, power_errors)
else:
logging.warn("get_bootstrap_results: can't get model params or powers from the simulated data")
else:
logging.warn("get_bootstrap_results: can't create summed model from dave_models.")
else:
logging.warn("get_bootstrap_results: can't create power density spectrum.")
except:
logging.error(ExHelper.getException('get_bootstrap_results'))
return common_error(ExHelper.getWarnMsg())
return results
# get_lomb_scargle_results:
# Returns LombScargle frequencies and powers from a given lightcurve
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency, send [-1, -1] for use all frequencies
# @param: nyquist_factor: Average Nyquist frequency factor
# @param: ls_norm: Periodogram normalization ["standard", "model", "log", "psd"]
# @param: samples_per_peak: Points across each significant periodogram peak
#
def get_lomb_scargle_results(src_destination, bck_destination, gti_destination,
filters, axis, dt, freq_range, nyquist_factor, ls_norm, samples_per_peak):
frequency = []
power = []
power_err = []
duration = []
warnmsg = []
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
warnmsg = [""]
# Calculates the LombScargle values
frequency, power, lc = get_lomb_scargle(src_destination, bck_destination, gti_destination,
filters, axis, dt, freq_range, nyquist_factor, ls_norm, samples_per_peak)
if not lc:
return common_error("Can't create lightcurve or is empty")
elif not math.isclose(dt, lc.dt, abs_tol=0.001):
warnmsg = ["@WARN@Overriden Bin Size: " + str(lc.dt)]
duration = [lc.tseg]
if lc.gti is not None and len(lc.gti) == 0 and DsHelper.hasGTIGaps(lc.time):
warnmsg = ["@WARN@GTI gaps found on LC"]
lc = None # Dispose memory
except:
logging.error(ExHelper.getException('get_lomb_scargle_results'))
warnmsg = [ExHelper.getWarnMsg()]
# Preapares the result
result = push_to_results_array([], frequency)
result = push_to_results_array_with_errors(result, power, power_err)
result = push_to_results_array(result, duration)
result = push_to_results_array(result, warnmsg)
return result
# get_fit_lomb_scargle_result:
# Returns the results of fitting a LombScargle with an astropy model. If priors are
# sent the a Bayesian parameter estimation is calculated, else a Maximum Likelihood Fitting
# is used as default.
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency, send [-1, -1] for use all frequencies
# @param: nyquist_factor: Average Nyquist frequency factor
# @param: ls_norm: Periodogram normalization ["standard", "model", "log", "psd"]
# @param: samples_per_peak: Points across each significant periodogram peak
# @param: models: array of models, dave_model definition with the starting parameters
# @param: priors: array of priors, dave_priors defined for each model parameters
# @param: sampling_params: dict with the parameter values for do the MCMC sampling
#
def get_fit_lomb_scargle_result(src_destination, bck_destination, gti_destination,
filters, axis, dt, freq_range, nyquist_factor, ls_norm, samples_per_peak,
models, priors=None, sampling_params=None):
results = []
try:
# Calculates the LombScargle values
frequency, power, lc = get_lomb_scargle(src_destination, bck_destination, gti_destination,
filters, axis, dt, freq_range, nyquist_factor, ls_norm, samples_per_peak)
if not lc:
return common_error("Can't create lightcurve or is empty")
pds = Powerspectrum()
pds.freq = frequency
pds.power = power
if pds:
results = fit_power_density_spectrum(pds, models, priors=priors, sampling_params=sampling_params)
pds = None # Dispose memory
else:
logging.warn("get_fit_lomb_scargle_result: can't create power spectrum.")
except:
logging.error(ExHelper.getException('get_fit_lomb_scargle_result'))
return common_error(ExHelper.getWarnMsg())
return results
# get_pulse_search: Returns z_n_search or epoch_folding results of a given events dataset
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: freq_range: A tuple with minimum and maximum values of the
# range of frequency
# @param: mode: Pulse search merhod ["epoch_folding", "z_n_search"].
# @param: oversampling: Pulse peak oversampling.
# @param: nharm: Number of harmonics.
# @param: nbin: Number of bins of the folded profiles.
# @param: segment_size: Length of the segments to be averaged in the periodogram.
#
def get_pulse_search(src_destination, bck_destination, gti_destination, filters, axis,
dt, freq_range, mode="z_n_search", oversampling=15, nharm=4, nbin=128, segment_size=5000):
freq = []
zstat = []
cand_freqs_z = []
cand_stat_z = []
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
if mode not in ['epoch_folding', 'z_n_search']:
logging.warn("Wrong mode, using default: z_n_search")
mode = "z_n_search"
filters = FltHelper.get_filters_clean_color_filters(filters)
filters = FltHelper.apply_bin_size_to_filters(filters, dt)
ds = get_filtered_dataset(src_destination, filters, gti_destination)
if not ds:
return common_error("Cant read dataset!")
# Gets time data
time_data = np.array(ds.tables[axis[0]["table"]].columns[axis[0]["column"]].values)
tseg = np.median(np.diff(time_data))
logging.debug("tseg: " + str(tseg))
# We will search for pulsations over a range
# of frequencies around the known pulsation period.
# Calculates frequencies from min frequency, and frequency step
df_min = 1/(max(time_data) - min(time_data))
df = df_min / oversampling
frequencies = np.arange(freq_range[0], freq_range[1], df)
weights=1
if DsHelper.is_lightcurve_dataset(ds):
weights = np.array(ds.tables["RATE"].columns["RATE"].values)
if mode == "z_n_search":
freq, zstat = z_n_search(time_data, frequencies, nbin=nbin, \
nharm=nharm, segment_size=segment_size, weights=weights)
else:
freq, zstat = epoch_folding_search(time_data, frequencies, nbin=nbin, \
segment_size=segment_size, weights=weights)
z_detlev = z2_n_detection_level(n=1, epsilon=0.001, ntrial=len(freq))
cand_freqs_z, cand_stat_z = search_best_peaks(freq, zstat, z_detlev)
except:
logging.error(ExHelper.getException('get_pulse_search'))
return common_error(ExHelper.getWarnMsg())
# Preapares the result
result = push_to_results_array([], freq)
result = push_to_results_array(result, zstat)
result = push_to_results_array(result, cand_freqs_z)
result = push_to_results_array(result, cand_stat_z)
return result
# get_phaseogram: Returns phaseogram of a given events dataset
#
# @param: src_destination: source file destination
# @param: bck_destination: background file destination, is optional
# @param: gti_destination: gti file destination, is optional
# @param: filters: array with the filters to apply
# [{ table = "EVENTS", column = "Time", from=0, to=10 }, ... ]
# @param: axis: array with the column names to use in ploting
# [{ table = "EVENTS", column = "TIME" },
# { table = "EVENTS", column = "PHA" } ]
# @param: dt: The time resolution of the events.
# @param: f: Pulse frequency.
# @param: nph: Number of phase bins.
# @param: nt: Number of time bins.
#
def get_phaseogram(src_destination, bck_destination, gti_destination, filters, axis,
dt, f, nph, nt, fdot=0, fddot=0, binary_parameters=None):
phaseogr = []
phases = []
times = []
mean_phases = []
profile = []
error_dist = []
try:
if len(axis) != 2:
return common_error("Wrong number of axis")
filters = FltHelper.get_filters_clean_color_filters(filters)
filters = FltHelper.apply_bin_size_to_filters(filters, dt)
ds = get_filtered_dataset(src_destination, filters, gti_destination)
if not ds:
return common_error("Cant read dataset!")
weights=None
if DsHelper.is_lightcurve_dataset(ds):
weights = np.array(ds.tables["RATE"].columns["RATE"].values)
# Prepares the phaseogram parameters
time_data = np.array(ds.tables[axis[0]["table"]].columns[axis[0]["column"]].values)
pepoch = None
if len(ds.tables["GTI"].columns["START"].values) > 0:
pepoch = ds.tables["GTI"].columns["START"].values[0]
delay_times = 0
orbital_period = time_data[-1] - time_data[0]
asini = 0
t0 = pepoch
prev_t0 = 0
if not binary_parameters is None:
if binary_parameters[0] > 0:
orbital_period=binary_parameters[0]
if binary_parameters[1] > 0:
asini=binary_parameters[1]
if binary_parameters[2] > 0:
t0=binary_parameters[2]
delay_times = asini * np.sin(2 * np.pi * (time_data - t0) / orbital_period)
corrected_times = time_data - delay_times
# Calculate the phaseogram plot data
phaseogr, phases, times, additional_info = phaseogram(corrected_times, f, nph=nph, nt=nt,
fdot=fdot, fddot=fddot, plot=False,
pepoch=pepoch, weights=weights)
phaseogr = np.transpose(phaseogr)
# Calculates the profile plot data
mean_phases = (phases[:-1] + phases[1:]) / 2
profile = np.sum(phaseogr, axis=1)
mean_profile = np.mean(profile)
if np.all(mean_phases < 1.5):
mean_phases = np.concatenate((mean_phases, mean_phases + 1))
profile = np.concatenate((profile, profile))
err_low, err_high = poisson_conf_interval(mean_profile, interval='frequentist-confidence', sigma=1)
error_dist = [err_low, err_high]
except:
logging.error(ExHelper.getException('get_phaseogram'))
return common_error(ExHelper.getWarnMsg())
# Preapares the result
result = push_to_results_array([], phaseogr)
result = push_to_results_array(result, phases)
result = push_to_results_array(result, times)
result = push_to_results_array(result, mean_phases)
result = push_to_results_array(result, profile)
result = push_to_results_array(result, error_dist)
return result
# ----- HELPER FUNCTIONS.. NOT EXPOSED -------------
def get_filtered_dataset(destination, filters, gti_destination=""):
# Try to get filtered dataset from cache
cache_key = "FILTERED_" + DsCache.get_key(destination + gti_destination + str(filters), True)
if DsCache.contains(cache_key):
logging.debug("Returned cached filtered dataset, cache_key: " + cache_key + ", count: " + str(DsCache.count()))
return DsCache.get(cache_key)
dataset, ds_cache_key = DaveReader.get_file_dataset(destination)
if not dataset:
logging.warn("get_filtered_dataset: destination specified but not loadable.")
return None
if gti_destination:
gti_dataset, gti_cache_key = DaveReader.get_file_dataset(gti_destination)
if gti_dataset:
dataset = DsHelper.get_dataset_applying_gti_dataset(dataset, gti_dataset)
if not dataset:
logging.warn("get_filtered_dataset: dataset is none after applying gti_dataset.")
return None
else:
logging.warn("get_filtered_dataset: Gti_destination specified but not loadable.")
filtered_ds = dataset.apply_filters(filters)
if filtered_ds:
logging.debug("Add filtered_ds to cache, cache_key: " + cache_key + ", count: " + str(DsCache.count()))
DsCache.add(cache_key, filtered_ds)
return filtered_ds
def get_color_filtered_dataset(destination, filters, color_column_name, gti_destination=""):
color_filters = FltHelper.get_filters_from_color_filters(filters, color_column_name)
filtered_ds = get_filtered_dataset(destination, color_filters, gti_destination)
return filtered_ds
def split_dataset_with_color_filters(src_destination, filters, color_keys, gti_destination):
filtered_datasets = []
for color_key in color_keys:
filtered_ds = get_color_filtered_dataset(src_destination, filters, color_key, gti_destination)
if not DsHelper.is_events_dataset(filtered_ds):
logging.warn("Can't create filtered_ds for " + str(color_key))
return None
filtered_datasets.append(filtered_ds)
return filtered_datasets
def push_to_results_array (result, values):
column = dict()
try:
column["values"] = np.around(nan_and_inf_to_num(values), decimals=CONFIG.PRECISION)
except:
column["values"] = nan_and_inf_to_num(values)
result.append(column)
return result
def push_to_results_array_with_errors (result, values, errors):
column = dict()
column["values"] = np.around(nan_and_inf_to_num(values), decimals=CONFIG.PRECISION)
column["error_values"] = np.around(nan_and_inf_to_num(errors), decimals=CONFIG.PRECISION)
result.append(column)
return result
def nan_and_inf_to_num (obj):
if isinstance(obj, int) \
or isinstance(obj, np.integer) \
or isinstance(obj, float) \
or isinstance(obj, np.floating):
if obj > CONFIG.BIG_NUMBER:
return CONFIG.BIG_NUMBER
if obj < -CONFIG.BIG_NUMBER:
return -CONFIG.BIG_NUMBER
if np.isnan(obj):
return 0
elif isinstance(obj, np.ndarray) \
and len(obj.shape) == 1:
# Checks if any element is NaN of Inf and replaces it for BIG_NUMBER or 0
# This is the fastest way to check it:
# https://stackoverflow.com/questions/6736590/fast-check-for-nan-in-numpy
if not np.isfinite(np.dot(obj, obj)):
obj[np.isposinf(obj)] = CONFIG.BIG_NUMBER
obj[np.isneginf(obj)] = -CONFIG.BIG_NUMBER
obj[np.isnan(obj)] = 0
return obj
def get_color_axis_for_ds():
color_axis = [dict() for i in range(2)]
color_axis[0]["table"] = "EVENTS"
color_axis[0]["column"] = CONFIG.TIME_COLUMN
color_axis[1]["table"] = "EVENTS"
color_axis[1]["column"] = "PHA"
return color_axis
def check_axis_in_dataset (dataset, axis):
for i in range(len(axis)):
if axis[i]["table"] not in dataset.tables:
logging.warn('check_axis_in_dataset: ' + axis[i]["table"] + ' table not found!')
return False
if axis[i]["column"] not in dataset.tables[axis[i]["table"]].columns:
logging.warn('check_axis_in_dataset: ' + axis[i]["column"] + ' column not found!')
return False
return True
# exclude_axis: Returns first found axis from axis list
# where column differs from filter_axis.column
def exclude_axis(axis, filter_axis):
for i in range(len(axis)):
if axis[i]["column"] != filter_axis["column"]:
return axis[i]
return None
def get_lightcurve_any_dataset(src_destination, bck_destination, gti_destination, filters, dt):
filters = FltHelper.get_filters_clean_color_filters(filters)
filters = FltHelper.apply_bin_size_to_filters(filters, dt)
filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
if DsHelper.is_events_dataset(filtered_ds):
# Creates lightcurves by gti and joins in one
logging.debug("Create lightcurve from evt dataset")
return get_lightcurve_from_events_dataset(filtered_ds, bck_destination, filters, gti_destination, dt)
elif DsHelper.is_lightcurve_dataset(filtered_ds):
#If dataset is LIGHTCURVE type
logging.debug("Create lightcurve from lc dataset")
gti = load_gti_from_destination (gti_destination)
lc = DsHelper.get_lightcurve_from_lc_dataset(filtered_ds, gti=gti)
#Applies background data if setted
if bck_destination:
#Gets the backscale keyword value
src_backscale = None
if "BACKSCAL" in filtered_ds.tables["RATE"].header:
src_backscale = int(filtered_ds.tables["RATE"].header["BACKSCAL"])
#Applies background data
lc = apply_background_to_lc(lc, bck_destination, filters, gti_destination, dt, src_backscale)
return lc
else:
logging.warn("Wrong dataset type")
return None
def get_lightcurve_from_events_dataset(filtered_ds, bck_destination, filters, gti_destination, dt):
# Try to get the lightcurve from cache
cache_key = "LC_" + DsCache.get_key(filtered_ds.id + bck_destination + gti_destination + str(filters) + str(dt), True)
if DsCache.contains(cache_key):
logging.debug("Returned cached lightcurve, cache_key: " + cache_key + ", count: " + str(DsCache.count()))
return DsCache.get(cache_key)
eventlist = DsHelper.get_eventlist_from_evt_dataset(filtered_ds)
if not eventlist or eventlist.ncounts < 2 or len(eventlist.time) < 2:
logging.warn("Wrong lightcurve counts for eventlist from ds.id -> " + str(filtered_ds.id))
return None
if (eventlist.time[eventlist.ncounts - 1] - eventlist.time[0]) < dt * 2:
logging.warn("Lightcurve duration must be greater than two bin sizes, for ds.id -> " + str(filtered_ds.id))
return None
while True:
# Checks if lc has counts or retries with smaller bin size.
lc = eventlist.to_lc(dt)
if (lc is None) or (not np.isnan(lc.meanrate) and lc.n > 1):
break
else:
dt = dt / 2.0
logging.warn("Lightcurve has no counts, bin size: " + str(lc.dt) + ", retrying with binsize: " + str(dt))
if bck_destination:
#Gets the backscale keyword value
src_backscale = None
if "BACKSCAL" in filtered_ds.tables["EVENTS"].header:
src_backscale = int(filtered_ds.tables["EVENTS"].header["BACKSCAL"])
#Applies background data
lc = apply_background_to_lc(lc, bck_destination, filters, gti_destination, lc.dt, src_backscale)
eventlist = None # Dispose memory
filtered_ds = None # Dispose memory
# Applies rate filter to lightcurve countrate if filter has been sent
rate_filter = FltHelper.get_rate_filter(filters)
if rate_filter:
logging.debug("Filtering lightcurve with countrates: from: " + str(rate_filter["from"]) + ", to: " + str(rate_filter["to"]))
filtered_indexes = np.where((lc.countrate >= rate_filter["from"]) & (lc.countrate <= rate_filter["to"]))[0]
lc = DsHelper.get_lightcurve(lc.time[filtered_indexes],
lc.counts[filtered_indexes],
lc.counts_err[filtered_indexes],
lc.gti)
DsCache.add(cache_key, lc)
return lc
def get_lightcurves_from_events_datasets_array (datasets_array, color_keys, bck_destination, filters, gti_destination, dt):
lightcurves = []
for color_idx in range(len(color_keys)):
color_filters = FltHelper.get_filters_from_color_filters(filters, color_keys[color_idx])
lc = get_lightcurve_from_events_dataset(datasets_array[color_idx], bck_destination, color_filters, gti_destination, dt)
if lc:
lightcurves.append(lc)
return lightcurves
def apply_background_to_lc(lc, bck_destination, filters, gti_destination, dt, src_backscale=None):
if lc:
logging.debug("Create background lightcurve ....")
bck_lc = get_lightcurve_any_dataset(bck_destination, "", gti_destination, filters, dt)
if bck_lc:
#Calculates the backscale_ratio
backscale_ratio = 1;
if src_backscale is not None:
bck_ds, bck_cache_key = DaveReader.get_file_dataset(bck_destination)
if bck_ds:
#Gets the backscale keyword value
table = DsHelper.get_hdutable_from_dataset(bck_ds)
if table:
if "BACKSCAL" in table.header:
backscale_ratio = src_backscale / int(table.header["BACKSCAL"])
bck_ds = None
table = None
if backscale_ratio != 1:
# Applies the backscale_ratio to background lightcurve
logging.debug("Applying backscale_ratio: " + str(backscale_ratio))
bck_lc.counts *= backscale_ratio
bck_lc.counts_err *= backscale_ratio
bck_lc = Lightcurve(bck_lc.time, bck_lc.counts,
err=bck_lc.counts_err, gti=bck_lc.gti,
mjdref=bck_lc.mjdref)
#Substracts background lightcurve from source lightcurve
lc = lc - bck_lc
bck_lc = None
else:
logging.warn("Wrong lightcurve for background data...")
else:
logging.warn("Wrong source lightcurve.")
return lc
def create_power_density_spectrum(src_destination, bck_destination, gti_destination,
filters, axis, dt, nsegm, segm_size, norm, pds_type, df=0):
if len(axis) != 2:
logging.warn("Wrong number of axis")
return None, None, None
if norm not in ['frac', 'abs', 'leahy', 'none']:
logging.warn("Wrong normalization")
return None, None, None
if pds_type not in ['Sng', 'Avg']:
logging.warn("Wrong power density spectrum type")
return None, None, None
if segm_size == 0:
segm_size = None
# Creates the lightcurve
lc = get_lightcurve_any_dataset(src_destination, bck_destination, gti_destination, filters, dt)
if not lc:
logging.warn("Can't create lightcurve or is empty")
return None, None, None
# Prepares GTI if passed
gti = load_gti_from_destination (gti_destination)
if not gti:
logging.debug("External GTIs not loaded using defaults")
gti = lc.gti
# Creates the power density spectrum
logging.debug("Create power density spectrum")
if pds_type == 'Sng':
pds = Powerspectrum(lc, norm=norm, gti=gti)
else:
pds = AveragedPowerspectrum(lc=lc, segment_size=segm_size, norm=norm, gti=gti)
if pds:
if df > 0:
pds = pds.rebin(df=df)
#pds = rebin_spectrum_if_necessary(pds)
else:
logging.warn("Can't create power spectrum")
return pds, lc, gti
def fit_power_density_spectrum(pds, models, priors=None, sampling_params=None):
results = []
try:
fit_model, starting_pars = ModelHelper.get_astropy_model_from_dave_models(models)
if fit_model:
# Default fit parameters
max_post=False
fitmethod="L-BFGS-B"
as_priors=None
if priors is not None:
# Creates the priors from dave_priors
as_priors = ModelHelper.get_astropy_priors(priors)
if len(as_priors.keys()) > 0:
# If there are priors then is a Bayesian Parameters Estimation
max_post=True
fitmethod="BFGS"
else:
as_priors=None
logging.warn("fit_power_density_spectrum: can't create priors from dave_priors.")
if as_priors:
# Creates a Posterior object with the priors
lpost = PSDPosterior(pds.freq, pds.power, fit_model, priors=as_priors, m=pds.m)
else:
# Creates the Maximum Likelihood object for fitting
lpost = PSDLogLikelihood(pds.freq, pds.power, fit_model, m=pds.m)
# Creates the PSD Parameters Estimation object and runs the fitting
parest = PSDParEst(pds, fitmethod=fitmethod, max_post=max_post)
res = parest.fit(lpost, starting_pars, neg=True)
sample = None
if as_priors and sampling_params is not None:
# If is a Bayesian Par. Est. and has sampling parameters
# then sample the posterior distribution defined in `lpost` using MCMC
sample = parest.sample(lpost, res.p_opt, cov=res.cov,
nwalkers=sampling_params["nwalkers"],
niter=sampling_params["niter"],
burnin=sampling_params["burnin"],
threads=sampling_params["threads"],
print_results=False, plot=False)
# Prepares the results to be returned to GUI
fixed = [fit_model.fixed[n] for n in fit_model.param_names]
parnames = [n for n, f in zip(fit_model.param_names, fixed) \
if f is False]
# Add to results the estimated parameters
params = []
for i, (x, y, p) in enumerate(zip(res.p_opt, res.err, parnames)):
param = dict()
param["index"] = i
param["name"] = p
param["opt"] = nan_and_inf_to_num(x)
param["err"] = nan_and_inf_to_num(y)
params.append(param)
results = push_to_results_array(results, params)
# Add to results the estimation statistics
stats = dict()
try:
stats["deviance"] = nan_and_inf_to_num(res.deviance)
stats["aic"] = nan_and_inf_to_num(res.aic)
stats["bic"] = nan_and_inf_to_num(res.bic)
except AttributeError:
stats["deviance"] = "ERROR"
try:
stats["merit"] = nan_and_inf_to_num(res.merit)
stats["dof"] = nan_and_inf_to_num(res.dof) # Degrees of freedom
stats["dof_ratio"] = nan_and_inf_to_num(res.merit/res.dof)
stats["sobs"] = nan_and_inf_to_num(res.sobs)
stats["sexp"] = nan_and_inf_to_num(res.sexp)
stats["ssd"] = nan_and_inf_to_num(res.ssd)
except AttributeError:
stats["merit"] = "ERROR"
results = push_to_results_array(results, stats)
# If there is sampling data add it to results
if sample:
sample_stats = dict()
try:
sample_stats["acceptance"] = sample.acceptance
sample_stats["rhat"] = sample.rhat
sample_stats["mean"] = sample.mean
sample_stats["std"] = sample.std
sample_stats["ci"] = sample.ci
try:
#Acor is not always present
sample_stats["acor"] = sample.acor
except AttributeError:
sample_stats["acor"] = "ERROR"
#Creates an IMG Html tag from plot
try:
fig = sample.plot_results(nsamples=sampling_params["nsamples"])
sample_stats["img"] = Plotter.convert_fig_to_html(fig)
except:
sample_stats["img"] = "ERROR"
logging.error(ExHelper.getException('fit_power_density_spectrum: Cant create image from plot.'))
except AttributeError:
sample_stats["acceptance"] = "ERROR"
logging.error(ExHelper.getException('fit_power_density_spectrum: Cant add sample data.'))
results = push_to_results_array(results, sample_stats)
except:
logging.error(ExHelper.getException('fit_power_density_spectrum'))
return results
def get_lomb_scargle(src_destination, bck_destination, gti_destination,
filters, axis, dt, freq_range, nyquist_factor, ls_norm, samples_per_peak):
# Creates the lightcurve
lc = get_lightcurve_any_dataset(src_destination, bck_destination, gti_destination, filters, dt)
if not lc:
return None, None, None
# If freq_range is not set, calculates max and min freq
if freq_range[0] < 0:
freq_range[0] = 0.6 / lc.tseg
if freq_range[1] < 0:
freq_range[1] = 0.6 / lc.dt
# Calculates the LombScargle values
frequency, power = LombScargle(lc.time, lc.counts).autopower(minimum_frequency=freq_range[0],
maximum_frequency=freq_range[1],
nyquist_factor=nyquist_factor,
normalization=ls_norm,
samples_per_peak=samples_per_peak)
return frequency, nan_and_inf_to_num(power), lc
# Reduces the pds data to Max_plot_points for improve pds performance
def rebin_spectrum_if_necessary (pds):
freq_size = len(pds.freq)
if freq_size > CONFIG.MAX_PLOT_POINTS:
df = (max(pds.freq) - min(pds.freq)) / CONFIG.MAX_PLOT_POINTS
logging.warn("Spectrum rebined to " + str(CONFIG.MAX_PLOT_POINTS) + " points, from " + str(freq_size) + " points, with df: " + str(df))
pds = pds.rebin(df=df)
return pds
def get_countrate_from_lc_ds (lc_destination, bck_destination, lc_name, bck_name):
lc_ds, lc_cache_key = DaveReader.get_file_dataset(lc_destination)
if not DsHelper.is_lightcurve_dataset(lc_ds):
logging.warn("Wrong dataset type for " + lc_name)
return None, None
count_rate = np.array(lc_ds.tables["RATE"].columns["RATE"].values)
count_rate_error = np.array(lc_ds.tables["RATE"].columns["RATE"].error_values)
if bck_destination:
bck_ds, bck_cache_key = DaveReader.get_file_dataset(bck_destination)
if not DsHelper.is_lightcurve_dataset(bck_ds):
logging.warn("Wrong dataset type for " + bck_name)
else:
count_rate_bck = np.array(bck_ds.tables["RATE"].columns["RATE"].values)
count_rate_error_bck = np.array(bck_ds.tables["RATE"].columns["RATE"].error_values)
if count_rate.shape == count_rate_bck.shape:
count_rate -= count_rate_bck
count_rate_error -= count_rate_error_bck
else:
logging.warn("Lightcurves " + lc_name + " and " + bck_name + " have different shapes.")
return count_rate, count_rate_error
def load_gti_from_destination (gti_destination):
# Try to get the gtis from cache
cache_key = "GTI_" + DsCache.get_key(gti_destination, True)
if DsCache.contains(cache_key):
logging.debug("Returned cached gtis, cache_key: " + cache_key + ", count: " + str(DsCache.count()))
return DsCache.get(cache_key)
gti = None
if gti_destination:
gti_dataset, gti_cache_key = DaveReader.get_file_dataset(gti_destination)
if gti_dataset:
gti = DsHelper.get_stingray_gti_from_gti_table (gti_dataset.tables["GTI"])
DsCache.add(cache_key, gti)
logging.debug("Load GTI success")
return gti
def get_divided_values_and_error (values_0, values_1, error_0, error_1):
divided_error = np.array([])
with np.errstate(all='ignore'): # Ignore divisions by 0 and others
divided_values = nan_and_inf_to_num(values_0 / values_1)
if error_0.shape == error_1.shape == values_0.shape:
divided_error = nan_and_inf_to_num((error_0/values_1) + ((error_1 * values_0)/(values_1 * values_1)))
divided_values[divided_values >= CONFIG.BIG_NUMBER]=0
divided_values[divided_values <= -CONFIG.BIG_NUMBER]=0
divided_error[divided_error >= CONFIG.BIG_NUMBER]=0
divided_error[divided_error <= -CONFIG.BIG_NUMBER]=0
return divided_values, divided_error
#Return the white_noise_offset automatically
def get_white_noise_offset (event_arr, gti, dt, pds_type, segm_size, df):
white_noise_offset = 0.0
evt_list = EventList(event_arr[:,0], pi=event_arr[:,1])
if evt_list and evt_list.ncounts > 1:
if (evt_list.time[evt_list.ncounts - 1] - evt_list.time[0]) >= dt:
lc = evt_list.to_lc(dt)
if lc and np.sqrt(lc.meancounts * lc.meancounts) > 0:
if not gti:
gti = lc.gti
if segm_size > lc.tseg:
segm_size = lc.tseg
logging.warn("get_white_noise_offset: segmsize bigger than lc.duration, lc.duration applied instead.")
pds = None
if pds_type == 'Sng':
pds = Powerspectrum(lc, norm='leahy', gti=gti)
else:
pds = AveragedPowerspectrum(lc=lc, segment_size=segm_size, norm='leahy', gti=gti)
if pds:
if df > 0:
pds = pds.rebin(df=df)
num_tries = 0
while white_noise_offset <= 0.0 and num_tries < 5:
amp, x0, fwhm, wno = ModelHelper.fit_data_with_lorentz_and_const(pds.freq, pds.power)
white_noise_offset = wno
num_tries += 1
return white_noise_offset
def common_error(error):
logging.error(error)
return dict(error=error)
def common_warn(warn):
logging.warn(warn)
return dict(error="@WARN@" + warn)
# ----- Long-Term variability FUNCTIONS.. NOT EXPOSED -------------
def lightcurve_meancount(lc):
return lc.meancounts, np.std(lc.counts)
def lightcurve_excvar(lc):
return excess_variance(lc, normalization='none')
def lightcurve_fractional_rms(lc):
return excess_variance(lc, normalization='fvar')
def get_means_from_array(array, elements_per_mean):
split = np.array_split(array, math.floor(len(array) / elements_per_mean))
return np.array([np.mean(arr) for arr in split])
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sp.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
|
StingraySoftware/dave
|
src/main/python/utils/dave_engine.py
|
Python
|
apache-2.0
| 111,651
|
[
"Gaussian"
] |
02c6cc2d323fca3932ca8c19d20673884cf3ab68d3cea799cf3535482260bbf7
|
runs = [
#'bands05m', 'bands10m', # 'bands20m', # large number of bands bad
#'m101',
#'m102',
#'m103', 'm105', # 'm107', # for Mixer nmaxold matters
#'m051', # many steps, and no advantage for this set of systems
#'m203', # larger mixing better
's103',
'd103', 'd203', 'd253', # MixerDiff best
'dzpm103', 'dzpm203', # 'dzpm253', # dzp guess does not help
]
runsstr = ','.join(runs)
def agts(queue):
run = [queue.add('g2_1_pbe0_fd.py %s --gpaw=fprojectors=1' % r,
ncpus=4,
walltime=40*60)
for r in runs * 2]
analyse = queue.add('analyse.py molecule scf_g2_1_pbe0_fd ' + runsstr,
ncpus=1, walltime=10, deps=run,
creates=['scf_g2_1_pbe0_fd_energy.csv',
'scf_g2_1_pbe0_fd_calculator_steps.png'])
|
robwarm/gpaw-symm
|
gpaw/test/big/scf/g2_1_pbe0_fd.agts.py
|
Python
|
gpl-3.0
| 884
|
[
"GPAW"
] |
430b94cc8025c5999bea5747c8988c1ea6eef10a24f96020ed0a7afb630b7cf5
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark import BarrierTaskContext
from bigdl.orca.learn.pytorch.torch_runner import TorchRunner
import torch.distributed as dist
import logging
from bigdl.orca.learn.utils import save_pkl
import os
import tempfile
from pyspark import BarrierTaskContext, TaskContext
from bigdl.orca.learn.utils import save_pkl, duplicate_stdout_stderr_to_file, get_rank
from bigdl.orca.learn.log_monitor import LogMonitor
logger = logging.getLogger(__name__)
class PytorchPysparkWorker(TorchRunner):
"""Manages a PyTorch model for training."""
def __init__(self,
model_creator,
optimizer_creator,
size,
cluster_info,
cores_per_worker,
loss_creator=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=None,
config=None,
use_tqdm=False,
scheduler_step_freq=None,
state_dict=None,
backend="torch-distributed",
mode="fit",
sync_stats=True,
log_level=logging.INFO,
model_dir=None,
log_to_driver=True,
driver_ip=None,
driver_port=None,
):
super().__init__(model_creator, optimizer_creator, loss_creator, metrics, scheduler_creator,
training_operator_cls, config, use_tqdm, scheduler_step_freq, sync_stats,
log_level=log_level)
self.state_dict = state_dict
self.size = size
self.mode = mode
self.backend = backend
self.cluster_info = cluster_info
assert model_dir
self.model_dir = model_dir
self.log_to_driver = log_to_driver
self.setup(cores_per_worker)
if self.log_to_driver:
self.log_path, self.logger_thread, self.thread_stop = \
PytorchPysparkWorker._start_log_monitor(driver_ip, driver_port)
if self.backend == "torch-distributed":
self.setup_distributed(self.mode, cluster_info)
@staticmethod
def _start_log_monitor(driver_ip, driver_port):
if TaskContext.get():
partition_id = TaskContext.get().partitionId()
else:
partition_id = BarrierTaskContext().get().partitionId()
log_path = os.path.join(tempfile.gettempdir(),
"{}_runner.log".format(partition_id))
duplicate_stdout_stderr_to_file(log_path)
logger_thread, thread_stop = \
LogMonitor.start_log_monitor(driver_ip=driver_ip,
driver_port=driver_port,
log_path=log_path,
partition_id=partition_id)
return log_path, logger_thread, thread_stop
def setup_distributed(self, mode, cluster_info):
if mode == "fit":
self.rank = get_rank(cluster_info)
logger.info(f"cluster is: {cluster_info}")
address = f"tcp://{cluster_info[0]}"
self.setup_torch_distribute(url=address,
world_rank=self.rank,
world_size=self.size)
else:
self.rank = 0
self.setup_components()
self.setup_operator(self.models)
def train_epochs(self, data_creator, epochs=1, batch_size=32, profile=False,
info=None, wrap_dataloader=None, callbacks=None):
self.load_state_dict(self.state_dict.value)
stats_list = super().train_epochs(data_creator, epochs, batch_size, profile, info,
wrap_dataloader, callbacks)
state_dict = self.get_state_dict()
if self.log_to_driver:
LogMonitor.stop_log_monitor(self.log_path, self.logger_thread, self.thread_stop)
if self.rank == 0:
save_pkl(state_dict, os.path.join(self.model_dir, "state.pkl"))
return [stats_list]
def validate(self, data_creator, batch_size=32, num_steps=None, profile=False,
info=None, wrap_dataloader=None):
"""Evaluates the model on the validation data set."""
self.load_state_dict(self.state_dict.value)
validation_stats = super().validate(data_creator, batch_size, num_steps, profile, info,
wrap_dataloader)
if self.log_to_driver:
LogMonitor.stop_log_monitor(self.log_path, self.logger_thread, self.thread_stop)
return [validation_stats]
def predict(self, data_creator, batch_size=32, profile=False):
"""Evaluates the model on the validation data set."""
config = self.config.copy()
self._toggle_profiling(profile=profile)
partition = data_creator(config, batch_size)
self.load_state_dict(self.state_dict.value)
result = super().predict(partition=partition, batch_size=batch_size, profile=profile)
if self.log_to_driver:
LogMonitor.stop_log_monitor(self.log_path, self.logger_thread, self.thread_stop)
return result
def shutdown(self):
"""Attempts to shut down the worker."""
dist.destroy_process_group()
super().shutdown()
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/learn/pytorch/pytorch_pyspark_worker.py
|
Python
|
apache-2.0
| 6,543
|
[
"ORCA"
] |
3f771ebc2397da96c4c6575192f7d0930eab04305384698f9e540f9283ebbe40
|
##############################################################################
# Imports
##############################################################################
import numpy as np
import mdtraj as md
from mdtraj.utils.six import PY2
from mdtraj.utils import ensure_type
from mdtraj.geometry.hbond import _prep_kabsch_sander_arrays
from mdtraj.geometry import _geometry
if PY2:
from string import maketrans
else:
maketrans = str.maketrans
##############################################################################
# GLOBALS
##############################################################################
SIMPLIFIED_CODE_TRANSLATION = maketrans('HGIEBTS ', 'HHHEECCC')
__all__ = ['compute_dssp']
##############################################################################
# CODE
##############################################################################
def compute_dssp(traj, simplified=True):
"""Compute Dictionary of protein secondary structure (DSSP) secondary structure assignments
Parameters
----------
traj : md.Trajectory
A trajectory
simplified : bool, default=True
Use the simplified 3-category assignment scheme. Otherwise the original
8-category scheme is used.
Returns
-------
assignments : np.ndarray, shape=(n_frames, n_residues), dtype=S1
The assignments is a 2D array of character codes (see below), giving
the secondary structure of each residue in each frame.
Notes
-----
The DSSP assignment codes are:
- 'H' : Alpha helix
- 'B' : Residue in isolated beta-bridge
- 'E' : Extended strand, participates in beta ladder
- 'G' : 3-helix (3/10 helix)
- 'I' : 5 helix (pi helix)
- 'T' : hydrogen bonded turn
- 'S' : bend
- ' ' : Loops and irregular elements
The simplified DSSP codes are:
- 'H' : Helix. Either of the 'H', 'G', or 'I' codes.
- 'E' : Strand. Either of the 'E', or 'B' codes.
- 'C' : Coil. Either of the 'T', 'S' or ' ' codes.
A special 'NA' code will be assigned to each 'residue' in the topology which
isn't actually a protein residue (does not contain atoms with the names
'CA', 'N', 'C', 'O'), such as water molecules that are listed as 'residue's
in the topology.
Our implementation is based on DSSP-2.2.0, written by Maarten L. Hekkelman
and distributed under the Boost Software license.
References
----------
.. [1] Kabsch W, Sander C (1983). "Dictionary of protein secondary
structure: pattern recognition of hydrogen-bonded and geometrical
features". Biopolymers 22 (12): 2577-637. doi:10.1002/bip.360221211
"""
if traj.topology is None:
raise ValueError('kabsch_sander requires topology')
xyz, nco_indices, ca_indices, proline_indices, protein_indices \
= _prep_kabsch_sander_arrays(traj)
chain_ids = np.array([r.chain.index for r in traj.top.residues], dtype=np.int32)
value = _geometry._dssp(xyz, nco_indices, ca_indices, proline_indices, chain_ids)
if simplified:
value = value.translate(SIMPLIFIED_CODE_TRANSLATION)
n_frames = xyz.shape[0]
n_residues = nco_indices.shape[0]
if PY2:
array = np.fromiter(value, dtype=np.dtype('S2'))
else:
array = np.fromiter(value, dtype=np.dtype('U2'))
array = array.reshape(n_frames, n_residues)
array[:, np.logical_not(protein_indices)] = 'NA'
return array
|
leeping/mdtraj
|
mdtraj/geometry/dssp.py
|
Python
|
lgpl-2.1
| 3,484
|
[
"MDTraj"
] |
7403f603978f23f730d229dc689bb96bd0e85af8ba33ea4864685d15005f3475
|
"""Support for Ecobee Thermostats."""
from __future__ import annotations
import collections
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
PRECISION_TENTHS,
STATE_ON,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.util.temperature import convert
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
from .util import ecobee_date, ecobee_time
ATTR_COOL_TEMP = "cool_temp"
ATTR_END_DATE = "end_date"
ATTR_END_TIME = "end_time"
ATTR_FAN_MIN_ON_TIME = "fan_min_on_time"
ATTR_FAN_MODE = "fan_mode"
ATTR_HEAT_TEMP = "heat_temp"
ATTR_RESUME_ALL = "resume_all"
ATTR_START_DATE = "start_date"
ATTR_START_TIME = "start_time"
ATTR_VACATION_NAME = "vacation_name"
ATTR_DST_ENABLED = "dst_enabled"
ATTR_MIC_ENABLED = "mic_enabled"
ATTR_AUTO_AWAY = "auto_away"
ATTR_FOLLOW_ME = "follow_me"
DEFAULT_RESUME_ALL = False
PRESET_TEMPERATURE = "temp"
PRESET_VACATION = "vacation"
PRESET_HOLD_NEXT_TRANSITION = "next_transition"
PRESET_HOLD_INDEFINITE = "indefinite"
AWAY_MODE = "awayMode"
PRESET_HOME = "home"
PRESET_SLEEP = "sleep"
DEFAULT_MIN_HUMIDITY = 15
DEFAULT_MAX_HUMIDITY = 50
HUMIDIFIER_MANUAL_MODE = "manual"
# Order matters, because for reverse mapping we don't want to map HEAT to AUX
ECOBEE_HVAC_TO_HASS = collections.OrderedDict(
[
("heat", HVAC_MODE_HEAT),
("cool", HVAC_MODE_COOL),
("auto", HVAC_MODE_HEAT_COOL),
("off", HVAC_MODE_OFF),
("auxHeatOnly", HVAC_MODE_HEAT),
]
)
ECOBEE_HVAC_ACTION_TO_HASS = {
# Map to None if we do not know how to represent.
"heatPump": CURRENT_HVAC_HEAT,
"heatPump2": CURRENT_HVAC_HEAT,
"heatPump3": CURRENT_HVAC_HEAT,
"compCool1": CURRENT_HVAC_COOL,
"compCool2": CURRENT_HVAC_COOL,
"auxHeat1": CURRENT_HVAC_HEAT,
"auxHeat2": CURRENT_HVAC_HEAT,
"auxHeat3": CURRENT_HVAC_HEAT,
"fan": CURRENT_HVAC_FAN,
"humidifier": None,
"dehumidifier": CURRENT_HVAC_DRY,
"ventilator": CURRENT_HVAC_FAN,
"economizer": CURRENT_HVAC_FAN,
"compHotWater": None,
"auxHotWater": None,
}
PRESET_TO_ECOBEE_HOLD = {
PRESET_HOLD_NEXT_TRANSITION: "nextTransition",
PRESET_HOLD_INDEFINITE: "indefinite",
}
SERVICE_CREATE_VACATION = "create_vacation"
SERVICE_DELETE_VACATION = "delete_vacation"
SERVICE_RESUME_PROGRAM = "resume_program"
SERVICE_SET_FAN_MIN_ON_TIME = "set_fan_min_on_time"
SERVICE_SET_DST_MODE = "set_dst_mode"
SERVICE_SET_MIC_MODE = "set_mic_mode"
SERVICE_SET_OCCUPANCY_MODES = "set_occupancy_modes"
DTGROUP_INCLUSIVE_MSG = (
f"{ATTR_START_DATE}, {ATTR_START_TIME}, {ATTR_END_DATE}, "
f"and {ATTR_END_TIME} must be specified together"
)
CREATE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
vol.Required(ATTR_COOL_TEMP): vol.Coerce(float),
vol.Required(ATTR_HEAT_TEMP): vol.Coerce(float),
vol.Inclusive(
ATTR_START_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_date,
vol.Inclusive(
ATTR_START_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_time,
vol.Inclusive(ATTR_END_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_date,
vol.Inclusive(ATTR_END_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_time,
vol.Optional(ATTR_FAN_MODE, default="auto"): vol.Any("auto", "on"),
vol.Optional(ATTR_FAN_MIN_ON_TIME, default=0): vol.All(
int, vol.Range(min=0, max=60)
),
}
)
DELETE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
}
)
RESUME_PROGRAM_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
}
)
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
}
)
SUPPORT_FLAGS = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_PRESET_MODE
| SUPPORT_AUX_HEAT
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_FAN_MODE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ecobee thermostat."""
data = hass.data[DOMAIN]
entities = []
for index in range(len(data.ecobee.thermostats)):
thermostat = data.ecobee.get_thermostat(index)
if not thermostat["modelNumber"] in ECOBEE_MODEL_TO_NAME:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link to open a new issue: "
"https://github.com/home-assistant/core/issues "
"and include the following information: "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
entities.append(Thermostat(data, index, thermostat))
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
def create_vacation_service(service):
"""Create a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
for thermostat in entities:
if thermostat.entity_id == entity_id:
thermostat.create_vacation(service.data)
thermostat.schedule_update_ha_state(True)
break
def delete_vacation_service(service):
"""Delete a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
vacation_name = service.data[ATTR_VACATION_NAME]
for thermostat in entities:
if thermostat.entity_id == entity_id:
thermostat.delete_vacation(vacation_name)
thermostat.schedule_update_ha_state(True)
break
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [
entity for entity in entities if entity.entity_id in entity_id
]
else:
target_thermostats = entities
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [
entity for entity in entities if entity.entity_id in entity_id
]
else:
target_thermostats = entities
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
hass.services.async_register(
DOMAIN,
SERVICE_CREATE_VACATION,
create_vacation_service,
schema=CREATE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_DELETE_VACATION,
delete_vacation_service,
schema=DELETE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_SET_FAN_MIN_ON_TIME,
fan_min_on_time_set_service,
schema=SET_FAN_MIN_ON_TIME_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_RESUME_PROGRAM,
resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA,
)
platform.async_register_entity_service(
SERVICE_SET_DST_MODE,
{vol.Required(ATTR_DST_ENABLED): cv.boolean},
"set_dst_mode",
)
platform.async_register_entity_service(
SERVICE_SET_MIC_MODE,
{vol.Required(ATTR_MIC_ENABLED): cv.boolean},
"set_mic_mode",
)
platform.async_register_entity_service(
SERVICE_SET_OCCUPANCY_MODES,
{
vol.Optional(ATTR_AUTO_AWAY): cv.boolean,
vol.Optional(ATTR_FOLLOW_ME): cv.boolean,
},
"set_occupancy_modes",
)
class Thermostat(ClimateEntity):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index, thermostat):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = thermostat
self._name = self.thermostat["name"]
self.vacation = None
self._last_active_hvac_mode = HVAC_MODE_HEAT_COOL
self._operation_list = []
if (
self.thermostat["settings"]["heatStages"]
or self.thermostat["settings"]["hasHeatPump"]
):
self._operation_list.append(HVAC_MODE_HEAT)
if self.thermostat["settings"]["coolStages"]:
self._operation_list.append(HVAC_MODE_COOL)
if len(self._operation_list) == 2:
self._operation_list.insert(0, HVAC_MODE_HEAT_COOL)
self._operation_list.append(HVAC_MODE_OFF)
self._preset_modes = {
comfort["climateRef"]: comfort["name"]
for comfort in self.thermostat["program"]["climates"]
}
self._fan_modes = [FAN_AUTO, FAN_ON]
self.update_without_throttle = False
async def async_update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
await self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
await self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(self.thermostat_index)
if self.hvac_mode != HVAC_MODE_OFF:
self._last_active_hvac_mode = self.hvac_mode
@property
def available(self):
"""Return if device is available."""
return self.thermostat["runtime"]["connected"]
@property
def supported_features(self):
"""Return the list of supported features."""
if self.has_humidifier_control:
return SUPPORT_FLAGS | SUPPORT_TARGET_HUMIDITY
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat["name"]
@property
def unique_id(self):
"""Return a unique identifier for this ecobee thermostat."""
return self.thermostat["identifier"]
@property
def device_info(self):
"""Return device information for this ecobee thermostat."""
try:
model = f"{ECOBEE_MODEL_TO_NAME[self.thermostat['modelNumber']]} Thermostat"
except KeyError:
# Ecobee model is not in our list
model = None
return {
"identifiers": {(DOMAIN, self.thermostat["identifier"])},
"name": self.name,
"manufacturer": MANUFACTURER,
"model": model,
}
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def precision(self) -> float:
"""Return the precision of the system."""
return PRECISION_TENTHS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat["runtime"]["actualTemperature"] / 10.0
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return round(self.thermostat["runtime"]["desiredHeat"] / 10.0)
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return round(self.thermostat["runtime"]["desiredCool"] / 10.0)
return None
@property
def has_humidifier_control(self):
"""Return true if humidifier connected to thermostat and set to manual/on mode."""
return (
self.thermostat["settings"]["hasHumidifier"]
and self.thermostat["settings"]["humidifierMode"] == HUMIDIFIER_MANUAL_MODE
)
@property
def target_humidity(self) -> int | None:
"""Return the desired humidity set point."""
if self.has_humidifier_control:
return self.thermostat["runtime"]["desiredHumidity"]
return None
@property
def min_humidity(self) -> int:
"""Return the minimum humidity."""
return DEFAULT_MIN_HUMIDITY
@property
def max_humidity(self) -> int:
"""Return the maximum humidity."""
return DEFAULT_MAX_HUMIDITY
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return None
if self.hvac_mode == HVAC_MODE_HEAT:
return round(self.thermostat["runtime"]["desiredHeat"] / 10.0)
if self.hvac_mode == HVAC_MODE_COOL:
return round(self.thermostat["runtime"]["desiredCool"] / 10.0)
return None
@property
def fan(self):
"""Return the current fan status."""
if "fan" in self.thermostat["equipmentStatus"]:
return STATE_ON
return HVAC_MODE_OFF
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat["runtime"]["desiredFanMode"]
@property
def fan_modes(self):
"""Return the available fan modes."""
return self._fan_modes
@property
def preset_mode(self):
"""Return current preset mode."""
events = self.thermostat["events"]
for event in events:
if not event["running"]:
continue
if event["type"] == "hold":
if event["holdClimateRef"] in self._preset_modes:
return self._preset_modes[event["holdClimateRef"]]
# Any hold not based on a climate is a temp hold
return PRESET_TEMPERATURE
if event["type"].startswith("auto"):
# All auto modes are treated as holds
return event["type"][4:].lower()
if event["type"] == "vacation":
self.vacation = event["name"]
return PRESET_VACATION
return self._preset_modes[self.thermostat["program"]["currentClimateRef"]]
@property
def hvac_mode(self):
"""Return current operation."""
return ECOBEE_HVAC_TO_HASS[self.thermostat["settings"]["hvacMode"]]
@property
def hvac_modes(self):
"""Return the operation modes list."""
return self._operation_list
@property
def current_humidity(self) -> int | None:
"""Return the current humidity."""
return self.thermostat["runtime"]["actualHumidity"]
@property
def hvac_action(self):
"""Return current HVAC action.
Ecobee returns a CSV string with different equipment that is active.
We are prioritizing any heating/cooling equipment, otherwase look at
drying/fanning. Idle if nothing going on.
We are unable to map all actions to HA equivalents.
"""
if self.thermostat["equipmentStatus"] == "":
return CURRENT_HVAC_IDLE
actions = [
ECOBEE_HVAC_ACTION_TO_HASS[status]
for status in self.thermostat["equipmentStatus"].split(",")
if ECOBEE_HVAC_ACTION_TO_HASS[status] is not None
]
for action in (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
):
if action in actions:
return action
return CURRENT_HVAC_IDLE
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
status = self.thermostat["equipmentStatus"]
return {
"fan": self.fan,
"climate_mode": self._preset_modes[
self.thermostat["program"]["currentClimateRef"]
],
"equipment_running": status,
"fan_min_on_time": self.thermostat["settings"]["fanMinOnTime"],
}
@property
def is_aux_heat(self):
"""Return true if aux heater."""
return "auxHeat" in self.thermostat["equipmentStatus"]
async def async_turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
if not self.is_aux_heat:
_LOGGER.warning("# Changing aux heat is not supported")
async def async_turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
if self.is_aux_heat:
_LOGGER.warning("# Changing aux heat is not supported")
def set_preset_mode(self, preset_mode):
"""Activate a preset."""
if preset_mode == self.preset_mode:
return
self.update_without_throttle = True
# If we are currently in vacation mode, cancel it.
if self.preset_mode == PRESET_VACATION:
self.data.ecobee.delete_vacation(self.thermostat_index, self.vacation)
if preset_mode == PRESET_AWAY:
self.data.ecobee.set_climate_hold(
self.thermostat_index, "away", "indefinite", self.hold_hours()
)
elif preset_mode == PRESET_TEMPERATURE:
self.set_temp_hold(self.current_temperature)
elif preset_mode in (PRESET_HOLD_NEXT_TRANSITION, PRESET_HOLD_INDEFINITE):
self.data.ecobee.set_climate_hold(
self.thermostat_index,
PRESET_TO_ECOBEE_HOLD[preset_mode],
self.hold_preference(),
self.hold_hours(),
)
elif preset_mode == PRESET_NONE:
self.data.ecobee.resume_program(self.thermostat_index)
elif preset_mode in self.preset_modes:
climate_ref = None
for comfort in self.thermostat["program"]["climates"]:
if comfort["name"] == preset_mode:
climate_ref = comfort["climateRef"]
break
if climate_ref is not None:
self.data.ecobee.set_climate_hold(
self.thermostat_index,
climate_ref,
self.hold_preference(),
self.hold_hours(),
)
else:
_LOGGER.warning("Received unknown preset mode: %s", preset_mode)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index,
preset_mode,
self.hold_preference(),
self.hold_hours(),
)
@property
def preset_modes(self):
"""Return available preset modes."""
return list(self._preset_modes.values())
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
self.data.ecobee.set_hold_temp(
self.thermostat_index,
cool_temp_setpoint,
heat_temp_setpoint,
self.hold_preference(),
self.hold_hours(),
)
_LOGGER.debug(
"Setting ecobee hold_temp to: heat=%s, is=%s, cool=%s, is=%s",
heat_temp,
isinstance(heat_temp, (int, float)),
cool_temp,
isinstance(cool_temp, (int, float)),
)
self.update_without_throttle = True
def set_fan_mode(self, fan_mode):
"""Set the fan mode. Valid values are "on" or "auto"."""
if fan_mode.lower() not in (FAN_ON, FAN_AUTO):
error = "Invalid fan_mode value: Valid values are 'on' or 'auto'"
_LOGGER.error(error)
return
self.data.ecobee.set_fan_mode(
self.thermostat_index,
fan_mode,
self.hold_preference(),
holdHours=self.hold_hours(),
)
_LOGGER.info("Setting fan mode to: %s", fan_mode)
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto.
Ecobee API: It is good practice to set the heat and cool hold
temperatures to be the same, if the thermostat is in either heat, cool,
auxHeatOnly, or off mode. If the thermostat is in auto mode, an
additional rule is required. The cool hold temperature must be greater
than the heat hold temperature by at least the amount in the
heatCoolMinDelta property.
https://www.ecobee.com/home/developer/api/examples/ex5.shtml
"""
if self.hvac_mode in (HVAC_MODE_HEAT, HVAC_MODE_COOL):
heat_temp = temp
cool_temp = temp
else:
delta = self.thermostat["settings"]["heatCoolMinDelta"] / 10
heat_temp = temp - delta
cool_temp = temp + delta
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.hvac_mode == HVAC_MODE_HEAT_COOL and (
low_temp is not None or high_temp is not None
):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error("Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
if humidity not in range(0, 101):
raise ValueError(
f"Invalid set_humidity value (must be in range 0-100): {humidity}"
)
self.data.ecobee.set_humidity(self.thermostat_index, int(humidity))
self.update_without_throttle = True
def set_hvac_mode(self, hvac_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
ecobee_value = next(
(k for k, v in ECOBEE_HVAC_TO_HASS.items() if v == hvac_mode), None
)
if ecobee_value is None:
_LOGGER.error("Invalid mode for set_hvac_mode: %s", hvac_mode)
return
self.data.ecobee.set_hvac_mode(self.thermostat_index, ecobee_value)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, "true" if resume_all else "false"
)
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are:
# "useEndTime2hour", "useEndTime4hour"
# "nextPeriod", "askMe"
# "indefinite"
device_preference = self.thermostat["settings"]["holdAction"]
# Currently supported pyecobee holdTypes:
# dateTime, nextTransition, indefinite, holdHours
hold_pref_map = {
"useEndTime2hour": "holdHours",
"useEndTime4hour": "holdHours",
"indefinite": "indefinite",
}
return hold_pref_map.get(device_preference, "nextTransition")
def hold_hours(self):
"""Return user preference setting for hold duration in hours."""
# Values returned from thermostat are:
# "useEndTime2hour", "useEndTime4hour"
# "nextPeriod", "askMe"
# "indefinite"
device_preference = self.thermostat["settings"]["holdAction"]
hold_hours_map = {
"useEndTime2hour": 2,
"useEndTime4hour": 4,
}
return hold_hours_map.get(device_preference)
def create_vacation(self, service_data):
"""Create a vacation with user-specified parameters."""
vacation_name = service_data[ATTR_VACATION_NAME]
cool_temp = convert(
service_data[ATTR_COOL_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
heat_temp = convert(
service_data[ATTR_HEAT_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
start_date = service_data.get(ATTR_START_DATE)
start_time = service_data.get(ATTR_START_TIME)
end_date = service_data.get(ATTR_END_DATE)
end_time = service_data.get(ATTR_END_TIME)
fan_mode = service_data[ATTR_FAN_MODE]
fan_min_on_time = service_data[ATTR_FAN_MIN_ON_TIME]
kwargs = {
key: value
for key, value in {
"start_date": start_date,
"start_time": start_time,
"end_date": end_date,
"end_time": end_time,
"fan_mode": fan_mode,
"fan_min_on_time": fan_min_on_time,
}.items()
if value is not None
}
_LOGGER.debug(
"Creating a vacation on thermostat %s with name %s, cool temp %s, heat temp %s, "
"and the following other parameters: %s",
self.name,
vacation_name,
cool_temp,
heat_temp,
kwargs,
)
self.data.ecobee.create_vacation(
self.thermostat_index, vacation_name, cool_temp, heat_temp, **kwargs
)
def delete_vacation(self, vacation_name):
"""Delete a vacation with the specified name."""
_LOGGER.debug(
"Deleting a vacation on thermostat %s with name %s",
self.name,
vacation_name,
)
self.data.ecobee.delete_vacation(self.thermostat_index, vacation_name)
def turn_on(self):
"""Set the thermostat to the last active HVAC mode."""
_LOGGER.debug(
"Turning on ecobee thermostat %s in %s mode",
self.name,
self._last_active_hvac_mode,
)
self.set_hvac_mode(self._last_active_hvac_mode)
def set_dst_mode(self, dst_enabled):
"""Enable/disable automatic daylight savings time."""
self.data.ecobee.set_dst_mode(self.thermostat_index, dst_enabled)
def set_mic_mode(self, mic_enabled):
"""Enable/disable Alexa mic (only for Ecobee 4)."""
self.data.ecobee.set_mic_mode(self.thermostat_index, mic_enabled)
def set_occupancy_modes(self, auto_away=None, follow_me=None):
"""Enable/disable Smart Home/Away and Follow Me modes."""
self.data.ecobee.set_occupancy_modes(
self.thermostat_index, auto_away, follow_me
)
|
lukas-hetzenecker/home-assistant
|
homeassistant/components/ecobee/climate.py
|
Python
|
apache-2.0
| 28,180
|
[
"VisIt"
] |
62c441706b376568e2fe7e0a853fe5f17cca1ed83623ad71b874cd8c03731dc8
|
# Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""exceptions handling (raising, catching, exceptions classes) checker
"""
import sys
from ..logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
from ..logilab import astng
from ..logilab.astng import YES, Instance, unpack_infer
from ..checkers import BaseChecker
from ..checkers.utils import is_empty, is_raising
from ..interfaces import IASTNGChecker
OVERGENERAL_EXCEPTIONS = ('Exception',)
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be catched by the most specific handler.'),
'E0702': ('Raising %s while only classes, instances or string are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'W0701': ('Raising a string exception',
'raising-string',
'Used when a string exception is raised.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0704': ('Except doesn\'t do anything',
'pointless-except',
'Used when an except clause does nothing but "pass" and there is\
no "else" clause.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.'),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
}
if sys.version_info < (3, 0):
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
class ExceptionsChecker(BaseChecker):
"""checks for
* excepts without exception filter
* type of raise argument : string, Exceptions, other values
"""
__implements__ = IASTNGChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' :'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
def visit_raise(self, node):
"""visit raise possibly inferring value"""
# ignore empty raise
if node.exc is None:
return
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = unpack_infer(expr).next()
except astng.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
"""check for bad values, string exception and class inheritance
"""
value_found = True
if isinstance(expr, astng.Const):
value = expr.value
if isinstance(value, str):
self.add_message('W0701', node=node)
else:
self.add_message('E0702', node=node,
args=value.__class__.__name__)
elif (isinstance(expr, astng.Name) and \
expr.name in ('None', 'True', 'False')) or \
isinstance(expr, (astng.List, astng.Dict, astng.Tuple,
astng.Module, astng.Function)):
self.add_message('E0702', node=node, args=expr.name)
elif ( (isinstance(expr, astng.Name) and expr.name == 'NotImplemented')
or (isinstance(expr, astng.CallFunc) and
isinstance(expr.func, astng.Name) and
expr.func.name == 'NotImplemented') ):
self.add_message('E0711', node=node)
elif isinstance(expr, astng.BinOp) and expr.op == '%':
self.add_message('W0701', node=node)
elif isinstance(expr, (Instance, astng.Class)):
if isinstance(expr, Instance):
expr = expr._proxied
if (isinstance(expr, astng.Class) and
not inherit_from_std_ex(expr) and
expr.root().name != BUILTINS_NAME):
if expr.newstyle:
self.add_message('E0710', node=node)
else:
self.add_message('W0710', node=node)
else:
value_found = False
else:
value_found = False
return value_found
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
# single except doing nothing but "pass" without else clause
if nb_handlers == 1 and is_empty(handler.body) and not node.orelse:
self.add_message('W0704', node=handler.type or handler.body[0])
if handler.type is None:
if nb_handlers == 1 and not is_raising(handler.body):
self.add_message('W0702', node=handler)
# check if a "except:" is followed by some other
# except
elif index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('E0701', node=node, args=msg)
elif isinstance(handler.type, astng.BoolOp):
self.add_message('W0711', node=handler, args=handler.type.op)
else:
try:
excs = list(unpack_infer(handler.type))
except astng.InferenceError:
continue
for exc in excs:
# XXX skip other non class nodes
if exc is YES or not isinstance(exc, astng.Class):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astng.Class)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('E0701', node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == EXCEPTIONS_MODULE
and nb_handlers == 1 and not is_raising(handler.body)):
self.add_message('W0703', args=exc.name, node=handler.type)
exceptions_classes += excs
def inherit_from_std_ex(node):
"""return true if the given class node is subclass of
exceptions.Exception
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
for parent in node.ancestors(recurs=False):
if inherit_from_std_ex(parent):
return True
return False
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
yorvic/.vim
|
bundle/python-mode/pylibs/pylama/checkers/pylint/checkers/exceptions.py
|
Python
|
gpl-3.0
| 9,260
|
[
"VisIt"
] |
b90db6ba2fc6d377f1ee4a54d6e77c5d6c9cf0e83157d0cb9cf6716833e0092a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A module containing a number of interesting image filter effects,
such as:
* Black-and-white pencil sketch
* Warming/cooling filters
* Cartoonizer
"""
import numpy as np
import cv2
from scipy.interpolate import UnivariateSpline
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
class PencilSketch:
"""Pencil sketch effect
A class that applies a pencil sketch effect to an image.
The processed image is overlayed over a background image for visual
effect.
"""
def __init__(self, (width, height), bg_gray='pencilsketch_bg.jpg'):
"""Initialize parameters
:param (width, height): Image size.
:param bg_gray: Optional background image to improve the illusion
that the pencil sketch was drawn on a canvas.
"""
self.width = width
self.height = height
# try to open background canvas (if it exists)
self.canvas = cv2.imread(bg_gray, cv2.CV_8UC1)
if self.canvas is not None:
self.canvas = cv2.resize(self.canvas, (self.width, self.height))
def render(self, img_rgb):
"""Applies pencil sketch effect to an RGB image
:param img_rgb: RGB image to be processed
:returns: Processed RGB image
"""
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (21, 21), 0, 0)
img_blend = cv2.divide(img_gray, img_blur, scale=256)
# if available, blend with background canvas
if self.canvas is not None:
img_blend = cv2.multiply(img_blend, self.canvas, scale=1./256)
return cv2.cvtColor(img_blend, cv2.COLOR_GRAY2RGB)
class WarmingFilter:
"""Warming filter
A class that applies a warming filter to an image.
The class uses curve filters to manipulate the perceived color
temparature of an image. The warming filter will shift the image's
color spectrum towards red, away from blue.
"""
def __init__(self):
"""Initialize look-up table for curve filter"""
# create look-up tables for increasing and decreasing a channel
self.incr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],
[0, 70, 140, 210, 256])
self.decr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],
[0, 30, 80, 120, 192])
def render(self, img_rgb):
"""Applies warming filter to an RGB image
:param img_rgb: RGB image to be processed
:returns: Processed RGB image
"""
# warming filter: increase red, decrease blue
c_r, c_g, c_b = cv2.split(img_rgb)
c_r = cv2.LUT(c_r, self.incr_ch_lut).astype(np.uint8)
c_b = cv2.LUT(c_b, self.decr_ch_lut).astype(np.uint8)
img_rgb = cv2.merge((c_r, c_g, c_b))
# increase color saturation
c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV))
c_s = cv2.LUT(c_s, self.incr_ch_lut).astype(np.uint8)
return cv2.cvtColor(cv2.merge((c_h, c_s, c_v)), cv2.COLOR_HSV2RGB)
def _create_LUT_8UC1(self, x, y):
"""Creates a look-up table using scipy's spline interpolation"""
spl = UnivariateSpline(x, y)
return spl(xrange(256))
class CoolingFilter:
"""Cooling filter
A class that applies a cooling filter to an image.
The class uses curve filters to manipulate the perceived color
temparature of an image. The warming filter will shift the image's
color spectrum towards blue, away from red.
"""
def __init__(self):
"""Initialize look-up table for curve filter"""
# create look-up tables for increasing and decreasing a channel
self.incr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],
[0, 70, 140, 210, 256])
self.decr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],
[0, 30, 80, 120, 192])
def render(self, img_rgb):
"""Applies pencil sketch effect to an RGB image
:param img_rgb: RGB image to be processed
:returns: Processed RGB image
"""
# cooling filter: increase blue, decrease red
c_r, c_g, c_b = cv2.split(img_rgb)
c_r = cv2.LUT(c_r, self.decr_ch_lut).astype(np.uint8)
c_b = cv2.LUT(c_b, self.incr_ch_lut).astype(np.uint8)
img_rgb = cv2.merge((c_r, c_g, c_b))
# decrease color saturation
c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV))
c_s = cv2.LUT(c_s, self.decr_ch_lut).astype(np.uint8)
return cv2.cvtColor(cv2.merge((c_h, c_s, c_v)), cv2.COLOR_HSV2RGB)
def _create_LUT_8UC1(self, x, y):
"""Creates a look-up table using scipy's spline interpolation"""
spl = UnivariateSpline(x, y)
return spl(xrange(256))
class Cartoonizer:
"""Cartoonizer effect
A class that applies a cartoon effect to an image.
The class uses a bilateral filter and adaptive thresholding to create
a cartoon effect.
"""
def __init__(self):
pass
def render(self, img_rgb):
numDownSamples = 2 # number of downscaling steps
numBilateralFilters = 7 # number of bilateral filtering steps
# -- STEP 1 --
# downsample image using Gaussian pyramid
img_color = img_rgb
for _ in xrange(numDownSamples):
img_color = cv2.pyrDown(img_color)
# repeatedly apply small bilateral filter instead of applying
# one large filter
for _ in xrange(numBilateralFilters):
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
# upsample image to original size
for _ in xrange(numDownSamples):
img_color = cv2.pyrUp(img_color)
# -- STEPS 2 and 3 --
# convert to grayscale and apply median blur
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.medianBlur(img_gray, 7)
# -- STEP 4 --
# detect and enhance edges
img_edge = cv2.adaptiveThreshold(img_blur, 255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 9, 2)
# -- STEP 5 --
# convert back to color so that it can be bit-ANDed with color image
img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
return cv2.bitwise_and(img_color, img_edge)
|
PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python
|
Module 3/01_code/filters.py
|
Python
|
mit
| 6,689
|
[
"Gaussian"
] |
9b9e1ab6160f4ed2045cdb4963068ee876ee75921bce3a942efc0d816c46d7d2
|
"""**Class projection**
"""
from osgeo import osr
# The projection string depends on the gdal version
DEFAULT_PROJECTION = '+proj=longlat +datum=WGS84 +no_defs'
def proj4_to_dict(P):
"""Helper to turn a proj4 string into a dictionary for ease of comparison
See issue #304
Args:
P: proj4 string, such as +proj=longlat +ellps=WGS84 +no_defs
Returns
dictionary of individual elements, e.g. {'+proj': 'longlat',
'+ellps': 'WGS84',
'+no_defs': ''}
"""
D = {}
for e in P.split():
fields = e.strip().split('=')
if len(fields) > 1:
val = fields[1]
else:
val = ''
D[fields[0]] = val
return D
class Projection:
"""Represents projections associated with layers
"""
def __init__(self, p):
"""Constructor for Projection.
Args:
* p: Projection information.
Any of the GDAL formats are OK including WKT, proj4, ESRI, XML
It can also be an instance of Projection.
"""
if p is None:
# msg = 'Requested projection is None'
# raise TypeError(msg)
p = DEFAULT_PROJECTION
# Clean input string. This will also work when p is of class
# Projection by virtue of its __repr__()
p = str(p).strip()
# Create OSR spatial reference object
srs = self.spatial_reference = osr.SpatialReference()
# Try importing
input_OK = False
for import_func in [srs.ImportFromProj4,
srs.ImportFromWkt,
srs.ImportFromEPSG,
srs.ImportFromESRI,
# FIXME (Ole): This one emits the warning:
# Warning 5: Failed parsing CoordSys:
# 'Indonesia TM-3 zone 48.2'
# srs.ImportFromMICoordSys,
srs.ImportFromPCI,
srs.ImportFromXML,
srs.ImportFromUSGS,
srs.ImportFromUrl]:
try:
res = import_func(p)
except TypeError:
# FIXME: NetCDF raster layer gives SRS error
# Occasionally we get things like
# File "/usr/lib/python2.7/dist-packages/osgeo/osr.py",
# line 639, in ImportFromEPSG
# return _osr.SpatialReference_ImportFromEPSG(self, *args)
# TypeError: in method 'SpatialReference_ImportFromEPSG',
# argument 2 of type 'int'
# e.g. when using NetCDF multiband data. Why?
pass
if res == 0:
input_OK = True
break
if not input_OK:
msg = 'Spatial reference %s was not recognised' % p
raise TypeError(msg)
# Store some - FIXME this is only for backwards compat, remove.
self.wkt = self.get_projection(proj4=False)
self.proj4 = self.get_projection(proj4=True)
def __repr__(self):
return self.wkt
def get_projection(self, proj4=False):
"""Return projection
Args:
* proj4: If True, projection will be returned in proj4 format.
If False (default) projection will be returned in WKT
format
Note:
To compare projections, use the __eq__ method directly on the
projection objects: E.g.
self.projection == other.projection
"""
if proj4:
p = self.spatial_reference.ExportToProj4()
else:
p = self.spatial_reference.ExportToWkt()
return p.strip()
def __eq__(self, other):
"""Override '==' to allow comparison with other projection objecs
"""
try:
other = Projection(other)
except Exception, e:
msg = ('Argument to == must be a spatial reference or object'
' of class Projection. I got %s with error '
'message: %s' % (str(other), e))
raise TypeError(msg)
if self.spatial_reference.IsSame(other.spatial_reference):
# OSR comparison checks out
return True
else:
# We have seen cases where the native comparison didn't work
# for projections that should be identical. See e.g.
# https://github.com/AIFDR/inasafe/issues/304
# FIXME (Ole): Someone, please find out how to robustly compare
# projections
# For now we do a secondary check using the proj4 string:
# Pull +proj and +ellips fields and compare
# Attempt to compare strings like this. This is non trivial
# as the proj4 format does not always have the same parameters
# conf e.g. +towgs84 or +ellps vs +datum (tsk tsk):
# +proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs
# +proj=longlat +ellps=WGS84 +no_defs
#
# And even worse:
# +proj=longlat +datum=WGS84 +no_defs
# +proj=longlat +ellps=WGS84 +no_defs
# Get proj4 representations
P1 = self.get_projection(proj4=True)
P2 = other.get_projection(proj4=True)
if P1 == P2:
# Direct comparison of proj4 strings match
return True
else:
# Check key elements
D1 = proj4_to_dict(P1)
D2 = proj4_to_dict(P2)
result = True
for key in D1:
# Only compare keys that appear in both (see above)
if key in D2:
if D1[key] != D2[key]:
result = False
break
return result
def __ne__(self, other):
"""Override '!=' to allow comparison with other projection objecs
"""
return not self == other
|
Jannes123/inasafe
|
safe/storage/projection.py
|
Python
|
gpl-3.0
| 6,217
|
[
"NetCDF"
] |
6dfd85a63ba42c958770c6c234fc85f010fd721a7528db940c23efb16873716f
|
#! /usr/bin/env python
import argparse, re, os
parser = argparse.ArgumentParser(description = 'Download genome/transcript sequences and gene annotations')
parser.add_argument('species', choices=['hg19','mm10','TAIR10'], help='choose a species (Human, Mouse, Arabidopsis)')
parser.add_argument('-d', '--download', action='store_true', help='download sequences or annotations')
parser.add_argument('-b', '--build', action='store_true', help='build sequences or annotations')
parser.add_argument('-g', '--genome', action='store_true', help='download or build genome sequences')
parser.add_argument('-t', '--transcriptome', action='store_true', help='download or build transcriptome sequences')
parser.add_argument('-a', '--annotation', action='store_true', help='download or build gene annotations')
args = parser.parse_args()
genome_dict = {'hg19': 'ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/chromFa.tar.gz',
'mm10': 'ftp://hgdownload.cse.ucsc.edu/goldenPath/mm10/bigZips/chromFa.tar.gz',
'TAIR10': 'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR10.*.dna.toplevel.fa.gz'}
trans_dict = {'hg19': ['ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/cdna/Homo_sapiens.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/cdna/Homo_sapiens.*.cdna.all.fa.gz',
'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/ncrna/Homo_sapiens.*.ncrna.fa.gz'],
'mm10': ['ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.*.cdna.all.fa.gz',
'ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/ncrna/Mus_musculus.*.ncrna.fa.gz'],
'TAIR10': ['ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/cdna/Arabidopsis_thaliana.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/cdna/Arabidopsis_thaliana.*.cdna.all.fa.gz',
'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/ncrna/Arabidopsis_thaliana.*.ncrna.fa.gz']}
anno_dict = {'hg19': 'ftp://ftp.ensembl.org/pub/release-75/gtf/homo_sapiens/*.gtf.gz',
'mm10': 'ftp://ftp.ensembl.org/pub/current_gtf/mus_musculus/*.gtf.gz',
'TAIR10': 'ftp://ftp.ensemblgenomes.org/pub/current/plants//gtf/arabidopsis_thaliana/*.gtf.gz'}
def gtf_build(gtf, build):
input_file = open(gtf,'r')
output_file = open(build,'w')
tx2gene = {}
tx2exon_starts = {}
tx2exon_ends = {}
tx2cds_starts = {}
tx2cds_ends = {}
for line in input_file:
if line.startswith('#'):
continue
line_list = line.strip().split('\t')
chrom, biotype, feature, start, end, strand, ID = (line_list[0],line_list[1],line_list[2],line_list[3],line_list[4],line_list[6],line_list[8])
if gtf == 'hg19.gtf' or gtf == 'mm10.gtf':
chrom = 'chr' + chrom
start = str(int(start) - 1) ## 0-based
if re.search('gene_id \"(.+?)\".+transcript_id \"(.+?)\"', ID) is not None:
gene_id, tx_id = re.search('gene_id \"(.+?)\".+transcript_id \"(.+?)\"', ID).groups()
tx2gene[tx_id] = '%s|%s|%s|%s' % (chrom, strand, gene_id, biotype)
if feature == 'exon':
tx2exon_starts[tx_id] = start + ',' + tx2exon_starts.get(tx_id, '')
tx2exon_ends[tx_id] = end + ',' + tx2exon_ends.get(tx_id, '')
if feature == 'CDS':
tx2cds_starts[tx_id] = start + ',' + tx2cds_starts.get(tx_id, '')
tx2cds_ends[tx_id] = end + ',' + tx2cds_ends.get(tx_id, '')
gene2repretx = {} ## representative transcript (repretx) is the longest transcript for each gene
trans2len = {}
for tx_id in tx2gene:
chrom, strand, gene_id, biotype = tx2gene[tx_id].split('|')
exon_starts = sorted([int(i) for i in tx2exon_starts[tx_id].strip(',').split(',')])
exon_ends = sorted([int(i) for i in tx2exon_ends[tx_id].strip(',').split(',')])
tx_len = 0
for i in range(len(exon_starts)):
tx_len += (exon_ends[i] - exon_starts[i])
trans2len[tx_id] = tx_len
if gene_id in gene2repretx:
if tx_len > trans2len[gene2repretx[gene_id]]:
gene2repretx[gene_id] = tx_id
else:
gene2repretx[gene_id] = tx_id
for tx_id in sorted(tx2gene):
chrom, strand, gene_id, biotype = tx2gene[tx_id].split('|')
if tx_id == gene2repretx[gene_id]:
exon_starts = [str(j) for j in sorted([int(i) for i in tx2exon_starts[tx_id].strip(',').split(',')])]
exon_ends = [str(j) for j in sorted([int(i) for i in tx2exon_ends[tx_id].strip(',').split(',')])]
tx_start = exon_starts[0]
tx_end = exon_ends[-1]
cds_start = '.'
cds_end = '.'
if tx_id in tx2cds_starts:
cds_starts = [str(j) for j in sorted([int(i) for i in tx2cds_starts[tx_id].strip(',').split(',')])]
cds_ends = [str(j) for j in sorted([int(i) for i in tx2cds_ends[tx_id].strip(',').split(',')])]
cds_start = cds_starts[0]
cds_end = cds_ends[-1]
output_file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (chrom, tx_start, tx_end, cds_start, cds_end, strand, ','.join(exon_starts), ','.join(exon_ends), tx_id, gene_id, biotype))
if args.download:
if args.genome:
print '[download %s genome]' % args.species
if args.species == 'hg19' or args.species == 'mm10':
print 'wget -q %s -O %s.tar.gz' % (genome_dict[args.species], args.species)
os.system('wget -q %s -O %s.tar.gz' % (genome_dict[args.species], args.species))
print 'tar -zxf %s.tar.gz' % args.species
os.system('tar -zxf %s.tar.gz' % args.species)
print 'cat chr*.fa > %s_dna.fa' % args.species
os.system('cat chr*.fa > %s_dna.fa' % args.species)
print 'rm chr*.fa'
os.system('rm chr*.fa')
else:
print 'wget -q %s -O %s.fa.gz' % (genome_dict[args.species], args.species)
os.system('wget -q %s -O %s.fa.gz' % (genome_dict[args.species], args.species))
print 'zcat %s.fa.gz > %s_dna.fa' % (args.species, args.species)
os.system('zcat %s.fa.gz > %s_dna.fa' % (args.species, args.species))
print 'rm %s.fa.gz' % args.species
os.system('rm %s.fa.gz' % args.species)
elif args.transcriptome:
print '[download %s transcriptome]' % args.species
for i in trans_dict[args.species]:
print 'wget -q %s' % i
os.system('wget -q %s' % i)
print 'zcat *.fa.gz > %s_trans.fa' % args.species
os.system('zcat *.fa.gz > %s_trans.fa' % args.species)
print 'rm *.fa.gz'
os.system('rm *.fa.gz')
elif args.annotation:
print '[download %s gene annotation]' % args.species
print 'wget -q %s -O %s.gtf.gz' % (anno_dict[args.species], args.species)
os.system('wget -q %s -O %s.gtf.gz' % (anno_dict[args.species], args.species))
print 'gzip -d %s.gtf.gz' % args.species
os.system('gzip -d %s.gtf.gz' % args.species)
else:
print 'please specify -g/--genome or -t/--transcriptome or -a/--annotation'
elif args.build:
if args.genome:
print '[build %s genome]' % args.species
print 'bowtie-build %s_dna.fa %s_dna' % (args.species, args.species)
os.system('bowtie-build %s_dna.fa %s_dna' % (args.species, args.species))
elif args.transcriptome:
print '[build %s transcriptome]' % args.species
print 'bowtie-build %s_trans.fa %s_trans' % (args.species, args.species)
os.system('bowtie-build %s_trans.fa %s_trans' % (args.species, args.species))
elif args.annotation:
print '[build %s gene annotation]' % args.species
print 'gtf_build(%s.gtf, %s.gtf.build)' % (args.species, args.species)
gtf_build(args.species+'.gtf', args.species+'.gtf.build')
else:
print 'please specify -g/--genome or -t/--transcriptome or -a/--annotation'
else:
print 'please specify -d/--download or -b/--build'
|
bioxfu/circRNAFinder
|
src/SeqAnnoDownloadBuild.py
|
Python
|
gpl-3.0
| 7,557
|
[
"Bowtie"
] |
c634fbffa5ede9c35940adccc0f99d590f84a7f75a1e036bb33b67bdd9bbb059
|
'''
[1] Mei, Jian-Ping, et al. "Drug target interaction prediction by learning from local information and neighbors." Bioinformatics 29.2 (2013): 238-245.
[2] van Laarhoven, Twan, Sander B. Nabuurs, and Elena Marchiori. "Gaussian interaction profile kernels for predicting drug-target interaction." Bioinformatics 27.21 (2011): 3036-3043.
Default Parameters:
alpha = 0.5
gamma = 1.0 (the gamma0 in [1], see Eq. 11 and 12 for details)
avg = False (True: g=mean, False: g=max)
sigma = 1.0 (The regularization parameter used for the RLS-avg classifier)
Please refer to Section 4.1 in [1] and and Section 4 in [2] for the details.
'''
import numpy as np
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
class BLMNII:
def __init__(self, alpha=0.5, gamma=1.0, sigma=1.0, avg=False):
self.alpha = float(alpha)
self.gamma = float(gamma)
self.sigma = float(sigma)
if avg in ('false', 'False', False):
self.avg = False
if avg in ('true', 'True', True):
self.avg = True
def kernel_combination(self, R, S, new_inx, bandwidth):
K = self.alpha*S+(1.0-self.alpha)*rbf_kernel(R, gamma=bandwidth)
K[new_inx, :] = S[new_inx, :]
K[:, new_inx] = S[:, new_inx]
return K
def rls_train(self, R, S, K, train_inx, new_inx):
Y = R.copy()
for d in new_inx:
Y[d, :] = np.dot(S[d, train_inx], Y[train_inx, :])
x1, x2 = np.max(Y[d, :]), np.min(Y[d, :])
Y[d, :] = (Y[d, :]-x2)/(x1-x2)
vec = np.linalg.inv(K+self.sigma*np.eye(K.shape[0]))
return np.dot(np.dot(K, vec), Y)
def fix_model(self, W, intMat, drugMat, targetMat, seed=None):
R = W*intMat
m, n = intMat.shape
x, y = np.where(R > 0)
drugMat = (drugMat+drugMat.T)/2
targetMat = (targetMat+targetMat.T)/2
train_drugs = np.array(list(set(x.tolist())), dtype=np.int32)
train_targets = np.array(list(set(y.tolist())), dtype=np.int32)
new_drugs = np.array(list(set(xrange(m)) - set(x.tolist())), dtype=np.int32)
new_targets = np.array(list(set(xrange(n)) - set(y.tolist())), dtype=np.int32)
drug_bw = self.gamma*m/len(x)
target_bw = self.gamma*n/len(x)
Kd = self.kernel_combination(R, drugMat, new_drugs, drug_bw)
Kt = self.kernel_combination(R.T, targetMat, new_targets, target_bw)
self.Y1 = self.rls_train(R, drugMat, Kd, train_drugs, new_drugs)
self.Y2 = self.rls_train(R.T, targetMat, Kt, train_targets, new_targets)
def predict_scores(self, test_data, N):
inx = np.array(test_data)
x, y = inx[:, 0], inx[:, 1]
if self.avg:
scores = 0.5*(self.Y1[x, y]+self.Y2.T[x, y])
else:
scores = np.maximum(self.Y1[x, y], self.Y2.T[x, y])
return scores
def evaluation(self, test_data, test_label):
x, y = test_data[:, 0], test_data[:, 1]
if self.avg:
scores = 0.5*(self.Y1[x, y]+self.Y2.T[x, y])
else:
scores = np.maximum(self.Y1[x, y], self.Y2.T[x, y])
prec, rec, thr = precision_recall_curve(test_label, scores)
aupr_val = auc(rec, prec)
fpr, tpr, thr = roc_curve(test_label, scores)
auc_val = auc(fpr, tpr)
return aupr_val, auc_val
def __str__(self):
return "Model:BLMNII, alpha:%s, gamma:%s, sigma:%s, avg:%s" % (self.alpha, self.gamma, self.sigma, self.avg)
if __name__ == "__main__":
import time
from functions import *
seeds = [7771, 8367, 22, 1812, 4659]
for cv_setting in [1, 2, 3]:
for dataset in ["nr", "gpcr", "ic", "e"]:
intMat, drugMat, targetMat = load_data_from_file(dataset, "../dataset/")
if cv_setting == 1: # CV setting S1
X, D, T, cv = intMat, drugMat, targetMat, 1
if cv_setting == 2: # CV setting S2
X, D, T, cv = intMat, drugMat, targetMat, 0
if cv_setting == 3: # CV setting S3
X, D, T, cv = intMat.T, targetMat, drugMat, 0
max_auc, max_aupr, auc_opt, aupr_opt = 0, 0, [], []
for x in np.arange(0, 0.1, 0.1):
cv_data = cross_validation(X, seeds, cv)
tic = time.clock()
model = BLMNII(alpha=x, avg=False)
cmd = "dataset:"+dataset+", cross_validation: "+str(cv_setting)+"\n"+str(model)
aupr_vec, auc_vec = train(model, cv_data, X, D, T)
# aupr_avg, auc_avg = np.mean(aupr_vec), np.mean(auc_vec)
aupr_avg, aupr_st = mean_confidence_interval(aupr_vec)
auc_avg, auc_st = mean_confidence_interval(auc_vec)
print(cmd)
print("AUPR: %s, AUC:%s, AUPRst:%s, AUCst:%s, Time:%s" % (aupr_avg, auc_avg, aupr_st, auc_st, time.clock() - tic))
if aupr_avg > max_aupr:
max_aupr = aupr_avg
aupr_opt = [cmd, aupr_avg, auc_avg]
if auc_avg > max_auc:
max_auc = auc_avg
auc_opt = [cmd, aupr_avg, auc_avg]
# cmd = "Optimal Parameters for AUPR optimization:\n%s\n" % aupr_opt[0]
# cmd += "AUPR: %s, AUC: %s\n" % (aupr_opt[1], aupr_opt[2])
# cmd += "Optimal Parameters for AUC optimization:\n%s\n" % auc_opt[0]
# cmd += "AUPR: %s, AUC: %s" % (auc_opt[1], auc_opt[2])
# print "\n"+cmd
# with open("../output/blmnii_results.txt", "a+") as outf:
# outf.write("Dataset:"+dataset+"\n"+cmd+"\n\n")
write_metric_vector_to_file(aupr_vec, "../output/blm_aupr_"+str(cv_setting)+"_"+dataset+".txt")
write_metric_vector_to_file(auc_vec, "../output/blm_auc_"+str(cv_setting)+"_"+dataset+".txt")
|
akiyamalab/BO-DTI
|
blm.py
|
Python
|
gpl-3.0
| 5,916
|
[
"Gaussian"
] |
189a38a26db9722e5f9cf806f74df5593c08363403c68e3d10d8e0a07e9f86d5
|
import sys
import random
import bisect
import pysam
import gzip
import cPickle
import numpy
from time import time, localtime, strftime
import argparse
from multiprocessing import Process
import os
import math
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
def subprogram(command, name):
os.system(command)
print "exiting subprocess " + str(name)
def main(argv):
t0 = time()
arguline = " ".join(argv)
parser = argparse.ArgumentParser(description='Wessim2: Whole Exome Sequencing SIMulator 2 (Probe-based version)', prog='Wessim2', formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group('Mandatory input files')
group1.add_argument('-R', metavar = 'FILE', dest='reference', required=True, help='faidx-indexed (R)eference genome FASTA file or meta description file (.meta)')
group1.add_argument('-P', metavar = 'FILE', dest='probe', required=True, help='(P)robe sequence FASTA file')
group1.add_argument('-B', metavar = 'FILE', dest='probeblat', required=True, help='(B)lat matched probe regions .PSL file')
group2 = parser.add_argument_group('Parameters for exome capture')
group2.add_argument('-f', metavar = 'INT', type=int, dest='fragsize', required=False, help='mean (f)ragment size. this corresponds to insert size when sequencing in paired-end mode. [200]', default=200)
group2.add_argument('-d', metavar = 'INT', type=int, dest='fragsd', required=False, help='standard (d)eviation of fragment size [50]', default=50)
group2.add_argument('-m', metavar = 'INT', type=int, dest='fragmin', required=False, help='(m)inimum fragment length [read_length + 20]')
group2.add_argument('-y', metavar = 'PERCENT',type=int, dest='bind', required=False, help='minimum required fraction of probe match to be h(y)bridized [50]', default=50)
group2.add_argument('-w', metavar = 'INT', type=int, dest='weight', required=False, help='penalty (w)eight for indel in the hybridization [2]', default=2)
group3 = parser.add_argument_group('Parameters for sequencing')
group3.add_argument('-p', action='store_true', help='generate paired-end reads [single]')
group3.add_argument('-n', metavar = 'INT', type=int, dest='readnumber', required=True, help='total (n)umber of reads')
group3.add_argument('-l', metavar = 'INT', type=int, dest='readlength', required=True, help='read (l)ength (bp)')
group3.add_argument('-M', metavar = 'FILE', dest='model', required=True, help='GemSim (M)odel file (.gzip)')
group3.add_argument('-t', metavar = 'INT', type=int, dest='threadnumber', required=False, help='number of (t)hreaded subprocesses [1]', default=1)
group4 = parser.add_argument_group('Output options')
group4.add_argument('-o', metavar = 'FILE', dest='outfile', help='(o)utput file header. ".fastq.gz" or ".fastq" will be attached automatically. Output will be splitted into two files in paired-end mode', required=True)
group4.add_argument('-z', action='store_true', help='compress output with g(z)ip [false]')
group4.add_argument('-q', metavar = 'INT', type=int, dest='qualbase', required=False, help='(q)uality score offset [33]', default=33)
group4.add_argument('-v', action='store_true', help='(v)erbose; print out intermediate messages.')
args = parser.parse_args()
reffile = args.reference
probefile = args.probe
alignfile = args.probeblat
isize = args.fragsize
isd = args.fragsd
imin = args.fragmin
bind = args.bind
paired = args.p
readlength = args.readlength
readnumber = args.readnumber
threadnumber = args.threadnumber
if imin==None:
if paired:
imin = readlength + 20
else:
imin = readlength + 20
if isize < imin:
print "too small mean fragment size (" + str(isize) + ") compared to minimum length (" + str(imin) + "). Increase it and try again."
sys.exit(0)
model = args.model
outfile = args.outfile
compress = args.z
qualbase = args.qualbase
verbose = args.v
print
print "-------------------------------------------"
print "Reference:", reffile
print "Probeset:", probefile
print "Probematch:", alignfile
print "Fragment:",isize, "+-", isd, ">", imin
print "Paired-end mode?", paired
print "Sequencing model:", model
print "Read length:", readlength, "Read number:", readnumber
print "Output File:", outfile
print "Gzip compress?", compress
print "Quality base:", qualbase
print "Thread number:", threadnumber
print "Job started at:", strftime("%Y-%m-%d %H:%M:%S", localtime())
print "-------------------------------------------"
print
processes = []
for t in range(0, threadnumber):
readstart = int(float(readnumber) / float(threadnumber) * t) + 1
readend = int(float(readnumber) / float(threadnumber) * (t+1))
command = "python __sub_wessim2.py " + arguline + " -1 " + str(readstart) + " -2 " + str(readend) + " -i " + str(t+1)
p = Process(target=subprogram, args=(command, t+1))
p.start()
processes.append(p)
for p in processes:
p.join()
t1 = time()
print "Done generating " + str(readnumber) + " reads in %f secs" % (t1 - t0)
print "Merging subresults..."
wread = None
wread2 = None
if paired and compress:
wread = gzip.open(outfile + "_1.fastq.gz", 'wb')
wread2 = gzip.open(outfile + "_2.fastq.gz", 'wb')
elif paired and not compress:
wread = open(outfile + "_1.fastq", 'w')
wread2 = open(outfile + "_2.fastq", 'w')
elif not paired and compress:
wread = gzip.open(outfile + ".fastq.gz", 'wb')
else:
wread = open(outfile + ".fastq", 'w')
if not paired:
for t in range(0, threadnumber):
suboutfile = outfile + "-" + str(t+1)
fread = None
if compress:
suboutfile += ".fastq.gz"
fread = gzip.open(suboutfile, 'rb')
else:
suboutfile += ".fastq"
fread = open(suboutfile, 'r')
line = fread.readline()
while line:
wread.write(line)
line = fread.readline()
fread.close()
os.remove(suboutfile)
wread.close()
else:
for t in range(0, threadnumber):
suboutfile1 = outfile + "-" + str(t+1) + "_1"
suboutfile2 = outfile + "-" + str(t+1) + "_2"
fread1 = None
fread2 = None
if compress:
suboutfile1 += ".fastq.gz"
suboutfile2 += ".fastq.gz"
fread1 = gzip.open(suboutfile1, "rb")
fread2 = gzip.open(suboutfile2, "rb")
else:
suboutfile1 += ".fastq"
suboutfile2 += ".fastq"
fread1 = open(suboutfile1, "r")
fread2 = open(suboutfile2, "r")
line1 = fread1.readline()
line2 = fread2.readline()
while line1 and line2:
wread.write(line1)
wread2.write(line2)
line1 = fread1.readline()
line2 = fread2.readline()
fread1.close()
fread2.close()
os.remove(suboutfile1)
os.remove(suboutfile2)
wread.close()
wread2.close()
sys.exit(0)
if __name__=="__main__":
main(sys.argv[1:])
|
cbg-ethz/WES_Cancer_Sim
|
sim_cancer/tools/Wessim_beta/Wessim2.py
|
Python
|
apache-2.0
| 6,678
|
[
"pysam"
] |
997e0b7ddd2a898dad9c96b77d9a0d56dc5290dcf6797acf23d1243dd34ad9fc
|
# Load Modules
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
import statsmodels.tsa.api as smtsa
# Function to plot signal, ACF and PACF
def plotds(xt, nlag=30, fig_size=(12, 10)):
if not isinstance(xt, pd.Series):
xt = pd.Series(xt)
plt.figure(figsize=fig_size)
layout = (2, 2)
# Assign axes
ax_xt = plt.subplot2grid(layout, (0, 0), colspan=2)
ax_acf = plt.subplot2grid(layout, (1, 0))
ax_pacf = plt.subplot2grid(layout, (1, 1))
# Plot graphs
xt.plot(ax=ax_xt)
ax_xt.set_title("Time Series")
plot_acf(xt, lags=50, ax=ax_acf)
plot_pacf(xt, lags=50, ax=ax_pacf)
plt.tight_layout()
return None
# Read data from Excel file
djia_df = pd.read_excel("datasets/DJIA_Jan2016_Dec2016.xlsx")
# Rename the second column
djia_df.head(10)
# Let us parse the Date column and use as row index for the DataFrame and drop it as a column
djia_df["Date"] = pd.to_datetime(djia_df["Date"], "%Y-%m-%d")
djia_df.index = djia_df["Date"]
djia_df.drop("Date", axis=1, inplace=True)
# Let us see first few rows of the modified DataFrame
djia_df.head(10)
# Plot ACF and PACF
djia_df = djia_df.dropna()
plotds(djia_df["Close"], nlag=50)
# Evaluate mean and variance at mid values
mean1, mean2 = djia_df.iloc[:125].Close.mean(), djia_df.iloc[125:].Close.mean()
var1, var2 = djia_df.iloc[:125].Close.var(), djia_df.iloc[125:].Close.var()
print("mean1=%f, mean2=%f" % (mean1, mean2))
print("variance1=%f, variance2=%f" % (var1, var2))
# ADF Test
from statsmodels.tsa.stattools import adfuller
adf_result = adfuller(djia_df.Close.tolist())
print("ADF Statistic: %f" % adf_result[0])
print("p-value: %f" % adf_result[1])
# QQ plot and probability plot
sm.qqplot(djia_df["Close"], line="s")
# Optimize ARMA parameters (Will return a non-stationary error)
arma_obj = smtsa.ARMA(djia_df["Close"].tolist(), order=(1, 1)).fit(
maxlag=30, method="mle", trend="nc"
)
# Let us plot the original time series and first-differences
first_order_diff = djia_df["Close"].diff(1).dropna()
fig, ax = plt.subplots(2, sharex=True)
fig.set_size_inches(5.5, 5.5)
djia_df["Close"].plot(ax=ax[0], color="b")
ax[0].set_title("Close values of DJIA during Jan 2016-Dec 2016")
first_order_diff.plot(ax=ax[1], color="r")
ax[1].set_title("First-order differences of DJIA during Jan 2016-Dec 2016")
# plot signal
plotds(first_order_diff, nlag=50)
adf_result = adfuller(first_order_diff)
print("ADF Statistic: %f" % adf_result[0])
print("p-value: %f" % adf_result[1])
# Optimize ARMA parameters
aicVal = []
for d in range(1, 3):
for ari in range(0, 3):
for maj in range(0, 3):
try:
arima_obj = ARIMA(djia_df["Close"].tolist(), order=(ari, d, maj))
arima_obj_fit = arima_obj.fit()
aicVal.append([ari, d, maj, arima_obj_fit.aic])
except ValueError:
pass
# Optimal ARIMA model
arima_obj = ARIMA(djia_df["Close"].tolist(), order=(0, 2, 1))
arima_obj_fit = arima_obj.fit(disp=0)
arima_obj_fit.summary()
# Evaluate prediction
pred = np.append([0, 0], arima_obj_fit.fittedvalues.tolist())
djia_df["ARIMA"] = pred
diffval = np.append([0, 0], arima_obj_fit.resid + arima_obj_fit.fittedvalues)
djia_df["diffval"] = diffval
# QQ plot and probability plot
sm.qqplot(arima_obj_fit.resid, line="s")
# Plot output
f, axarr = plt.subplots(1, sharex=True)
f.set_size_inches(5.5, 5.5)
djia_df["diffval"].iloc[2:].plot(color="b", linestyle="-", ax=axarr)
djia_df["ARIMA"].iloc[2:].plot(color="r", linestyle="--", ax=axarr)
axarr.set_title("ARIMA(0,2,1)")
plt.xlabel("Index")
plt.ylabel("Closing")
# Forecasting
f, err, ci = arima_obj_fit.forecast(40)
djia_df["forecast"] = arima_obj_fit.forecast(10)
djia_df[["Close", "forecast"]].plot(figsize=(12, 8))
##############
# SARIMAX
##############
# Seasonality (based on first difference ACF shows significance at 42 lag)
x = djia_df["Close"] - djia_df["Close"].shift(42)
mod = sm.tsa.statespace.SARIMAX(
djia_df["Close"], trend="n", order=(0, 2, 1), seasonal_order=(1, 1, 1, 42)
)
sarimax = mod.fit()
sarimax.summary()
|
Diyago/Machine-Learning-scripts
|
time series regression/ARIMA/ARIMA.py
|
Python
|
apache-2.0
| 4,301
|
[
"ADF"
] |
4f7c0f2877fa1ca473720f535662989880e4a66112cd8422a52df1f8f3dfe7c9
|
__source__ = ' https://leetcode.com/problems/maximum-depth-of-n-ary-tree/'
# Time: O(N)
# Space: O(N)
#
# Description: Leetcode # 559. Maximum Depth of N-ary Tree
#
# Given a n-ary tree, find its maximum depth.
#
# The maximum depth is the number of nodes along the longest path
# from the root node down to the farthest leaf node.
#
# For example, given a 3-ary tree:
#
# We should return its max depth, which is 3.
#
# Note:
#
# The depth of the tree is at most 1000.
# The total number of nodes is at most 5000.
#
import unittest
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
# 104ms 93.85%
def maxDepth(self, root): #DFS
"""
:type root: Node
:rtype: int
"""
if root is None:
return 0
elif root.children == []:
return 1
else:
height = [self.maxDepth(c) for c in root.children]
return max(height) + 1
# 108ms 72.41%
def maxDepthBFS(self, root):
"""
:type root: Node
:rtype: int
"""
stack = []
if root is not None:
stack.append((1, root))
depth = 0
while stack != []:
current_depth, root = stack.pop()
if root is not None:
depth = max(depth, current_depth)
for c in root.children:
stack.append((current_depth + 1, c))
return depth
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/maximum-depth-of-n-ary-tree/solution/
#
Complexity analysis
Time complexity : we visit each node exactly once,
thus the time complexity is O(N), where NN is the number of nodes.
Space complexity : in the worst case, the tree is completely unbalanced,
e.g. each node has only one child node,
the recursion call would occur N times (the height of the tree),
therefore the storage to keep the call stack would be O(N).
But in the best case (the tree is completely balanced),
the height of the tree would be log(N).
Therefore, the space complexity in this case would be O(log(N)).
/*
// Definition for a Node.
class Node {
public int val;
public List<Node> children;
public Node() {}
public Node(int _val,List<Node> _children) {
val = _val;
children = _children;
}
};
*/
# DFS
# 4ms 47.26%
class Solution {
public int maxDepth(Node root) {
if (root == null) return 0;
else if (root.children.isEmpty()) return 1;
else {
List<Integer> heights = new LinkedList();
for (Node child : root.children) {
heights.add(maxDepth(child));
}
return Collections.max(heights) + 1;
}
}
}
# BFS
# 10ms 4.65%
class Solution {
public int maxDepth(Node root) {
if (root == null) return 0;
else if (root.children.isEmpty()) return 1;
Queue<Node> queue = new LinkedList();
queue.offer(root);
int count = 0;
while (!queue.isEmpty()) {
int size = queue.size();
for (int i = 0; i < size; i++) {
Node item = queue.poll();
if (item.children != null) {
for (Node each: item.children) queue.offer(each);
}
}
count++;
}
return count;
}
}
# BFS + Pair
# 12ms 3,34%
import javafx.util.Pair;
import java.lang.Math;
class Solution {
public int maxDepth(Node root) {
Queue<Pair<Node, Integer>> stack = new LinkedList<>();
if (root != null) {
stack.add(new Pair(root, 1));
}
int depth = 0;
while (!stack.isEmpty()) {
Pair<Node, Integer> current = stack.poll();
root = current.getKey();
int current_depth = current.getValue();
if (root != null) {
depth = Math.max(depth, current_depth);
for (Node c : root.children) {
stack.add(new Pair(c, current_depth + 1));
}
}
}
return depth;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/MaximumDepthofN-aryTree.py
|
Python
|
apache-2.0
| 4,218
|
[
"VisIt"
] |
448466cc0ab4b24f1d07d6c21d1bbfed61de014456014da550e43ba9e3ccaf37
|
from utilities import read_csv, str2fl
from math import pow, log, exp
from psychrometrics import HumFromRHumTemp
class Weather(object):
"""
Weather
Read epw file
http://bigladdersoftware.com/epx/docs/8-2/auxiliary-programs/epw-csv-format-inout.html
properties
location # location name
staTemp % air temperature (C)
staTdp % dewpoint temperature (C)
staRhum % air relative humidity (%)
staPres % air pressure (Pa)
staInfra % horizontal Infrared Radiation Intensity (W m-2)
staHor % horizontal radiation
staDir % normal solar direct radiation (W m-2)
staDif % horizontal solar diffuse radiation (W m-2)
staUdir % wind direction ()
staUmod % wind speed (m s-1)
staRobs % Precipitation (mm h-1)
staHum % specific humidty (kg kg-1)
"""
#TODO: change to xrange
def __init__(self,climate_file,HI,HF):
#HI: Julian start date
#HF: Julian final date
#H1 and HF define the row we want
# Open .epw file and feed csv data to self.climate_data
try:
self.climate_data = read_csv(climate_file)
except Exception as e:
raise Exception("Failed to read .epw file! {}".format(e.message))
self.location = self.climate_data[0][1]
self.staTemp = str2fl([r[6] for r in self.climate_data[HI:HF+1]]) # drybulb [C]
self.staTdp = str2fl([r[7] for r in self.climate_data[HI:HF+1]]) # dewpoint [C]
self.staRhum = str2fl([r[8] for r in self.climate_data[HI:HF+1]]) # air relative humidity (%)
self.staPres = str2fl([r[9] for r in self.climate_data[HI:HF+1]]) # air pressure (Pa)
self.staInfra = str2fl([r[12] for r in self.climate_data[HI:HF+1]]) # horizontal Infrared Radiation Intensity (W m-2)
self.staHor = str2fl([r[13] for r in self.climate_data[HI:HF+1]]) # horizontal radiation [W m-2]
self.staDir = str2fl([r[14] for r in self.climate_data[HI:HF+1]]) # normal solar direct radiation (W m-2)
self.staDif = str2fl([r[15] for r in self.climate_data[HI:HF+1]]) # horizontal solar diffuse radiation (W m-2)
self.staUdir = str2fl([r[20] for r in self.climate_data[HI:HF+1]]) # wind direction ()
self.staUmod = str2fl([r[21] for r in self.climate_data[HI:HF+1]]) # wind speed (m s-1)
self.staRobs = str2fl([r[33] for r in self.climate_data[HI:HF+1]]) # Precipitation (mm h-1)
self.staHum = [0.0] * len(self.staTemp) # specific humidty (kgH20 kgN202-1)
for i in xrange(len(self.staTemp)):
self.staHum[i] = HumFromRHumTemp(self.staRhum[i], self.staTemp[i], self.staPres[i])
self.staTemp = [s+273.15 for s in self.staTemp] # air temperature (K)
def __repr__(self):
return "Weather: {a}, HI Tdb:{b}, HF Tdb:{c}".format(
a=self.location,
b=self.staTemp[0]-273.15,
c=self.staTemp[-1]-273.15
)
|
saeranv/UWG_Python
|
UWG/weather.py
|
Python
|
gpl-3.0
| 3,162
|
[
"EPW"
] |
af0c2f3ce17fa81baab9a4a27fcc588f1eea65c5c7618d0911f7588823c05a32
|
from django.utils.translation import ugettext_lazy as _
import horizon
class SwiftCluster(horizon.PanelGroup):
slug = "swift_cluster"
name = _("Swift Cluster")
panels = ('regions', 'zones', 'nodes', 'rings', 'containers',)
class SDSManagement(horizon.PanelGroup):
name = _("SDS Management")
slug = "sds_management"
panels = ('projects', 'filters', 'metrics', 'policies', 'controllers')
class Monitoring(horizon.PanelGroup):
name = _("Monitoring")
slug = "monitoring"
panels = ('swift_monitoring', 'kibana',)
class CrystalController(horizon.Dashboard):
name = _("Crystal Controller")
slug = "crystal"
panels = (SwiftCluster, SDSManagement, Monitoring,) # Add your panels here.
default_panel = 'projects' # Specify the slug of the default panel.
permissions = ('openstack.roles.admin', 'openstack.services.object-store', )
horizon.register(CrystalController)
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/dashboard.py
|
Python
|
gpl-3.0
| 925
|
[
"CRYSTAL"
] |
9e243bd091e29d8a9db7b35b27c79c11586d4fd18f9f7b791046d3a62791fd69
|
#!/usr/bin/python
#
# Copyright 2010 Brian Dolbec <brian.dolbec@gmail.com>
# Copyright(c) 2010, Gentoo Foundation
# Copyright 2003-2004 Karl Trygve Kalleberg
# Licensed under the GNU General Public License, v2
#
# $Header: $
"""Gentoo's installed packages analysis and repair tool"""
# Move to Imports section after Python 2.6 is stable
__docformat__ = 'epytext'
# version is dynamically set by distutils sdist
__version__ = "git"
__productname__ = "enalyze"
__authors__ = (
'Brian Dolbec, <brian.dolbec@gmail.com>'
)
# make an exportable copy of the info for help output
MODULE_INFO = {
"__docformat__": __docformat__,
"__doc__": __doc__,
"__version__": __version__,
"__productname__": __productname__,
"__authors__": __authors__
}
import errno
import sys
from getopt import getopt, GetoptError
import portage
import gentoolkit as gen
from gentoolkit import errors
from gentoolkit import pprinter as pp
from gentoolkit.base import (initialize_configuration, split_arguments,
parse_global_options, print_help)
NAME_MAP = {
'a': 'analyze',
'r': 'rebuild'
}
FORMATTED_OPTIONS = (
(" (a)nalyze",
"analyzes the installed PKG database USE flag or keyword useage"),
(" (r)ebuild",
"analyzes the Installed PKG database and generates files suitable"),
(" ",
"to replace corrupted or missing /etc/portage/package.* files")
)
def expand_module_name(module_name):
"""Returns one of the values of NAME_MAP or raises KeyError"""
if module_name == 'list':
# list is a Python builtin type, so we must rename our module
return 'list_'
elif module_name in NAME_MAP.values():
return module_name
else:
return NAME_MAP[module_name]
def main():
"""Parse input and run the program."""
short_opts = "hqCNV"
long_opts = (
'help', 'quiet', 'nocolor', 'no-color', 'no-pipe', 'version', 'debug'
)
initialize_configuration()
try:
global_opts, args = getopt(sys.argv[1:], short_opts, long_opts)
except GetoptError as err:
sys.stderr.write(" \n")
sys.stderr.write(pp.error("Global %s\n" % err))
print_help(MODULE_INFO, FORMATTED_OPTIONS, with_description=False)
sys.exit(2)
# Parse global options
need_help = parse_global_options(global_opts, args, MODULE_INFO, FORMATTED_OPTIONS)
if gen.CONFIG['quiet']:
gen.CONFIG['verbose'] = False
try:
module_name, module_args = split_arguments(args)
except IndexError:
print_help(MODULE_INFO, FORMATTED_OPTIONS)
sys.exit(2)
if need_help:
module_args.append('--help')
try:
expanded_module_name = expand_module_name(module_name)
except KeyError:
sys.stderr.write(pp.error("Unknown module '%s'" % module_name))
print_help(MODULE_INFO, FORMATTED_OPTIONS, with_description=False)
sys.exit(2)
try:
loaded_module = __import__(
expanded_module_name, globals(), locals(), [], 1
)
loaded_module.main(module_args)
except portage.exception.AmbiguousPackageName as err:
raise errors.GentoolkitAmbiguousPackage(err.args[0])
except IOError as err:
if err.errno != errno.EPIPE:
raise
if __name__ == '__main__':
main()
|
zmedico/gentoolkit
|
pym/gentoolkit/enalyze/__init__.py
|
Python
|
gpl-2.0
| 3,046
|
[
"Brian"
] |
662ead7907870dbbee273c06e81392c023056a020ccfd6b9b6cc396e3cd21b55
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import logging
import re
from lxml import etree
from django.db.models import Model
from ..models import *
from .data import OPERATOR_TABLE
from .exceptions import BlocklyXmlBuilderException
logger = logging.getLogger(__name__)
def camel_case_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class BlocklyXmlBuilder(NodeCacheHolder):
def build(self, tree_root):
xml = etree.Element('xml')
self.visit(tree_root, parent_xml=xml)
return etree.tostring(xml, pretty_print=True).decode('utf-8')
def visit(self, node, parent_xml):
content_object = node.content_object
if content_object is None:
last_xml = None
for child in self.get_children(node):
if last_xml is not None:
next_element = etree.Element('next')
last_xml.append(next_element)
parent_xml = next_element
last_xml = self.visit(child, parent_xml)
return
for cls in inspect.getmro(content_object.__class__):
if cls == Model:
break
method_name = 'visit_{}'.format(camel_case_to_snake_case(cls.__name__))
method = getattr(self, method_name, None)
if not method:
continue
node_xml = method(node, parent_xml)
if not getattr(method, 'process_children', None):
for child in self.get_children(node):
self.visit(child, parent_xml)
node_xml.set('id', str(node.id))
return node_xml
if content_object.__class__ not in (VariableDefinition,):
logger.debug('Unsupported content_object: {}'.format(content_object.__class__))
def visit_constant(self, node, parent_xml):
block_type = {
NumberConstant: 'math_number',
StringConstant: 'text',
BooleanConstant: 'logic_boolean',
DateConstant: 'business_logic_date',
}
field_name = {
NumberConstant: 'NUM',
StringConstant: 'TEXT',
BooleanConstant: 'BOOL',
DateConstant: 'DATE',
}
content_object = node.content_object
cls = content_object.__class__
block = etree.SubElement(parent_xml, 'block', type=block_type[cls])
field_element = etree.SubElement(block, 'field', name=field_name[cls])
if isinstance(content_object, BooleanConstant):
field_element.text = str(content_object).upper()
else:
field_element.text = str(content_object)
return block
def visit_reference_constant(self, node, parent_xml):
children = self.get_children(node)
if len(children) != 1:
raise BlocklyXmlBuilderException('Incorrect number of ReferenceConstant node children: {}'.format(
len(children)))
value_object_node = children[0]
content_type = value_object_node.content_type
block = etree.SubElement(parent_xml, 'block', type='business_logic_reference')
type_field = etree.SubElement(block, 'field', name='TYPE')
type_field.text = '{}.{}'.format(content_type.app_label, content_type.model_class().__name__)
value_field = etree.SubElement(block, 'field', name='VALUE')
value_field.text = str(value_object_node.object_id)
return block
visit_reference_constant.process_children = True
def _get_variable_block_type(self, node, action):
assert action in ('get', 'set')
if node.content_object.definition.name.find('.') != -1:
return 'business_logic_argument_field_{}'.format(action)
return 'variables_{}'.format(action)
def visit_variable(self, node, parent_xml):
block_type = self._get_variable_block_type(node, 'get')
block = etree.SubElement(parent_xml, 'block', type=block_type)
self._visit_variable(node, block)
return block
def visit_assignment(self, node, parent_xml):
lhs_node, rhs_node = self.get_children(node)
block_type = self._get_variable_block_type(lhs_node, 'set')
block = etree.SubElement(parent_xml, 'block', type=block_type)
self._visit_variable(lhs_node, block)
value = etree.SubElement(block, 'value', name='VALUE')
self.visit(rhs_node, value)
return block
visit_assignment.process_children = True
def _visit_variable(self, node, parent_xml):
variable = node.content_object
field_element = etree.SubElement(parent_xml, 'field', name='VAR')
field_element.text = variable.definition.name
def visit_binary_operator(self, node, parent_xml):
# determine block_type
operator_value = node.content_object.operator
block_type = None
table = None
for block_type, table in OPERATOR_TABLE.items():
if operator_value in table:
break
else:
raise BlocklyXmlBuilderException('Invalid Operator: {}'.format(operator_value))
block = etree.SubElement(parent_xml, 'block', type=block_type)
field_element = etree.SubElement(block, 'field', name='OP')
field_element.text = table[operator_value]
lhs_node, rhs_node = self.get_children(node)
for value_name, child_node in (('A', lhs_node), ('B', rhs_node)):
value = etree.SubElement(block, 'value', name=value_name)
self.visit(child_node, value)
return block
visit_binary_operator.process_children = True
def visit_if_statement(self, node, parent_xml):
children = self.get_children(node)
block = etree.SubElement(parent_xml, 'block', type='controls_if')
if len(children) > 2:
mutation = etree.SubElement(block, 'mutation')
if len(children) % 2:
mutation.set('else', '1')
elifs = (len(children) - 2 - len(children) % 2) / 2
if elifs:
mutation.set('elseif', str(int(elifs)))
for i, pair in enumerate(pairs(children)):
# last "else" branch
if len(pair) == 1:
statement = etree.SubElement(block, 'statement', name='ELSE')
self.visit(pair[0], statement)
break
if_condition = pair[0]
if_value = etree.SubElement(block, 'value', name='IF{}'.format(i))
self.visit(if_condition, if_value)
statement = etree.SubElement(block, 'statement', name='DO{}'.format(i))
self.visit(pair[1], statement)
return block
visit_if_statement.process_children = True
def visit_function(self, node, parent_xml):
function = node.content_object
function_definition = function.definition
children = self.get_children(node)
block = etree.SubElement(parent_xml, 'block', type='business_logic_function')
etree.SubElement(block, 'mutation', args='true')
field_element = etree.SubElement(block, 'field', name='FUNC')
field_element.text = function_definition.title
for i, child_node in enumerate(children):
value = etree.SubElement(block, 'value', name='ARG{}'.format(i))
self.visit(child_node, value)
return block
visit_function.process_children = True
|
dgk/django-business-logic
|
business_logic/blockly/build.py
|
Python
|
mit
| 7,523
|
[
"VisIt"
] |
1163afd26668703b7bd738e91957811713becc09f8197a4bf468b158eaf0ea2c
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import tempfile
import pandas as pd
import qiime2
from q2_types.feature_data import (
FeatureData, Taxonomy, Sequence, DNAFASTAFormat)
from .plugin_setup import plugin, citations
from qiime2.plugin import Int, Str, Float, Choices, Range, Bool
from ._consensus_assignment import (_consensus_assignments, _run_command,
_get_default_unassignable_label,
_annotate_method)
from ._taxonomic_classifier import TaxonomicClassifier
from .classifier import _classify_parameters, _parameter_descriptions
def classify_consensus_vsearch(query: DNAFASTAFormat,
reference_reads: DNAFASTAFormat,
reference_taxonomy: pd.Series,
maxaccepts: int = 10,
perc_identity: float = 0.8,
query_cov: float = 0.8,
strand: str = 'both',
min_consensus: float = 0.51,
unassignable_label: str =
_get_default_unassignable_label(),
search_exact: bool = False,
top_hits_only: bool = False,
threads: str = 1) -> pd.DataFrame:
seqs_fp = str(query)
ref_fp = str(reference_reads)
if maxaccepts == 'all':
maxaccepts = 0
cmd = ['vsearch', '--usearch_global', seqs_fp, '--id', str(perc_identity),
'--query_cov', str(query_cov), '--strand', strand, '--maxaccepts',
str(maxaccepts), '--maxrejects', '0', '--output_no_hits', '--db',
ref_fp, '--threads', str(threads)]
if search_exact:
cmd[1] = '--search_exact'
if top_hits_only:
cmd.append('--top_hits_only')
cmd.append('--blast6out')
consensus = _consensus_assignments(
cmd, reference_taxonomy, min_consensus=min_consensus,
unassignable_label=unassignable_label)
return consensus
def classify_hybrid_vsearch_sklearn(ctx,
query,
reference_reads,
reference_taxonomy,
classifier,
maxaccepts=10,
perc_identity=0.5,
query_cov=0.8,
strand='both',
min_consensus=0.51,
reads_per_batch=0,
confidence=0.7,
read_orientation='auto',
threads=1,
prefilter=True,
sample_size=1000,
randseed=0):
exclude = ctx.get_action('quality_control', 'exclude_seqs')
ccv = ctx.get_action('feature_classifier', 'classify_consensus_vsearch')
cs = ctx.get_action('feature_classifier', 'classify_sklearn')
filter_seqs = ctx.get_action('taxa', 'filter_seqs')
merge = ctx.get_action('feature_table', 'merge_taxa')
# randomly subsample reference sequences for rough positive filter
if prefilter:
ref = str(reference_reads.view(DNAFASTAFormat))
with tempfile.NamedTemporaryFile() as output:
cmd = ['vsearch', '--fastx_subsample', ref, '--sample_size',
str(sample_size), '--randseed', str(randseed),
'--fastaout', output.name]
_run_command(cmd)
sparse_reference = qiime2.Artifact.import_data(
'FeatureData[Sequence]', output.name)
# perform rough positive filter on query sequences
query, misses, = exclude(
query_sequences=query, reference_sequences=sparse_reference,
method='vsearch', perc_identity=perc_identity,
perc_query_aligned=query_cov, threads=threads)
# find exact matches, perform LCA consensus classification
taxa1, = ccv(query=query, reference_reads=reference_reads,
reference_taxonomy=reference_taxonomy, maxaccepts=maxaccepts,
strand=strand, min_consensus=min_consensus,
search_exact=True, threads=threads)
# Annotate taxonomic assignments with classification method
taxa1 = _annotate_method(taxa1, 'VSEARCH')
# perform second pass classification on unassigned taxa
# filter out unassigned seqs
try:
query, = filter_seqs(sequences=query, taxonomy=taxa1,
include=_get_default_unassignable_label())
except ValueError:
# get ValueError if all sequences are filtered out.
# so if no sequences are unassigned, return exact match results
return taxa1
# classify with sklearn classifier
taxa2, = cs(reads=query, classifier=classifier,
reads_per_batch=reads_per_batch, n_jobs=threads,
confidence=confidence, read_orientation=read_orientation)
# Annotate taxonomic assignments with classification method
taxa2 = _annotate_method(taxa2, 'sklearn')
# merge into one big happy result
taxa, = merge(data=[taxa2, taxa1])
return taxa
output_descriptions = {
'classification': 'The resulting taxonomy classifications.'}
parameters = {'maxaccepts': Int % Range(1, None) | Str % Choices(['all']),
'perc_identity': Float % Range(0.0, 1.0, inclusive_end=True),
'query_cov': Float % Range(0.0, 1.0, inclusive_end=True),
'strand': Str % Choices(['both', 'plus']),
'min_consensus': Float % Range(0.5, 1.0, inclusive_end=True,
inclusive_start=False),
'threads': Int % Range(1, None)}
inputs = {'query': FeatureData[Sequence],
'reference_reads': FeatureData[Sequence],
'reference_taxonomy': FeatureData[Taxonomy]}
input_descriptions = {'query': 'Sequences to classify taxonomically.',
'reference_reads': 'reference sequences.',
'reference_taxonomy': 'reference taxonomy labels.'}
parameter_descriptions = {
'strand': 'Align against reference sequences in forward ("plus") '
'or both directions ("both").',
'maxaccepts': 'Maximum number of hits to keep for each query. Set to '
'"all" to keep all hits > perc_identity similarity.',
'perc_identity': 'Reject match if percent identity to query is '
'lower.',
'query_cov': 'Reject match if query alignment coverage per high-'
'scoring pair is lower.',
'min_consensus': 'Minimum fraction of assignments must match top '
'hit to be accepted as consensus assignment.',
'threads': 'Number of threads to use for job parallelization.'}
outputs = [('classification', FeatureData[Taxonomy])]
ignore_prefilter = ' This parameter is ignored if `prefilter` is disabled.'
plugin.methods.register_function(
function=classify_consensus_vsearch,
inputs=inputs,
parameters={**parameters,
'unassignable_label': Str,
'search_exact': Bool,
'top_hits_only': Bool},
outputs=outputs,
input_descriptions=input_descriptions,
parameter_descriptions={
**parameter_descriptions,
'search_exact': 'Search for exact full-length matches to the query '
'sequences. Only 100% exact matches are reported and '
'this command is much faster than the default. If '
'True, the perc_identity and query_cov settings are '
'ignored. Note: query and reference reads must be '
'trimmed to the exact same DNA locus (e.g., primer '
'site) because only exact matches will be reported.',
'top_hits_only': 'Only the top hits between the query and reference '
'sequence sets are reported. For each query, the top '
'hit is the one presenting the highest percentage of '
'identity. Multiple equally scored top hits will be '
'used for consensus taxonomic assignment if '
'maxaccepts is greater than 1.',
},
output_descriptions=output_descriptions,
name='VSEARCH-based consensus taxonomy classifier',
description=('Assign taxonomy to query sequences using VSEARCH. Performs '
'VSEARCH global alignment between query and reference_reads, '
'then assigns consensus taxonomy to each query sequence from '
'among maxaccepts top hits, min_consensus of which share '
'that taxonomic assignment. Unlike classify-consensus-blast, '
'this method searches the entire reference database before '
'choosing the top N hits, not the first N hits.'),
citations=[citations['rognes2016vsearch']]
)
plugin.pipelines.register_function(
function=classify_hybrid_vsearch_sklearn,
inputs={**inputs, 'classifier': TaxonomicClassifier},
parameters={**parameters,
'reads_per_batch': _classify_parameters['reads_per_batch'],
'confidence': _classify_parameters['confidence'],
'read_orientation': _classify_parameters['read_orientation'],
'prefilter': Bool,
'sample_size': Int % Range(1, None),
'randseed': Int % Range(0, None)},
outputs=outputs,
input_descriptions={**input_descriptions,
'classifier': 'Pre-trained sklearn taxonomic '
'classifier for classifying the reads.'},
parameter_descriptions={
**{k: parameter_descriptions[k] for k in [
'strand', 'maxaccepts', 'min_consensus', 'threads']},
'perc_identity': 'Percent sequence similarity to use for PREFILTER. ' +
parameter_descriptions['perc_identity'] + ' Set to a '
'lower value to perform a rough pre-filter.' +
ignore_prefilter,
'query_cov': 'Query coverage threshold to use for PREFILTER. ' +
parameter_descriptions['query_cov'] + ' Set to a '
'lower value to perform a rough pre-filter.' +
ignore_prefilter,
'confidence': _parameter_descriptions['confidence'],
'read_orientation': 'Direction of reads with respect to reference '
'sequences in pre-trained sklearn classifier. '
'same will cause reads to be classified unchanged'
'; reverse-complement will cause reads to be '
'reversed and complemented prior to '
'classification. "auto" will autodetect '
'orientation based on the confidence estimates '
'for the first 100 reads.',
'reads_per_batch': 'Number of reads to process in each batch for '
'sklearn classification. If "auto", this parameter '
'is autoscaled to min(number of query sequences / '
'threads, 20000).',
'prefilter': 'Toggle positive filter of query sequences on or off.',
'sample_size': 'Randomly extract the given number of sequences from '
'the reference database to use for prefiltering.' +
ignore_prefilter,
'randseed': 'Use integer as a seed for the pseudo-random generator '
'used during prefiltering. A given seed always produces '
'the same output, which is useful for replicability. Set '
'to 0 to use a pseudo-random seed.' + ignore_prefilter,
},
output_descriptions=output_descriptions,
name='ALPHA Hybrid classifier: VSEARCH exact match + sklearn classifier',
description=('NOTE: THIS PIPELINE IS AN ALPHA RELEASE. Please report bugs '
'to https://forum.qiime2.org!\n'
'Assign taxonomy to query sequences using hybrid classifier. '
'First performs rough positive filter to remove artifact and '
'low-coverage sequences (use "prefilter" parameter to toggle '
'this step on or off). Second, performs VSEARCH exact match '
'between query and reference_reads to find exact matches, '
'followed by least common ancestor consensus taxonomy '
'assignment from among maxaccepts top hits, min_consensus of '
'which share that taxonomic assignment. Query sequences '
'without an exact match are then classified with a pre-'
'trained sklearn taxonomy classifier to predict the most '
'likely taxonomic lineage.'),
)
|
BenKaehler/q2-feature-classifier
|
q2_feature_classifier/_vsearch.py
|
Python
|
bsd-3-clause
| 13,539
|
[
"BLAST"
] |
1ac8509d040ca02e6e095efb5e6e40b1d45acedd373de00c176faea3b1936f34
|
import logging
import urllib
from collections import defaultdict
from lxml import html
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
import django.utils
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access, sort_by_announcement, sort_and_audited_items, get_course_info_section, filter_audited_items,
get_course_by_id, get_course, course_image_url, get_course_about_section, get_courses_by_search)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment
from student.views import course_from_id, single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from util.json_request import JsonResponse
from xblock.fragment import Fragment
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
from xmodule.contentstore.content import StaticContent
import shoppingcart
from microsite_configuration import microsite
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
#@ensure_csrf_cookie
#@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
q = request.GET.get('query', '')
courses_aa = get_courses_by_search(request.META.get('HTTP_HOST'))
courses_list = []
if q != "":
for course in courses_aa:
if q in course.org or q in course.id or q in course.display_name_with_default:
courses_list.append(course)
else:
continue
else:
courses_list = courses_aa
courses = sort_by_announcement(courses_list)
return render_to_response("courseware/courses.html", {'courses': filter_audited_items(courses)})
def return_fixed_courses(request, courses, action=None):
default_length = 8
course_id = request.GET.get("course_id")
if course_id:
course_id = course_id.replace(".", '/')
try:
index_course = get_course_by_id(course_id)
course_index = (courses.index(index_course) + 1)
except:
course_index = 0
current_list = courses[course_index:]
if len(current_list) > default_length:
current_list = current_list[0:default_length]
course_list = []
for course in current_list:
try:
course_json = mobi_course_info(request, course, action)
course_list.append(course_json)
except:
continue
return JsonResponse({"count": len(courses), "course-list": course_list})
def course_attr_list_handler(request, course_category, course_level=None):
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
courses = sort_and_audited_items(courses)
courses_list = []
for course in courses:
if course_level:
if course.course_level == course_level and course.course_category == course_category:
courses_list.append(course)
elif course.course_category == course_category:
courses_list.append(course)
else:
continue
return return_fixed_courses(request, courses_list, None)
def courses_list_handler(request, action):
"""
Return courses based on request params
"""
try:
user = request.user
except:
user = AnonymousUser()
if action not in ["homefalls", "all", "hot", "latest", "my", "search", "rolling", "sync"]:
return JsonResponse({"success": False, "errmsg": "not support other actions except homefalls all hot latest rolling and my"})
def get_courses_depend_action(courses):
"""
Return courses depend on action
action: [homefalls, hot, lastest, my, search]
homefalls: get all courses
hot: Number of attended people > ?
lastest: News last week
my: I registered
all: like 'homefalls'
"""
courses = sort_and_audited_items(courses)
courses_list = []
if action == "latest":
default_count = 20
if len(courses) < default_count:
default_count = len(courses)
courses_list = courses[0:default_count]
elif action == "my":
# filter my registered courses
for course in courses:
if registered_for_course(course, user):
courses_list.append(course)
elif action == "rolling":
default_count = 5
courses_list = courses[0:default_count]
elif action == 'search':
keyword = request.GET.get("keyword")
if keyword:
for c in courses:
if keyword in c.org or keyword in c.id or keyword in c.display_name_with_default:
courses_list.append(c)
else:
courses_list = courses
return courses_list
courses = get_courses(user, request.META.get('HTTP_HOST'))
if action != "sync":
courses = get_courses_depend_action(courses)
return return_fixed_courses(request, courses, action)
def _course_json(course, course_id, url_name, position=0):
locator = loc_mapper().translate_location(course_id, course.location, published=False, add_entry_if_missing=True)
is_container = course.has_children
category = course.category
result = {
'display_name': course.display_name,
'id': unicode(locator),
'category': category,
'is_draft': getattr(course, 'is_draft', False),
'is_container': is_container
}
if category in ['sequential', 'chapter']:
url_name = url_name + '/' + course.url_name
elif category == "vertical":
result['unit_url'] = url_name + '/' + str(position)
elif category == "video":
result[category + '_url'] = course.html5_sources[0] if len(course.html5_sources) > 0 else ""
if is_container:
children = []
for idx, child in enumerate(course.get_children()):
try:
children.append(_course_json(child, course_id, url_name, (idx + 1)))
except:
continue
result['children'] = children
return result
def mobi_course_info(request, course, action=None):
course_logo = course_image_url(course)
host = request.get_host()
try:
user = request.user
except:
user = AnonymousUser()
result = {
"id": course.id.replace('/', '.'),
"name": course.display_name_with_default,
"logo": host + course_logo,
"org": course.display_org_with_default,
"course_number": course.display_number_with_default,
"start_date": course.start.strftime("%Y-%m-%d"),
"course_category": course.course_category,
"course_level": course.course_level,
"registered": registered_for_course(course, user),
"about": get_course_about_section(course, 'short_description'),
"category": course.category,
"course_price": course.display_course_price_with_default
}
def compute_action_imgurl(imgname):
course_mini_info = course.id.split('/')
asset_location = StaticContent.compute_location(course_mini_info[0], course_mini_info[1], imgname)
return host + StaticContent.get_url_path_from_location(asset_location)
for imgname in ['mobi', 'mobi_r', 'ott_r']:
try:
result[imgname] = compute_action_imgurl(imgname + '_logo.jpg')
except:
result[imgname] = host + course_logo
return result
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]])
return content
def parse_updates_html_str(html_str):
course_upd_collection = []
if html_str == '':
return {"updates": course_upd_collection}
try:
course_html_parsed = html.fromstring(html_str)
except:
escaped = django.utils.html.eacape(html_str)
course_html_parsed = html.fromstring(escaped)
print type(course_html_parsed)
if course_html_parsed.tag == 'section':
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
computer_id = len(course_html_parsed) - index
payload = {
"id": computer_id,
"date": update.findtext("h2"),
"content": content
}
course_upd_collection.append(payload)
return {"updates": course_upd_collection}
def mobi_course_action(request, course_id, action):
try:
course_id_bak = course_id.replace('.', '/')
if action in ["updates", "handouts", "structure"]:
user = request.user
if not user:
user = AnonymousUser()
course = get_course_with_access(user, course_id_bak, 'load')
registered = registered_for_course(course, user)
if action == "updates" and registered:
# course_updates = get_course_info_section(request, course, action)
loc = Location(course.location.tag, course.location.org, course.location.course, 'course_info', action)
field_data_cache = FieldDataCache([], course.id, request.user)
course_module = get_module(
user,
request,
loc,
field_data_cache,
course.id,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
return JsonResponse({'updates': [item for item in course_module.items if item["status"] != "deleted"]})
elif action == "handouts" and registered:
course_handouts = get_course_info_section(request, course, action)
return JsonResponse({"handouts": course_handouts})
elif action == "structure":
url_name = request.get_host() + '/m/courses/' + course_id_bak + '/courseware'
return JsonResponse(_course_json(course=course, course_id=course.location.course_id, url_name=url_name))
else:
raise Exception
else:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
return JsonResponse(mobi_course_info(request, course))
except:
return JsonResponse({"success": False, "errmsg": "access denied!"})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
Location('i4x', course_location.org, course_location.course, None, module_id),
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_id)
context = {
'request': request,
'course_id': course_id,
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'reverifications': reverifications,
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_id,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id)
# see if we have already filled up all allowed enrollments
is_course_full = CourseEnrollment.is_course_full(course)
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_id}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response(
'courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, course_id, student_id)
def _progress(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_id):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_id)
course = course_from_id(course_id)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
|
hwjworld/xiaodun-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 38,172
|
[
"VisIt"
] |
8c2da74928f63c7cd49ffa5b85f8205c70ae0cecfa6a4527f716cd9a0e4c19d0
|
"""Check package URLs for updates
Subclasses of `Hoster` define how to handle each hoster. Hosters are
selected by regex matching each source URL in a recipe. The
`HTMLHoster` provides parsing for hosting sites listing new
releases in HTML format (probably covers most). Adding a hoster is
as simple as defining a regex to match the existing source URL, a
formatting string creating the URL of the relases page and a regex
to match links and extract their version.
- We need to use :conda:package:`regex` rather than `re` to allow
recursive matching to manipulate capture groups in URL patterns as
needed. (Technically, we could avoid this using a Snakemake wildcard
type syntax to define the patterns - implementers welcome).
"""
import abc
import inspect
import json
import logging
import os
from contextlib import redirect_stdout, redirect_stderr
from distutils.version import LooseVersion
from html.parser import HTMLParser
from itertools import chain
from typing import (Any, Dict, List, Match, Mapping, Pattern, Set, Tuple, Type,
Optional, TYPE_CHECKING)
from urllib.parse import urljoin
import regex as re
from .aiopipe import AsyncRequests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: Matches named capture groups
#: This is so complicated because we need to parse matched, not-escaped
#: parentheses to determine where the clause ends.
#: Requires regex package for recursion.
RE_CAPGROUP = re.compile(r"\(\?P<(\w+)>(?>[^()]+|\\\(|\\\)|(\((?>[^()]+|\\\(|\\\)|(?2))*\)))*\)")
RE_REFGROUP = re.compile(r"\(\?P=(\w+)\)")
def dedup_named_capture_group(pattern):
"""Replaces repetitions of capture groups with matches to first instance"""
seen: Set[str] = set()
def replace(match):
"inner replace"
name: str = match.group(1)
if name in seen:
return f"(?P={name})"
seen.add(name)
return match.group(0)
return re.sub(RE_CAPGROUP, replace, pattern)
def replace_named_capture_group(pattern, vals: Dict[str, str]):
"""Replaces capture groups with values from **vals**"""
def replace(match):
"inner replace"
name = match.group(1)
if name in vals:
return vals[name] or ""
return match.group(0)
res = re.sub(RE_CAPGROUP, replace, pattern)
res = re.sub(RE_REFGROUP, replace, res)
return res
class HosterMeta(abc.ABCMeta):
"""Meta-Class for Hosters
By making Hosters classes of a metaclass, rather than instances of a class,
we leave the option to add functions to a Hoster.
"""
hoster_types: List["HosterMeta"] = []
def __new__(cls, name: str, bases: Tuple[type, ...],
namespace: Dict[str, Any], **kwargs) -> type:
"""Creates Hoster classes
- expands references among ``{var}_pattern`` attributes
- compiles ``{var}_pattern`` attributes to ``{var}_re``
- registers complete classes
"""
typ = super().__new__(cls, name, bases, namespace, **kwargs)
if inspect.isabstract(typ):
return typ
if not typ.__name__.startswith("Custom"):
cls.hoster_types.append(typ)
patterns = {attr.replace("_pattern", ""): getattr(typ, attr)
for attr in dir(typ) if attr.endswith("_pattern")}
for pat in patterns:
# expand pattern references:
pattern = ""
new_pattern = patterns[pat]
while pattern != new_pattern:
pattern = new_pattern
new_pattern = re.sub(r"(\{\d+,?\d*\})", r"{\1}", pattern)
new_pattern = new_pattern.format_map(
{k: v.rstrip("$") for k, v in patterns.items()})
patterns[pat] = pattern
# repair duplicate capture groups:
pattern = dedup_named_capture_group(pattern)
# save parsed and compiled pattern
setattr(typ, pat + "_pattern_compiled", pattern)
logger.debug("%s Pattern %s = %s", typ.__name__, pat, pattern)
setattr(typ, pat + "_re", re.compile(pattern))
return typ
@classmethod
def select_hoster(cls, url: str, config: Dict[str, str]) -> Optional["Hoster"]:
"""Select `Hoster` able to handle **url**
Returns: `Hoster` or `None`
"""
logger.debug("Matching url '%s'", url)
for hoster_type in cls.hoster_types:
hoster = hoster_type.try_make_hoster(url, config)
if hoster:
return hoster
return None
class Hoster(metaclass=HosterMeta):
"""Hoster Baseclass"""
#: matches upstream version
#: - begins with a number
#: - then only numbers, characters or one of -, +, ., :, ~
#: - at most 31 characters length (to avoid matching checksums)
#: - accept v or r as prefix if after slash, dot, underscore or dash
version_pattern: str = r"(?:(?<=[/._-])[rv])?(?P<version>\d[\da-zA-Z\-+\.:\~_]{0,30})"
#: matches archive file extensions
ext_pattern: str = r"(?P<ext>(?i)\.(?:(?:(tar\.|t)(?:xz|bz2|gz))|zip|jar))"
#: named patterns that will change with a version upgrade
exclude = ['version']
@property
@abc.abstractmethod
def url_pattern(self) -> str:
"matches upstream package url"
#: will be generated as each class is created
url_re: Pattern[str] = None
@property
@abc.abstractmethod
def link_pattern(self) -> str:
"matches links on relase page"
@property
@abc.abstractmethod
def releases_formats(self) -> List[str]:
"format template for release page URL"
def __init__(self, url: str, match: Match[str]) -> None:
self.vals = {k: v or "" for k, v in match.groupdict().items()}
self.releases_urls = [
template.format_map(self.vals)
for template in self.releases_formats
]
logger.debug("%s matched %s with %s",
self.__class__.__name__, url, self.vals)
@classmethod
def try_make_hoster(cls: Type["Hoster"], url: str,
config: Dict[str, str]) -> Optional["Hoster"]:
"""Creates hoster if **url** is matched by its **url_pattern**"""
if config:
try:
klass: Type["Hoster"] = type(
"Customized" + cls.__name__,
(cls,),
{key+"_pattern":val for key, val in config.items()}
)
except KeyError:
logger.debug("Overrides invalid for %s - skipping", cls.__name__)
return None
else:
klass = cls
match = klass.url_re.search(url)
if match:
return klass(url, match)
return None
@classmethod
@abc.abstractmethod
def get_versions(cls, req: "AsyncRequests", orig_version: str) -> List[Mapping[str, Any]]:
"Gets list of versions from upstream hosting site"
class HrefParser(HTMLParser):
"""Extract link targets from HTML"""
def __init__(self, link_re: Pattern[str]) -> None:
super().__init__()
self.link_re = link_re
self.matches: List[Mapping[str, Any]] = []
def get_matches(self) -> List[Mapping[str, Any]]:
"""Return matches found for **link_re** in href links"""
return self.matches
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]) -> None:
if tag == "a":
for key, val in attrs:
if key == "href":
self.handle_a_href(val)
break
def handle_a_href(self, href: str) -> None:
"""Process href attributes of anchor tags"""
match = self.link_re.search(href)
if match:
data = match.groupdict()
data["href"] = href
self.matches.append(data)
def error(self, message: str) -> None:
logger.debug("Error parsing HTML: %s", message)
# pylint: disable=abstract-method
class HTMLHoster(Hoster):
"""Base for Hosters handling release listings in HTML format"""
async def get_versions(self, req, orig_version):
exclude = set(self.exclude)
vals = {key: val
for key, val in self.vals.items()
if key not in exclude}
link_pattern = replace_named_capture_group(self.link_pattern_compiled, vals)
link_re = re.compile(link_pattern)
result = []
for url in self.releases_urls:
parser = HrefParser(link_re)
parser.feed(await req.get_text_from_url(url))
for match in parser.get_matches():
match["link"] = urljoin(url, match["href"])
match["releases_url"] = url
match["vals"] = vals
result.append(match)
return result
class FTPHoster(Hoster):
"""Scans for updates on FTP servers"""
async def get_versions(self, req, orig_version):
exclude = set(self.exclude)
vals = {key: val
for key, val in self.vals.items()
if key not in exclude}
link_pattern = replace_named_capture_group(self.link_pattern_compiled, vals)
link_re = re.compile(link_pattern)
result = []
for url in self.releases_urls:
files = await req.get_ftp_listing(url)
for fname in files:
match = link_re.search(fname)
if match:
data = match.groupdict()
data['fn'] = fname
data['link'] = "ftp://" + vals['host'] + fname
data['releases_url'] = url
result.append(data)
return result
version_pattern = r"(?:(?<=[/._-])[rv])?(?P<version>\d[\da-zA-Z\-+\.:\~_]{0,30}?)"
host_pattern = r"(?P<host>[-_.\w]+)"
path_pattern = r"(?P<path>[-_/.\w]+/)"
package_pattern = r"(?P<package>[-_\w]+)"
suffix_pattern = r"(?P<suffix>([-_](lin|linux|Linux|x64|x86|src|64|OSX))*)"
link_pattern = "{path}{package}{version}{suffix}{ext}"
url_pattern = r"ftp://{host}/{link}"
releases_formats = ["ftp://{host}/{path}"]
class OrderedHTMLHoster(HTMLHoster):
"""HTMLHoster for which we can expected newest releases at top
The point isn't performance, but avoiding hassle with old versions
which may follow different versioning schemes.
E.g. 0.09 -> 0.10 -> 0.2 -> 0.2.1
FIXME: If the current version is not in the list, that's likely
a pathologic case. Should be handled somewhere.
"""
async def get_versions(self, req, orig_version):
matches = await super().get_versions(req, orig_version)
num = None
for num, match in enumerate(matches):
if match["version"] == self.vals["version"]:
break
if num is None:
return matches
return matches[:num + 1]
class GithubBase(OrderedHTMLHoster):
"""Base class for software hosted on github.com"""
exclude = ['version', 'fname']
account_pattern = r"(?P<account>[-\w]+)"
project_pattern = r"(?P<project>[-.\w]+)"
prefix_pattern = r"(?P<prefix>[-_./\w]+?)"
suffix_pattern = r"(?P<suffix>[-_](lin)?)"
#tag_pattern = "{prefix}??{version}{suffix}??"
tag_pattern = "{prefix}??{version}"
url_pattern = r"github\.com{link}"
fname_pattern = r"(?P<fname>[^/]+)"
releases_formats = ["https://github.com/{account}/{project}/releases"]
class GithubRelease(GithubBase):
"""Matches release artifacts uploaded to Github"""
link_pattern = r"/{account}/{project}/releases/download/{tag}/{fname}{ext}?"
class GithubTag(GithubBase):
"""Matches GitHub repository archives created automatically from tags"""
link_pattern = r"/{account}/{project}/archive(/refs/tags)?/{tag}{ext}"
releases_formats = ["https://github.com/{account}/{project}/tags"]
class GithubReleaseAttachment(GithubBase):
"""Matches release artifacts uploaded as attachment to release notes"""
link_pattern = r"/{account}/{project}/files/\d+/{tag}{ext}"
class GithubRepoStore(GithubBase):
"""Matches release artifacts stored in a github repo"""
branch_pattern = r"(master|[\da-f]{40})"
subdir_pattern = r"(?P<subdir>([-._\w]+/)+)"
link_pattern = r"/{account}/{project}/blob/master/{subdir}{tag}{ext}"
url_pattern = (r"(?:(?P<raw>raw\.githubusercontent)|github)\.com/"
r"{account}/{project}/(?(raw)|(?:(?P<blob>blob/)|raw/))"
r"{branch}/{subdir}?{tag}{ext}(?(blob)\?raw|)")
releases_formats = ["https://github.com/{account}/{project}/tree/master/{subdir}"]
class Bioconductor(HTMLHoster):
"""Matches R packages hosted at Bioconductor"""
link_pattern = r"/src/contrib/(?P<package>[^/]+)_{version}{ext}"
section_pattern = r"/(bioc|data/annotation|data/experiment)"
url_pattern = r"bioconductor.org/packages/(?P<bioc>[\d\.]+){section}{link}"
releases_formats = ["https://bioconductor.org/packages/{bioc}/bioc/html/{package}.html"]
class CargoPort(HTMLHoster):
"""Matches source backup urls created by cargo-port"""
os_pattern = r"_(?P<os>src_all|linux_x86|darwin_x86)"
link_pattern = r"(?P<package>[^/]+)_{version}{os}{ext}"
url_pattern = r"depot.galaxyproject.org/software/(?P<package>[^/]+)/{link}"
releases_formats = ["https://depot.galaxyproject.org/software/{package}"]
class SourceForge(HTMLHoster):
"""Matches packages hosted at SourceForge"""
project_pattern = r"(?P<project>[-\w]+)"
subproject_pattern = r"((?P<subproject>[-\w%]+)/)?"
baseurl_pattern = r"sourceforge\.net/project(s)?/{project}/(?(1)files/|){subproject}"
package_pattern = r"(?P<package>[-\w_\.+]*?[a-zA-Z+])"
type_pattern = r"(?P<type>((linux|x?(64|86)|src|source|all|core|java\d?)[-_.])*)"
type2_pattern = type_pattern.replace("type", "type2")
sep_pattern = r"(?P<sep>[-_.]?)" # separator between package name and version
filename_pattern = "{package}{sep}({type2}{sep})?{version}({sep}{type})?{ext}"
url_pattern = r"{baseurl}{filename}"
link_pattern = r"{baseurl}{filename}"
releases_formats = ["https://sourceforge.net/projects/{project}/files/"]
class JSONHoster(Hoster):
"""Base for Hosters handling release listings in JSON format"""
async def get_versions(self, req, orig_version: str):
result = []
for url in self.releases_urls:
text = await req.get_text_from_url(url)
data = json.loads(text)
matches = await self.get_versions_from_json(data, req, orig_version)
for match in matches:
match['releases_url'] = url
result.extend(matches)
return result
link_pattern = "https://{url}"
@abc.abstractmethod
async def get_versions_from_json(self, data, req, orig_version) -> List[Dict[str, Any]]:
"""Extract matches from json data in **data**
"""
class PyPi(JSONHoster):
"""Scans PyPi for updates"""
async def get_versions_from_json(self, data, req, orig_version):
latest = data["info"]["version"]
result = []
for vers in list(set([latest, orig_version])):
if vers not in data['releases']:
continue
for rel in data['releases'][vers]:
if rel["packagetype"] == "sdist":
rel["link"] = rel["url"]
rel["version"] = vers
rel["info"] = data['info']
result.append(rel)
return result
@staticmethod
def _get_requirements(package, fname, url, digest, python_version, build_config):
"""Call into conda_build.skeletons.pypi to handle the ugly mess of extracting
requirements from python packages.
Note: It is not safe to call into conda multiple times parallel, and thus this
function must not be called in parallel.
"""
from conda_build.skeletons.pypi import get_pkginfo, get_requirements
with open("/dev/null", "w") as devnull:
with redirect_stdout(devnull), redirect_stderr(devnull):
try:
pkg_info = get_pkginfo(package, fname, url, digest, python_version,
[], build_config, [])
requirements = get_requirements(package, pkg_info)
except SystemExit as exc:
raise Exception(exc) from None
except Exception as exc:
raise Exception(exc) from None
if len(requirements) == 1 and isinstance(requirements[0], list):
requirements = requirements[0]
requirements_fixed = []
for req in requirements:
if '\n' in req:
requirements_fixed.extend(req.split('\n'))
else:
requirements_fixed.append(req)
return pkg_info, requirements_fixed
@staticmethod
def _get_python_version(rel):
"""Try to determine correct python version"""
choose_from = ('3.6', '3.5', '3.7', '2.7')
requires_python = rel.get('requires_python')
if requires_python:
requires_python = requires_python.replace(" ", "")
checks = []
for check in requires_python.split(","):
for key, func in (('==', lambda x, y: x == y),
('!=', lambda x, y: x != y),
('<=', lambda x, y: x <= y),
('>=', lambda x, y: x >= y),
('>', lambda x, y: x > y),
('<', lambda x, y: x > y),
('~=', lambda x, y: x == y)):
if check.startswith(key):
checks.append((func, check[len(key):]))
break
else:
checks.append((lambda x, y: x == y, check))
for vers in choose_from:
try:
if all(op(LooseVersion(vers), LooseVersion(check))
for op, check in checks):
return vers
except TypeError:
logger.exception("Failed to compare %s to %s", vers, requires_python)
python_versions = [
classifier.split('::')[-1].strip()
for classifier in rel['info'].get('classifiers', [])
if classifier.startswith('Programming Language :: Python ::')
]
for vers in choose_from:
if vers in python_versions:
return vers
return '2.7'
async def get_deps(self, pipeline, build_config, package, rel):
"""Get dependencies for **package** using version data **rel**
This is messy even though we use conda_build.skeleton.pypi to
extract the requirements from a setup.py. Since the setup.py
actually gets executed, all manner of things can happen
(e.g. for one Bioconda package, this triggers compilation
of a binary module).
"""
req = pipeline.req
# We download ourselves to get async benefits
target_file = rel['filename']
target_path = os.path.join(build_config.src_cache, target_file)
if not os.path.exists(target_path):
await req.get_file_from_url(target_path, rel['link'], target_file)
python_version = self._get_python_version(rel)
# Run code from conda_build.skeletons in ProcessPoolExecutor
async with pipeline.conda_sem:
try:
pkg_info, depends = await pipeline.run_sp(
self._get_requirements,
package, target_file, rel['link'],
('sha256', rel['digests']['sha256']),
python_version, build_config)
except Exception: # pylint: disable=broad-except
logger.info("Failed to get depends for PyPi %s (py=%s)",
target_file, python_version)
logger.debug("Exception data", exc_info=True)
return
logger.debug("PyPi info for %s: %s", target_file, pkg_info)
# Convert into dict
deps = {}
for dep in depends:
match = re.search(r'([^<>= ]+)(.*)', dep)
if match:
deps[match.group(1)] = match.group(2)
# Write to rel dict for return
rel['depends'] = {'host': deps, 'run': deps}
releases_formats = ["https://pypi.org/pypi/{package}/json"]
package_pattern = r"(?P<package>[\w\-\.]+)"
source_pattern = r"{package}[-_]{version}{ext}"
hoster_pattern = (r"(?P<hoster>"
r"files.pythonhosted.org/packages|"
r"pypi.python.org/packages|"
r"pypi.io/packages)")
url_pattern = r"{hoster}/.*/{source}"
class Bioarchive(JSONHoster):
"""Scans for updates to packages hosted on bioarchive.galaxyproject.org"""
async def get_versions_from_json(self, data, req, orig_version):
try:
latest = data["info"]["Version"]
vals = {key: val
for key, val in self.vals.items()
if key not in self.exclude}
vals['version'] = latest
link = replace_named_capture_group(self.link_pattern, vals)
return [{
"link": link,
"version": latest,
}]
except KeyError:
return []
releases_formats = ["https://bioarchive.galaxyproject.org/api/{package}.json"]
package_pattern = r"(?P<package>[-\w.]+)"
url_pattern = r"bioarchive.galaxyproject.org/{package}_{version}{ext}"
class CPAN(JSONHoster):
"""Scans for updates to Perl packages hosted on CPAN"""
@staticmethod
def parse_deps(data):
"""Parse CPAN format dependencies"""
run_deps = {}
host_deps = {}
for dep in data:
if dep['relationship'] != 'requires':
continue
if dep['module'] in ('strict', 'warnings'):
continue
name = dep['module'].lower().replace('::', '-')
if 'version' in dep and dep['version'] not in ('0', None, 'undef'):
version = ">="+str(dep['version'])
else:
version = ''
if name != 'perl':
name = 'perl-' + name
else:
version = ''
if dep['phase'] == 'runtime':
run_deps[name] = version
elif dep['phase'] in ('build', 'configure', 'test'):
host_deps[name] = version
return {'host': host_deps, 'run': run_deps}
async def get_versions_from_json(self, data, req, orig_version):
try:
version = {
'link': data['download_url'],
'version': str(data['version']),
'depends': self.parse_deps(data['dependency'])
}
result = [version]
if version['version'] != orig_version:
url = self.orig_release_format.format(vers=orig_version,
dist=data['distribution'])
text = await req.get_text_from_url(url)
data2 = json.loads(text)
if data2['hits']['total']:
data = data2['hits']['hits'][0]['_source']
orig_vers = {
'link': data['download_url'],
'version': str(data['version']),
'depends': self.parse_deps(data['dependency'])
}
result.append(orig_vers)
return result
except KeyError:
return []
package_pattern = r"(?P<package>[-\w.+]+)"
author_pattern = r"(?P<author>[A-Z]+)"
url_pattern = (r"(www.cpan.org|cpan.metacpan.org|search.cpan.org/CPAN)"
r"/authors/id/./../{author}/([^/]+/|){package}-v?{version}{ext}")
releases_formats = ["https://fastapi.metacpan.org/v1/release/{package}"]
orig_release_format = ("https://fastapi.metacpan.org/v1/release/_search"
"?q=distribution:{dist}%20AND%20version:{vers}")
class CRAN(JSONHoster):
"""R packages hosted on r-project.org (CRAN)"""
async def get_versions_from_json(self, data, _, orig_version):
res = []
versions = list(set((str(data["latest"]), self.vals["version"], orig_version)))
for vers in versions:
if vers not in data['versions']:
continue
vdata = data['versions'][vers]
depends = {
"r-" + pkg.lower() if pkg != 'R' else 'r-base':
spec.replace(" ", "").replace("\n", "").replace("*", "")
for pkg, spec in chain(vdata.get('Depends', {}).items(),
vdata.get('Imports', {}).items(),
vdata.get('LinkingTo', {}).items())
}
version = {
'link': '',
'version': vers,
'depends': {'host': depends, 'run': depends},
}
res.append(version)
return res
package_pattern = r"(?P<package>[\w.]+)"
url_pattern = (r"r-project\.org/src/contrib"
r"(/Archive)?/{package}(?(1)/{package}|)"
r"_{version}{ext}")
releases_formats = ["https://crandb.r-pkg.org/{package}/all"]
# pylint: disable=abstract-method
class BitBucketBase(OrderedHTMLHoster): # abstract
"""Base class for hosting at bitbucket.org"""
account_pattern = r"(?P<account>[-\w]+)"
project_pattern = r"(?P<project>[-.\w]+)"
prefix_pattern = r"(?P<prefix>[-_./\w]+?)??"
url_pattern = r"bitbucket\.org{link}"
class BitBucketTag(BitBucketBase):
"""Tag based releases hosted at bitbucket.org"""
link_pattern = "/{account}/{project}/get/{prefix}{version}{ext}"
releases_formats = ["https://bitbucket.org/{account}/{project}/downloads/?tab=tags",
"https://bitbucket.org/{account}/{project}/downloads/?tab=branches"]
class BitBucketDownload(BitBucketBase):
"""Uploaded releases hosted at bitbucket.org"""
link_pattern = "/{account}/{project}/downloads/{prefix}{version}{ext}"
releases_formats = ["https://bitbucket.org/{account}/{project}/downloads/?tab=downloads"]
class GitlabTag(OrderedHTMLHoster):
"""Tag based releases hosted at gitlab.com"""
account_pattern = r"(?P<account>[-\w]+)"
subgroup_pattern = r"(?P<subgroup>(?:/[-\w]+|))"
project_pattern = r"(?P<project>[-.\w]+)"
link_pattern = (r"/{account}{subgroup}/{project}/(repository|-/archive)/"
r"{version}/(archive|{project}-{version}){ext}")
url_pattern = r"gitlab\.com{link}"
releases_formats = ["https://gitlab.com/{account}{subgroup}/{project}/tags"]
logger.info(f"Hosters loaded: %s", [h.__name__ for h in HosterMeta.hoster_types])
|
bioconda/bioconda-utils
|
bioconda_utils/hosters.py
|
Python
|
mit
| 27,054
|
[
"Bioconda",
"Bioconductor"
] |
58a1607e2d5a08ff47f6ce19e0f1fa267d028c6cf16648cad21789f6f6b3d0fa
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBcbioGff(PythonPackage):
"""Read and write Generic Feature Format (GFF) with Biopython
integration."""
pypi = "bcbio-gff/bcbio-gff-0.6.2.tar.gz"
version('0.6.2', sha256='c682dc46a90e9fdb124ab5723797a5f71b2e3534542ceff9f6572b64b9814e68')
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('py-biopython', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-bcbio-gff/package.py
|
Python
|
lgpl-2.1
| 640
|
[
"Biopython"
] |
b57622c0eee7afc7172e40209da2394f2582ca9bc35d61e424fa38bc9a37037a
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
import os
import xml.sax
import re
from cStringIO import StringIO
from math import sqrt, pi
import numpy as np
from ase.data import atomic_names
from ase.units import Bohr, Hartree
from gpaw import setup_paths
from gpaw.spline import Spline
from gpaw.utilities import _fact, divrl
from gpaw.utilities.tools import md5_new
from gpaw.xc.pawcorrection import PAWXCCorrection
from gpaw.mpi import broadcast_string
try:
import gzip
except ImportError:
has_gzip = False
else:
has_gzip = True
class SetupData:
"""Container class for persistent setup attributes and XML I/O."""
def __init__(self, symbol, xcsetupname, name='paw', readxml=True,
zero_reference=False, world=None):
self.symbol = symbol
self.setupname = xcsetupname
self.name = name
self.zero_reference = zero_reference
# Default filename if this setup is written
if name is None or name == 'paw':
self.stdfilename = '%s.%s' % (symbol, self.setupname)
else:
self.stdfilename = '%s.%s.%s' % (symbol, name, self.setupname)
self.filename = None # full path if this setup was loaded from file
self.fingerprint = None # hash value of file data if applicable
self.Z = None
self.Nc = None
self.Nv = None
# Quantum numbers, energies
self.n_j = []
self.l_j = []
self.f_j = []
self.eps_j = []
self.e_kin_jj = None # <phi | T | phi> - <phit | T | phit>
self.beta = None
self.ng = None
self.rcgauss = None # For compensation charge expansion functions
# State identifier, like "X-2s" or "X-p1", where X is chemical symbol,
# for bound and unbound states
self.id_j = []
# Partial waves, projectors
self.phi_jg = []
self.phit_jg = []
self.pt_jg = []
self.rcut_j = []
# Densities, potentials
self.nc_g = None
self.nct_g = None
self.nvt_g = None
self.vbar_g = None
# Kinetic energy densities of core electrons
self.tauc_g = None
self.tauct_g = None
# Reference energies
self.e_kinetic = 0.0
self.e_xc = 0.0
self.e_electrostatic = 0.0
self.e_total = 0.0
self.e_kinetic_core = 0.0
# Generator may store description of setup in this string
self.generatorattrs = []
self.generatordata = ''
# Optional quantities, normally not used
self.X_p = None
self.ExxC = None
self.extra_xc_data = {}
self.phicorehole_g = None
self.fcorehole = 0.0
self.lcorehole = None
self.ncorehole = None
self.core_hole_e = None
self.core_hole_e_kin = None
self.has_corehole = False
if readxml:
self.read_xml(world=world)
def read_xml(self, source=None, world=None):
PAWXMLParser(self).parse(source=source, world=world)
nj = len(self.l_j)
self.e_kin_jj.shape = (nj, nj)
def is_compatible(self, xc):
return xc.get_setup_name() == self.setupname
def print_info(self, text, setup):
if self.phicorehole_g is None:
text(self.symbol + '-setup:')
else:
text('%s-setup (%.1f core hole):' % (self.symbol, self.fcorehole))
text(' name :', atomic_names[self.Z])
text(' id :', self.fingerprint)
text(' Z :', self.Z)
text(' valence:', self.Nv)
if self.phicorehole_g is None:
text(' core : %d' % self.Nc)
else:
text(' core : %.1f' % self.Nc)
text(' charge :', self.Z - self.Nv - self.Nc)
text(' file :', self.filename)
text((' cutoffs: %4.2f(comp), %4.2f(filt), %4.2f(core),'
' lmax=%d' % (sqrt(10) * self.rcgauss * Bohr,
# XXX is this really true? I don't think this is
# actually the cutoff of the compensation charges
setup.rcutfilter * Bohr,
setup.rcore * Bohr,
setup.lmax)))
text(' valence states:')
text(' energy radius')
j = 0
for n, l, f, eps in zip(self.n_j, self.l_j, self.f_j, self.eps_j):
if n > 0:
f = '(%d)' % f
text(' %d%s%-4s %7.3f %5.3f' % (
n, 'spdf'[l], f, eps * Hartree, self.rcut_j[j] * Bohr))
else:
text(' *%s %7.3f %5.3f' % (
'spdf'[l], eps * Hartree, self.rcut_j[j] * Bohr))
j += 1
text()
def create_compensation_charge_functions(self, lmax, r_g, dr_g):
"""Create Gaussians used to expand compensation charges."""
rcgauss = self.rcgauss
g_lg = np.zeros((lmax + 1, len(r_g)))
g_lg[0] = 4 / rcgauss**3 / sqrt(pi) * np.exp(-(r_g / rcgauss)**2)
for l in range(1, lmax + 1):
g_lg[l] = 2.0 / (2 * l + 1) / rcgauss**2 * r_g * g_lg[l - 1]
for l in range(lmax + 1):
g_lg[l] /= np.dot(r_g**(l + 2) * dr_g, g_lg[l])
return g_lg
def get_smooth_core_density_integral(self, Delta0):
return -Delta0 * sqrt(4 * pi) - self.Z + self.Nc
def get_overlap_correction(self, Delta0_ii):
return sqrt(4.0 * pi) * Delta0_ii
def get_linear_kinetic_correction(self, T0_qp):
e_kin_jj = self.e_kin_jj
nj = len(e_kin_jj)
K_q = []
for j1 in range(nj):
for j2 in range(j1, nj):
K_q.append(e_kin_jj[j1, j2])
K_p = sqrt(4 * pi) * np.dot(K_q, T0_qp)
return K_p
def get_ghat(self, lmax, alpha, r, rcut):
d_l = [_fact[l] * 2**(2 * l + 2) / sqrt(pi) / _fact[2 * l + 1]
for l in range(lmax + 1)]
g = alpha**1.5 * np.exp(-alpha * r**2)
g[-1] = 0.0
ghat_l = [Spline(l, rcut, d_l[l] * alpha**l * g)
for l in range(lmax + 1)]
return ghat_l
def find_core_density_cutoff(self, r_g, dr_g, nc_g):
if self.Nc == 0:
return 1.0
else:
N = 0.0
g = self.ng - 1
while N < 1e-7:
N += sqrt(4 * pi) * nc_g[g] * r_g[g]**2 * dr_g[g]
g -= 1
return r_g[g]
def get_max_projector_cutoff(self):
g = self.ng - 1
pt_g = self.pt_jg[0]
while pt_g[g] == 0.0:
g -= 1
gcutfilter = g + 1
return gcutfilter
def get_xc_correction(self, rgd, xc, gcut2, lcut):
phicorehole_g = self.phicorehole_g
if phicorehole_g is not None:
phicorehole_g = phicorehole_g[:gcut2].copy()
xc_correction = PAWXCCorrection(
[phi_g[:gcut2] for phi_g in self.phi_jg],
[phit_g[:gcut2] for phit_g in self.phit_jg],
self.nc_g[:gcut2] / sqrt(4 * pi),
self.nct_g[:gcut2] / sqrt(4 * pi),
rgd,
list(enumerate(self.l_j)),
min(2 * lcut, 4),
self.e_xc,
phicorehole_g,
self.fcorehole,
self.tauc_g[:gcut2].copy(),
self.tauct_g[:gcut2].copy())
return xc_correction
def write_xml(self):
l_j = self.l_j
xml = open(self.stdfilename, 'w')
print >> xml, '<?xml version="1.0"?>'
print >> xml, '<paw_setup version="0.6">'
name = atomic_names[self.Z].title()
comment1 = name + ' setup for the Projector Augmented Wave method.'
comment2 = 'Units: Hartree and Bohr radii.'
comment2 += ' ' * (len(comment1) - len(comment2))
print >> xml, ' <!--', comment1, '-->'
print >> xml, ' <!--', comment2, '-->'
print >> xml, (' <atom symbol="%s" Z="%d" core="%.1f" valence="%d"/>'
% (self.symbol, self.Z, self.Nc, self.Nv))
if self.setupname == 'LDA':
type = 'LDA'
name = 'PW'
else:
type = 'GGA'
name = self.setupname
print >> xml, ' <xc_functional type="%s" name="%s"/>' % (type, name)
gen_attrs = ' '.join(['%s="%s"' % (key, value) for key, value
in self.generatorattrs])
print >> xml, ' <generator %s>' % gen_attrs
print >> xml, ' %s' % self.generatordata
print >> xml, ' </generator>'
print >> xml, ' <ae_energy kinetic="%f" xc="%f"' % \
(self.e_kinetic, self.e_xc)
print >> xml, ' electrostatic="%f" total="%f"/>' % \
(self.e_electrostatic, self.e_total)
print >> xml, ' <core_energy kinetic="%f"/>' % self.e_kinetic_core
print >> xml, ' <valence_states>'
line1 = ' <state n="%d" l="%d" f=%s rc="%5.3f" e="%8.5f" id="%s"/>'
line2 = ' <state l="%d" rc="%5.3f" e="%8.5f" id="%s"/>'
for id, l, n, f, e, rc in zip(self.id_j, l_j, self.n_j, self.f_j,
self.eps_j, self.rcut_j):
if n > 0:
f = '%-4s' % ('"%d"' % f)
print >> xml, line1 % (n, l, f, rc, e, id)
else:
print >> xml, line2 % (l, rc, e, id)
print >> xml, ' </valence_states>'
print >> xml, (' <radial_grid eq="r=a*i/(n-i)" a="%f" n="%d" ' +
'istart="0" iend="%d" id="g1"/>') % \
(self.beta, self.ng, self.ng - 1)
print >> xml, (' <shape_function type="gauss" rc="%.12e"/>' %
self.rcgauss)
if self.has_corehole:
print >> xml, ((' <core_hole_state state="%d%s" ' +
'removed="%.1f" eig="%.8f" ekin="%.8f">') %
(self.ncorehole, 'spdf'[self.lcorehole],
self.fcorehole,
self.core_hole_e, self.core_hole_e_kin))
for x in self.phicorehole_g:
print >> xml, '%16.12e' % x,
print >> xml, '\n </core_hole_state>'
for name, a in [('ae_core_density', self.nc_g),
('pseudo_core_density', self.nct_g),
('pseudo_valence_density', self.nvt_g),
('zero_potential', self.vbar_g),
('ae_core_kinetic_energy_density', self.tauc_g),
('pseudo_core_kinetic_energy_density', self.tauct_g)]:
print >> xml, ' <%s grid="g1">\n ' % name,
for x in a:
print >> xml, '%16.12e' % x,
print >> xml, '\n </%s>' % name
# Print xc-specific data to setup file (used so for KLI and GLLB)
for name, a in self.extra_xc_data.iteritems():
newname = 'GLLB_'+name
print >> xml, ' <%s grid="g1">\n ' % newname,
for x in a:
print >> xml, '%16.12e' % x,
print >> xml, '\n </%s>' % newname
for id, l, u, s, q, in zip(self.id_j, l_j, self.phi_jg, self.phit_jg,
self.pt_jg):
for name, a in [('ae_partial_wave', u),
('pseudo_partial_wave', s),
('projector_function', q)]:
print >> xml, (' <%s state="%s" grid="g1">\n ' %
(name, id)),
#p = a.copy()
#p[1:] /= r[1:]
#if l == 0:
# # XXXXX go to higher order!!!!!
# p[0] = (p[2] +
# (p[1] - p[2]) * (r[0] - r[2]) / (r[1] - r[2]))
for x in a:
print >> xml, '%16.12e' % x,
print >> xml, '\n </%s>' % name
print >> xml, ' <kinetic_energy_differences>',
nj = len(self.e_kin_jj)
for j1 in range(nj):
print >> xml, '\n ',
for j2 in range(nj):
print >> xml, '%16.12e' % self.e_kin_jj[j1, j2],
print >> xml, '\n </kinetic_energy_differences>'
if self.X_p is not None:
print >> xml, ' <exact_exchange_X_matrix>\n ',
for x in self.X_p:
print >> xml, '%16.12e' % x,
print >> xml, '\n </exact_exchange_X_matrix>'
print >> xml, ' <exact_exchange core-core="%f"/>' % self.ExxC
print >> xml, '</paw_setup>'
def build(self, xcfunc, lmax, basis):
from gpaw.setup import Setup
setup = Setup(self, xcfunc, lmax, basis)
return setup
def search_for_file(name, world=None):
"""Traverse gpaw setup paths to find file.
Returns the file path and file contents. If the file is not found,
contents will be None."""
if world is not None and world.size > 1:
if world.rank == 0:
filename, source = search_for_file(name)
if source is None:
source = ''
string = filename + '|' + source
else:
string = None
filename, source = broadcast_string(string, 0, world).split('|', 1)
if source == '':
source = None
return filename, source
source = None
filename = None
for path in setup_paths:
filename = os.path.join(path, name)
if os.path.isfile(filename):
source = open(filename).read()
break
else:
filename += '.gz'
if os.path.isfile(filename):
if has_gzip:
source = gzip.open(filename).read()
else:
source = os.popen('gunzip -c ' + filename, 'r').read()
break
return filename, source
class PAWXMLParser(xml.sax.handler.ContentHandler):
def __init__(self, setup):
xml.sax.handler.ContentHandler.__init__(self)
self.setup = setup
self.id = None
self.data = None
def parse(self, source=None, world=None):
setup = self.setup
if source is None:
(setup.filename, source) = search_for_file(setup.stdfilename,
world)
if source is None:
print """
You need to set the GPAW_SETUP_PATH environment variable to point to
the directory where the setup files are stored. See
http://wiki.fysik.dtu.dk/gpaw/install/installationguide.html for details."""
raise RuntimeError('Could not find %s-setup for "%s".' %
(setup.name + '.' + setup.setupname,
setup.symbol))
setup.fingerprint = md5_new(source).hexdigest()
# XXXX There must be a better way!
# We don't want to look at the dtd now. Remove it:
source = re.compile(r'<!DOCTYPE .*?>', re.DOTALL).sub('', source, 1)
xml.sax.parse(StringIO(source), self) # XXX There is a special parse
# function that takes a string
if setup.zero_reference:
setup.e_total = 0.0
setup.e_kinetic = 0.0
setup.e_electrostatic = 0.0
setup.e_xc = 0.0
#if not hasattr(setup, 'tauc_g'):
# setup.tauc_g = setup.tauct_g = None
def startElement(self, name, attrs):
setup = self.setup
if name == 'paw_setup':
setup.version = attrs['version']
assert setup.version >= '0.4'
if name == 'atom':
setup.Z = int(attrs['Z'])
setup.Nc = float(attrs['core'])
setup.Nv = int(attrs['valence'])
elif name == 'xc_functional':
if attrs['type'] == 'LDA':
setup.xcname = 'LDA'
else:
assert attrs['type'] == 'GGA'
setup.xcname = attrs['name']
elif name == 'ae_energy':
setup.e_total = float(attrs['total'])
setup.e_kinetic = float(attrs['kinetic'])
setup.e_electrostatic = float(attrs['electrostatic'])
setup.e_xc = float(attrs['xc'])
elif name == 'core_energy':
setup.e_kinetic_core = float(attrs['kinetic'])
elif name == 'state':
setup.n_j.append(int(attrs.get('n', -1)))
setup.l_j.append(int(attrs['l']))
setup.f_j.append(int(attrs.get('f', 0)))
setup.eps_j.append(float(attrs['e']))
setup.rcut_j.append(float(attrs.get('rc', -1)))
setup.id_j.append(attrs['id'])
# Compatibility with old setups:
if setup.version < '0.6' and setup.f_j[-1] == 0:
setup.n_j[-1] = -1
elif name in ['grid', 'radial_grid']: # XXX
assert attrs['eq'] == 'r=a*i/(n-i)'
setup.ng = int(attrs['n'])
setup.beta = float(attrs['a'])
elif name == 'shape_function':
if attrs.has_key('rc'):
assert attrs['type'] == 'gauss'
setup.rcgauss = float(attrs['rc'])
else:
# Old style: XXX
setup.rcgauss = max(setup.rcut_j) / sqrt(float(attrs['alpha']))
elif name in ['ae_core_density', 'pseudo_core_density',
'localized_potential', 'zero_potential', # XXX
'kinetic_energy_differences', 'exact_exchange_X_matrix',
'ae_core_kinetic_energy_density',
'pseudo_core_kinetic_energy_density']:
self.data = []
elif name.startswith('GLLB_'):
self.data = []
elif name in ['ae_partial_wave', 'pseudo_partial_wave']:
self.data = []
self.id = attrs['state']
elif name == 'projector_function':
self.id = attrs['state']
self.data = []
elif name == 'exact_exchange':
setup.ExxC = float(attrs['core-core'])
elif name == 'core_hole_state':
setup.has_corehole = True
setup.fcorehole = float(attrs['removed'])
setup.lcorehole = 'spdf'.find(attrs['state'][1])
setup.core_hole_e = float(attrs['eig'])
setup.core_hole_e_kin = float(attrs['ekin'])
self.data = []
else:
self.data = None
def characters(self, data):
if self.data is not None:
self.data.append(data)
def endElement(self, name):
setup = self.setup
if self.data is None:
return
x_g = np.array([float(x) for x in ''.join(self.data).split()])
if name == 'ae_core_density':
setup.nc_g = x_g
elif name == 'pseudo_core_density':
setup.nct_g = x_g
elif name == 'kinetic_energy_differences':
setup.e_kin_jj = x_g
elif name == 'ae_core_kinetic_energy_density':
setup.tauc_g = x_g
elif name == 'pseudo_valence_density':
setup.nvt_g = x_g
elif name == 'pseudo_core_kinetic_energy_density':
setup.tauct_g = x_g
elif name in ['localized_potential', 'zero_potential']: # XXX
setup.vbar_g = x_g
elif name.startswith('GLLB_'):
# Add setup tags starting with GLLB_ to extra_xc_data. Remove GLLB_ from front of string.
setup.extra_xc_data[name[5:]] = x_g
elif name == 'ae_partial_wave':
j = len(setup.phi_jg)
assert self.id == setup.id_j[j]
setup.phi_jg.append(x_g)
elif name == 'pseudo_partial_wave':
j = len(setup.phit_jg)
assert self.id == setup.id_j[j]
setup.phit_jg.append(x_g)
elif name == 'projector_function':
j = len(setup.pt_jg)
assert self.id == setup.id_j[j]
setup.pt_jg.append(x_g)
elif name == 'exact_exchange_X_matrix':
setup.X_p = x_g
elif name == 'core_hole_state':
setup.phicorehole_g = x_g
|
qsnake/gpaw
|
gpaw/setup_data.py
|
Python
|
gpl-3.0
| 20,200
|
[
"ASE",
"GPAW"
] |
7c3c0864e2abeb22462ce556e66410fd0f170340f35b6d5334c4752541cf823a
|
import os
import sys
from traceHandler import Trace
import optimizerHandler
import importlib
try:
import copyreg
except:
import copyreg
from types import MethodType
class externalHandler():
"""
Handles models which are using a simulator other than Neuron.
:param command: the command string which should be executed
.. note::
The command must consist of the following parts:
* the command to execute
* the model file
* options (optional)
* number of parameters to optimize
"""
def __init__(self,command):
c=command.split()
self.executable=c[0]
self.model_file=c[1]
self.options=c[2:len(c)-1]
self.number_of_params=int(c[-1])
self.record=[[]]
self.spike_times=None
os.chdir("/".join(self.model_file.split("/")[0:-1]))
def SetNParams(self,o):
"""
Sets the number of parameters in the given object by calling it's ``SetObjTOOpt`` method.
:param o: the object whose method will be called
.. note::
This is necessary because the other parts expects that the option handler objects knows the parameters subjects to optimization. Since this is not true in the case of an external simulator, this workaround is needed.
"""
for n in range(self.number_of_params):
o.SetObjTOOpt("parameter"+str(n))
def GetExec(self ,unique_ID=''):
"""
Creates the command that runs the simulator with the model and with the appropriate options.
:return: a ``list`` of strings ready for execution
"""
tmp=[self.executable,self.model_file]
for o in self.options:
tmp.append(o)
tmp += [unique_ID]
return tmp
def GetParameters(self):
return None
def CreateStimuli(self,s):
pass
def SetStimuli(self,p,e):
pass
def SetChannelParameters(self,section,segment,channel,params,values):
pass
def SetMorphParameters(self,section,params,values):
pass
# class to handle the neuron models
class modelHandlerNeuron():
"""
Imports the necessary modules to handle Neuron models and loads the model
as well as the additional mechanisms. Creates containers for the sections and the channels for easier handling.
:param model_path: the path to the model file
:param special_path: the path to the special file (.mod files)
:param base: the base working directory
"""
def __init__(self,model_path,special_path,base=os.getcwd()):
self.base_directory=base
os.chdir(self.base_directory)
import neuron
print('*********** NEURON '+neuron.__version__+' LOADED ***********')
self.special= None if special_path == 'None' else special_path
self.model=model_path
self.model_dir=('/').join(self.model.rsplit('/')[0:-1])
if self.special:
neuron.load_mechanisms(self.special)
self.hoc_obj=neuron.h
self.hoc_obj.load_file(1,str(self.model))
self.hoc_obj.load_file("stdrun.hoc")
self.vec=self.hoc_obj.Vector()
self.stimulus=None
self.record=[]
self.spike_times=None
self.sections={}
for n in self.hoc_obj.allsec():
self.sections[str(self.hoc_obj.secname(sec=n))]=n
self.channels={}
for sec in self.hoc_obj.allsec():
for seg in sec:
for mech in seg:
self.channels[str(mech.name())]=mech
#self.hoc_obj.tstop=settings[0]
#self.hoc_obj.steps_per_ms=1/settings[1]
#self.hoc_obj.dt=settings[1]
def __del__(self):
print("model instance deleted")
# creates and adjusts the stimulus parameters
# stims: 0.: stimulation type, 1.: place inside the section, 2.: section name
def CreateStimuli(self,stims):
"""
Creates a Neuron pointprocess which is responsible for the stimulation of the model.
.. note::
The type of the point process is either an ``IClamp`` or a ``SEClamp``.
:param stims: a ``list`` with the following values:
* stimulation type as ``string``
* position inside section
* name of the section
"""
self.stims=stims
#try:
# print self.stims[0],"IClamp"
# print self.stims[0]=="IClamp"
if self.stims[0]=="IClamp":
self.stimulus=self.hoc_obj.IClamp(self.stims[1],sec=self.sections[self.stims[2]])
elif self.stims[0]=="VClamp":
self.stimulus=self.hoc_obj.SEClamp(self.stims[1],sec=self.sections[self.stims[2]])
# else:
# raise TypeError()
# params: 0.: amplitude, 1.: delay, 2.:duration
def SetStimuli(self,params,extra_params):
"""
Sets the parameters of the stimulating object. The parameters are the following:
* amplitude
* delay
* duration
or
* amplitude1
* amplitude2
* amplitude3
* duration1
* duration2
* duration3
:param params: the ``list`` of parameters containing the first 3 values from the above list
:param extra_params: ``list`` of parameters containing additional values to set up the ``SEClamp``
.. note::
The rs parameter of the ``SEClamp`` is set to 0.01
"""
self.parameters=params
if self.stims[0]=="IClamp":
self.stimulus.amp=self.parameters[0]
self.stimulus.delay=self.parameters[1]
self.stimulus.dur=self.parameters[2]
else:
self.stimulus.amp1=extra_params[5]
self.stimulus.amp2=self.parameters[0]
self.stimulus.amp3=extra_params[5]
self.stimulus.dur1=self.parameters[1]
self.stimulus.dur2=self.parameters[2]
self.stimulus.dur3=extra_params[0]-(self.stimulus.dur1+self.stimulus.dur2)
self.stimulus.rs=0.01
#except TypeError:
# sys.exit("Unknown stimulus type!")
#except IndexError:
# sys.exit("Stimulation settings not specified!!")
def SetCustStimuli(self,params):
"""
Uses the vector.play method from Neuron to create a time varying stimulus.
The stimulus is read from the given file.
:param params: ``list`` with the name of the file containing the stimulus as first element
.. note::
The delay value must be set to zero and the duration must be set to 1e9, but these are
not the actual parameters of the stimulus. This is necessary for Neuron in order to work.
"""
self.hoc_obj.load_file("vplay.hoc")
self.parameters=params
f=open(self.parameters[0],'r')
tmp=[float(n) for n in f]
self.vec=self.vec.from_python(tmp)
#self.hoc_obj('h.vec.play(&stim.amp,dt)')
#print (dir(self.hoc_obj.cas()(0.5).point_processes()[0]))
#ref=self.hoc_obj.ref(self.stimulus.amp)
self.vec.play(self.hoc_obj.cas()(0.5).point_processes()[0]._ref_amp,self.hoc_obj.dt)
self.stimulus.delay=0
self.stimulus.dur=1e9
#self.stimulus.dur=self.parameters[2]
f.close()
# sets the channel parameters to the given value
# the user must know the existing parameters of the channels in order to change them,
# the program only detects the inserted channels
# sections: string: section name
# channels: string:channel name
# params: string list of channel parameters
# values: float list of values in the order of the parameters
# the user can specify one channel in one section at one time,
# but can give multiple parameters and values
def SetChannelParameters(self,section,segment,channel,params,values):
"""
Sets the given channel's parameter to the given value. If the section is not known that
indicates a serious internal error and the program will abort.
print(len(self.vec))
"""
try:
self.sections[section].push()
except KeyError:
sys.exit("No section named " + str(section) + " was found!")
sec = self.hoc_obj.cas()
lseg = [seg for seg in sec]
#self.hoc_obj.cas().__setattr__(params,values)
setattr(lseg[int(segment)],params,values)
self.hoc_obj.pop_section()
# sets the morphology parameters in the given section
# one section, but multiple parameters at one time
def SetMorphParameters(self,section,params,values):
"""
Sets the given morphological parameter to the given value.
If the section is not known that indicates a serious internal error and the program will abort.
If the section has no parameter with the given name
then it is interpreted as a parameter of a pointprocess and the function will set the parameter assuming the pointprocess exists in the middle (0.5) at the given section and there is only one other pointprocess in the section.
.. note::
This workaround is implemented because some mechanisms are implemented as pointprocesses.
:param section: the name of the section as ``string``
:param params: the name of the parameter as ``string``
:param values: the value to set
"""
try:
self.sections[section].push()
#self.hoc_obj.cas().__setattr__(params,values)
setattr(self.hoc_obj.cas(), params, values)
self.hoc_obj.pop_section()
except KeyError:
sys.exit("No section named " + str(section) + " was found!")
except AttributeError:
setattr(self.hoc_obj.cas()(0.5).point_processes()[1], params,values)
#except:
#sys.exit("Morphology parameter "+params+" not found!")
# # gets the stimulation settings, passes it to the graphic layer
# def GetStimuli(self):
# """
#
# """
# return [self.stim,self.stims,self.parameters]
#
def contains(self,string,ss):
"""
Checks if substring is in the given ``list``
and creates a string which contains only the matching elements separated by spaces.
:param string: ``list`` of strings
:param ss: the substring to be matched
:return: a string which contains only the matching elements separated by spaces
"""
temp=""
for n in string:
try:
str.index(n, ss)
temp=temp+" "+str(n)
except ValueError:
pass
return temp
# returns the adjustable parameters in the model, passes it to the graphic layer
# comment: the dir option returns everything in the channels class, but there is no exact distinction between
# channel parameters and methods
# possible distinction: channel variables contains the channel's name CaN <-> gmax_CaN
# returns a string matrix, containing the section name,
# morphology parameters, mechanisms' names, and the mech's changeable parameters
def GetParameters(self):
"""
Collects every member of every section object and filters out those that are not parameters of
the model. The function will collect:
* every parameter of the the mechanisms
* every mechanism
* some default parameters that are always included in a model,
and pointprocesses that are not some sort of Clamp
:return: the filtered content of the model in a string matrix
"""
matrix=[]
temp=[]
parname=[]
mechs_pars=[]
defaults=[]
seg_num=0
for sec in self.hoc_obj.allsec():
temp.append(str(self.hoc_obj.secname(sec=sec)))
defaults=["", "morphology",["L" , "cm" , "Ra", "diam"]]
mechs_pars.append(defaults)
for seg in sec:
for mech in seg:
self.hoc_obj('strdef mtname, msname')
self.hoc_obj('mtname=" "')
self.hoc_obj('objref mechs')
self.hoc_obj.mtname=mech.name()
self.hoc_obj('mechs=new MechanismStandard(mtname)')
self.hoc_obj('k = mechs.count()')
parnum=int(self.hoc_obj.k)
self.hoc_obj('j=0')
for j in range(parnum):
self.hoc_obj.j = j
self.hoc_obj('k = mechs.name(msname, j)')
parname.append(self.hoc_obj.msname)
mechs_pars.append([seg_num, mech.name(),parname])
#mechs_pars.append(parname)
parname=[]
seg_num+=1
temp.append(mechs_pars)
mechs_pars=[]
#temp.append(channels)
seg_num=0
matrix.append(temp)
temp=[]
#mechs_pars=[]
#break
return matrix
# sets the simulation settings, like tstop, dt, Vrest, stb also creates the hoc object to store the recordings
# runs the simulation
# settings:
#0:tstop
#1:dt
#2:recorded param
#3:section
#4:position
#5:vrest
#6:sampling rate
def RunControll(self, settings):
"""
Sets up the recording procedure and the simulation, then runs it.
:param settings: the settings of the recording and the parameters of the simulation:
* length of simulation
* integration step size
* parameter to record
* section to record from
* position inside the section
* initial voltage
"""
#self.hoc_obj.cvode_active(1)#variable time step is active
self.hoc_obj.tstop=settings[0]
self.hoc_obj.steps_per_ms=1/settings[1]
self.hoc_obj.dt=settings[1]
vec=self.hoc_obj.Vector()
if settings[2]=="i" and self.stims[0]=="VClamp":
#vec.record(getattr(h.cas()(0.5).point_processes()[0],"_ref_i"))
vec.record(getattr(self.sections[settings[3]](settings[4]).point_processes()[0],"_ref_i"))
else:
ref='_ref_'+settings[2]
vec.record(getattr(self.sections[settings[3]](settings[4]),ref))
# comment: create the hoc vector to record time and the measured parameter
self.hoc_obj.v_init=settings[5]
self.hoc_obj.finitialize(settings[5])
self.hoc_obj.run()
self.record=self.Recordings(vec)
if settings[2]=="i" and self.stims[0]=="VClamp":
self.record[0]=self.record[0][1:]
vec.resize(0)
#else:
#sys.exit("Error occurred during simulation!")
# creates a trace object from the recordings
def Recordings(self,vector):
"""
Converts the hoc vector obtained from the simulation and converts it into a ``Trace`` object.
:param vector: a hoc vector
:return: the data trace from the created object
"""
tr=Trace(1,"",t_length=self.hoc_obj.tstop,freq=(self.hoc_obj.tstop/self.hoc_obj.dt))
tr.Convert(vector)
return tr.data
# comment: pass the hoc vector to Convert, not the hoc_object
|
KaliLab/optimizer
|
optimizer/modelHandler.py
|
Python
|
lgpl-2.1
| 15,481
|
[
"NEURON"
] |
2bf8eb1de916278919929e00852e6bb3c1f46463ca96f27255ee49e80df6619f
|
#!/usr/bin/env python
#==============================================================================
#
# CVS Snapshot Generation Script
# Copyright (C) 2000-2005 by Eric Sunshine <sunshine@sunshineco.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#==============================================================================
#------------------------------------------------------------------------------
# snapshot.py
#
# A tool for generating snapshots and 'diffs' of a module within a CVS
# repository.
#
# Typically this script is run via the 'cron' daemon by the machine on
# which the final snapshots will reside. See the cron(8), crontab(1), and
# crontab(8) man pages for information specific to cron. A typical crontab
# entry which runs this script at 01:03 each morning might look like this:
#
# MAILTO = sunshine@sunshineco.com
# 3 1 * * * $HOME/bin/snapshot.py
#
# The script makes no attempt to perform any sort of CVS authentication.
# Currently, it is the client's responsibility to authenticate with the CVS
# server if necessary. For :pserver: access the easiest way to work around
# this limitation is to login to the CVS server one time manually using the
# appropriate identity (such as "anonymous"). Once logged in successfully,
# the authentication information is stored in $(HOME)/.cvspass and remains
# there. From that point onward, CVS considers the account as having been
# authenticated.
#
# The configuration settings 'cvsroot' and 'fixcvsroot' allow the project
# to be retrieved from the CVS server using a CVSROOT setting which differs
# from the final CVSROOT setting which is actually stored in resulting
# snapshot. This feature can be useful, for instance, when a snapshot
# should appear to the end-user as having originated via anonymous
# :pserver: even though it was actually generated using the :local: or
# :ext: protocols.
#
# Author's note: This script can certainly be improved. Better error
# handling, more options (such as --verbose, --quiet, etc.), better
# abstraction and generalization, are all future possibilities. There is
# room for improvement.
#
#------------------------------------------------------------------------------
import commands, glob, grp, os, re, string, sys, tempfile, time
prog_name = "snapshot.py"
prog_version = "15"
author_name = "Eric Sunshine"
author_email = "sunshine@sunshineco.com"
author_info = author_name + " <" + author_email + ">"
copyright = "Copyright (C) 2000-2005 by " + author_info
#------------------------------------------------------------------------------
# Configuration Section
# cvsroot - CVSROOT setting for performing the actual check out.
# fixcvsroot - The CVSROOT setting which should appear in each CVS/Root
# file within the snapshot. May be None if it is identical to the
# original cvsroot setting. This setting is useful in cases where the
# CVSROOT value used for performing the check out differs from the one
# users will later need when updating the snapshot from CVS.
# cvsmodule - The module to checkout from the CVS repository.
# moduledir - The name of the directory which is created when the module is
# checked out from CVS (frequently identical to cvsmodule).
# ownergroup - The group name which will be given to the 'chgrp' command
# when directories are created. Assigning group ownership to a
# directory allows others in the group to manipulate the contents of
# the directory. May be None.
# packprefix - Prefix used to compose the final package name of the form
# prefix-YYYY-MM-DD-HHMMSS.ext".
# snapdir - Directory where snapshot packages will be placed.
# checksumfile - Name of checksum file which will be placed in each archiver
# subdirectory. This file will contain checksums for all the published
# packages in the directory.
# checksumprog - Name of program to compute checksums of packages. The
# program should accept a list of filenames for which it should compute
# checksums. It should also emit a report on its standard-output stream
# which can be redirected to 'checksumfile'.
# keepsnaphots - Number of historical snapshots to retain.
# keepdiffs - Number of historical 'diffs' to retain.
# keeplogs - Number of historical log files to retain.
# workdir - Temporary working directory for checkouts.
# warnlevel - Warning level. Defaults is 0. Higher values may produce
# warnings about certain non-fatal problems, such as when "chgrp" on
# a directory fails when user is not owner of directory.
# archivers - A tuple of archivers used to generate the project packages.
# Each tuple element is a dictionary with the following keys. The key
# "name" specifies the name of the directory under 'snapdir' into which
# this archived package will be placed. The key "dir" is a dictionary
# describing how to archive a directory into a single package. The key
# "file" is a dictionary describing how to archive a single file into a
# package. The "dir" and "file" dictionaries contain the following
# keys. The key "ext" is the file extension for the generated file.
# The key "cmd" is the actual command template which describes how to
# generate the given archive. It may contain the meta-tokens @S and
# @D. The token @S is replaced with the name of the source directory
# or file which is being archived, and @D is replaced with the final
# destination package name.
#------------------------------------------------------------------------------
cvsroot = ":ext:crystal-manage@cvs.crystalspace3d.org:/cvsroot/crystal"
fixcvsroot = ":pserver:anonymous@cvs.crystalspace3d.org:/cvsroot/crystal"
cvsmodule = "CS"
moduledir = "CS"
ownergroup = "crystal"
packprefix = "cs-"
snapdir = "/home/crystal/www/htdocs/cvs-snapshots"
checksumfile = "checksums.md5"
checksumprog = "md5sum"
keepsnapshots = 2
keepdiffs = 14
keeplogs = 14
workdir = "/tmp"
warnlevel = 0
archivers = (
{"name": "gzip",
"dir": {"ext": "tgz", "cmd": "tar --create --file=- @S | gzip > @D"},
"file": {"ext": "gz", "cmd": "gzip --stdout @S > @D"}},
{"name": "bzip2",
"dir": {"ext": "tar.bz2", "cmd": "tar --create --file=- @S | bzip2 > @D"},
"file": {"ext": "bz2", "cmd": "bzip2 --stdout @S > @D"}},
{"name": "zip",
"dir": {"ext": "zip", "cmd": "zip -q -r @D @S"},
"file": {"ext": "zip", "cmd": "zip -q @D @S"}})
#------------------------------------------------------------------------------
# Directory Stack Class
#------------------------------------------------------------------------------
class DirStack:
stack = []
def pushdir(self, dir):
self.stack.append(os.getcwd())
os.chdir(dir)
def popdir(self):
os.chdir(self.stack[-1])
del self.stack[-1]
#------------------------------------------------------------------------------
# Snapshot Class
#------------------------------------------------------------------------------
class Snapshot:
def timenow(self):
return time.asctime(time.gmtime(time.time())) + " UTC"
def __init__(self):
self.packtemplate = packprefix + "????-??-??.??????"
self.packbase = packprefix + time.strftime(
"%Y-%m-%d.%H%M%S", time.gmtime(time.time()))
self.linkbase = packprefix + "current-snapshot"
self.diffext = ".diff"
self.diffname = self.packbase + self.diffext
self.logdir = os.path.join(snapdir, "logs")
self.logext = ".log"
self.logname = self.packbase + self.logext
self.logpath = os.path.join(self.logdir, self.logname)
self.logfile = None
self.stamppath = os.path.join(self.logdir, "lastrun.timestamp")
self.timestamp = self.timenow()
self.hasdiff = None
self.dirstack = DirStack()
def log(self, msg):
s = msg + "\n"
sys.stdout.write(s)
if self.logfile:
self.logfile.write(s)
def run(self, cmd):
rc = commands.getstatusoutput(cmd)
if len(rc[1]) > 0:
self.log("Command failed: " + cmd)
self.log(rc[1])
return (rc[0] == 0)
def removefile(self, path):
try:
os.remove(path)
except OSError, e:
if warnlevel > 1:
self.log('Error removing file "' + path + '"; reason: ' +
str(e))
def makedirectory(self, path):
if not os.path.exists(path) :
os.mkdir(path)
try:
os.chmod(path, 0775)
except Exception, e:
if warnlevel > 0:
self.log("Error making directory group writable: " +
path + '; reason: ' + str(e))
if ownergroup:
try:
os.chown(path, os.getuid(), grp.getgrnam(ownergroup)[2])
except Exception, e:
if warnlevel > 0:
self.log('Error setting group ownership "' + ownergroup +
'" on ' + path + '; reason: ' + str(e))
def openlog(self):
if not self.logfile:
try:
self.logfile = open(self.logpath, "w")
except IOError, e:
self.log("Error opening log file: " + self.logpath + " " +
repr(e.args))
def closelog(self):
if self.logfile:
self.logfile.close()
self.logfile = None
def writetimestamp(self):
file = open(self.stamppath, "w")
file.write(self.timestamp + "\n")
file.close()
def readtimestamp(self):
stamp = None
if os.path.exists(self.stamppath):
file = open(self.stamppath, "r")
stamp = string.strip(file.readline())
file.close()
return stamp
def purge(self, pattern, keep):
files = glob.glob(pattern)
blast = len(files) - keep
if blast > 0:
files.sort()
for i in range(0, blast):
self.log("Purging old file: " + os.path.basename(files[i]))
os.remove(files[i])
def purgeold(self):
self.purge(os.path.join(
self.logdir, self.packtemplate + self.logext), keeplogs)
for dict in archivers:
self.purge(os.path.join(
snapdir, dict["name"], self.packtemplate +
"." + dict["dir"]["ext"]), keepsnapshots)
self.purge(os.path.join(
snapdir, dict["name"], self.packtemplate + self.diffext +
"." + dict["file"]["ext"]), keepdiffs)
def purgetransient(self):
self.log("Purging working directory")
self.run("rm -rf " + self.builddir)
def preparetransient(self):
tempfile.tempdir = workdir
self.builddir = tempfile.mktemp()
self.log("Creating working directory: " + self.builddir)
self.makedirectory(self.builddir)
def findcvsdirs(self, dir):
dirs = []
rc = commands.getstatusoutput(
"find " + dir + " -type d -name CVS -print -prune")
if rc[0] == 0:
dirs = string.split(rc[1], "\n")
else: # 'find' command returned error.
if len(rc[1]) > 0:
self.log("Error searching for CVS directories")
self.log(rc[1])
return dirs
def stripstickyinfo(self, path): # Strip trailing sticky date or tag.
file = open(path, "r")
data = file.read()
file.close()
file = open(path, "w")
file.write(re.sub("(?m)(^/.+)(/[TD].+?)$", "\g<1>/", data))
file.close()
def purgestickytags(self, dirs):
self.log("Removing CVS sticky tags")
for dir in dirs:
self.removefile(os.path.join(dir, "Tag"))
self.removefile(os.path.join(dir, "Entries.Static"))
self.stripstickyinfo(os.path.join(dir, "Entries"))
def patchcvsroot(self, dirs):
if fixcvsroot:
self.log("Patching CVS/Root entries")
newroot = fixcvsroot + "\n"
for dir in dirs:
try:
file = open(os.path.join(dir, "Root"), "w")
file.write(newroot)
file.close()
file = None
except IOError, e:
self.log("Error patching Root in " + dir + " " +
repr(e.args))
def checkout(self, datewanted, outdir):
self.log("Retrieving module " + cvsmodule + " for " + datewanted)
self.makedirectory(outdir)
self.dirstack.pushdir(outdir)
rc = self.run("cvs -Q -d " + cvsroot + " checkout -D '" + datewanted +
"' -P " + cvsmodule)
self.dirstack.popdir()
if rc:
dirs = self.findcvsdirs(os.path.join(outdir, moduledir))
if len(dirs) > 0:
self.patchcvsroot(dirs)
self.purgestickytags(dirs)
return rc
def gendiff(self):
oldstamp = self.readtimestamp()
if oldstamp:
olddir = "old"
oldpath = os.path.join(self.builddir, "old")
if self.checkout(oldstamp, oldpath):
self.log("Generating diff of " + oldstamp + " & " +
self.timestamp)
self.dirstack.pushdir(self.builddir)
self.run("diff -crN " + os.path.join(olddir, moduledir) +
" " + moduledir + " > " + self.diffname)
self.dirstack.popdir()
self.hasdiff = 1
def genpackage(self, dirname, dict, src, dst):
outdir = os.path.join(snapdir, dirname)
self.makedirectory(outdir)
target = os.path.join(outdir, dst + "." + dict["ext"])
cmd = string.replace(
string.replace(dict["cmd"], "@S", src), "@D", target)
return self.run(cmd)
def genpackages(self):
self.dirstack.pushdir(self.builddir)
for dict in archivers:
name = dict["name"]
self.log("Generating '" + name + "' packages")
if self.genpackage(name, dict["dir"], moduledir, self.packbase):
if self.hasdiff:
self.genpackage(name, dict["file"], self.diffname,
self.diffname)
self.dirstack.popdir()
self.writetimestamp()
def makelink(self, ext, src, linkname):
src = src + "." + ext
linkname = linkname + "." + ext
self.removefile(linkname)
os.symlink(src, linkname)
def makelinks(self):
for dict in archivers:
name = dict["name"]
self.log("Linking to current '" + name + "' packages")
self.dirstack.pushdir(os.path.join(snapdir, name))
self.makelink(dict["dir"]["ext"], self.packbase, self.linkbase)
if self.hasdiff:
self.makelink(dict["file"]["ext"],
self.packbase + self.diffext,
self.linkbase + self.diffext)
self.dirstack.popdir()
def checksum(self, files):
self.removefile(checksumfile)
if len(files) > 0:
self.run(checksumprog + ' "' + '" "'.join(files) + '" > ' +
checksumfile)
def checksums(self):
for dict in archivers:
name = dict["name"]
extd = "." + dict["dir"]["ext"]
extf = "." + dict["file"]["ext"]
self.log("Generating checksums for '" + name + "' packages")
self.dirstack.pushdir(os.path.join(snapdir, name))
files = []
files.extend(glob.glob(self.packtemplate + extd))
files.extend(glob.glob(self.packtemplate + self.diffext + extf))
files.extend(glob.glob(self.linkbase + extd))
files.extend(glob.glob(self.linkbase + self.diffext + extf))
self.checksum(files)
self.dirstack.popdir()
def dobulk(self):
if self.checkout(self.timestamp, self.builddir):
self.gendiff()
self.genpackages()
self.makelinks()
self.purgeold()
self.checksums()
def doall(self):
self.makedirectory(snapdir)
self.makedirectory(self.logdir)
self.openlog()
self.log(prog_name + " version " + prog_version)
self.log(copyright + "\n")
self.log("BEGIN: " + self.timenow())
try:
self.preparetransient()
self.dirstack.pushdir(self.builddir)
try:
self.dobulk()
except Exception, e:
self.log("A fatal exception occurred: " + str(e))
self.dirstack.popdir()
self.purgetransient()
finally:
self.log("END: " + self.timenow())
self.closelog()
tool = Snapshot()
tool.doall()
|
garinh/cs
|
bin/snapshot.py
|
Python
|
lgpl-2.1
| 17,726
|
[
"BLAST",
"CRYSTAL"
] |
ad2c6586031b7d41a1268a22afdede4e1cf2be9ede4cdbbe5c5b7667862996ed
|
import numpy as np
from numpy import sin, cos, pi, sqrt, tan, arccos, arcsin
try:
from scipy.optimize import newton, minimize, curve_fit
from scipy.signal import find_peaks, savgol_filter
except ImportError:
_can_compute_eclipse_params = False
else:
_can_compute_eclipse_params = True
import logging
logger = logging.getLogger("SOLVER")
logger.addHandler(logging.NullHandler())
# two-Gaussian model stuff
# HELPER FUNCTIONS
def ellipsoidal(phi, Aell, phi0):
return 0.5*Aell*np.cos(4*np.pi*(phi-phi0))
def gaussian(phi, mu, d, sigma):
return d*np.exp(-(phi-mu)**2/(2*sigma**2))
def gsum(phi, mu, d, sigma):
gauss_sum = np.zeros(len(phi))
for i in range(-2,3,1):
gauss_sum += gaussian(phi,mu+i,d,sigma)
return gauss_sum
# CHOICE OF MODELS
def const(phi, C):
return C*np.ones(len(phi))
def ce(phi, C, Aell, phi0):
return const(phi, C) - ellipsoidal(phi, Aell, phi0)
def cg(phi, C, mu, d, sigma):
return const(phi, C) - gsum(phi, mu, d, sigma)
def cge(phi, C, mu, d, sigma, Aell):
return const(phi, C) - ellipsoidal(phi, Aell, mu) - gsum(phi, mu, d, sigma)
def cg12(phi, C, mu1, d1, sigma1, mu2, d2, sigma2):
return const(phi, C) - gsum(phi, mu1, d1, sigma1) - gsum(phi, mu2, d2, sigma2)
def cg12e1(phi, C, mu1, d1, sigma1, mu2, d2, sigma2, Aell):
return const(phi, C) - gsum(phi, mu1, d1, sigma1) - gsum(phi, mu2, d2, sigma2) - ellipsoidal(phi, Aell, mu1)
def cg12e2(phi, C, mu1, d1, sigma1, mu2, d2, sigma2, Aell):
return const(phi, C) - gsum(phi, mu1, d1, sigma1) - gsum(phi, mu2, d2, sigma2) - ellipsoidal(phi, Aell, mu2)
# PREPROCESSING
def extend_phasefolded_lc(phases, fluxes, sigmas=None):
#make new arrays that would span phase range -1 to 1:
fluxes_extend = np.hstack((fluxes[(phases > 0)], fluxes, fluxes[phases < 0.]))
phases_extend = np.hstack((phases[phases>0]-1, phases, phases[phases<0]+1))
if sigmas is not None:
sigmas_extend = np.hstack((sigmas[phases > 0], sigmas, sigmas[phases < 0.]))
else:
sigmas_extend = None
return phases_extend, fluxes_extend, sigmas_extend
def find_eclipse(phases, fluxes):
phase_min = phases[np.nanargmin(fluxes)]
ph_cross = phases[fluxes - np.nanmedian(fluxes) > 0]
# this part looks really complicated but it really only accounts for eclipses split
# between the edges of the phase range - if a left/right edge is not found, we look for
# it in the phases on the other end of the range
# we then mirror the value back on the side of the eclipse position for easier width computation
try:
arg_edge_left = np.argmin(np.abs(phase_min - ph_cross[ph_cross<phase_min]))
edge_left = ph_cross[ph_cross<phase_min][arg_edge_left]
except:
arg_edge_left = np.argmin(np.abs((phase_min+1)-ph_cross[ph_cross<(phase_min+1)]))
edge_left = ph_cross[ph_cross<(phase_min+1)][arg_edge_left]-1
try:
arg_edge_right = np.argmin(np.abs(phase_min-ph_cross[ph_cross>phase_min]))
edge_right = ph_cross[ph_cross>phase_min][arg_edge_right]
except:
arg_edge_right = np.argmin(np.abs((phase_min-1)-ph_cross[ph_cross>(phase_min-1)]))
edge_right = ph_cross[ph_cross>(phase_min-1)][arg_edge_right]+1
return phase_min, edge_left, edge_right
def estimate_eclipse_positions_widths(phases, fluxes, diagnose_init=False):
pos1, edge1l, edge1r = find_eclipse(phases, fluxes)
fluxes_sec = fluxes.copy()
fluxes_sec[((phases > edge1l) & (phases < edge1r)) | ((phases > edge1l+1) | (phases < edge1r-1))] = np.nan
pos2, edge2l, edge2r = find_eclipse(phases, fluxes_sec)
if diagnose_init:
import matplotlib.pyplot as plt
plt.figure(figsize=(10,8))
plt.plot(phases, fluxes, '.')
plt.axhline(y=np.median(fluxes), c='orange')
for i,x in enumerate([pos1, edge1l, edge1r]):
ls = '-' if i==0 else '--'
plt.axvline(x=x, c='r', ls=ls)
for i,x in enumerate([pos2, edge2l, edge2r]):
ls = '-' if i==0 else '--'
plt.axvline(x=x, c='g', ls=ls)
return {'ecl_positions': [pos1, pos2], 'ecl_widths': [edge1r-edge1l, edge2r-edge2l]}
# FITTING
def lnlike(y, yerr, ymodel):
if yerr is not None:
return -np.sum(np.log((2*np.pi)**0.5*yerr)+(y-ymodel)**2/(2*yerr**2))
else:
return -np.sum((y-ymodel)**2)
def bic(y, yerr, ymodel, nparams):
if yerr is not None:
return 2*lnlike(y,yerr,ymodel) - nparams*np.log(len(y))
else:
return lnlike(y, yerr, ymodel)
def fit_twoGaussian_models(phases, fluxes, sigmas=None):
# setup the initial parameters
# fit all of the models to the data
twogfuncs = {'C': const, 'CE': ce, 'CG': cg, 'CGE': cge, 'CG12': cg12, 'CG12E1': cg12e1, 'CG12E2': cg12e2}
C0 = fluxes.max()
init_pos_w = estimate_eclipse_positions_widths(phases, fluxes)
mu10, mu20 = init_pos_w['ecl_positions']
sigma10, sigma20 = init_pos_w['ecl_widths']
d10 = fluxes.max()-fluxes[np.argmin(np.abs(phases-mu10))]
d20 = fluxes.max()-fluxes[np.argmin(np.abs(phases-mu20))]
Aell0 = 0.001
init_params = {'C': [C0,],
'CE': [C0, Aell0, mu10],
'CG': [C0, mu10, d10, sigma10],
'CGE': [C0, mu10, d10, sigma10, Aell0],
'CG12': [C0, mu10, d10, sigma10, mu20, d20, sigma20],
'CG12E1': [C0, mu10, d10, sigma10, mu20, d20, sigma20, Aell0],
'CG12E2': [C0, mu10, d10, sigma10, mu20, d20, sigma20, Aell0]}
# parameters used frequently for bounds
fmax = fluxes.max()
fmin = fluxes.min()
fdiff = fmax - fmin
bounds = {'C': ((0),(fmax)),
'CE': ((0, 1e-6, -0.5),(fmax, fdiff, 0.5)),
'CG': ((0., -0.5, 0., 0.), (fmax, 0.5, fdiff, 0.5)),
'CGE': ((0., -0.5, 0., 0., 1e-6),(fmax, 0.5, fdiff, 0.5, fdiff)),
'CG12': ((0.,-0.5, 0., 0., -0.5, 0., 0.),(fmax, 0.5, fdiff, 0.5, 0.5, fdiff, 0.5)),
'CG12E1': ((0.,-0.5, 0., 0., -0.5, 0., 0., 1e-6),(fmax, 0.5, fdiff, 0.5, 0.5, fdiff, 0.5, fdiff)),
'CG12E2': ((0.,-0.5, 0., 0., -0.5, 0., 0., 1e-6),(fmax, 0.5, fdiff, 0.5, 0.5, fdiff, 0.5, fdiff))}
fits = {}
# extend light curve on phase range [-1,1]
phases, fluxes, sigmas = extend_phasefolded_lc(phases, fluxes, sigmas)
for key in twogfuncs.keys():
try:
fits[key] = curve_fit(twogfuncs[key], phases, fluxes, p0=init_params[key], sigma=sigmas, bounds=bounds[key])
except Exception as err:
logger.warning("2G model {} failed with error: {}".format(key, err))
fits[key] = np.array([np.nan*np.ones(len(init_params[key]))])
return fits
def compute_twoGaussian_models(fits, phases):
twogfuncs = {'C': const, 'CE': ce, 'CG': cg, 'CGE': cge, 'CG12': cg12, 'CG12E1': cg12e1, 'CG12E2': cg12e2}
models = {}
for fkey in fits.keys():
models[fkey] = twogfuncs[fkey](phases, *fits[fkey][0])
return models
def compute_twoGaussian_models_BIC(models, phases, fluxes, sigmas):
bics = {}
nparams = {'C':1, 'CE':3, 'CG':4, 'CGE':5, 'CG12':7, 'CG12E1':8, 'CG12E2':8}
for mkey in models.keys():
bics[mkey] = bic(fluxes, sigmas, models[mkey], nparams[mkey])
return bics
def fit_lc(phases, fluxes, sigmas):
if np.any(sigmas <= 0.0):
raise ValueError("sigmas cannot be <= 0.0")
if np.all(np.isnan(sigmas)):
sigmas = None
fits = fit_twoGaussian_models(phases, fluxes, sigmas)
models = compute_twoGaussian_models(fits, phases)
bics = compute_twoGaussian_models_BIC(models, phases, fluxes, sigmas)
params = {'C': ['C'],
'CE': ['C', 'Aell', 'mu1'],
'CG': ['C', 'mu1', 'd1', 'sigma1'],
'CGE': ['C', 'mu1', 'd1', 'sigma1', 'Aell'],
'CG12': ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2'],
'CG12E1': ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2', 'Aell'],
'CG12E2': ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2', 'Aell']}
if np.all(np.isnan(list(bics.values()))):
raise ValueError("all two gaussian models failed, please report this issue.")
best_fit = list(models.keys())[np.nanargmax(list(bics.values()))]
return {'fits':fits, 'models':models, 'bics':bics, 'best_fit':best_fit, 'model_parameters': params}
# REFINING THE FIT
def two_line_model(values,breakp,x,y,sigma,edge='none'):
k, n, a, b, c = values
ymodel = np.zeros(len(x))
if edge == 'left':
ymodel[x<breakp] = k*x[x<breakp] + n
ymodel[x>=breakp] = a*x[x>=breakp]**2+b*x[x>=breakp]+c
elif edge == 'right':
ymodel[x>breakp] = k*x[x>breakp] + n
ymodel[x<=breakp] = a*x[x<=breakp]**2+b*x[x<=breakp]+c
else:
raise ValueError('Must provide value for edge orientation [\'left\', \'right\']')
return np.sum((ymodel-y)**2/sigma**2)
def refine_eclipse_widths(phases, fluxes, sigmas, pos1, pos2, width1, width2):
# to refine the region around the eclipses, we're taking half of the number of eclipse points
# left and right from the current edge position
mask1_left = (phases > pos1-width1) & (phases < pos1-0.05*width1)
mask1_right = (phases > pos1+0.3*width1) & (phases < pos1+width1)
mask2_left = (phases > pos2-width2) & (phases < pos2-0.05*width1)
mask2_right = (phases > pos2+0.3*width1) & (phases < pos2+width2)
eclipse_breaks = np.zeros(4)
try:
for i,mask in enumerate([mask1_left, mask1_right, mask2_left, mask2_right]):
if i%2==0:
edge='left'
else:
edge='right'
breakpoints = np.linspace(phases[mask].min(), phases[mask].max(), len(phases[mask]))
chis2 = np.zeros(len(breakpoints))
for j,breakp in enumerate(breakpoints):
sol = minimize(two_line_model, [0., 2., 1., 1., 2.], args=(breakp, phases[mask], fluxes[mask], sigmas[mask], edge))
k, n, a, b, c = sol['x']
x=phases[mask]
ymodel = np.zeros(len(x))
if edge == 'left':
ymodel[x<breakp] = k*x[x<breakp] + n
ymodel[x>=breakp] = a*x[x>=breakp]**2+b*x[x>=breakp]+c
elif edge == 'right':
ymodel[x>breakp] = k*x[x>breakp] + n
ymodel[x<=breakp] = a*x[x<=breakp]**2+b*x[x<=breakp]+c
else:
raise ValueError('Must provide value for edge orientation [\'left\', \'right\']')
chis2[j] = np.sum((ymodel-fluxes[mask])**2/sigmas[mask]**2)
eclipse_breaks[i] = breakpoints[np.argmin(chis2)]
return eclipse_breaks
except:
logger.warning('Eclipse width refinement failed.')
return [pos1-0.5*width1, pos1+0.5*width1, pos2-0.5*width2, pos2+0.5*width2]
# GEOMETRY SOLVER
def compute_eclipse_params(phases, fluxes, sigmas, fit_result=None, diagnose=False):
if fit_result is None:
fit_result = fit_lc(phases, fluxes, sigmas)
best_fit = fit_result['best_fit']
model_params = fit_result['model_parameters'][best_fit]
sigma1 = fit_result['fits'][best_fit][0][model_params.index('sigma1')] if 'sigma1' in model_params else np.nan
sigma2 = fit_result['fits'][best_fit][0][model_params.index('sigma2')] if 'sigma2' in model_params else np.nan
mu1 = fit_result['fits'][best_fit][0][model_params.index('mu1')] if 'mu1' in model_params else np.nan
mu2 = fit_result['fits'][best_fit][0][model_params.index('mu2')] if 'mu2' in model_params else np.nan
C = fit_result['fits'][best_fit][0][model_params.index('C')]
if not np.isnan(mu1) and not np.isnan(sigma1) and np.abs(sigma1) < 0.5:
pos1 = mu1
width1 = min(5.6*np.abs(sigma1), 0.5)
depth1 = C - fluxes[np.argmin(np.abs(phases-pos1))]
else:
pos1 = np.nan
width1 = np.nan
depth1 = np.nan
if not np.isnan(mu2) and not np.isnan(sigma2) and np.abs(sigma2) < 0.5:
pos2 = mu2
width2 = min(5.6*np.abs(sigma2), 0.5)
depth2 = C - fluxes[np.argmin(np.abs(phases-pos2))]
else:
pos2 = np.nan
width2 = np.nan
depth2 = np.nan
eclipse_edges = [pos1 - 0.5*width1, pos1+0.5*width1, pos2-0.5*width2, pos2+0.5*width2]
# print('No eclipse refinement')
# if not np.isnan(width1) and not np.isnan(width2) and not np.isnan(pos1) and not np.isnan(pos2):
# if np.abs(pos1-pos2) < width1 or np.abs(pos1-pos2) < width2:
# # in case of higly ellipsoidal systems, the eclipse positions aren't detected well
# # and need to be refined
# logger.warning('Poor two-Gaussian fit. Results potentially unreliable!')
# pos1 = phases_w[(phases_w > -0.25) & (phases_w < 0.25)][np.argmin(fluxes_w[(phases_w > -0.25) & (phases_w < 0.25)])]
# pos2 = phases_w[(phases_w > 0.25) & (phases_w < 0.75)][np.argmin(fluxes_w[(phases_w > 0.25) & (phases_w < 0.75)])]
# width1 = 0.5
# width2 = 0.5
# eclipse_edges = refine_eclipse_widths(phases_w, fluxes_w, sigmas_w, pos1, pos2, width1, width2)
# width1, width2 = eclipse_edges[1]-eclipse_edges[0], eclipse_edges[3]-eclipse_edges[2]
# else:
# eclipse_edges = [np.nan, np.nan, np.nan, np.nan]
if diagnose:
phases_w, fluxes_w, sigmas_w = extend_phasefolded_lc(phases, fluxes, sigmas)
twogfuncs = {'C': const, 'CE': ce, 'CG': cg, 'CGE': cge, 'CG12': cg12, 'CG12E1': cg12e1, 'CG12E2': cg12e2}
[ecl1_l, ecl1_r, ecl2_l, ecl2_r] = eclipse_edges
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
ax.plot(phases_w, fluxes_w, 'k.')
plt.plot(phases_w, twogfuncs[best_fit](phases_w, *fit_result['fits'][best_fit][0]), '-', label=fit_result['best_fit'])
lines = []
lines.append(ax.axvline(x=pos1, c='#2B71B1', lw=2, label='primary'))
lines.append(ax.axvline(x=pos2, c='#FF702F', lw=2, label='secondary'))
lines.append(ax.axvline(x=ecl1_l, c='#2B71B1', lw=2, ls='--'))
lines.append(ax.axvline(x=ecl1_r, c='#2B71B1', lw=2, ls='--'))
lines.append(ax.axvline(x=ecl2_l, c='#FF702F', lw=2, ls='--'))
lines.append(ax.axvline(x=ecl2_r, c='#FF702F', lw=2, ls='--'))
drs = []
for l,label in zip(lines,['pos1', 'pos2', 'ecl1_l', 'ecl1_r', 'ecl2_l', 'ecl2_r']):
dr = DraggableLine(l)
dr.label = label
dr.connect()
drs.append(dr)
ax.legend()
plt.show(block=True)
print('adjusting values')
pos1 = drs[0].point.get_xdata()[0]
pos2 = drs[1].point.get_xdata()[0]
ecl1_l = drs[2].point.get_xdata()[0]
ecl1_r = drs[3].point.get_xdata()[0]
ecl2_l = drs[4].point.get_xdata()[0]
ecl2_r = drs[5].point.get_xdata()[0]
eclipse_edges = [ecl1_l, ecl1_r, ecl2_l, ecl2_r]
return {
'primary_width': width1,
'secondary_width': width2,
'primary_position': pos1,
'secondary_position': pos2,
'primary_depth': depth1,
'secondary_depth': depth2,
'eclipse_edges': eclipse_edges
}
# t0 ESTIMATOR
def t0_from_geometry(phase_min, times, period=1, t0_supconj = 0, t0_near_times = True):
delta_t0 = phase_min*period
t0 = t0_supconj + delta_t0
if t0_near_times:
if t0 >= times.min() and t0 <= times.max():
return t0
else:
return t0 + int((times.min()/period)+1)*(period)
else:
return t0
# ECCENTRICITY AND ARG OF PERIASTRON ESTIMATOR
def f (psi, sep): # used in pf_ecc_psi_w
return psi - sin(psi) - 2*pi*sep
def df (psi, sep): # used in pf_ecc_psi_w
return 1 - cos(psi) +1e-6
def ecc_w_from_geometry(sep, pwidth, swidth):
if np.isnan(sep) or np.isnan(pwidth) or np.isnan(swidth):
logger.warning('Cannot esimate eccentricty and argument of periastron: incomplete geometry information')
return 0., pi/2
# computation fails if sep<0, so we need to adjust for it here.
if sep < 0:
sep = 1+sep
psi = newton(func=f, x0=(12*pi*sep)**(1./3), fprime=df, args=(sep,), maxiter=5000)
# ecc = sqrt( (0.25*(tan(psi-pi))**2+(swidth-pwidth)**2/(swidth+pwidth)**2)/(1+0.25*(tan(psi-pi))**2) )
ecc = (np.sin(0.5*(psi-pi))**2+((swidth-pwidth)/(swidth+pwidth))**2*np.cos(0.5*(psi-pi))**2)**0.5
try:
w1 = np.arcsin((swidth-pwidth)/(swidth+pwidth)/ecc)
w2 = np.arccos((1-ecc**2)**0.5/ecc * np.tan(0.5*(psi-np.pi)))
w = w2 if w1 >= 0 else 2*pi-w2
except:
w = pi/2
return ecc, w
class DraggableLine:
def __init__(self, p):
self.point = p
self.press = None
def connect(self):
self.cidpress = self.point.figure.canvas.mpl_connect('button_press_event', self.button_press_event)
self.cidrelease = self.point.figure.canvas.mpl_connect('button_release_event', self.button_release_event)
self.cidmotion = self.point.figure.canvas.mpl_connect('motion_notify_event', self.motion_notify_event)
def disconnect(self):
#disconnect all the stored connection ids
self.point.figure.canvas.mpl_disconnect(self.cidpress)
self.point.figure.canvas.mpl_disconnect(self.cidrelease)
self.point.figure.canvas.mpl_disconnect(self.cidmotion)
def button_press_event(self,event):
if event.inaxes != self.point.axes:
return
contains = self.point.contains(event)[0]
if not contains: return
self.press = self.point.get_xdata(), event.xdata
def button_release_event(self,event):
self.press = None
self.point.figure.canvas.draw()
def motion_notify_event(self, event):
if self.press is None: return
if event.inaxes != self.point.axes: return
xdata, xpress = self.press
dx = event.xdata-xpress
self.point.set_xdata(xdata+dx)
self.point.figure.canvas.draw()
|
phoebe-project/phoebe2
|
phoebe/solverbackends/lc_geometry.py
|
Python
|
gpl-3.0
| 17,980
|
[
"Gaussian"
] |
7088dbd915e387ae54399ffd944bd7464fdaaffc04f9bbda40a9b7df1624b8fb
|
###############################################################################
# DoubleExponentialDiskPotential.py: class that implements the double
# exponential disk potential
#
# rho(R,z) = rho_0 e^-R/h_R e^-|z|/h_z
###############################################################################
import numpy
from scipy import special
from ..util import conversion
from .Potential import Potential, check_potential_inputs_not_arrays
def _de_psi(t):
return t*numpy.tanh(numpy.pi/2.*numpy.sinh(t))
def _de_psiprime(t):
return (numpy.sinh(numpy.pi*numpy.sinh(t))
+numpy.pi*t*numpy.cosh(t))/(numpy.cosh(numpy.pi*numpy.sinh(t))+1)
class DoubleExponentialDiskPotential(Potential):
"""Class that implements the double exponential disk potential
.. math::
\\rho(R,z) = \\mathrm{amp}\\,\\exp\\left(-R/h_R-|z|/h_z\\right)
"""
def __init__(self,amp=1.,hr=1./3.,hz=1./16.,normalize=False,
ro=None,vo=None,
de_h=1e-3,de_n=10000):
"""
NAME:
__init__
PURPOSE:
initialize a double-exponential disk potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass density or Gxmass density
hr - disk scale-length (can be Quantity)
hz - scale-height (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
de_h= (1e-3) step used in numerical integration (use 1000 for a lower accuracy version that is typically still high accuracy enough, but faster)
de_b= (10000) number of points used in numerical integration
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
DoubleExponentialDiskPotential object
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2013-01-01 - Re-implemented using faster integration techniques - Bovy (IAS)
2020-12-24 - Re-implemented again using more accurate integration techniques for Bessel integrals - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='density')
hr= conversion.parse_length(hr,ro=self._ro)
hz= conversion.parse_length(hz,ro=self._ro)
self.hasC= True
self.hasC_dens= True
self._hr= hr
self._scale= self._hr
self._hz= hz
self._alpha= 1./self._hr
self._beta= 1./self._hz
self._zforceNotSetUp= True #We have not calculated a typical Kz yet
# For double-exponential formula
self._de_h= de_h
self._de_n= de_n
self._de_j0zeros= special.jn_zeros(0,self._de_n)/numpy.pi
self._de_j1zeros= special.jn_zeros(1,self._de_n)/numpy.pi
self._de_j0_xs= numpy.pi/self._de_h\
*_de_psi(self._de_h*self._de_j0zeros)
self._de_j0_weights= 2./(numpy.pi*self._de_j0zeros\
*special.j1(numpy.pi*self._de_j0zeros)**2.)\
*special.j0(self._de_j0_xs)\
*_de_psiprime(self._de_h*self._de_j0zeros)
self._de_j1_xs= numpy.pi/self._de_h\
*_de_psi(self._de_h*self._de_j1zeros)
self._de_j1_weights= 2./(numpy.pi*self._de_j1zeros\
*special.jv(2,numpy.pi*self._de_j1zeros)**2.)\
*special.j1(self._de_j1_xs)\
*_de_psiprime(self._de_h*self._de_j1zeros)
# Potential at zero in case we want that
_gamma= self._beta/self._alpha
_gamma2= _gamma**2.
self._pot_zero= (2.*(_gamma-1.)*numpy.sqrt(1.+_gamma2)
+2.*numpy.arctanh(1./numpy.sqrt(1.+_gamma2))
-numpy.log(1.-_gamma/numpy.sqrt(1.+_gamma2))
+numpy.log(1.+_gamma/numpy.sqrt(1.+_gamma2)))\
/(2.*(1.+_gamma2)**1.5)
self._pot_zero*= -4.*numpy.pi/self._alpha**2.
# Normalize?
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
def _evaluate(self,R,z,phi=0.,t=0.,dR=0,dphi=0):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2012-12-26 - New method using Gaussian quadrature between zeros - Bovy (IAS)
2020-12-24 - New method using Ogata's Bessel integral formula - Bovy (UofT)
"""
if isinstance(R,(float,int)):
floatIn= True
R= numpy.array([R])
z= numpy.array([z])
else:
if isinstance(z,float):
z= z*numpy.ones_like(R)
floatIn= False
outShape= R.shape # this code can't do arbitrary shapes
R= R.flatten()
z= z.flatten()
fun= lambda x: (self._alpha**2.+(x/R[:,numpy.newaxis])**2.)**-1.5\
*(self._beta*numpy.exp(-x/R[:,numpy.newaxis]*numpy.fabs(z[:,numpy.newaxis]))
-x/R[:,numpy.newaxis]*numpy.exp(-self._beta*numpy.fabs(z[:,numpy.newaxis])))\
/(self._beta**2.-(x/R[:,numpy.newaxis])**2.)
out= -4.*numpy.pi*self._alpha/R*\
numpy.nansum(fun(self._de_j0_xs)*self._de_j0_weights,
axis=1)
out[(R == 0)*(z == 0)]= self._pot_zero
if floatIn: return out[0]
else: return numpy.reshape(out,outShape)
@check_potential_inputs_not_arrays
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
Rforce
PURPOSE:
evaluate radial force K_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_R (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2012-12-26 - New method using Gaussian quadrature between zeros - Bovy (IAS)
2020-12-24 - New method using Ogata's Bessel integral formula - Bovy (UofT)
"""
fun= lambda x: x*(self._alpha**2.+(x/R)**2.)**-1.5\
*(self._beta*numpy.exp(-x/R*numpy.fabs(z))
-x/R*numpy.exp(-self._beta*numpy.fabs(z)))\
/(self._beta**2.-(x/R)**2.)
return -4.*numpy.pi*self._alpha/R**2.\
*numpy.nansum(fun(self._de_j1_xs)*self._de_j1_weights)
@check_potential_inputs_not_arrays
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
zforce
PURPOSE:
evaluate vertical force K_z (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_z (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2012-12-26 - New method using Gaussian quadrature between zeros - Bovy (IAS)
2020-12-24 - New method using Ogata's Bessel integral formula - Bovy (UofT)
"""
fun= lambda x: (self._alpha**2.+(x/R)**2.)**-1.5*x/R\
*(numpy.exp(-x/R*numpy.fabs(z))
-numpy.exp(-self._beta*numpy.fabs(z)))\
/(self._beta**2.-(x/R)**2.)
out= -4.*numpy.pi*self._alpha*self._beta/R*\
numpy.nansum(fun(self._de_j0_xs)*self._de_j0_weights)
if z > 0.:
return out
else:
return -out
@check_potential_inputs_not_arrays
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
R2deriv
PURPOSE:
evaluate R2 derivative
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
-d K_R (R,z) d R
HISTORY:
2012-12-27 - Written - Bovy (IAS)
2020-12-24 - New method using Ogata's Bessel integral formula - Bovy (UofT)
"""
fun= lambda x: x**2*(self._alpha**2.+(x/R)**2.)**-1.5\
*(self._beta*numpy.exp(-x/R*numpy.fabs(z))
-x/R*numpy.exp(-self._beta*numpy.fabs(z)))\
/(self._beta**2.-(x/R)**2.)
return 4.*numpy.pi*self._alpha/R**3.\
*numpy.nansum(fun(self._de_j0_xs)*self._de_j0_weights
-fun(self._de_j1_xs)/self._de_j1_xs\
*self._de_j1_weights)
@check_potential_inputs_not_arrays
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
z2deriv
PURPOSE:
evaluate z2 derivative
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
-d K_Z (R,z) d Z
HISTORY:
2012-12-26 - Written - Bovy (IAS)
2020-12-24 - New method using Ogata's Bessel integral formula - Bovy (UofT)
"""
fun= lambda x: (self._alpha**2.+(x/R)**2.)**-1.5*x/R\
*(x/R*numpy.exp(-x/R*numpy.fabs(z))
-self._beta*numpy.exp(-self._beta*numpy.fabs(z)))\
/(self._beta**2.-(x/R)**2.)
return -4.*numpy.pi*self._alpha*self._beta/R*\
numpy.nansum(fun(self._de_j0_xs)*self._de_j0_weights)
@check_potential_inputs_not_arrays
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
Rzderiv
PURPOSE:
evaluate the mixed R,z derivative
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
2020-12-24 - New method using Ogata's Bessel integral formula - Bovy (UofT)
"""
fun= lambda x: (self._alpha**2.+(x/R)**2.)**-1.5*(x/R)**2.\
*(numpy.exp(-x/R*numpy.fabs(z))
-numpy.exp(-self._beta*numpy.fabs(z)))\
/(self._beta**2.-(x/R)**2.)
out= -4.*numpy.pi*self._alpha*self._beta/R*\
numpy.nansum(fun(self._de_j1_xs)*self._de_j1_weights)
if z > 0.:
return out
else:
return -out
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
rho (R,z)
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
return numpy.exp(-self._alpha*R-self._beta*numpy.fabs(z))
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Sigma (R,z)
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
return 2.*numpy.exp(-self._alpha*R)/self._beta\
*(1.-numpy.exp(-self._beta*numpy.fabs(z)))
|
jobovy/galpy
|
galpy/potential/DoubleExponentialDiskPotential.py
|
Python
|
bsd-3-clause
| 11,672
|
[
"Gaussian"
] |
ae634c6336256aa13436cb553cfb0cfb54f83411a98d4ca2ab32c3028d1a51af
|
from common.pdb_io import *
from common.interface import *
import subprocess
def mutate_residue(atomgroup, resnum, original_residue, new_residue_name, segid = None):
"""
Mutate the residue resum (in segid) to new_residue_name. The procedure
changes the residue name and retains only backbone and CB atoms (unless
the new and old names match in which case the old atoms are retained). The
edited residue is merged back into the atom group and returned.
"""
unchanged = select_atoms(atomgroup, "not all")
if segid:
select_text = "segid " + segid + " and resnum " + str(resnum)
unchanged += select_atoms(atomgroup, "not (" + select_text + ")")
target = select_atoms(atomgroup, select_text)
else:
select_text = "resnum " + str(resnum)
unchanged += select_atoms(atomgroup, "not (" + select_text + ")")
target = select_atoms(atomgroup, select_text)
mutations = select_atoms(atomgroup, "not all")
for residue in target.residues:
if residue.resnames()[0] != original_residue:
if segid:
print "Incorrect original residue specified in the mutation for " + resnum + "in chain " + segid
else:
print "Incorrect original residue specified in the mutation for " + resnum
raise StandardError
else:
mutations += mutate_single_residue(residue, new_residue_name)
mutated = merge_atom_selections([unchanged, mutations])
return mutated
def mutate_single_residue(atomgroup, new_residue_name):
"""
Mutates the residue into new_residue_name. The only atoms retained are
the backbone and CB (unless the new residue is GLY). If the original
resname == new_residue_name the residue is left untouched.
"""
resnames = atomgroup.resnames()
if len(resnames) == 1:
if resnames[0] == new_residue_name:
edited_atomgroup = atomgroup
else:
if new_residue_name == 'GLY':
edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O"])
else:
edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O", "CB"])
for t in edited_atomgroup:
t.resname = new_residue_name
else:
edited_atomgroup = atomgroup
return edited_atomgroup
def edit_leap_input(specification, filename):
"""
Read in the template Leap input file and replace variables (specified with
a leading $ character) with appropriate values from the input specification
dictionary. This is output to the specified file.)
"""
template = 'tleap_template.txt'
out_file = open(filename, 'w')
with open(template, 'r') as f:
for line in f:
for var_name, value in specification.items():
line = line.replace('$%s' % var_name, value)
out_file.write(line)
out_file.close()
return
def amber_parameterize(specification):
"""
Edit the Leap input to reflect the input specification dictionary and run
tLeap to create completed PDB and topology files for the complex (both
prior to and after solvation), the receptor and the ligand.
"""
leap_input = specification['TARGET_DIR'] + '/leap.in'
edit_leap_input(specification, leap_input)
result = subprocess.check_output(['tleap','-f', leap_input], stderr=subprocess.STDOUT)
return
def chains_selection(chains):
select_text = ''
for chain in chains:
if len(select_text) > 0:
select_text += ' or segid ' + chain
else:
select_text += 'segid ' + chain
return select_text
def split_pdb_chains(structure, rec_chains, lig_chains, sol_chains):
select_rec = chains_selection(rec_chains)
rec = select_atoms(structure, select_rec)
select_lig = chains_selection(lig_chains)
lig = select_atoms(structure, select_lig)
select_sol = chains_selection(lig_chains)
sol = select_atoms(structure, select_lig)
return rec, lig, sol
def create_topology(input_pdb, protein_chains, mutations, specification, ff='amber'):
structure = load_pdb(input_pdb)
for chain, residue_list in mutations.iteritems():
# Need to see what the mutations object looks like this is a placeholder
for resnum, mutation in residue_list.iteritems():
# mutation is supplied as a list of pairs of one letter aa codes
# convert to three letter for comprison with structure
original_resname = aa1to3[mutation[0][0]]
final_resname = aa1to3[mutation[0][1]]
structure = mutate_residue(structure, resnum, original_resname, final_resname, segid = chain)
# Need to think about protonation
rec, lig, sol = split_pdb_chains(structure, protein_chains, ['X'],['S'])
write_pdb_file(rec, specification['RECEPTOR_PDB'])
write_pdb_file(lig, specification['LIGAND_PDB'])
write_pdb_file(sol, specification['SOLVENT_PDB'])
amber_parameterize(specification)
return
|
dww100/bac
|
builder/bac_builder.py
|
Python
|
apache-2.0
| 5,212
|
[
"Amber"
] |
af330471949369fc404fc31bf37d6335d524d9bfaa6550d826c566fb30eb3824
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Prepares IPHAS images for public release.
This script will edit all images in the data release to ensure they have
the correct astrometric solution (WCS) and calibration information (PHOTZP).
This script will fail for r376022.fit, which is unfortunately a corrupt file.
"""
from __future__ import division, print_function, unicode_literals
from astropy.io import fits
from astropy.io import ascii
from astropy import log
from astropy import wcs
from astropy import table
from astropy import time
import numpy as np
import itertools
import datetime
import os
import util
import constants
__author__ = 'Geert Barentsen'
__copyright__ = 'Copyright, The Authors'
__credits__ = ['Geert Barentsen']
####################
# CONSTANTS & CONFIG
####################
# Table containing slight updates to WCS astrometric parameters
WCSFIXES_PATH = os.path.join(constants.PACKAGEDIR, 'wcs-tuning', 'wcs-fixes.csv')
WCSFIXES = ascii.read(WCSFIXES_PATH)
MD = fits.getdata(os.path.join(constants.DESTINATION, 'metadata.fits'))
METADATA = dict(zip(MD['run'], MD))
###########
# CLASSES
###########
class CalibrationDatabase(object):
"""Class to hold the calibration shifts."""
def __init__(self):
r = ascii.read(os.path.join(constants.DESTINATION, 'calibration',
'calibration-r.csv'))
i = ascii.read(os.path.join(constants.DESTINATION, 'calibration',
'calibration-i.csv'))
ha = ascii.read(os.path.join(constants.DESTINATION, 'calibration',
'calibration-ha.csv'))
self.shifts = dict(zip(
np.concatenate((r['run'], i['run'], ha['run'])),
np.concatenate((r['shift'], i['shift'], ha['shift']))
))
class SurveyImage(object):
"""Class used to write a single IPHAS CCD image with up-to-date keywords."""
def __init__(self, run, ccd):
self.run = run
self.ccd = ccd
# Open the image
self.path = constants.RAWDATADIR + METADATA[run]['image']
self.fits = fits.open(self.path, do_not_scale_image_data=True)
# Is the run a DR2-recalibrated run?
mycaldb = get_caldb()
if self.run in mycaldb.shifts:
self.calibrated = True
else:
self.calibrated = False
# Sort out the new FITS image and header
self.hdu = self.fits[self.ccd]
self.set_header()
self.fix_wcs()
self.add_comments()
@property
def exptime(self):
"""Returns the exposure time as used in DR2.
This can differ slightly from the original exposure time recorded in
the raw data, because the DR2 pipeline accounts for known foibles
in the in exposure time recording.
"""
return METADATA[self.run]['exptime_precalib']
@property
def photzp(self):
"""Returns the zeropoint such that MAG=-2.5*log(pixel value)+PHOTZP
Following the definition of PHOTZP defined in the
"ESO External Data Products Standard"
"""
# What is the calibration shift applied in DR2?
mycaldb = get_caldb()
try:
shift = mycaldb.shifts[self.run]
except KeyError:
shift = 0.0
# The zeropoint in the metadata file is corrected for extinction
# but not re-calibrated and not corrected for PERCORR.
# In accordance with the ESO standard, photzp absorbs the scaling
# with exposure time.
photzp = (METADATA[self.run]['zeropoint_precalib']
- self.percorr
+ shift
+ 2.5*np.log10(self.exptime))
return np.round(photzp, 4)
@property
def percorr(self):
return METADATA[self.run]['CCD{0}_PERCORR'.format(self.ccd)]
@property
def confmap(self):
confmap = METADATA[self.run]['conf']
if confmap == 'nan':
return ''
else:
return confmap[1:] # get rid of leading slash
def set_header(self):
# In a few cases the date/time is missing from the headers;
# we recovered these from the observing logs:
if self.run == 755575:
self.fits[0].header['DATE-OBS'] = '2010-08-30'
self.fits[0].header['UTSTART'] = '03:52:00'
if self.run == 948917:
self.fits[0].header['DATE-OBS'] = '2012-11-20'
self.fits[0].header['UTSTART'] = '02:48:00'
# The MJD-OBS keyword is sometimes missing when the header-packet
# from the Telescope Control System was not collected.
if self.run in [755574, 755575, 940983, 942046,
942495, 943312, 948917]:
isostamp = (self.fits[0].header['DATE-OBS']
+ 'T' + self.fits[0].header['UTSTART'])
self.fits[0].header['MJD-OBS'] = time.Time(isostamp, scale='utc').mjd
# Copy keywords from the original HDU[0]
for kw in ['RUN', 'OBSERVAT', 'OBSERVER', 'OBJECT',
'LATITUDE', 'LONGITUD', 'HEIGHT', 'SLATEL', 'TELESCOP',
'MJD-OBS', 'JD', 'PLATESCA', 'TELFOCUS', 'AIRMASS',
'TEMPTUBE', 'INSTRUME', 'WFFPOS', 'WFFBAND', 'WFFID',
'SECPPIX', 'DETECTOR', 'CCDSPEED',
'CCDXBIN', 'CCDYBIN', 'CCDSUM', 'CCDTEMP', 'NWINDOWS']:
try:
self.hdu.header.insert('NAXIS2', (kw,
self.fits[0].header[kw],
self.fits[0].header.comments[kw])
)
except KeyError:
pass
# Ensure a proper ISO stamp
isostamp = (self.fits[0].header['DATE-OBS']
+ 'T' + self.fits[0].header['UTSTART'])
self.hdu.header.insert('NAXIS2', ('DATE-OBS',
isostamp,
'Start time of the exposure [UTC]'))
# Fix exposure time -- it might have changed in detections.py
self.hdu.header['EXPTIME'] = self.exptime
self.hdu.header.comments['EXPTIME'] = '[sec] Exposure time adopted in DR2'
# Add true zeropoint with all corrections absorbed
self.hdu.header['PHOTZP'] = self.photzp
self.hdu.header.comments['PHOTZP'] = 'mag(Vega) = -2.5*log(pixel value) + PHOTZP'
self.hdu.header.comments['MAGZPT'] = 'Uncorrected nightly ZP (per second)'
# Add keywords according to the "ESO External Data Products standard"
self.hdu.header['PHOTZPER'] = 0.03
self.hdu.header.comments['PHOTZPER'] = 'Default 1-sigma PHOTZP uncertainty in IPHAS DR2'
self.hdu.header['PHOTSYS'] = 'Vega'
self.hdu.header.comments['PHOTSYS'] = 'Photometric system'
# Was this image part of the DR2 re-calibration?
if self.calibrated:
self.hdu.header['FLUXCAL'] = 'ABSOLUTE'
else:
self.hdu.header['FLUXCAL'] = 'UNCALIBRATED'
self.hdu.header.comments['FLUXCAL'] = 'Certifies the validity of PHOTZP'
# Where is the conf map?
self.hdu.header['CONFMAP'] = self.confmap
def fix_wcs(self):
"""Derived from fix_wcs() in detections.py."""
# Never trust these WCS keywords, which may have been left behind
# by older versions of the CASU pipeline:
for kw in ['PV1_0', 'PV1_1', 'PV1_2', 'PV1_3',
'PV2_0', 'PV2_1', 'PV2_2', 'PV2_3', 'PV2_5',
'PV3_0', 'PV3_1', 'PV3_3', 'PV3_3',
'PROJP1', 'PROJP3', 'PROJP5', 'WAT1_001', 'WAT2_001',
'RADECSYS', 'CTYPE1', 'CTYPE2', 'CUNIT1', 'CUNIT2']:
try:
del self.hdu.header[kw]
except KeyError:
pass
# Ensure the pipeline defaults are set correctly
self.hdu.header.insert('CRVAL1', ('RADESYS', 'ICRS', 'WCS calibrated against 2MASS'))
self.hdu.header.insert('CRVAL1', ('EQUINOX', 2000.0))
self.hdu.header.insert('CRVAL1', ('CTYPE1', 'RA---ZPN', 'Algorithm type for axis 1'))
self.hdu.header.insert('CRVAL1', ('CTYPE2', 'DEC--ZPN', 'Algorithm type for axis 2'))
self.hdu.header.insert('CRVAL1', ('CRUNIT1', 'deg', 'Unit of right ascension coordinates'))
self.hdu.header.insert('CRVAL1', ('CRUNIT2', 'deg', 'Unit of declination coordinates'))
self.hdu.header.insert('CRVAL1', ('PV2_1', 1.0, 'Coefficient for r term'))
self.hdu.header.insert('CRVAL1', ('PV2_2', 0.0, 'Coefficient for r**2 term'))
self.hdu.header.insert('CRVAL1', ('PV2_3', 220.0, 'Coefficient for r**3 term'))
# Improvide the documentation following
# http://apm49.ast.cam.ac.uk/surveys-projects/wfcam/technical/astrometry
self.hdu.header.comments['CRPIX1'] = '[pixel] Reference pixel along axis 1'
self.hdu.header.comments['CRPIX2'] = '[pixel] Reference pixel along axis 2'
self.hdu.header.comments['CRVAL1'] = '[deg] Right ascension at the reference pixel'
self.hdu.header.comments['CRVAL2'] = '[deg] Declination at the reference pixel'
self.hdu.header.comments['CD1_1'] = 'Transformation matrix element'
self.hdu.header.comments['CD1_2'] = 'Transformation matrix element'
self.hdu.header.comments['CD2_1'] = 'Transformation matrix element'
self.hdu.header.comments['CD2_2'] = 'Transformation matrix element'
# Is an updated (fixed) WCS available?
if self.run in WCSFIXES['RUN']:
idx = ((WCSFIXES['RUN'] == self.run)
& (WCSFIXES['CCD'] == self.ccd))
if idx.sum() > 0:
log.info("WCS fixed: {0}[{1}].".format(self.run, self.ccd))
idx_fix = idx.nonzero()[0][-1]
for kw in ['CRVAL1', 'CRVAL2', 'CRPIX1', 'CRPIX2',
'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']:
self.hdu.header[kw] = WCSFIXES[kw][idx_fix]
def add_comments(self):
"""Populate the HISTORY and COMMENT keywords of the FITS file.
At the time of writing, we use "hdu._header" rather than "hdu.header"
to by-pass Astropy issue #2363.
"""
self.hdu._header['HISTORY'] = ''
self.hdu._header['HISTORY'] = 'Updated ' + datetime.datetime.now().strftime('%Y-%m-%d')
self.hdu._header['HISTORY'] = '------------------'
self.hdu._header['HISTORY'] = 'This frame contains pipeline-reduced IPHAS data that was originally'
self.hdu._header['HISTORY'] = 'processed by the Cambridge Astronomical Survey Unit (CASU), but the'
self.hdu._header['HISTORY'] = 'headers have been updated by Geert Barentsen (Hertfordshire) in 2014'
self.hdu._header['HISTORY'] = 'to add a re-calibrated zeropoint and to tweak the WCS keywords.'
self.hdu._header['COMMENT'] = ' _____ _____ _ _ _____ '
self.hdu._header['COMMENT'] = '|_ _| __ \| | | | /\ / ____|'
self.hdu._header['COMMENT'] = ' | | | |__) | |__| | / \ | (___ '
self.hdu._header['COMMENT'] = ' | | | ___/| __ | / /\ \ \___ \ '
self.hdu._header['COMMENT'] = ' _| |_| | | | | |/ ____ \ ____) |'
self.hdu._header['COMMENT'] = '|_____|_| |_| |_/_/ \_\_____/ '
self.hdu._header['COMMENT'] = ''
self.hdu._header['COMMENT'] = 'Data origin'
self.hdu._header['COMMENT'] = '-----------'
self.hdu._header['COMMENT'] = 'This image is part of the INT Photometric H-Alpha Survey'
self.hdu._header['COMMENT'] = 'of the Northern Galactic Plane (IPHAS). For more information,'
self.hdu._header['COMMENT'] = 'visit http://www.iphas.org.'
self.hdu._header['COMMENT'] = ''
# Set calibration comments
if self.calibrated:
self.hdu._header['COMMENT'] = 'Photometric calibration info'
self.hdu._header['COMMENT'] = '----------------------------'
self.hdu._header['COMMENT'] = 'The pixel values (number counts) in this image can be converted into'
self.hdu._header['COMMENT'] = 'Vega-based magnitudes using the PHOTZP keyword as follows:'
self.hdu._header['COMMENT'] = ''
self.hdu._header['COMMENT'] = ' mag(Vega) = -2.5*log10(pixel value) + PHOTZP.'
self.hdu._header['COMMENT'] = ''
self.hdu._header['COMMENT'] = 'The PHOTZP value has been computed such that it absorbs the required'
self.hdu._header['COMMENT'] = 'corrections for atmospheric extinction, gain variations, exposure time,'
self.hdu._header['COMMENT'] = 'and the DR2 re-calibration shift.'
self.hdu._header['COMMENT'] = 'As these images still include moonlight and other sources of'
self.hdu._header['COMMENT'] = 'non-astronomical background, they can only support flux measurements'
self.hdu._header['COMMENT'] = 'that include a suitably-chosen local background subtraction.'
else:
self.hdu._header['COMMENT'] = '*** IMPORTANT WARNING ***'
self.hdu._header['COMMENT'] = '-------------------------'
self.hdu._header['COMMENT'] = 'This image is not part of IPHAS Data Release 2. It may have been'
self.hdu._header['COMMENT'] = 'excluded from DR2 due to a serious quality problem, e.g. clouds,'
self.hdu._header['COMMENT'] = 'and hence the photometric zeropoint should NOT be trusted.'
self.hdu._header['COMMENT'] = 'In other words, *** USE THIS IMAGE AT YOUR OWN RISK ***.'
self.hdu._header['COMMENT'] = ''
self.hdu._header['COMMENT'] = 'Acknowledgement instructions'
self.hdu._header['COMMENT'] = '----------------------------'
self.hdu._header['COMMENT'] = 'If you use this data, please cite Drew et al. (2005) and'
self.hdu._header['COMMENT'] = 'Barentsen et al. (2014), and include the acknowledgement text'
self.hdu._header['COMMENT'] = 'that is available from www.iphas.org.'
@property
def output_filename(self):
"""Filename of the output?"""
return 'r{0}-{1}.fits.fz'.format(self.run, self.ccd).encode('ascii')
def save(self):
"""Save the ccd image to a new file."""
directory = os.path.join(constants.PATH_IMAGES,
'r'+str(self.run)[0:3])
util.setup_dir(directory)
target = os.path.join(directory, self.output_filename)
# checksum=True adds the CHECKSUM and DATASUM keywords
self.hdu.writeto(target, clobber=True, checksum=True)
def get_metadata(self):
"""Returns the CCD's metadata as a dictionary."""
# Find center and corner coordinates (ra/dec in decimal degrees)
mywcs = wcs.WCS(self.hdu.header)
ra, dec = mywcs.all_pix2world([[1024, 2048]], 1)[0]
corners = mywcs.all_pix2world([[1, 1],
[1, 4096],
[2048, 4096],
[2048, 1]],
1)
ra_min, ra_max = np.min(corners[:, 0]), np.max(corners[:, 0])
# If CCD crosses 0h RA, then need to separate corners on either side
if ra_max - ra_min > 180:
ra_min = np.min(corners[:, 0][corners[:, 0] > 180])
ra_max = np.max(corners[:, 0][corners[:, 0] < 180]) + 360.
dec1, dec2 = np.min(corners[:, 1]), np.max(corners[:, 1])
if self.calibrated:
in_dr2 = "true".encode('ascii')
else:
in_dr2 = "false".encode('ascii')
# The AIRMASS keyword might be missing if the TCS header packet was not received
try:
airmass = self.hdu.header['AIRMASS']
except KeyError:
airmass = ''
band = str(self.hdu.header['WFFBAND']).lower()
meta = {'filename': self.output_filename,
'run': self.run,
'ccd': self.ccd,
'in_dr2': in_dr2,
'ra': ra,
'dec': dec,
'ra_min': ra_min,
'ra_max': ra_max,
'dec_min': dec1,
'dec_max': dec2,
'band': band,
'utstart': str(self.hdu.header['DATE-OBS']).encode('ascii'),
'exptime': self.hdu.header['EXPTIME'],
'seeing': METADATA[self.run]['CCD{0}_SEEING'.format(self.ccd)],
'elliptic': METADATA[self.run]['CCD{0}_ELLIPTIC'.format(self.ccd)],
'skylevel': METADATA[self.run]['CCD{0}_SKYLEVEL'.format(self.ccd)],
'skynoise': METADATA[self.run]['CCD{0}_SKYNOISE'.format(self.ccd)],
'airmass': airmass,
'photzp': self.hdu.header['PHOTZP'],
'confmap': str(self.confmap).encode('ascii'),
}
return meta
###########
# FUNCTIONS
###########
def get_caldb():
"""Returns the calibration information."""
# Keep the CALDB stored as a global variable (= optimisation)
global CALDB
try:
return CALDB
except NameError:
CALDB = CalibrationDatabase()
return CALDB
def prepare_one(run, save=True):
with log.log_to_file(os.path.join(constants.LOGDIR, 'images.log')):
result = []
for ccd in constants.EXTENSIONS:
try:
img = SurveyImage(run, ccd)
if save:
img.save()
result.append(img.get_metadata())
img.fits.close() # avoid memory leak
except Exception, e:
log.error(e)
return result
def prepare_images(clusterview):
# Make sure the output directory exists
util.setup_dir(constants.PATH_IMAGES)
metadata = []
for band in ['halpha', 'r', 'i']:
log.info('Starting with band {0}'.format(band))
# Retrieve the list of runs
if band == 'halpha':
idx_band = 'ha'
else:
idx_band = band
# [constants.IPHASQC_COND_RELEASE]
runs = constants.IPHASQC['run_'+idx_band]
# Prepare each run
result = clusterview.map(prepare_one, runs, block=True)
metadata.extend(result)
# Write the metadata to a table
mycolumns = (str('filename'), str('run'), str('ccd'),
str('in_dr2'),
str('ra'), str('dec'),
str('ra_min'), str('ra_max'),
str('dec_min'), str('dec_max'),
str('band'),
str('utstart'), str('exptime'),
str('seeing'), str('elliptic'),
str('skylevel'), str('skynoise'),
str('airmass'), str('photzp'),
str('confmap'))
rows = list(itertools.chain.from_iterable(metadata)) # flatten list
t = table.Table(rows, names=mycolumns)
table_filename = os.path.join(constants.PATH_IMAGES, 'iphas-images.fits')
t.write(table_filename, format='fits', overwrite=True)
################################
# MAIN EXECUTION (FOR DEBUGGING)
################################
if __name__ == '__main__':
from IPython.parallel import client
client = client.client.Client()
with client[:].sync_imports():
from dr2.images import SurveyImage
from dr2 import constants
from dr2 import util
from astropy import log
from astropy.io import fits
import os
prepare_images(client[:])
#prepare_one(367744)
|
barentsen/iphas-dr2
|
dr2/images.py
|
Python
|
mit
| 19,646
|
[
"VisIt"
] |
94dec500e5a9e77e294b730ac83a69a36aad04b8e770588fe20f03a5d07c77b8
|
from .estimator_base import *
class H2OGradientBoostingEstimator(H2OEstimator):
"""
Builds gradient boosted classification trees, and gradient boosted regression trees on
a parsed data set. The default distribution function will guess the model type based on
the response column type run properly the response column must be an numeric for
"gaussian" or an enum for "bernoulli" or "multinomial".
Parameters
----------
model_id : str, optional
The unique id assigned to the resulting model. If none is given, an id will
automatically be generated.
distribution : str
The distribution function of the response. Must be "AUTO", "bernoulli",
"multinomial", "poisson", "gamma", "tweedie" or "gaussian"
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
ntrees : int
A non-negative integer that determines the number of trees to grow.
max_depth : int
Maximum depth to grow the tree.
min_rows : int
Minimum number of rows to assign to terminal nodes.
learn_rate : float
A value from 0.0 to 1.0
nbins : int
For numerical columns (real/int), build a histogram of (at least) this many bins, then
split at the best point.
nbins_top_level : int
For numerical columns (real/int), build a histogram of (at most) this many bins at the
root level, then decrease by factor of two per level.
nbins_cats : int
For categorical columns (factors), build a histogram of this many bins, then split at
the best point. Higher values can lead to more overfitting.
balance_classes : bool
logical, indicates whether or not to balance training data class counts via
over/under-sampling (for imbalanced data)
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts
(can be less than 1.0). Ignored if balance_classes is False, which is the
default behavior.
seed : int
Seed for random numbers (affects sampling when balance_classes=T)
build_tree_one_node : bool
Run on one node only; no network overhead but fewer cpus used.
Suitable for small datasets.
nfolds : int, optional
Number of folds for cross-validation. If nfolds >= 2, then validation must
remain empty.
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified.
Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
score_each_iteration : bool
Attempts to score each tree.
Returns
-------
A new H2OGradientBoostedEstimator object.
"""
def __init__(self, model_id=None, distribution=None, tweedie_power=None, ntrees=None,
max_depth=None, min_rows=None, learn_rate=None, nbins=None,
nbins_top_level=None, nbins_cats=None, balance_classes=None,
max_after_balance_size=None, seed=None, build_tree_one_node=None,
nfolds=None, fold_assignment=None, keep_cross_validation_predictions=None,
score_each_iteration=None, checkpoint=None):
super(H2OGradientBoostingEstimator, self).__init__()
self._parms = locals()
self._parms = {k:v for k,v in self._parms.iteritems() if k!="self"}
@property
def distribution(self):
return self._parms["distribution"]
@distribution.setter
def distribution(self, value):
self._parms["distribution"] = value
@property
def tweedie_power(self):
return self._parms["tweedie_power"]
@tweedie_power.setter
def tweedie_power(self, value):
self._parms["tweedie_power"] = value
@property
def ntrees(self):
return self._parms["ntrees"]
@ntrees.setter
def ntrees(self, value):
self._parms["ntrees"] = value
@property
def max_depth(self):
return self._parms["max_depth"]
@max_depth.setter
def max_depth(self, value):
self._parms["max_depth"] = value
@property
def min_rows(self):
return self._parms["min_rows"]
@min_rows.setter
def min_rows(self, value):
self._parms["min_rows"] = value
@property
def learn_rate(self):
return self._parms["learn_rate"]
@learn_rate.setter
def learn_rate(self, value):
self._parms["learn_rate"] = value
@property
def nbins(self):
return self._parms["nbins"]
@nbins.setter
def nbins(self, value):
self._parms["nbins"] = value
@property
def nbins_top_level(self):
return self._parms["nbins_top_level"]
@nbins_top_level.setter
def nbins_top_level(self, value):
self._parms["nbins_top_level"] = value
@property
def nbins_cats(self):
return self._parms["nbins_cats"]
@nbins_cats.setter
def nbins_cats(self, value):
self._parms["nbins_cats"] = value
@property
def balance_classes(self):
return self._parms["balance_classes"]
@balance_classes.setter
def balance_classes(self, value):
self._parms["balance_classes"] = value
@property
def max_after_balance_size(self):
return self._parms["max_after_balance_size"]
@max_after_balance_size.setter
def max_after_balance_size(self, value):
self._parms["max_after_balance_size"] = value
@property
def seed(self):
return self._parms["seed"]
@seed.setter
def seed(self, value):
self._parms["seed"] = value
@property
def build_tree_one_node(self):
return self._parms["build_tree_one_node"]
@build_tree_one_node.setter
def build_tree_one_node(self, value):
self._parms["build_tree_one_node"] = value
@property
def nfolds(self):
return self._parms["nfolds"]
@nfolds.setter
def nfolds(self, value):
self._parms["nfolds"] = value
@property
def fold_assignment(self):
return self._parms["fold_assignment"]
@fold_assignment.setter
def fold_assignment(self, value):
self._parms["fold_assignment"] = value
@property
def keep_cross_validation_predictions(self):
return self._parms["keep_cross_validation_predictions"]
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, value):
self._parms["keep_cross_validation_predictions"] = value
@property
def score_each_iteration(self):
return self._parms["score_each_iteration"]
@score_each_iteration.setter
def score_each_iteration(self, value):
self._parms["score_each_iteration"] = value
@property
def checkpoint(self):
return self._parms["checkpoint"]
@checkpoint.setter
def checkpoint(self, value):
self._parms["checkpoint"] = value
|
pchmieli/h2o-3
|
h2o-py/h2o/estimators/gbm.py
|
Python
|
apache-2.0
| 6,518
|
[
"Gaussian"
] |
26df52ed109558ee397c4d1e133d4c75b53e937c985bdeee5dce21a3602ec45c
|
'''
Created on Jun 9, 2019
@author: briank
'''
"""
@name: PyHouse/Project/src/Modules/Core/Utilities/Node_tools.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2019-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 9, 2019
@Summary: Routines to
"""
__updated__ = '2019-06-09'
# Import system type stuff
# Import PyHouse files
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.NodeTools ')
def get_node_name(p_pyhhouse_obj):
return p_pyhhouse_obj.Computer.Name
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Utilities/node_tools.py
|
Python
|
mit
| 590
|
[
"Brian"
] |
3902e5139ac4b67d0d6cad243d9ce39a79ddbe146431007ef83cc59d069d2dc4
|
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
import os
from mock import patch
from nose.plugins.attrib import attr
from base_studio_test import StudioCourseTest
from bok_choy.promise import EmptyPromise
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.lms.courseware import CoursewarePage
from textwrap import dedent
from xmodule.partitions.partitions import Group
@attr('shard_8')
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
EmptyPromise(
lambda: self.outline_page.is_browser_on_page(), "loaded page {!r}".format(self.outline_page),
timeout=30
).fulfill()
@attr('shard_8')
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
self.assertTrue(self.advanced_settings.is_browser_on_page())
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Click Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr('shard_1')
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
@attr('a11y')
class StudioSettingsA11yTest(StudioCourseTest):
"""
Class to test Studio pages accessibility.
"""
def setUp(self): # pylint: disable=arguments-differ
super(StudioSettingsA11yTest, self).setUp()
self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'],
self.course_info['run'])
def test_studio_settings_page_a11y(self):
"""
Check accessibility of SettingsPage.
"""
self.settings_page.visit()
self.settings_page.wait_for_page()
# There are several existing color contrast errors on this page,
# we will ignore this error in the test until we fix them.
self.settings_page.a11y_audit.config.set_rules({
"ignore": [
'color-contrast', # TODO: AC-225
'link-href', # TODO: AC-226
'nav-aria-label', # TODO: AC-227
'icon-aria-hidden', # TODO: AC-229
],
})
# TODO: Figure out how to get CodeMirror to pass accessibility testing
# We use the CodeMirror Javascript library to
# add code editing to a number of textarea elements
# on this page. CodeMirror generates markup that does
# not pass our accessibility testing rules.
self.settings_page.a11y_audit.config.set_scope(
exclude=['.CodeMirror textarea']
)
self.settings_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class StudioSubsectionSettingsA11yTest(StudioCourseTest):
"""
Class to test accessibility on the subsection settings modals.
"""
def setUp(self): # pylint: disable=arguments-differ
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# This test will fail if run using phantomjs < 2.0, due to an issue with bind()
# See https://github.com/ariya/phantomjs/issues/10522 for details.
# The course_outline uses this function, and as such will not fully load when run
# under phantomjs 1.9.8. So, to prevent this test from timing out at course_outline.visit(),
# force the use of firefox vs the standard a11y test usage of phantomjs 1.9.8.
# TODO: remove this block once https://openedx.atlassian.net/browse/TE-1047 is resolved.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(StudioSubsectionSettingsA11yTest, self).setUp(is_staff=True)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
)
def test_special_exams_menu_a11y(self):
"""
Given that I am a staff member
And I am editing settings on the special exams menu
Then that menu is accessible
"""
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.course_outline.select_advanced_tab()
# limit the scope of the audit to the special exams tab on the modal dialog
self.course_outline.a11y_audit.config.set_scope(
include=['section.edit-settings-timed-examination']
)
self.course_outline.a11y_audit.check_for_accessibility_errors()
class StudioSettingsImageUploadTest(StudioCourseTest):
"""
Class to test course settings image uploads.
"""
def setUp(self): # pylint: disable=arguments-differ
super(StudioSettingsImageUploadTest, self).setUp()
self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'],
self.course_info['run'])
# from nose.tools import set_trace; set_trace()
self.settings_page.visit()
# Ensure jquery is loaded before running a jQuery
self.settings_page.wait_for_ajax()
# This text appears towards the end of the work that jQuery is performing on the page
self.settings_page.wait_for_jquery_value('input#course-name:text', 'test_run')
def test_upload_course_card_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-course-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#course-image'))
def test_upload_course_banner_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-banner-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#banner-image'))
def test_upload_course_video_thumbnail_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-video-thumbnail-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#video-thumbnail-image'))
|
shabab12/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
Python
|
agpl-3.0
| 25,324
|
[
"VisIt"
] |
54f96bb12bcdb21fdf6915b0378c04c28b93acbd76bda3e7af51b25ee7718aaf
|
"""Bias matrix tests.
"""
from __future__ import division, absolute_import
import unittest
import numpy as np
from bcn.bias import BiasLowRank, BiasUnconstrained
class TestBiasLowRank(unittest.TestCase):
"""Test to verify that all three models produce finite ndarrays.
"""
def setUp(self):
self.shape = (50, 60)
self.rank = 6
def _assert_shape(self, bias):
assert bias['X'].shape == self.shape
assert bias['usvt'][0].shape == (self.shape[0], self.rank)
assert bias['usvt'][1].shape == (self.rank,)
assert bias['usvt'][2].shape == (self.rank, self.shape[1])
def _assert_finite(self, bias):
assert np.isfinite(bias['X']).all() == True
assert np.isfinite(bias['usvt'][0]).all() == True
assert np.isfinite(bias['usvt'][1]).all() == True
assert np.isfinite(bias['usvt'][2]).all() == True
def _assert_ndarray(self, bias):
assert type(bias['X']) == np.ndarray
assert type(bias['usvt'][0]) == np.ndarray
assert type(bias['usvt'][1]) == np.ndarray
assert type(bias['usvt'][2]) == np.ndarray
def _assert_dict(self, bias):
assert type(bias) == dict
def test_image(self):
bias = BiasLowRank(self.shape, self.rank, bias_model='image', image_source='trump.png').generate()
self._assert_dict(bias)
self._assert_finite(bias)
self._assert_ndarray(bias)
self._assert_shape(bias)
def test_gaussian(self):
bias = BiasLowRank(self.shape, self.rank, bias_model='gaussian', noise_amplitude=1.0).generate()
self._assert_dict(bias)
self._assert_finite(bias)
self._assert_ndarray(bias)
self._assert_shape(bias)
def test_bicluster(self):
bias = BiasLowRank(self.shape, self.rank, bias_model='bicluster', n_clusters=(3,4)).generate()
self._assert_dict(bias)
self._assert_finite(bias)
self._assert_ndarray(bias)
self._assert_shape(bias)
class TestBiasUnconstrained(unittest.TestCase):
"""Test to verify that both models produce finite ndarrays.
"""
def setUp(self):
self.shape = (50, 60)
def _assert_shape(self, bias):
assert bias['X'].shape == self.shape
def _assert_finite(self, bias):
assert np.isfinite(bias['X']).all() == True
def _assert_ndarray(self, bias):
assert type(bias['X']) == np.ndarray
def _assert_dict(self, bias):
assert type(bias) == dict
def test_gaussian_finite(self):
bias = BiasUnconstrained(self.shape, 'gaussian', noise_amplitude=1.0).generate()
self._assert_dict(bias)
self._assert_finite(bias)
self._assert_ndarray(bias)
self._assert_shape(bias)
def test_uniform_finite(self):
bias = BiasUnconstrained(self.shape, 'uniform', fill_value=-1.5).generate()
self._assert_dict(bias)
self._assert_finite(bias)
self._assert_ndarray(bias)
self._assert_shape(bias)
if __name__ == '__main__':
unittest.main()
|
a378ec99/bcn
|
tests/test_bias.py
|
Python
|
mit
| 3,098
|
[
"Gaussian"
] |
9e29aee25cb8cd844e3e9726ceacbfdad5d1cc75a9b7798717ce7ad322d53e12
|
## INFO ########################################################################
## ##
## cutils ##
## ====== ##
## ##
## Modern and Lightweight C Utilities ##
## Version: 0.8.90.725 (20140821) ##
## ##
## File: pycutils/cutils/internal/comment.py ##
## ##
## For more information about the project, visit <http://www.cutils.org>. ##
## Copyright (C) 2014 Peter Varo ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Comment symbols
LINE = '#', '//'
BLOCK = {'/*': '*/'}
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Regex escape sequence
_ESCAPE = {'.' : r'\.', '^' : r'\^', '$' : r'\$', '*' : r'\*',
'+' : r'\+', '?' : r'\?', '{' : r'\{', '}' : r'\}',
'\\': r'\\', '[' : r'\[', ']' : r'\]', '|' : r'\|',
'(' : r'\(', ')' : r'\)', '#' : r'[#]'}
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def escape(string):
return r''.join(_ESCAPE.get(char, char) for char in string)
#------------------------------------------------------------------------------#
def block_comments(block):
# Number of opening and closing symbols
length = len(block)
# Process block comment marks
for index, (opening, closing) in enumerate(block.items()):
# Make sure all regex chars are escaped
opening = escape(opening)
closing = escape(closing)
# Except first
if index:
blocks_open = r'{}|(?P<b{}>{})'.format(blocks_open, index, opening)
blocks_close = r'(?(b{}){}|{})'.format(index, closing, blocks_close)
# First
else:
blocks_open = r'(?P<b0>{})'.format(opening) if length > 1 else opening
blocks_close = closing
# Return processed strings
return blocks_open, blocks_close
|
matt-hayden/cutils
|
pycutils/cutils/internal/comment.py
|
Python
|
gpl-3.0
| 3,683
|
[
"VisIt"
] |
4d98444831c376cff1015ff396217ad9a63f2b6fe95e613c3ec1a1e931ddce6d
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
assert_,
assert_equal,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
PSF,
PSF_nosegid,
PSF_NAMD,
)
class TestPSFParser(ParserBase):
"""
Based on small PDB with AdK (:data:`PDB_small`).
"""
parser = mda.topology.PSFParser.PSFParser
filename = PSF
expected_attrs = ['ids', 'names', 'types', 'masses',
'charges',
'resids', 'resnames',
'segids',
'bonds', 'angles', 'dihedrals', 'impropers']
expected_n_atoms = 3341
expected_n_residues = 214
expected_n_segments = 1
def test_bonds_total_counts(self):
assert_(len(self.top.bonds.values) == 3365)
def test_bonds_atom_counts(self):
u = mda.Universe(self.filename)
assert_(len(u.atoms[[0]].bonds) == 4)
assert_(len(u.atoms[[42]].bonds) == 1)
def test_bonds_identity(self):
vals = self.top.bonds.values
for b in ((0, 1), (0, 2), (0, 3), (0, 4)):
assert_((b in vals) or (b[::-1] in vals))
def test_angles_total_counts(self):
assert_(len(self.top.angles.values) == 6123)
def test_angles_atom_counts(self):
u = mda.Universe(self.filename)
assert_(len(u.atoms[[0]].angles), 9)
assert_(len(u.atoms[[42]].angles), 2)
def test_angles_identity(self):
vals = self.top.angles.values
for b in ((1, 0, 2), (1, 0, 3), (1, 0, 4)):
assert_((b in vals) or (b[::-1] in vals))
def test_dihedrals_total_counts(self):
assert_(len(self.top.dihedrals.values) == 8921)
def test_dihedrals_atom_counts(self):
u = mda.Universe(self.filename)
assert_(len(u.atoms[[0]].dihedrals) == 14)
def test_dihedrals_identity(self):
vals = self.top.dihedrals.values
for b in ((0, 4, 6, 7), (0, 4, 6, 8),
(0, 4, 6, 9), (0, 4, 17, 18)):
assert_((b in vals) or (b[::-1] in vals))
class TestNAMDPSFParser(ParserBase):
"""Testfiles provided by JiyongPark77.
NAMD/VMD XPLOR-style PSF file (using CGENFF residues/atoms).
https://github.com/MDAnalysis/mdanalysis/issues/107
"""
parser = mda.topology.PSFParser.PSFParser
filename = PSF_NAMD
expected_attrs = ['ids', 'names', 'types', 'masses',
'charges',
'resids', 'resnames',
'segids',
'bonds', 'angles', 'dihedrals', 'impropers']
guessed_attrs = ['elements']
expected_n_atoms = 130
expected_n_residues = 6
expected_n_segments = 1
def test_psf_nosegid():
"""Issue #121"""
u = mda.Universe(PSF_nosegid)
assert_(isinstance(u, mda.Universe))
assert_equal(u.atoms.n_atoms, 98)
assert_equal(u.segments.segids, ["SYSTEM"])
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_psf.py
|
Python
|
gpl-2.0
| 3,941
|
[
"MDAnalysis",
"NAMD",
"VMD"
] |
2a00b4171f9163008b8daf42b402fc83a3db7a51d3001ddbbe08c3d520a7944b
|
"""
Internal implementation of a SAT solver, used by L{solver.SATSolver}.
This is not part of the public API.
"""
from __future__ import print_function
# Copyright (C) 2010, Thomas Leonard
# See the README file for details, or visit http://0install.net.
# The design of this solver is very heavily based on the one described in
# the MiniSat paper "An Extensible SAT-solver [extended version 1.2]"
# http://minisat.se/Papers.html
#
# The main differences are:
#
# - We care about which solution we find (not just "satisfiable" or "not").
# - We take care to be deterministic (always select the same versions given
# the same input). We do not do random restarts, etc.
# - We add an AtMostOneClause (the paper suggests this in the Excercises, and
# it's very useful for our purposes).
def debug(msg, *args):
"""@type msg: str"""
return
print("SAT:", msg % args)
# variables are numbered from 0
# literals have the same number as the corresponding variable,
# except they for negatives they are (-1-v):
#
# Variable Literal not(Literal)
# 0 0 -1
# 1 1 -2
def neg(lit):
"""@type lit: int
@rtype: int"""
return -1 - lit
def watch_index(lit):
"""@type lit: int
@rtype: int"""
if lit >= 0:
return lit * 2
return neg(lit) * 2 + 1
class AtMostOneClause(object):
def __init__(self, solver, lits):
"""Preferred literals come first.
@type solver: L{SATProblem}"""
self.solver = solver
self.lits = lits
# The single literal from our set that is True.
# We store this explicitly because the decider needs to know quickly.
self.current = None
def propagate(self, lit):
# Re-add ourselves to the watch list.
# (we we won't get any more notifications unless we backtrack,
# in which case we'd need to get back on the list anyway)
self.solver.watch_lit(lit, self)
# value[lit] has just become True
assert self.solver.lit_value(lit) == True
#debug("%s: noticed %s has become True" % (self, self.solver.name_lit(lit)))
# If we already propagated successfully when the first
# one was set then we set all the others to False and
# anyone trying to set one True will get rejected. And
# if we didn't propagate yet, current will still be
# None, even if we now have a conflict (which we'll
# detect below).
assert self.current is None
self.current = lit
# If we later backtrace, call our undo function to unset current
self.solver.get_varinfo_for_lit(lit).undo.append(self)
for l in self.lits:
value = self.solver.lit_value(l)
#debug("Value of %s is %s" % (self.solver.name_lit(l), value))
if value is True and l is not lit:
# Due to queuing, we might get called with current = None
# and two versions already selected.
debug("CONFLICT: already selected %s" % self.solver.name_lit(l))
return False
if value is None:
# Since one of our lits is already true, all unknown ones
# can be set to False.
if not self.solver.enqueue(neg(l), self):
debug("CONFLICT: enqueue failed for %s", self.solver.name_lit(neg(l)))
return False # Conflict; abort
return True
def undo(self, lit):
debug("(backtracking: no longer selected %s)" % self.solver.name_lit(lit))
assert lit == self.current
self.current = None
# Why is lit True?
# Or, why are we causing a conflict (if lit is None)?
def calc_reason(self, lit):
if lit is None:
# Find two True literals
trues = []
for l in self.lits:
if self.solver.lit_value(l) is True:
trues.append(l)
if len(trues) == 2: return trues
else:
for l in self.lits:
if l is not lit and self.solver.lit_value(l) is True:
return [l]
# Find one True literal
assert 0 # don't know why!
def best_undecided(self):
debug("best_undecided: %s" % (self.solver.name_lits(self.lits)))
for lit in self.lits:
#debug("%s = %s" % (self.solver.name_lit(lit), self.solver.lit_value(lit)))
if self.solver.lit_value(lit) is None:
return lit
return None
def __repr__(self):
return "<at most one: %s>" % (', '.join(self.solver.name_lits(self.lits)))
class UnionClause(object):
def __init__(self, solver, lits):
"""@type solver: L{SATProblem}"""
self.solver = solver
self.lits = lits
# Try to infer new facts.
# We can do this only when all of our literals are False except one,
# which is undecided. That is,
# False... or X or False... = True => X = True
#
# To get notified when this happens, we tell the solver to
# watch two of our undecided literals. Watching two undecided
# literals is sufficient. When one changes we check the state
# again. If we still have two or more undecided then we switch
# to watching them, otherwise we propagate.
#
# Returns False on conflict.
def propagate(self, lit):
# value[get(lit)] has just become False
#debug("%s: noticed %s has become False" % (self, self.solver.name_lit(neg(lit))))
# For simplicity, only handle the case where self.lits[1]
# is the one that just got set to False, so that:
# - value[lits[0]] = None | True
# - value[lits[1]] = False
# If it's the other way around, just swap them before we start.
if self.lits[0] == neg(lit):
self.lits[0], self.lits[1] = self.lits[1], self.lits[0]
if self.solver.lit_value(self.lits[0]) == True:
# We're already satisfied. Do nothing.
self.solver.watch_lit(lit, self)
return True
assert self.solver.lit_value(self.lits[1]) == False
# Find a new literal to watch now that lits[1] is resolved,
# swap it with lits[1], and start watching it.
for i in range(2, len(self.lits)):
value = self.solver.lit_value(self.lits[i])
if value != False:
# Could be None or True. If it's True then we've already done our job,
# so this means we don't get notified unless we backtrack, which is fine.
self.lits[1], self.lits[i] = self.lits[i], self.lits[1]
self.solver.watch_lit(neg(self.lits[1]), self)
return True
# Only lits[0], is now undefined.
self.solver.watch_lit(lit, self)
return self.solver.enqueue(self.lits[0], self)
def undo(self, lit): pass
# Why is lit True?
# Or, why are we causing a conflict (if lit is None)?
def calc_reason(self, lit):
assert lit is None or lit is self.lits[0]
# The cause is everything except lit.
return [neg(l) for l in self.lits if l is not lit]
def __repr__(self):
return "<some: %s>" % (', '.join(self.solver.name_lits(self.lits)))
# Using an array of VarInfo objects is less efficient than using multiple arrays, but
# easier for me to understand.
class VarInfo(object):
__slots__ = ['value', 'reason', 'level', 'undo', 'obj']
def __init__(self, obj):
self.value = None # True/False/None
self.reason = None # The constraint that implied our value, if True or False
self.level = -1 # The decision level at which we got a value (when not None)
self.undo = [] # Constraints to update if we become unbound (by backtracking)
self.obj = obj # The object this corresponds to (for our caller and for debugging)
def __repr__(self):
return '%s=%s' % (self.name, self.value)
@property
def name(self):
return str(self.obj)
class SATProblem(object):
def __init__(self):
# Propagation
self.watches = [] # watches[2i,2i+1] = constraints to check when literal[i] becomes True/False
self.propQ = [] # propagation queue
# Assignments
self.assigns = [] # [VarInfo]
self.trail = [] # order of assignments [lit]
self.trail_lim = [] # decision levels (len(trail) at each decision)
self.toplevel_conflict = False
def get_decision_level(self):
"""@rtype: int"""
return len(self.trail_lim)
def add_variable(self, obj):
"""@rtype: int"""
debug("add_variable('%s')", obj)
index = len(self.assigns)
self.watches += [[], []] # Add watch lists for X and not(X)
self.assigns.append(VarInfo(obj))
return index
# lit is now True
# reason is the clause that is asserting this
# Returns False if this immediately causes a conflict.
def enqueue(self, lit, reason):
"""@type lit: int
@rtype: bool"""
debug("%s => %s" % (reason, self.name_lit(lit)))
old_value = self.lit_value(lit)
if old_value is not None:
if old_value is False:
# Conflict
return False
else:
# Already set (shouldn't happen)
return True
if lit < 0:
var_info = self.assigns[neg(lit)]
var_info.value = False
else:
var_info = self.assigns[lit]
var_info.value = True
var_info.level = self.get_decision_level()
var_info.reason = reason
self.trail.append(lit)
self.propQ.append(lit)
return True
# Pop most recent assignment from self.trail
def undo_one(self):
lit = self.trail[-1]
debug("(pop %s)", self.name_lit(lit))
var_info = self.get_varinfo_for_lit(lit)
var_info.value = None
var_info.reason = None
var_info.level = -1
self.trail.pop()
while var_info.undo:
var_info.undo.pop().undo(lit)
def cancel(self):
n_this_level = len(self.trail) - self.trail_lim[-1]
debug("backtracking from level %d (%d assignments)" %
(self.get_decision_level(), n_this_level))
while n_this_level != 0:
self.undo_one()
n_this_level -= 1
self.trail_lim.pop()
def cancel_until(self, level):
"""@type level: int"""
while self.get_decision_level() > level:
self.cancel()
# Process the propQ.
# Returns None when done, or the clause that caused a conflict.
def propagate(self):
#debug("propagate: queue length = %d", len(self.propQ))
while self.propQ:
lit = self.propQ[0]
del self.propQ[0]
wi = watch_index(lit)
watches = self.watches[wi]
self.watches[wi] = []
debug("%s -> True : watches: %s" % (self.name_lit(lit), watches))
# Notifiy all watchers
for i in range(len(watches)):
clause = watches[i]
if not clause.propagate(lit):
# Conflict
# Re-add remaining watches
self.watches[wi] += watches[i+1:]
# No point processing the rest of the queue as
# we'll have to backtrack now.
self.propQ = []
return clause
return None
def impossible(self):
self.toplevel_conflict = True
def get_varinfo_for_lit(self, lit):
"""@type lit: int
@rtype: L{VarInfo}"""
if lit >= 0:
return self.assigns[lit]
else:
return self.assigns[neg(lit)]
def lit_value(self, lit):
"""@type lit: int
@rtype: bool | None"""
if lit >= 0:
value = self.assigns[lit].value
return value
else:
v = -1 - lit
value = self.assigns[v].value
if value is None:
return None
else:
return not value
# Call cb when lit becomes True
def watch_lit(self, lit, cb):
#debug("%s is watching for %s to become True" % (cb, self.name_lit(lit)))
"""@type lit: int"""
self.watches[watch_index(lit)].append(cb)
# Returns the new clause if one was added, True if none was added
# because this clause is trivially True, or False if the clause is
# False.
def _add_clause(self, lits, learnt, reason):
"""@type lits: [int]
@type learnt: bool
@type reason: str
@rtype: L{zeroinstall.injector.sat.UnionClause} | bool"""
if not lits:
assert not learnt
self.toplevel_conflict = True
return False
elif len(lits) == 1:
# A clause with only a single literal is represented
# as an assignment rather than as a clause.
return self.enqueue(lits[0], reason)
clause = UnionClause(self, lits)
if learnt:
# lits[0] is None because we just backtracked.
# Start watching the next literal that we will
# backtrack over.
best_level = -1
best_i = 1
for i in range(1, len(lits)):
level = self.get_varinfo_for_lit(lits[i]).level
if level > best_level:
best_level = level
best_i = i
lits[1], lits[best_i] = lits[best_i], lits[1]
# Watch the first two literals in the clause (both must be
# undefined at this point).
for lit in lits[:2]:
self.watch_lit(neg(lit), clause)
return clause
def name_lits(self, lst):
"""@type lst: [int]
@rtype: [str]"""
return [self.name_lit(l) for l in lst]
# For nicer debug messages
def name_lit(self, lit):
"""@type lit: int
@rtype: str"""
if lit >= 0:
return self.assigns[lit].name
return "not(%s)" % self.assigns[neg(lit)].name
def add_clause(self, lits):
# Public interface. Only used before the solve starts.
"""@type lits: [int]
@rtype: L{zeroinstall.injector.sat.UnionClause} | bool"""
assert lits
debug("add_clause([%s])" % ', '.join(self.name_lits(lits)))
if any(self.lit_value(l) == True for l in lits):
# Trivially true already.
return True
lit_set = set(lits)
for l in lits:
if neg(l) in lit_set:
# X or not(X) is always True.
return True
# Remove duplicates and values known to be False
lits = [l for l in lit_set if self.lit_value(l) != False]
retval = self._add_clause(lits, learnt = False, reason = "input fact")
if not retval:
self.toplevel_conflict = True
return retval
def at_most_one(self, lits):
"""@type lits: [int]
@rtype: L{zeroinstall.injector.sat.AtMostOneClause}"""
assert lits
debug("at_most_one(%s)" % ', '.join(self.name_lits(lits)))
# If we have zero or one literals then we're trivially true
# and not really needed for the solve. However, Zero Install
# monitors these objects to find out what was selected, so
# keep even trivial ones around for that.
#
#if len(lits) < 2:
# return True # Trivially true
# Ensure no duplicates
assert len(set(lits)) == len(lits), lits
# Ignore any literals already known to be False.
# If any are True then they're enqueued and we'll process them
# soon.
lits = [l for l in lits if self.lit_value(l) != False]
clause = AtMostOneClause(self, lits)
for lit in lits:
self.watch_lit(lit, clause)
return clause
def analyse(self, cause):
# After trying some assignments, we've discovered a conflict.
# e.g.
# - we selected A then B then C
# - from A, B, C we got X, Y
# - we have a rule: not(A) or not(X) or not(Y)
#
# The simplest thing to do would be:
# 1. add the rule "not(A) or not(B) or not(C)"
# 2. unassign C
#
# Then we we'd deduce not(C) and we could try something else.
# However, that would be inefficient. We want to learn a more
# general rule that will help us with the rest of the problem.
#
# We take the clause that caused the conflict ("cause") and
# ask it for its cause. In this case:
#
# A and X and Y => conflict
#
# Since X and Y followed logically from A, B, C there's no
# point learning this rule; we need to know to avoid A, B, C
# *before* choosing C. We ask the two variables deduced at the
# current level (X and Y) what caused them, and work backwards.
# e.g.
#
# X: A and C => X
# Y: C => Y
#
# Combining these, we get the cause of the conflict in terms of
# things we knew before the current decision level:
#
# A and X and Y => conflict
# A and (A and C) and (C) => conflict
# A and C => conflict
#
# We can then learn (record) the more general rule:
#
# not(A) or not(C)
#
# Then, in future, whenever A is selected we can remove C and
# everything that depends on it from consideration.
learnt = [None] # The general rule we're learning
btlevel = 0 # The deepest decision in learnt
p = None # The literal we want to expand now
seen = set() # The variables involved in the conflict
counter = 0
while True:
# cause is the reason why p is True (i.e. it enqueued it).
# The first time, p is None, which requests the reason
# why it is conflicting.
if p is None:
debug("Why did %s make us fail?" % cause)
p_reason = cause.calc_reason(p)
debug("Because: %s => conflict" % (' and '.join(self.name_lits(p_reason))))
else:
debug("Why did %s lead to %s?" % (cause, self.name_lit(p)))
p_reason = cause.calc_reason(p)
debug("Because: %s => %s" % (' and '.join(self.name_lits(p_reason)), self.name_lit(p)))
# p_reason is in the form (A and B and ...)
# p_reason => p
# Check each of the variables in p_reason that we haven't
# already considered:
# - if the variable was assigned at the current level,
# mark it for expansion
# - otherwise, add it to learnt
for lit in p_reason:
var_info = self.get_varinfo_for_lit(lit)
if var_info not in seen:
seen.add(var_info)
if var_info.level == self.get_decision_level():
# We deduced this var since the last decision.
# It must be in self.trail, so we'll get to it
# soon. Remember not to stop until we've processed it.
counter += 1
elif var_info.level > 0:
# We won't expand lit, just remember it.
# (we could expand it if it's not a decision, but
# apparently not doing so is useful)
learnt.append(neg(lit))
btlevel = max(btlevel, var_info.level)
# else we already considered the cause of this assignment
# At this point, counter is the number of assigned
# variables in self.trail at the current decision level that
# we've seen. That is, the number left to process. Pop
# the next one off self.trail (as well as any unrelated
# variables before it; everything up to the previous
# decision has to go anyway).
# On the first time round the loop, we must find the
# conflict depends on at least one assignment at the
# current level. Otherwise, simply setting the decision
# variable caused a clause to conflict, in which case
# the clause should have asserted not(decision-variable)
# before we ever made the decision.
# On later times round the loop, counter was already >
# 0 before we started iterating over p_reason.
assert counter > 0
while True:
p = self.trail[-1]
var_info = self.get_varinfo_for_lit(p)
cause = var_info.reason
self.undo_one()
if var_info in seen:
break
debug("(irrelevant)")
counter -= 1
if counter <= 0:
assert counter == 0
# If counter = 0 then we still have one more
# literal (p) at the current level that we
# could expand. However, apparently it's best
# to leave this unprocessed (says the minisat
# paper).
break
# p is the literal we decided to stop processing on. It's either
# a derived variable at the current level, or the decision that
# led to this level. Since we're not going to expand it, add it
# directly to the learnt clause.
learnt[0] = neg(p)
debug("Learnt: %s" % (' or '.join(self.name_lits(learnt))))
return learnt, btlevel
def run_solver(self, decide):
"""@rtype: bool"""
# Check whether we detected a trivial problem
# during setup.
if self.toplevel_conflict:
debug("FAIL: toplevel_conflict before starting solve!")
return False
while True:
# Use logical deduction to simplify the clauses
# and assign literals where there is only one possibility.
conflicting_clause = self.propagate()
if not conflicting_clause:
debug("new state: %s", self.assigns)
if all(info.value != None for info in self.assigns):
# Everything is assigned without conflicts
debug("SUCCESS!")
return True
else:
# Pick a variable and try assigning it one way.
# If it leads to a conflict, we'll backtrack and
# try it the other way.
lit = decide()
#print "TRYING:", self.name_lit(lit)
assert lit is not None, "decide function returned None!"
assert self.lit_value(lit) is None
self.trail_lim.append(len(self.trail))
r = self.enqueue(lit, reason = "considering")
assert r is True
else:
if self.get_decision_level() == 0:
debug("FAIL: conflict found at top level")
return False
else:
# Figure out the root cause of this failure.
learnt, backtrack_level = self.analyse(conflicting_clause)
self.cancel_until(backtrack_level)
c = self._add_clause(learnt, learnt = True, reason = conflicting_clause)
if c is not True:
# Everything except the first literal in learnt is known to
# be False, so the first must be True.
e = self.enqueue(learnt[0], c)
assert e is True
|
rammstein/0install
|
zeroinstall/injector/sat.py
|
Python
|
lgpl-2.1
| 19,957
|
[
"VisIt"
] |
c34b55f64622374e6d6a75db99a007f315214f26ed7ad370c7e5df94728d3125
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for misc.GritNode'''
import os
import sys
if __name__ == '__main__':
sys.path[0] = os.path.abspath(os.path.join(sys.path[0], '../..'))
import unittest
import StringIO
from grit import grd_reader
import grit.exception
from grit import util
from grit.format import rc
from grit.node import misc
class GritNodeUnittest(unittest.TestCase):
def testUniqueNameAttribute(self):
try:
restree = grd_reader.Parse(
util.PathFromRoot('grit/testdata/duplicate-name-input.xml'))
self.fail('Expected parsing exception because of duplicate names.')
except grit.exception.Parsing:
pass # Expected case
def testReadFirstIdsFromFile(self):
test_resource_ids = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'resource_ids')
base_dir = os.path.dirname(test_resource_ids)
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'FOO': os.path.join(base_dir, 'bar'),
'SHARED_INTERMEDIATE_DIR': os.path.join(base_dir,
'out/Release/obj/gen'),
})
self.assertEqual({}, id_dict.get('bar/file.grd', None))
self.assertEqual({},
id_dict.get('out/Release/obj/gen/devtools/devtools.grd', None))
class IfNodeUnittest(unittest.TestCase):
def testIffyness(self):
grd = grd_reader.Parse(StringIO.StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">
Bingo!
</message>
</if>
<if expr="'hello' in defs">
<message name="IDS_HELLO">
Hello!
</message>
</if>
<if expr="lang == 'fr' or 'FORCE_FRENCH' in defs">
<message name="IDS_HELLO" internal_comment="French version">
Good morning
</message>
</if>
</messages>
</release>
</grit>'''), dir='.')
messages_node = grd.children[0].children[0]
bingo_message = messages_node.children[0].children[0]
hello_message = messages_node.children[1].children[0]
french_message = messages_node.children[2].children[0]
self.assertTrue(bingo_message.name == 'message')
self.assertTrue(hello_message.name == 'message')
self.assertTrue(french_message.name == 'message')
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message not in active)
self.failUnless(hello_message in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': 1})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message not in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message in active)
def testElsiness(self):
grd = util.ParseGrdForUnittest('''
<messages>
<if expr="True">
<then> <message name="IDS_YES1"></message> </then>
<else> <message name="IDS_NO1"></message> </else>
</if>
<if expr="True">
<then> <message name="IDS_YES2"></message> </then>
<else> </else>
</if>
<if expr="True">
<then> </then>
<else> <message name="IDS_NO2"></message> </else>
</if>
<if expr="True">
<then> </then>
<else> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO3"></message> </then>
<else> <message name="IDS_YES3"></message> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO4"></message> </then>
<else> </else>
</if>
<if expr="False">
<then> </then>
<else> <message name="IDS_YES4"></message> </else>
</if>
<if expr="False">
<then> </then>
<else> </else>
</if>
</messages>''')
included = [msg.attrs['name'] for msg in grd.ActiveDescendants()
if msg.name == 'message']
self.assertEqual(['IDS_YES1', 'IDS_YES2', 'IDS_YES3', 'IDS_YES4'], included)
def testIffynessWithOutputNodes(self):
grd = grd_reader.Parse(StringIO.StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<outputs>
<output filename="uncond1.rc" type="rc_data" />
<if expr="lang == 'fr' or 'hello' in defs">
<output filename="only_fr.adm" type="adm" />
<output filename="only_fr.plist" type="plist" />
</if>
<if expr="lang == 'ru'">
<output filename="doc.html" type="document" />
</if>
<output filename="uncond2.adm" type="adm" />
<output filename="iftest.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
</outputs>
</grit>'''), dir='.')
outputs_node = grd.children[0]
uncond1_output = outputs_node.children[0]
only_fr_adm_output = outputs_node.children[1].children[0]
only_fr_plist_output = outputs_node.children[1].children[1]
doc_output = outputs_node.children[2].children[0]
uncond2_output = outputs_node.children[0]
self.assertTrue(uncond1_output.name == 'output')
self.assertTrue(only_fr_adm_output.name == 'output')
self.assertTrue(only_fr_plist_output.name == 'output')
self.assertTrue(doc_output.name == 'output')
self.assertTrue(uncond2_output.name == 'output')
grd.SetOutputLanguage('ru')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html',
'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('ru')
grd.SetDefines({'bingo': '2'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm',
'iftest.h'])
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
def testChildrenAccepted(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
</if>
</includes>
<structures>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</structures>
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</messages>
</release>
<translations>
<if expr="'bingo' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
</if>
</translations>
</grit>'''), dir='.')
def testIfBadChildrenNesting(self):
# includes
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# messages
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# structures
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</structures>
</release>
</grit>''')
# translations
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# same with nesting
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</structures>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
class ReleaseNodeUnittest(unittest.TestCase):
def testPseudoControl(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" source_lang_id="en-US" current_release="2" base_dir=".">
<release seq="1" allow_pseudo="false">
<messages>
<message name="IDS_HELLO">
Hello
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
<release seq="2">
<messages>
<message name="IDS_BINGO">
Bingo
</message>
</messages>
<structures>
<structure type="menu" name="IDC_KLONKMENU" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
hello = grd.GetNodeById('IDS_HELLO')
aboutbox = grd.GetNodeById('IDD_ABOUTBOX')
bingo = grd.GetNodeById('IDS_BINGO')
menu = grd.GetNodeById('IDC_KLONKMENU')
for node in [hello, aboutbox]:
self.failUnless(not node.PseudoIsAllowed())
for node in [bingo, menu]:
self.failUnless(node.PseudoIsAllowed())
# TODO(benrg): There was a test here that formatting hello and aboutbox with
# a pseudo language should fail, but they do not fail and the test was
# broken and failed to catch it. Fix this.
# Should not raise an exception since pseudo is allowed
rc.FormatMessage(bingo, 'xyz-pseudo')
rc.FormatStructure(menu, 'xyz-pseudo', '.')
if __name__ == '__main__':
unittest.main()
|
leighpauls/k2cro4
|
tools/grit/grit/node/misc_unittest.py
|
Python
|
bsd-3-clause
| 15,067
|
[
"xTB"
] |
6e304834a1ece7ef9604e7d9a39ea1de9244058e6e7bbbc8dee904a0ed4b302c
|
""" DIRAC FileCatalog utilities
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import intListToString
def getIDSelectString(ids):
"""
:param ids: input IDs - can be single int, list or tuple or a SELECT string
:return: Select string
"""
if isinstance(ids, basestring) and ids.lower().startswith('select'):
idString = ids
elif isinstance(ids, (int, long)):
idString = '%d' % ids
elif isinstance(ids, (tuple, list)):
idString = intListToString(ids)
else:
return S_ERROR('Illegal fileID')
return S_OK(idString)
|
fstagni/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/Utilities.py
|
Python
|
gpl-3.0
| 586
|
[
"DIRAC"
] |
4302c66b502f8f2636ff9f73934e751f0b4c73a44ccf31b98e77490696446a9d
|
r"""wamp is a module that provide classes that extend any
WAMP related class for the purpose of vtkWeb.
"""
import inspect, types, string, random, logging, six, json, re, base64
from threading import Timer
from twisted.web import resource
from twisted.python import log
from twisted.internet import reactor
from twisted.internet import defer
from twisted.internet.defer import Deferred, returnValue
from autobahn import wamp
from autobahn import util
from autobahn.wamp import types
from autobahn.wamp import auth
from autobahn.wamp import register as exportRpc
from autobahn.twisted.wamp import ApplicationSession, RouterSession
from autobahn.twisted.websocket import WampWebSocketServerFactory
from autobahn.twisted.websocket import WampWebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketServerProtocol
from vtk.web import protocols
try:
from vtkWebCore import vtkWebApplication
except:
from vtkWebCorePython import vtkWebApplication
# =============================================================================
salt = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
application = None
imageCapture = None
# =============================================================================
#
# Base class for vtkWeb WampServerProtocol
#
# =============================================================================
class ServerProtocol(ApplicationSession):
"""
Defines the core server protocol for vtkWeb. Adds support to
marshall/unmarshall RPC callbacks that involve ServerManager proxies as
arguments or return values.
Applications typically don't use this class directly, since it doesn't
register any RPC callbacks that are required for basic web-applications with
interactive visualizations. For that, use vtkWebServerProtocol.
"""
def __init__(self, config):
ApplicationSession.__init__(self, config)
self.vtkWebProtocols = []
self.authdb = None
self.secret = None
self.Application = self.initApplication()
self.initialize()
# Init Binary WebSocket image renderer
global imageCapture
imageCapture = protocols.vtkWebViewPortImageDelivery()
imageCapture.setApplication(self.Application)
def setAuthDB(self, db):
self.authdb = db
if self.secret:
self.authdb.updateKey('vtkweb', self.secret)
def initialize(self):
"""
Let the sub class define what they need to do to properly initialize
themselves.
"""
pass
def initApplication(self):
"""
Let subclass optionally initialize a custom application in lieu
of the default vtkWebApplication.
"""
global application
if not application:
application = vtkWebApplication()
return application
def onJoin(self, details):
ApplicationSession.onJoin(self, details)
self.register(self)
for protocol in self.vtkWebProtocols:
self.register(protocol)
def setApplication(self, application):
self.Application = application
# Init Binary WebSocket image renderer
global imageCapture
imageCapture.setApplication(self.Application)
def registerVtkWebProtocol(self, protocol):
protocol.setApplication(self.Application)
self.vtkWebProtocols.append(protocol)
def getVtkWebProtocols(self):
return self.vtkWebProtocols
def updateSecret(self, newSecret):
self.secret = newSecret
if self.authdb:
self.authdb.updateKey('vtkweb', self.secret)
@exportRpc("application.exit")
def exit(self):
"""RPC callback to exit"""
reactor.stop()
@exportRpc("application.exit.later")
def exitLater(self, secondsLater=60):
"""RPC callback to exit after a short delay"""
reactor.callLater(secondsLater, reactor.stop)
# =============================================================================
#
# Base class for vtkWeb WampServerFactory
#
# =============================================================================
class TimeoutWampWebSocketServerFactory(WampWebSocketServerFactory):
"""
TimeoutWampWebSocketServerFactory is WampWebSocketServerFactory subclass
that adds support to close the web-server after a timeout when the last
connected client drops.
Currently, the protocol must call connectionMade() and connectionLost() methods
to notify the factory that the connection was started/closed.
If the connection count drops to zero, then the reap timer
is started which will end the process if no other connections are made in
the timeout interval.
"""
def __init__(self, factory, *args, **kwargs):
self._connection_count = 0
self._timeout = kwargs['timeout']
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
del kwargs['timeout']
WampWebSocketServerFactory.__init__(self, factory, *args, **kwargs)
WampWebSocketServerFactory.protocol = TimeoutWampWebSocketServerProtocol
def connectionMade(self):
if self._reaper:
log.msg("Client has reconnected, cancelling reaper", logLevel=logging.DEBUG)
self._reaper.cancel()
self._reaper = None
self._connection_count += 1
log.msg("on_connect: connection count = %s" % self._connection_count, logLevel=logging.DEBUG)
def connectionLost(self, reason):
if self._connection_count > 0:
self._connection_count -= 1
log.msg("connection_lost: connection count = %s" % self._connection_count, logLevel=logging.DEBUG)
if self._connection_count == 0 and not self._reaper:
log.msg("Starting timer, process will terminate in: %ssec" % self._timeout, logLevel=logging.DEBUG)
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
# =============================================================================
class TimeoutWampWebSocketServerProtocol(WampWebSocketServerProtocol):
def connectionMade(self):
WampWebSocketServerProtocol.connectionMade(self)
self.factory.connectionMade()
def connectionLost(self, reason):
WampWebSocketServerProtocol.connectionLost(self, reason)
self.factory.connectionLost(reason)
# =============================================================================
class AuthDb:
"""
An in-memory-only user database of a single user.
"""
AUTHEXTRA = {'salt': 'salt123', 'keylen': 32, 'iterations': 1000}
def __init__(self):
self._creds = {'vtkweb': auth.derive_key("vtkweb-secret", self.AUTHEXTRA['salt'])}
def get(self, authid):
## we return a deferred to simulate an asynchronous lookup
return defer.succeed(self._creds.get(authid, None))
def updateKey(self, id, newKey):
self._creds[id] = auth.derive_key(newKey, self.AUTHEXTRA['salt'])
# =============================================================================
class PendingAuth:
"""
Used for tracking pending authentications.
"""
def __init__(self, key, session, authid, authrole, authmethod, authprovider):
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
self.session = session
self.timestamp = util.utcnow()
self.nonce = util.newid()
challenge_obj = {
'authid': self.authid,
'authrole': self.authrole,
'authmethod': self.authmethod,
'authprovider': self.authprovider,
'session': self.session,
'nonce': self.nonce,
'timestamp': self.timestamp
}
self.challenge = json.dumps(challenge_obj)
self.signature = auth.compute_wcs(key, self.challenge)
# =============================================================================
class CustomWampCraRouterSession(RouterSession):
"""
A custom router session that authenticates via WAMP-CRA.
"""
def __init__(self, routerFactory):
"""
Constructor.
"""
RouterSession.__init__(self, routerFactory)
@defer.inlineCallbacks
def onHello(self, realm, details):
"""
Callback fired when client wants to attach session.
"""
self._pending_auth = None
if details.authmethods:
for authmethod in details.authmethods:
if authmethod == u"wampcra":
authdb = self.factory.authdb
## lookup user in user DB
key = yield authdb.get(details.authid)
## if user found ..
if key:
## setup pending auth
self._pending_auth = PendingAuth(key, details.pending_session,
details.authid, "user", authmethod, "authdb")
## send challenge to client
extra = { 'challenge': self._pending_auth.challenge }
## when using salted passwords, provide the client with
## the salt and then PBKDF2 parameters used
extra['salt'] = authdb.AUTHEXTRA['salt']
extra['iterations'] = 1000
extra['keylen'] = 32
defer.returnValue(types.Challenge('wampcra', extra))
## deny client
defer.returnValue(types.Deny())
def onAuthenticate(self, signature, extra):
"""
Callback fired when a client responds to an authentication challenge.
"""
## if there is a pending auth, and the signature provided by client matches ..
if self._pending_auth and signature == self._pending_auth.signature:
## accept the client
return types.Accept(authid = self._pending_auth.authid,
authrole = self._pending_auth.authrole,
authmethod = self._pending_auth.authmethod,
authprovider = self._pending_auth.authprovider)
## deny client
return types.Deny()
# =============================================================================
# Simple web server endpoint handling POST requests to execute rpc methods
# =============================================================================
class HttpRpcResource(resource.Resource, object):
def __init__(self, serverProtocol, endpointRootPath):
super(HttpRpcResource, self).__init__()
self.functionMap = {}
self.urlMatcher = re.compile(endpointRootPath.strip('/') + '/([^/]+)')
# Build the rpc method dictionary
protocolList = serverProtocol.getVtkWebProtocols()
protocolList.append(serverProtocol) # so the exit methods get "registered"
for protocolObject in protocolList:
test = lambda x: inspect.ismethod(x) or inspect.isfunction(x)
for k in inspect.getmembers(protocolObject.__class__, test):
proc = k[1]
if "_wampuris" in proc.__dict__:
pat = proc.__dict__["_wampuris"][0]
if pat.is_endpoint():
uri = pat.uri()
self.functionMap[uri] = (protocolObject, proc)
def extractRpcMethod(self, path):
m = self.urlMatcher.search(path)
if m:
return m.group(1)
else:
return None
def getChild(self, path, request):
return self
def render_POST(self, request):
payload = json.loads(request.content.getvalue())
args = payload['args']
methodName = self.extractRpcMethod(request.path)
obj,func = self.functionMap[methodName]
results = func(obj, *args)
return json.dumps(results)
# =============================================================================
# Binary WebSocket image push protocol
# =============================================================================
class ImagePushBinaryWebSocketServerProtocol(WebSocketServerProtocol):
def onOpen(self):
global imageCapture
self.helper = imageCapture
self.app = imageCapture.getApplication()
self.deltaT = 0.015
self.viewToCapture = {}
self.renderLoop = False
def onMessage(self, msg, isBinary):
request = json.loads(msg)
if 'view_id' in request:
viewId = str(request['view_id'])
if viewId not in self.viewToCapture:
self.viewToCapture[viewId] = { 'quality': 100, 'enabled': True, 'view': self.helper.getView(viewId), 'view_id': viewId, 'mtime': 0 }
# Update fields
objToUpdate = self.viewToCapture[viewId]
for key in request:
objToUpdate[key] = request[key]
# Trigger new render loop if needed
self.startRenderLoop()
def onClose(self, wasClean, code, reason):
self.viewToCapture = {}
self.renderLoop = False
def connectionLost(self, reason):
self.viewToCapture = []
self.renderLoop = False
def startRenderLoop(self):
if self.renderLoop:
return
self.renderLoop = True
reactor.callLater(self.deltaT, lambda: self.processNextRender())
def processNextRender(self):
keepGoing = False
for k, v in self.viewToCapture.iteritems():
if v['enabled']:
keepGoing = True
view = v['view']
if hasattr(view,'SMProxy'):
view = view.SMProxy
quality = v['quality']
mtime = v['mtime']
base64Image = self.app.StillRenderToString(view, mtime, quality)
if base64Image:
v['mtime'] = self.app.GetLastStillRenderToStringMTime()
meta = {
'size': self.app.GetLastStillRenderImageSize(),
'id': k
}
self.sendMessage(json.dumps(meta), False)
self.sendMessage(base64.standard_b64decode(base64Image), True)
self.renderLoop = keepGoing
if self.renderLoop:
reactor.callLater(self.deltaT, lambda: self.processNextRender())
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Web/Python/vtk/web/wamp.py
|
Python
|
bsd-3-clause
| 14,893
|
[
"VTK"
] |
39574a5ffba9df013b981fa7e138192da838af85513967fa15d8cd7254bf76a4
|
from rpy2 import robjects
def main():
# load r functions
r_install_packages = robjects.r['install.packages']
r_source = robjects.r['source']
# setup bioconductor
r_source("http://bioconductor.org/biocLite.R")
r_biocLite = robjects.r['biocLite']
# install argparser
r_install_packages("argparser", repos="http://cran.us.r-project.org")
# install dada2
r_biocLite('dada2')
if __name__ == "__main__":
main()
|
shafferm/dada2_qiime1
|
scripts/install_dada2_qiime_dependencies.py
|
Python
|
mit
| 456
|
[
"Bioconductor"
] |
3640f83738dd175303d5cc5933d785806a471e9c738d1fcdead6e637fd8e89db
|
## numpy-oldnumeric calls replaced by custom script; 09/06/2016
## Automatically adapted for numpy-oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Utilities for handling structures and sequences
"""
## see: https://www.python.org/dev/peps/pep-0366/
## allow relative imports when calling module as main script for testing
if __name__ == "__main__" and __package__ is None:
import biskit
__package__ = "biskit"
from biskit import EHandler
from biskit import tools as t
from biskit.core import oldnumeric as N0
import copy
import types
class MolUtilError( Exception ):
pass
#: translate PDB amino acid names to single letter code
aaDicStandard =\
{'asp':'D', 'glu':'E', 'lys':'K', 'his':'H', 'arg':'R',
'gln':'Q', 'asn':'N', 'ser':'S', 'asx':'B', 'glx':'Z',
'phe':'F', 'trp':'W', 'tyr':'Y',
'gly':'G', 'ala':'A', 'ile':'I', 'leu':'L', 'cys':'C',
'met':'M', 'thr':'T', 'val':'V', 'pro':'P' }
#: same for nucleic acids (incomplete)
nsDicStandard = {'a':'a', 'g':'g', 'c':'c', 't':'t', 'u':'u',
'a3':'a', 'g3':'g', 'c3':'c', 't3':'t', 'u3':'u',
'a5':'a', 'g5':'g', 'c5':'c', 't5':'t', 'u5':'u',
'da':'a','dg':'g','dc':'c','dt':'t',
'da3':'a','dg3':'g','dc3':'c','dt3':'t',
'da5':'a','dg5':'g','dc5':'c','dt5':'t'
}
#: extend aaDicStandard with non-standard residues
aaDic = copy.copy( aaDicStandard )
aaDic.update( {'cyx':'C', 'hid':'H', 'hie':'H', 'hip':'H',
'unk':'X', 'ace':'X', 'nme':'X'} )#, 'ndp':'X' } )
#: extend nsDicStandard with non-standard residues
nsDic = copy.copy( nsDicStandard )
nsDic.update( {'atp':'a', 'gtp':'g', 'ctp':'c', 'ttp':'t', 'utp':'u',
'adp':'a', 'gdp':'g', 'cdp':'c', 'tdp':'t', 'udp':'u',
'amp':'a', 'gmp':'g',
'fad':'f', 'fmp':'f',
'nad':'n',
} )
#: translate common hetero residues to pseudo single letter code
xxDic = {'tip3':'~', 'hoh':'~', 'wat':'~', 'cl-':'-', 'na+':'+', 'ca':'+',
'ndp':'X', 'nap':'X'}
#: translate standard PDB amino and nucleic acid names to single letter code
resDicStandard = copy.copy( aaDicStandard )
resDicStandard.update( nsDicStandard )
#: extend resDicStandard with common non-standard names
resDic = copy.copy( aaDic )
resDic.update( nsDic )
resDic.update( xxDic )
## map non-standard amino acid names to closest standard amino acid
##
## Data from: http://www.ccp4.ac.uk/html/lib_list.html#peptide_synonyms
## More info at: http://xray.bmc.uu.se/hicup/XXX/ where XXX is the residue code
##
## NOT ADDED:
## SAR SARCOSINE
## PCA 5-pyrrolidone-2-carboxylic_acid
## INI Amidinated_lysine_with_methyl_isonicotinimida
## SAH S-ADENOSYL-L-HOMOCYSTEINE
## SAM S-ADENOSYLMETHIONINE
## LLP LYSINE-PYRIDOXAL-5*-PHOSPHATE
## ACE acetyl
## FOR Formyl
## BOC TERT-BUTYLOXYCARBONYL GROUP
## MLE N-METHYLLEUCINE
## MVA N-METHYLVALINE
## IVA Isovaleric_acid
## STA STATINE
## ETA ethanolamine
## TFA TRIFLUOROACETYL GROUP
## ANI 4-TRIFLUOROMEHYLANILINE
## MPR BETA-MERCAPTOPROPIONATE
## DAM N-METHYL-ALPHA-BETA-DEHYDROALANINE
## ACB 2-AMINO-3-CARBONYLBUTANOIC ACID
## ADD 2,6,8-TRIMETHYL-3-AMINO-9-BENZYL-9-M
## CXM N-CARBOXYMETHIONINE
## DIP DIPENTYLAMINE
## BAL BETA-ALANINE
nonStandardAA={ 'UNK':'ALA', 'ABA':'ALA', 'B2A':'ALA',
'ORN':'ARG',
'ASX':'ASP',
'CSH':'CYS', 'OCS':'CYS', 'CSO':'CYS',
'GLX':'GLU', 'CGU':'GLU', 'ILG':'GLU',
'B2I':'ILE',
'BLE':'LEU',
'KCX':'LYS', 'BLY':'LYS',
'MSE':'MET',
'B1F':'PHE', 'B2F':'PHE',
'HYP':'PRO', '5HP':'PRO',
'SEP':'SER',
'TYS':'TYR',
'B2V':'B2V',
'HIE':'HIS', 'HID':'HIS', 'HIP':'HIS',
'CYX':'CYS' }
#: heavy atoms of amino acids in standard order, OXT applies to C term only
aaAtoms={'GLY':['N','CA','C','O', 'OXT' ],
'ALA':['N','CA','C','O', 'CB', 'OXT'],
'VAL':['N','CA','C','O','CB','CG1','CG2', 'OXT'],
'LEU':['N','CA','C','O','CB','CG','CD1','CD2', 'OXT'],
'ILE':['N','CA','C','O','CB','CG1','CG2','CD1', 'OXT'],
'MET':['N','CA','C','O','CB','CG','SD','CE', 'OXT'],
'PRO':['N','CA','C','O','CB','CG','CD', 'OXT'],
'PHE':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ',
'OXT'],
'TRP':['N','CA','C','O','CB','CG','CD1','CD2','NE1','CE2','CE3',
'CZ2','CZ3','CH2', 'OXT'],
'SER':['N','CA','C','O','CB','OG', 'OXT'],
'THR':['N','CA','C','O','CB','OG1','CG2', 'OXT'],
'ASN':['N','CA','C','O','CB','CG','OD1','ND2', 'OXT'],
'GLN':['N','CA','C','O','CB','CG','CD','OE1','NE2', 'OXT'],
'TYR':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ','OH',
'OXT'],
'CYS':['N','CA','C','O','CB','SG', 'OXT'],
'LYS':['N','CA','C','O','CB','CG','CD','CE','NZ', 'OXT'],
'ARG':['N','CA','C','O','CB','CG','CD','NE','CZ','NH1','NH2', 'OXT'],
'HIS':['N','CA','C','O','CB','CG','ND1','CD2','CE1','NE2', 'OXT'],
'ASP':['N','CA','C','O','CB','CG','OD1','OD2', 'OXT'],
'GLU':['N','CA','C','O','CB','CG','CD','OE1','OE2', 'OXT']}
#: dictionary of elements
elements = { 'carbon':['C', 'CD2', 'CZ2', 'CB', 'CA', 'CG', 'CE', 'CD', 'CZ',
'CH2', 'CE3', 'CD1', 'CE1', 'CZ3', 'CG1', 'CG2', 'CE2'],
'nitrogen':['NZ', 'ND2', 'NH1', 'NH2', 'ND1', 'NE1', 'NE2',
'NE', 'N'],
'oxygen':['OG', 'OE2', 'OXT', 'OD1', 'OE1', 'OH', 'OG1', 'OD2',
'O'],
'suplphur':['SG', 'SD'],
'clustering_BDZ':['C','CB','CD','CD1','CD2','CZ','CZ2','CZ3',
'ND1','ND2','NZ','OD1','OD2','SD' ],
'clustering_ABDZ':['C','CA','CB','CD','CD1','CD2','CZ','CZ2',
'CZ3',
'ND1','ND2','NZ','OD1','OD2','SD' ],
'clustering_G':['C','CG','CG1','OG','OG1','SG' ],
'clustering_B':['C','CB'],
'clustering_AG':['C','CA','CG','CG1','OG','OG1','SG' ],
'clustering_AGE':['C','CA','CG','CG1','OG','OG1','SG','NE','OE1',
'CE1','CE','CE3' ],
'clustering_BD':['C','CB','CD','CD1','OD1','SD' ],
'clustering_ABD':['C','CA','CB','CD','CD1','OD1','SD' ],
'clustering_AB':['C','CA','CB']}
#: number of attached H for each heavy atom in each amino acid
aaAtomsH={'XXX':{'N':1,'CA':1,'C':0,'O':0,'OXT':0},
'GLY':{},
'ALA':{'CB':3},
'VAL':{'CB':0,'CG1':3,'CG2':3},
'LEU':{'CB':2,'CG':0,'CD1':3,'CD2':3},
'ILE':{'CB':0,'CG1':1,'CG2':3,'CD1':3},
'MET':{'CB':2,'CG':2,'SD':0,'CE':3 },
'PRO':{'N':0,'CB':2,'CG':2,'CD':2},
'PHE':{'CB':2,'CG':0,'CD1':1,'CD2':1,'CE1':1,'CE2':1,'CZ':1},
'TRP':{'CB':2,'CG':0,'CD1':1,'CD2':0,'NE1':1,'CE2':0,'CE3':1,
'CZ2':1,'CZ3':1,'CH2':1},
'SER':{'CB':2,'OG':1},
'THR':{'CB':0,'OG1':1,'CG2':3},
'ASN':{'CB':2,'CG':0,'OD1':0,'ND2':2},
'GLN':{'CB':2,'CG':2,'CD':0,'OE1':0,'NE2':2},
'TYR':{'CB':2,'CG':0,'CD1':1,'CD2':1,'CE1':1,'CE2':1,'CZ':0,'OH':1},
'CYS':{'CB':2,'SG':1},
'LYS':{'CB':2,'CG':2,'CD':2,'CE':2,'NZ':3},
'ARG':{'CB':2,'CG':2,'CD':2,'NE':1,'CZ':0,'NH1':2,'NH2':2},
'HIS':{'CB':2,'CG':0,'ND1':1,'CD2':1,'CE1':1,'NE2':0},
'ASP':{'CB':2,'CG':0,'OD1':0,'OD2':0},
'GLU':{'CB':2,'CG':2,'CD':0,'OE1':0,'OE2':0} }
for aa in aaAtomsH:
default = copy.copy( aaAtomsH['XXX'] )
default.update( aaAtomsH[aa] )
aaAtomsH[aa] = default
## work in progress...heavy atoms of nucleic acids in standard order
nsAtoms={
'ATP':['PG', 'O1G', 'O2G', 'O3G', 'PB', 'O1B', 'O2B', 'O3B', 'PA', 'O1A',
'O2A', 'O3A', 'O5*', 'C5*', 'C4*', 'O4*', 'C3*', 'O3*', 'C2*',
'O2*', 'C1*', 'N9', 'C8', 'N7', 'C5', 'C6', 'N6', 'N1', 'C2', 'N3',
'C4'],
'GTP':['PG', 'O1G', 'O2G', 'O3G', 'PB', 'O1B', 'O2B', 'O3B', 'PA', 'O1A',
'O2A', 'O3A', 'O5*', 'C5*', 'C4*', 'O4*', 'C3*', 'O3*', 'C2*',
'O2*', 'C1*', 'N9', 'C8', 'N7', 'C5', 'C6', 'O6', 'N1', 'C2', 'N2',
'N3', 'C4'],
'DA': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6', 'N6', 'H61', 'H62',
'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2",
"O3'"],
'DC': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4', 'N4', 'H41', 'H42',
'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'DG': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6', 'O6', 'N1', 'H1',
'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1",
"H2'2", "O3'"],
'DT': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'C7', 'H71', 'H72', 'H73',
'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1",
"H2'2", "O3'"],
'RA': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6', 'N6', 'H61', 'H62',
'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'",
"HO'2", "O3'"],
'RC': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4', 'N4', 'H41', 'H42',
'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'RG': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6', 'O6', 'N1', 'H1',
'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1",
"O2'", "HO'2", "O3'"],
'RU': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'",
"C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4', 'O4', 'N3', 'H3',
'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'MG' :['MG'],
'NDP':['P1', 'O1', 'O2', 'O5R', 'C5R', 'O1R', 'C4R', 'C3R', 'O3R', 'C2R',
'O2R', 'C1R', 'N9', 'C8', 'N7', 'C5', 'C6', 'N6', 'N1', 'C2',
'N3', 'C4', 'O10', 'P2', 'O11', 'O21', 'O51R', 'C51R', 'O11R',
'C41R', 'C31R', 'O31R', 'C21R', 'O21R', 'C11R', 'N11', 'C61',
'C51', 'C71', 'O71', 'N71', 'C41', 'C31', 'C21', 'P3', 'O3',
'O4', 'O5', 'H8', 'H9', 'H7', 'H6', 'H1', 'H5', 'H4', 'H13',
'H11', 'H12', 'H10', 'H18', 'H19', 'H17', 'H16', 'H3', 'H15',
'H2', 'H14', 'H23', 'H24', 'H25', 'H22', 'H26', 'H21', 'H20'] }
for res in ['DA','DC','DG','DT','RA','RC','RG','RU']:
#delete H
nsAtoms[ res ] = [ a for a in nsAtoms[res] if a[0] != 'H' ]
# create 3' and 5' versions
nsAtoms[ res + '3' ] = nsAtoms[res] + ['H3T']
nsAtoms[ res + '5' ] = ['H5T'] + nsAtoms[res][3:]
nsAtoms['NAP'] = nsAtoms['NDP'].remove('H26')
#: map AA and NS and some other residue names to single letter code
resDic = copy.copy( aaDic )
resDic.update( nsDicStandard )
#: map AA and NS residue names to list of allowed heavy atoms
atomDic = copy.copy( aaAtoms )
atomDic.update( nsAtoms )
#: some common synonyms of atom names
atomSynonyms = { "O'":'O', 'OT1':'O', "O''":'OXT', 'OT2':'OXT',
'O1':'O', 'O2':'OXT',
'CD':'CD1'}
hydrogenSynonyms = { 'H':'HN', '1HE2':'HE21', '2HE2':'HE22',
'1HH1':'HH11', '2HH1':'HH12', '1HH2':'HH21',
'2HH2':'HH22', '1HD2':'HD21', '2HD2':'HD22' }
###################
## Hydrogen bond
##
hbonds={ 'donors': {'GLY':['H','H1','H2','H3'],
'ALA':['H','H1','H2','H3'],
'VAL':['H','H1','H2','H3'],
'LEU':['H','H1','H2','H3'],
'ILE':['H','H1','H2','H3'],
'MET':['H','H1','H2','H3'],
'PRO':['H','H1','H2','H3'],
'PHE':['H','H1','H2','H3'],
'TRP':['H','H1','H2','H3','HE1'],
'SER':['H','H1','H2','H3','HG'],
'THR':['H','H1','H2','H3','HG1'],
'ASN':['H','H1','H2','H3','1HD2','2HD2'],
'GLN':['H','H1','H2','H3','1HE2','2HE2'],
'TYR':['H','H1','H2','H3','HH'],
'CYS':['H','H1','H2','H3','HG'],
'LYS':['H','H1','H2','H3','HZ1','HZ2','HZ3'],
'ARG':['H','H1','H2','H3','HE','1HH1','2HH1',
'1HH2','2HH2'],
'HIS':['H','H1','H2','H3','HD1','HE2'],
'ASP':['H','H1','H2','H3'],
'GLU':['H','H1','H2','H3']},
'acceptors': {'GLY':['O','OXT' ],
'ALA':['O','OXT'],
'VAL':['O','OXT'],
'LEU':['O','OXT'],
'ILE':['O','OXT'],
'MET':['O','SD','OXT'],
'PRO':['O','OXT'],
'PHE':['O','OXT'],
'TRP':['O','OXT'],
'SER':['O','OG', 'OXT'],
'THR':['O','OG1','CG2', 'OXT'],
'ASN':['O','OD1','OXT'],
'GLN':['O','OE1','OXT'],
'TYR':['O','OH','OXT'],
'CYS':['O','SG','OXT'],
'LYS':['O','OXT'],
'ARG':['O','OXT'],
'HIS':['O','OXT'],
'ASP':['O','OD1','OD2', 'OXT'],
'GLU':['O','OE1','OE2', 'OXT']} }
##############################
## Polar hydrogen connectivity -- PARAM19
polarH = {'GLY':{'H':'N','H1':'N','H2':'N','H3':'N'},
'ALA':{'H':'N','H1':'N','H2':'N','H3':'N'},
'VAL':{'H':'N','H1':'N','H2':'N','H3':'N'},
'LEU':{'H':'N','H1':'N','H2':'N','H3':'N'},
'ILE':{'H':'N','H1':'N','H2':'N','H3':'N'},
'MET':{'H':'N','H1':'N','H2':'N','H3':'N'},
'PRO':{'H':'N','H1':'N','H2':'N','H3':'N'},
'PHE':{'H':'N','H1':'N','H2':'N','H3':'N'},
'TRP':{'H':'N','H1':'N','H2':'N','H3':'N',
'HE1':'NE1'},
'SER':{'H':'N','H1':'N','H2':'N','H3':'N',
'HG':'OG'},
'THR':{'H':'N','H1':'N','H2':'N','H3':'N',
'HG1':'OG1'},
'ASN':{'H':'N','H1':'N','H2':'N','H3':'N',
'HD21':'ND2','HD22':'ND2'},
'GLN':{'H':'N','H1':'N','H2':'N','H3':'N',
'HE21':'NE2','HE22':'NE2'},
'TYR':{'H':'N','H1':'N','H2':'N','H3':'N',
'HH':'OH'},
'CYS':{'H':'N','H1':'N','H2':'N','H3':'N'},
'LYS':{'H':'N','H1':'N','H2':'N','H3':'N',
'HZ1':'NZ','HZ2':'NZ','HZ3':'NZ'},
'ARG':{'H':'N','H1':'N','H2':'N','H3':'N',
'HE':'NE', 'HH11':'NH1','HH12':'NH1',
'HH21':'NH2','HH22':'NH2'},
'HIS':{'H':'N','H1':'N','H2':'N','H3':'N',
'HD1':'ND1','HE2':'NE2'},
'ASP':{'H':'N','H1':'N','H2':'N','H3':'N'},
'GLU':{'H':'N','H1':'N','H2':'N','H3':'N'}}
## Scoring matrix for protein-protein interaction surfaces
## (Volume normalized values, Table IV in reference)
##
## The Matrix is based on data from a db of 621 noneredundant protein-protein
## complexes, a CB-CB (CA for Gly) of 6 A was used
##
## Reference:
## "Residue Frequencies and Pair Preferences at Protein-Protein Interfaces"
## F. Glaser, D. M. Steinberg, I. A. Vakser and N0. Ben-Tal,
## Proteins 43:89-102 (2001)
##
## Warning. This is just half of the matrix (above diagonal), the residue names
## in the pairs is sorted in the same order as in Complex.resPairCounts()
pairScore = {'WW': 5.85, 'WY': 6.19, 'RT': 3.77, 'RV': 4.18, 'RW': 8.57, 'RR': 2.87,
'RS': 2.82, 'RY': 5.28, 'GW': 1.42, 'GV':-0.41, 'GT': 0.21, 'GS':-1.53,
'GR': 1.59, 'GQ': 1.70, 'GP':-0.51, 'GY': 1.25, 'GG':-4.40, 'GN':-0.54,
'GM': 0.91, 'GL':-0.37, 'GK': 1.33, 'GI': 0.77, 'GH': 1.08, 'SS':-0.09,
'IY': 5.61, 'HY': 6.05, 'HR': 4.90, 'HS': 0.80, 'HP': 2.89, 'HQ': 4.00,
'HV': 3.21, 'HW': 6.46, 'HT': 2.71, 'KN': 3.17, 'HK': 2.72, 'HH': 5.37,
'HI': 3.38, 'HN': 2.38, 'HL': 4.88, 'HM': 4.65, 'ST': 1.91, 'PR': 3.99,
'PS': 1.33, 'PP': 0.60, 'PQ': 3.50, 'PV': 2.90, 'PW': 7.87, 'PT': 2.65,
'PY': 4.22, 'IQ': 3.60, 'IP': 3.27, 'AK': 2.13, 'EM': 3.88, 'EL': 3.12,
'EN': 2.68, 'EI': 3.20, 'EH': 2.30, 'EK': 5.32, 'EE': 1.65, 'EG':-0.89,
'EF': 2.87, 'IT': 3.05, 'EY': 4.54, 'ET': 2.88, 'EW': 1.20, 'IV': 4.91,
'EQ': 1.95, 'EP': 3.17, 'ES': 2.60, 'ER': 5.75, 'II': 3.89, 'MM': 6.02,
'MN': 2.30, 'AS': 0.39, 'MT': 2.09, 'MW': 4.89, 'MV': 4.37, 'MQ': 4.18,
'MP': 3.38, 'MS': 1.61, 'MR': 3.62, 'MY': 4.81, 'IL': 4.59, 'FP': 4.25,
'FQ': 4.25, 'FR': 4.49, 'FS': 1.75, 'FT': 3.34, 'VV': 3.74, 'FV': 4.69,
'FW': 5.83, 'FY': 5.83, 'AV': 2.57, 'FF': 5.34, 'FG': 0.14, 'FH': 3.47,
'FI': 5.33, 'FK': 3.57, 'FL': 4.86, 'FM': 5.28, 'FN': 3.11, 'EV': 3.22,
'NN': 2.92, 'NY': 3.66, 'NP': 3.09, 'NQ': 3.45, 'NR': 3.85, 'NS': 1.77,
'NT': 2.52, 'NV': 1.36, 'NW': 3.54, 'CK': 2.05, 'CI': 1.76, 'CH': 4.12,
'CN':-0.42, 'CM': 1.84, 'CL': 2.93, 'CC': 7.65, 'CG':-0.25, 'CF': 3.68,
'CE': 2.51, 'CD': 0.24, 'CY': 2.47, 'CS': 2.48, 'CR': 2.81, 'CQ': 1.33,
'CP': 2.47, 'CW': 2.14, 'CV': 2.89, 'CT': 1.03, 'SY': 2.30, 'VW': 2.92,
'KK': 3.24, 'SW': 2.87, 'SV': 1.42, 'KM': 3.93, 'KL': 3.15, 'KS': 2.74,
'KR': 2.29, 'KQ': 3.50, 'KP': 3.75, 'KW': 5.76, 'KV': 4.45, 'KT': 3.67,
'KY': 5.26, 'DN': 3.85, 'DL': 1.40, 'DM': 0.36, 'DK': 3.90, 'DH': 5.20,
'DI': 2.30, 'DF': 0.99, 'DG':-0.08, 'DD': 0.13, 'DE': 0.08, 'YY': 5.93,
'DY': 1.76, 'DV': 1.93, 'DW': 2.62, 'DT': 3.88, 'DR': 4.94, 'DS': 2.94,
'DP': 1.46, 'DQ': 3.26, 'TY': 3.14, 'LN': 2.31, 'TW': 5.12, 'LL': 4.03,
'LM': 5.32, 'LV': 4.20, 'LW': 5.77, 'LT': 2.07, 'LR': 4.99, 'LS': 1.41,
'LP': 2.50, 'LQ': 3.46, 'LY': 4.19, 'AA':-0.52, 'AC': 1.46, 'AE': 1.71,
'AD': 1.13, 'AG':-1.77, 'AF': 3.00, 'AI': 2.84, 'AH': 2.59, 'IS': 1.00,
'IR': 3.80, 'AM': 2.30, 'AL': 2.77, 'IW': 6.24, 'AN': 1.69, 'AQ': 1.72,
'AP': 1.22, 'IK': 3.23, 'AR': 1.90, 'IM': 5.25, 'AT': 1.21, 'AW': 3.37,
'IN': 1.59, 'AY': 2.47, 'VY': 3.95, 'QQ': 2.83, 'QS': 2.00, 'QR': 4.50,
'QT': 1.82, 'QW': 1.37, 'QV': 3.22, 'QY': 2.05, 'TV': 2.83, 'TT': 1.27}
## various constants
boltzmann = 1.38066e-23 ## [J/K]
NA = 6.02214199e+23 ## Avogadro constant [1/mol]
planck2 = 1.0545727e-34 ## [J s], h/2Pi
euler = N0.e
mu = 1.66056e-27 ## atomic mass unit in [kg]
angstroem = 1e-10 ## [m]
calorie = 4.184 ## [J]
#: dictionary with relative atomic mass of elements {'H':1.01, 'ZN':65.39, ...}
atomMasses = { 'H':1.00797, 'C':12.01115, 'N':14.0067,
'S':32.064, 'O':15.9994, 'P':30.9738, 'ZN': 65.39 }
def allAACodes():
"""
:return: list of all single AA codes, including B, Z, X
:rtype: [str]
"""
result = []
for aa in aaDic.values():
if not aa in result:
result += aa
return result
def allAA():
"""
:return: list of all 20 'exact' single AA codes.
:rtype: [str]
"""
result = allAACodes()
for a in ['Z','B','X']:
result.remove( a )
return result
def elementType( eLetter ):
"""
Classify an atom as polar or unpolar::
atomType( eLetter ) -> list of types this element belongs to
:param eLetter: atom name
:type eLetter: str
:return: return 'p' for polar, 'u' for unpolar and None if not
in classified
:rtype: p|u OR None
"""
types = {'p' : ['N','O','H','Cl'], ## polar
'u' : ['C','S'] } ## unpolar
for key, values in types.items():
if eLetter in values:
return key
return None
def resType( resCode ):
"""
Classify residues as aromatic (a), charged (c) or polar (p).
:param resCode: amino acid code
:type resCode: str
:return: list of types this residue belongs to...
:rtype: a|c|p OR None
"""
types = {'a' : ['F','Y','W','H'], ## aromatic
'c' : ['E','D','L','R','H'], ## charged
'p' : ['Q','N','S'] } ## polar
result = []
for t in types.keys():
if resCode in types[t]:
result += [t]
if result == []:
result = ['u']
return result
def singleAA(seq, xtable=None, nonstandard=True, unknown='?' ):
"""
convert list with 3-letter AA code to list with 1-letter code
:param seq: amino acid sequence in 3-letter code
:type seq: [str]
:param xtable: dictionary with additional str:single_char mapping
:type xtable: dict
:param nonstandard: support non-standard residue names (default True)
:type nonstandard: bool
:param unknown: letter to use for unknown residues [default: '?']
:type unknown: str
:return: list with 1-letter code; C{ ['A','C','L','A'...]}
:rtype: [str]
"""
result = [] # will hold 1-letter list
table = resDicStandard
if nonstandard: table = resDic
## Python2.5
## table = resDic if nonstandard else resDicStandard
if xtable:
table = copy.copy( table )
table.update( xtable )
for aa in seq:
try:
aa = aa.lower()
result += [ table[aa] ]
except:
result = result + [unknown]
return result
def single2longAA( seq ):
"""
Convert string of 1-letter AA code into list of 3-letter AA codes.
:param seq: amino acid sequence in 1-letter code
:type seq: str
:return: list with the amino acids in 3-letter code
:rtype: [str]
"""
## invert AA dict
invTab = {}
for key in aaDicStandard:
invTab[ aaDicStandard[key] ] = key
result = []
for aa in seq:
try:
aa = aa.upper()
result += [ invTab[aa].upper() ]
except:
EHandler.warning("unknown residue: " + str(aa))
result += ['Xaa']
return result
def cmpAtoms( a1, a2 ):
"""
Comparison function for bringing atoms into standard order
within residues as defined by :class:`atomDic`.
:param a1: atom dictionary
:type a1: CrossView or equivalent dictionary
:param a2: atom dictionary
:type a2: CrossView or equivalent dictionary
:return: int or list of matching positions
:rtype: [-1|0|1]
"""
## get standard order within residues
target = atomDic[ a1['residue_name'] ]
i1 = len( target )
if a1['name'] in target:
i1 = target.index( a1['name'] )
i2 = len( target )
if a2['name'] in target:
i2 = target.index( a2['name'] )
return (i1 > i2) - (i1 < i2)
def sortAtomsOfModel( model ):
"""
Sort atoms within residues into the standard order defined in :class:`atomDic`.
:param model: model to sort
:type model: PDBModel
:return: model with sorted atoms
:rtype: PDBModel
"""
## make a copy
model = model.take( model.atomRange() )
## sort atoms
model = model.sort( model.argsort( cmpAtoms ) )
return model
#############
## TESTING
#############
from . import test as BT
class Test(BT.BiskitTest):
"""Test case"""
def test_molUtils( self ):
"""molUtils test"""
from biskit import PDBModel
S = self
## load a structure
S.m = PDBModel( t.testRoot('lig/1A19.pdb' ))
S.model_1 = S.m.compress( S.m.maskProtein() )
## now sort in standard order
S.model_2 = sortAtomsOfModel( S.model_1)
## compare the atom order
cmp = []
for a in S.model_1.atomRange():
cmp += [ cmpAtoms( S.model_1.atoms[a], S.model_2.atoms[a] )]
self.assertEqual( N0.sum(cmp), 159 )
## get the primaty sequence as a string
S.seq = S.model_1.sequence()
## convert it to a list of three letter code
S.seq=single2longAA(S.seq)
## convert it to a list in one letter code
S.seq=singleAA(S.seq)
self.assertEqual( ''.join(S.seq), S.model_1.sequence() )
if __name__ == '__main__':
BT.localTest()
|
graik/biskit
|
biskit/molUtils.py
|
Python
|
gpl-3.0
| 26,169
|
[
"Avogadro"
] |
6ef7060811c2dd4f15eddefa5d1775331a0a1ad3f844f1978e1886fbd6dd130a
|
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""Unittest for ipaddress module."""
import unittest
import re
import contextlib
import operator
import ipaddress
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Ensure we produce clean tracebacks on failure
if exc.exception.__context__ is not None:
self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
self.assertRaises(TypeError, bytes, self.factory(1))
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("000.000.000.000", "0.0.0.0")
self.assertInstancesEqual("192.168.000.001", "192.168.0.1")
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(bytes.fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(bytes.fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % 2**32)):
self.factory(2**32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = bytes(12) + bytes.fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = bytes(12) + bytes.fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = bytes.fromhex("c0a80001") + bytes(12)
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2**128)):
self.factory(2**128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octal_decimal_ambiguity(self):
def assertBadOctet(addr, octet):
msg = "Ambiguous (octal/decimal) value in %r not permitted in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("016.016.016.016", "016")
assertBadOctet("001.000.008.016", "008")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in '1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in '127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in '1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in '1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in 'net' "
"in '1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
self.assertEqual(str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
# Generate and re-parse the CIDR format (trivial).
net_str = '0.0.0.0/%d' % i
net = self.factory(net_str)
self.assertEqual(str(net), net_str)
# Generate and re-parse the expanded netmask.
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.netmask)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('0.0.0.0/0%d' % i)), net_str)
# Generate and re-parse the expanded hostmask. The ambiguous
# cases (/0 and /32) are treated as netmasks.
if i in (32, 0):
net_str = '0.0.0.0/%d' % (32 - i)
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.hostmask)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "-1")
assertBadNetmask("1.2.3.4", "+1")
assertBadNetmask("1.2.3.4", " 1 ")
assertBadNetmask("1.2.3.4", "0x1")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.2.3.4", "1.a.2.3")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "255.254.128.0")
assertBadNetmask("1.1.1.1", "0.1.127.255")
assertBadNetmask("1.1.1.1", "pudding")
assertBadNetmask("1.1.1.1", "::")
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
self.assertEqual(str(self.factory('2001:db8::/32')), '2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
net_str = '::/%d' % i
self.assertEqual(str(self.factory(net_str)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('::/0%d' % i)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "-1")
assertBadNetmask("::1", "+1")
assertBadNetmask("::1", " 1 ")
assertBadNetmask("::1", "0x1")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "1.2.3.4")
assertBadNetmask("::1", "pudding")
assertBadNetmask("::", "::")
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_address('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
#self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertEqual("IPv4Interface('1.2.3.4/32')",
repr(ipaddress.IPv4Interface('1.2.3.4')))
self.assertEqual("IPv6Interface('::1/128')",
repr(ipaddress.IPv6Interface('::1')))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
first, last = ipaddress._find_address_range([
ipaddress.IPv4Address('10.10.10.10'),
ipaddress.IPv4Address('10.10.10.12')])
self.assertEqual(first, last)
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertEqual("IPv4Network('1.2.3.0/24')", repr(self.ipv4_network))
def testMissingAddressVersion(self):
class Broken(ipaddress._BaseAddress):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingNetworkVersion(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingAddressClass(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*address"):
broken._address_class
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertEqual(ipv4_zero_netmask._prefix_from_prefix_string('0'), 0)
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0'))
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0.0.0.0'))
self.assertFalse(ipv4_zero_netmask._is_valid_netmask('invalid'))
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertEqual(ipv6_zero_netmask._prefix_from_prefix_string('0'), 0)
def testIPv4NetAndHostmasks(self):
net = self.ipv4_network
self.assertFalse(net._is_valid_netmask('invalid'))
self.assertTrue(net._is_valid_netmask('128.128.128.128'))
self.assertFalse(net._is_valid_netmask('128.128.128.127'))
self.assertFalse(net._is_valid_netmask('128.128.128.255'))
self.assertTrue(net._is_valid_netmask('255.128.128.128'))
self.assertFalse(net._is_hostmask('invalid'))
self.assertTrue(net._is_hostmask('128.255.255.255'))
self.assertFalse(net._is_hostmask('255.255.255.255'))
self.assertFalse(net._is_hostmask('1.2.3.4'))
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(net.prefixlen, 24)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
# special case where only 1 bit is left for address
self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')],
list(ipaddress.ip_network('2.0.0.0/31').hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(str(list(
self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(str(list(
self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertIn(ipaddress.IPv4Interface('1.2.3.128/25'),
self.ipv4_network)
self.assertNotIn(ipaddress.IPv4Interface('1.2.4.1/24'),
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertIn(addr1, self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface(
'1.2.3.4/255.255.255.255')), '1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
#ip5 = ipaddress.IPv4Interface('1.1.1.4/30')
#ip6 = ipaddress.IPv4Interface('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5,
ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddress.ip_address
#ipnet = ipaddress.ip_network
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.1'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.2'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::1'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__,
ipaddress.ip_address('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__,
ipaddress.ip_address('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_interface(
'169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddress.ip_interface(
'169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_network(
'127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertIn(self.ipv4_address, dummy)
self.assertIn(ip2, dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2**128 + 1)
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertNotIn('network_address', self.ipv4_network._cache)
self.assertNotIn('broadcast_address', self.ipv4_network._cache)
self.assertNotIn('hostmask', self.ipv4_network._cache)
# V4 - populate and test
self.assertEqual(self.ipv4_network.network_address,
ipaddress.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertIn('broadcast_address', self.ipv4_network._cache)
self.assertIn('hostmask', self.ipv4_network._cache)
# V6 - make sure we're empty
self.assertNotIn('broadcast_address', self.ipv6_network._cache)
self.assertNotIn('hostmask', self.ipv6_network._cache)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertIn('broadcast_address', self.ipv6_network._cache)
self.assertIn('hostmask', self.ipv6_network._cache)
self.assertIn('broadcast_address', self.ipv6_interface.network._cache)
self.assertIn('hostmask', self.ipv6_interface.network._cache)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
|
timm/timmnix
|
pypy3-v5.5.0-linux64/lib-python/3/test/test_ipaddress.py
|
Python
|
mit
| 74,242
|
[
"FEFF"
] |
910ed20bcd93c7542a0297d4c6442786bd0db4934cfa4f519fd2894e053e5c02
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
"""
from __future__ import division, print_function, absolute_import
# TODO:
# * properly implement ``_FillValue``.
# * implement Jeff Whitaker's patch for masked variables.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file']
from operator import mul
from mmap import mmap, ACCESS_READ
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
import sys
PY3 = sys.version_info[0] == 3
if PY3:
integer_types = int,
else:
integer_types = (int, long)
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w'}, optional
read-write mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`_
for more info.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`_. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``range(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
Created for a test
>>> time = f.variables['time']
>>> print(time.units)
days since 2008-01-01
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
>>> f.close()
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
>>> print(f.history)
Created for a test
"""
def __init__(self, filename, mode='r', mmap=None, version=1):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
self.fp = open(self.filename, '%sb' % mode)
if mmap is None:
mmap = True
self.use_mmap = mmap
self._fds = []
self.version_byte = version
if not mode in 'rw':
raise ValueError("Mode must be either 'r' or 'w'.")
self.mode = mode
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._attributes = {}
if mode == 'r':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
try:
# mmaps are only for reading (for now)
for mmap_fd in self._fds:
mmap_fd.close()
finally:
if not self.fp.closed:
try:
self.flush()
finally:
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode is 'w':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variables non-recs first, then recs. We use a DSU
# since some people use pupynere with Python 2.3.x.
deco = [(v._shape and not v.isrec, k) for (k, v) in self.variables.items()]
deco.sort()
variables = [k for (unused, k) in deco][::-1]
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(b'0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(b'0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR),
]
try:
sample = values[0]
except TypeError:
sample = values
except IndexError:
if isinstance(values, basestring):
sample = values
else:
raise
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if not header in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if not header in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if not header in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
nrsize = 0
nrbegin = 0
recbegin = 0
nrdtype = {'names': [], 'formats': []}
recdtype = {'names': [], 'formats': []}
nr_vars = []
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
isrec = shape and shape[0] is None # record variable
recshape = shape[isrec:] # shape without record dimension
# construct dtype
names = [name]
formats = [str(recshape) + dtype_]
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, recshape, 1) * size
padding = -actual_size % 4
if padding:
names.append('_padding_%d' % var)
formats.append('(%d,)>b' % padding)
if isrec:
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if recbegin == 0:
recbegin = begin_
recdtype['names'].extend(names)
recdtype['formats'].extend(formats)
# Data will be set later.
data = None
else: # not a record variable
nr_vars.append(name)
nrsize += vsize
if nrbegin == 0:
nrbegin = begin_
nrdtype['names'].extend(names)
nrdtype['formats'].extend(formats)
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = None
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(a_size), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes)
if self.use_mmap:
# Build nonrec array.
mm = mmap(self.fp.fileno(), nrbegin+nrsize, access=ACCESS_READ)
nr_array = ndarray.__new__(ndarray, (), dtype=nrdtype, buffer=mm,
offset=nrbegin, order=0)
self._fds.append(mm)
for var in nr_vars:
self.variables[var].__dict__['data'] = nr_array[var]
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
recdtype['names'] = recdtype['names'][:1]
recdtype['formats'] = recdtype['formats'][:1]
# Build rec array.
if self.use_mmap:
mm = mmap(self.fp.fileno(), recbegin+self._recs*self._recsize, access=ACCESS_READ)
rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=recdtype,
buffer=mm, offset=recbegin, order=0)
self._fds.append(mm)
else:
pos = self.fp.tell()
self.fp.seek(recbegin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=recdtype)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
# further reading will be done through the mmaps
self.fp.close()
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions, attributes=None):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return self.data.shape and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
self.data.resize(shape)
self.data[index] = data
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
jungla/ICOM-fluidity-toolbox
|
netcdf.py
|
Python
|
gpl-2.0
| 32,158
|
[
"NetCDF"
] |
c1462fcfd20d25b8b849459e510fa1a0ca2bc8ca0d122f45a800adf13029c233
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess as sp
from Bio import SeqIO
from Bio.Seq import Seq
from .config import BCFTOOLS_PATH
from .config import BWA_PATH
from .config import MARK_DUPLICATES_JAR_PATH
from .config import MASK_MIN_DEPTH
from .config import MASK_MIN_QUALITY
from .config import NOVOSORT_PATH
from .config import SAMTOOLS_PATH
from .config import TABIX_PATH
from .make_paths import get_bam_file_path
from .make_paths import get_bam_file_working_path
from .make_paths import get_cns_file_path
from .make_paths import get_depth_file_path
from .make_paths import get_fastq_pair_name
from .make_paths import get_vcf_file_path
from .make_paths import get_fastq_file_path
def samtools_index_fasta(fasta_path):
""" indexes a fasta with samtools """
cmd = [SAMTOOLS_PATH, 'faidx', fasta_path]
return cmd
def bwa_index_fasta(fasta_path):
""" builds a bwa index for a fasta """
cmd = [BWA_PATH, 'index', fasta_path]
return cmd
def bwa_mem_cmd(fasta_path, fw_fq, rv_fq, orig_fastq=False):
""" bwa mem command builder """
if orig_fastq:
short_fw_fq = fw_fq
short_rv_fq = rv_fq
else:
short_fw_fq = get_fastq_file_path(fasta_path, fw_fq)
short_rv_fq = get_fastq_file_path(fasta_path, rv_fq)
cmd = [BWA_PATH, 'mem', fasta_path, short_fw_fq, short_rv_fq]
return cmd
def samtools_view_cmd():
""" runs samtools view command builder"""
cmd = [SAMTOOLS_PATH, 'view', '-Su']
return cmd
def novosort_cmd(bamfile_working):
""" novosort command builder """
cmd = [NOVOSORT_PATH,
'-m', '1g',
'-o', bamfile_working,
'-t', '.', '-']
return cmd
def mark_duplicates_cmd(bamfile_working, bamfile_final):
""" mark duplicates command builder """
cmd = ['java', '-jar', MARK_DUPLICATES_JAR_PATH,
'INPUT={}'.format(bamfile_working),
'OUTPUT={}'.format(bamfile_final),
'REMOVE_DUPLICATES=true',
'METRICS_FILE=dup.txt',
'ASSUME_SORTED=true']
return cmd
def samtools_index_bam_cmd(bamfile_final):
""" bamfile index command maker """
cmd = [SAMTOOLS_PATH, 'index', bamfile_final]
return cmd
def cat_final_bam(bamfile_final):
""" cat command maker """
cmd = ['cat', bamfile_final]
return cmd
def samtools_mpileup(bamfile_final, ref_file):
""" mpileup command maker """
cmd = [SAMTOOLS_PATH, 'mpileup', '-A', '-ug', '-I',
'-f', ref_file, '-s', bamfile_final]
return cmd
def bcftools_call():
""" bcftools call command maker """
cmd = [BCFTOOLS_PATH, 'call', '-c']
return cmd
def bgzip():
""" bgzip command maker """
cmd = ['bgzip', '-c']
return cmd
def tabix(vcf_file_out):
""" tabix command maker """
cmd = [TABIX_PATH, '-f', '-p', 'vcf', vcf_file_out]
return cmd
def bcftools_filter(vcf_file_out):
""" bcftools commmand maker """
filter_string = "-i'(DP>={})&(%QUAL>={})'".format(MASK_MIN_DEPTH,
MASK_MIN_QUALITY)
cmd = [BCFTOOLS_PATH, 'filter',
filter_string,
vcf_file_out]
return cmd
def bcftools_query():
""" bcftools command maker """
cmd = [BCFTOOLS_PATH, 'query',
'-f%CHROM\t%POS\n']
return cmd
def get_fasta_record(fasta_path):
"""returns a single fasta file record"""
with open(fasta_path) as input_handle:
for record in SeqIO.parse(input_handle, 'fasta'):
return record
return False
def write_fasta_record(record, file_path):
"""writes a fasta file record"""
with open(file_path, "w+") as output_handle:
SeqIO.write(record, output_handle, 'fasta')
return True
def remask_if_empty(fasta_ref, cns_path):
"""remasks the consensus if there is no file size."""
if os.stat(cns_path).st_size: # if file has a size
return True
record = get_fasta_record(fasta_ref)
if record:
new_seq = Seq("".join(["N" for char in record.seq]))
record.seq = new_seq
return write_fasta_record(record, cns_path)
return False
def build_fasta_indices(fasta_path):
""" builds fasta indices """
bwa_cmd = bwa_index_fasta(fasta_path)
sam_cmd = samtools_index_fasta(fasta_path)
sp.call(bwa_cmd)
sp.call(sam_cmd)
return True
def build_working_bam(ref_file, fw_fq, rv_fq, bamfile_working, orig_fastq=False):
""" builds first step bamfile """
bwa_cmd = bwa_mem_cmd(ref_file, fw_fq, rv_fq, orig_fastq)
sam_cmd = samtools_view_cmd()
nov_cmd = novosort_cmd(bamfile_working)
p1 = sp.Popen(bwa_cmd, stdout=sp.PIPE)
p2 = sp.Popen(sam_cmd, stdin=p1.stdout, stdout=sp.PIPE)
p3 = sp.Popen(nov_cmd, stdin=p2.stdout)
status = p3.communicate()
return status
def build_final_bam(bamfile_working, bamfile_final):
""" builds finalized bamfile """
dups_cmd = mark_duplicates_cmd(bamfile_working, bamfile_final)
index_bam_cmd = samtools_index_bam_cmd(bamfile_final)
sp.call(dups_cmd)
sp.call(index_bam_cmd)
os.remove(bamfile_working)
return True
def build_vcf(ref_file, bamfile_final, vcf_file_out):
""" builds snp vcf file to make consensus """
sam_cmd = samtools_mpileup(bamfile_final, ref_file)
bcf_cmd = bcftools_call()
buz_cmd = bgzip()
idx_cmd = tabix(vcf_file_out)
with open(vcf_file_out, "w+") as output_handle:
p1 = sp.Popen(sam_cmd, stdout=sp.PIPE)
p2 = sp.Popen(bcf_cmd, stdin=p1.stdout, stdout=sp.PIPE)
p3 = sp.Popen(buz_cmd, stdin=p2.stdout, stdout=output_handle)
p3.communicate()
sp.call(idx_cmd)
return vcf_file_out
def build_depth_file(vcf_file_out, depth_file):
""" builds depth tsv file for consensus masking """
fil_cmd = bcftools_filter(vcf_file_out)
que_cmd = bcftools_query()
with open(depth_file, 'w+') as output_handle:
p1 = sp.Popen(fil_cmd, stdout=sp.PIPE)
p2 = sp.Popen(que_cmd, stdin=p1.stdout, stdout=output_handle)
p2.communicate()
return True
def read_depth_positions(depth_file):
pos_set = set()
name = False
with open(depth_file) as input_handle:
for num, line in enumerate(input_handle):
line = line.split("\t")
if len(line) == 2:
if num == 0:
name = line[0].strip()
else:
pos_set.add(int(line[1].strip()))
return name, pos_set
def write_new_depth_file(fasta, depth_file, name, pos_set):
with open(depth_file, "w+") as output_handle:
with open(fasta) as input_handle:
for record in SeqIO.parse(input_handle, "fasta"):
for num, char in enumerate(record.seq, start=1):
if num not in pos_set:
output_handle.write(
"{}\t{}\n".format(name, num)
)
return True
def invert_extend_depth_file(ref_file, depth_file):
name, pos_set = read_depth_positions(depth_file)
if name:
write_new_depth_file(ref_file, depth_file, name, pos_set)
else:
open(depth_file, "w+").close()
return True
def build_consensus(vcf_file_out, ref_file, depth_file, cns_file):
""" builds masked consensus file """
cns_cmd = [BCFTOOLS_PATH,
'consensus', vcf_file_out,
'-f', ref_file,
'-m', depth_file]
with open(cns_file, 'w+') as output_handle:
status = sp.call(cns_cmd, stdout=output_handle)
return status
def pipe_consensus(fasta, fw_fq, rv_fq, orig_fastq=False):
""" runs all of the consensus commands in the right order """
pair_name = get_fastq_pair_name(fw_fq, rv_fq)
bamfile_working = get_bam_file_working_path(fasta, pair_name)
bamfile_final = get_bam_file_path(fasta, pair_name)
vcf_file_out = get_vcf_file_path(fasta, pair_name)
depth_file = get_depth_file_path(fasta, pair_name)
cns_file = get_cns_file_path(fasta, pair_name)
build_fasta_indices(fasta)
build_working_bam(fasta, fw_fq, rv_fq, bamfile_working, orig_fastq)
build_final_bam(bamfile_working, bamfile_final)
vcf_file_out = build_vcf(fasta, bamfile_final, vcf_file_out)
build_depth_file(vcf_file_out, depth_file)
invert_extend_depth_file(fasta, depth_file)
build_consensus(vcf_file_out, fasta, depth_file, cns_file)
remask_if_empty(fasta, cns_file)
return True
def pipe_consensus_argslist(args):
""" runs the pipe consensus command with 1 argument """
if len(args) == 3:
fasta, fw_rd, rv_rd = args
pipe_consensus(fasta, fw_rd, rv_rd)
return True
elif len(args) == 4:
fasta, fw_rd, rv_rd, orig_fastq = args
pipe_consensus(fasta, fw_rd, rv_rd, orig_fastq=orig_fastq)
return True
return False
|
TheCulliganMan/cgap
|
cgap/build_consensus.py
|
Python
|
gpl-3.0
| 8,889
|
[
"BWA"
] |
6c9712f47c1137674f1656210d2262786fa600dfcf70591860dc3170c9c45493
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************
**espressopp.esutil.NormalVariate**
***********************************
.. function:: espressopp.esutil.NormalVariate(mean, sigma)
:param mean: (default: 0.0)
:param sigma: (default: 1.0)
:type mean: real
:type sigma: real
"""
from espressopp import pmi
from _espressopp import esutil_NormalVariate
class NormalVariateLocal(esutil_NormalVariate):
def __init__(self, mean=0.0, sigma=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, esutil_NormalVariate, mean, sigma)
if pmi.isController:
class NormalVariate(object):
__metaclass__ = pmi.Proxy
"""A random normal variate."""
pmiproxydefs = dict(
cls = 'espressopp.esutil.NormalVariateLocal',
localcall = [ '__call__' ],
)
|
capoe/espressopp.soap
|
src/esutil/NormalVariate.py
|
Python
|
gpl-3.0
| 1,754
|
[
"ESPResSo"
] |
9e4e154022ceee760e861ed9f13d354aa73a8c19661d88175286bd9aad1b14b1
|
"""
# Notes:
- This simulation seeks to emulate the CUBA benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008).
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian2_benchmark_CUBA_nosyn_compiled_0016/pbsout/brian2_benchmark_CUBA_nosyn_compiled_0016.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
set_device('cpp_standalone')
prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native']
# Parameters
cells = 16
defaultclock.dt = 0.01*ms
taum=20*ms
Vt = -50*mV
Vr = -60*mV
El = -49*mV
# The model
eqs = Equations('''
dv/dt = ((v-El))/taum : volt
''')
P = NeuronGroup(cells, model=eqs,threshold="v>Vt",reset="v=Vr",refractory=5*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Initialization
P.v = Vr
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
Brian2/brian2_benchmark_CUBA_nosyn_compiled_0016.py
|
Python
|
gpl-3.0
| 2,138
|
[
"Brian"
] |
2b2687fa476d97ada0b17edcd0aaabe371b8d66917937c1413c166fb5272d5a9
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2010 Borys Jurgiel
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import QgsApplication, QgsContextHelp
import sys, time
from ui_qgsplugininstallerfetchingbase import Ui_QgsPluginInstallerFetchingDialogBase
from ui_qgsplugininstallerinstallingbase import Ui_QgsPluginInstallerInstallingDialogBase
from ui_qgsplugininstallerrepositorybase import Ui_QgsPluginInstallerRepositoryDetailsDialogBase
from ui_qgsplugininstallerpluginerrorbase import Ui_QgsPluginInstallerPluginErrorDialogBase
from ui_qgsplugininstallerbase import Ui_QgsPluginInstallerDialogBase
from installer_data import *
try:
from qgis.utils import startPlugin, unloadPlugin, loadPlugin # QGIS >= 1.4
from qgis.utils import reloadPlugin, updateAvailablePlugins # QGIS >= 1.5
except Exception:
pass
# --- common functions ------------------------------------------------------------------- #
def removeDir(path):
result = QString()
if not QFile(path).exists():
result = QCoreApplication.translate("QgsPluginInstaller","Nothing to remove! Plugin directory doesn't exist:")+"\n"+path
elif QFile(path).remove(): # if it is only link, just remove it without resolving.
pass
else:
fltr = QDir.Dirs | QDir.Files | QDir.Hidden
iterator = QDirIterator(path, fltr, QDirIterator.Subdirectories)
while iterator.hasNext():
item = iterator.next()
if QFile(item).remove():
pass
fltr = QDir.Dirs | QDir.Hidden
iterator = QDirIterator(path, fltr, QDirIterator.Subdirectories)
while iterator.hasNext():
item = iterator.next()
if QDir().rmpath(item):
pass
if QFile(path).exists():
result = QCoreApplication.translate("QgsPluginInstaller","Failed to remove the directory:")+"\n"+path+"\n"+QCoreApplication.translate("QgsPluginInstaller","Check permissions or remove it manually")
# restore plugin directory if removed by QDir().rmpath()
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins"
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
return result
# --- /common functions ------------------------------------------------------------------ #
# --- class QgsPluginInstallerFetchingDialog --------------------------------------------------------------- #
class QgsPluginInstallerFetchingDialog(QDialog, Ui_QgsPluginInstallerFetchingDialogBase):
# ----------------------------------------- #
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.progressBar.setRange(0,len(repositories.allEnabled())*100)
self.itemProgress = {}
self.item = {}
for key in repositories.allEnabled():
self.item[key] = QTreeWidgetItem(self.treeWidget)
self.item[key].setText(0,key)
if repositories.all()[key]["state"] > 1:
self.itemProgress[key] = 100
self.displayState(key,0)
else:
self.itemProgress[key] = 0
self.displayState(key,2)
self.treeWidget.resizeColumnToContents(0)
QObject.connect(repositories, SIGNAL("repositoryFetched(QString)"), self.repositoryFetched)
QObject.connect(repositories, SIGNAL("anythingChanged(QString, int, int)"), self.displayState)
# ----------------------------------------- #
def displayState(self,key,state,state2=None):
messages=[self.tr("Success"),self.tr("Resolving host name..."),self.tr("Connecting..."),self.tr("Host connected. Sending request..."),self.tr("Downloading data..."),self.tr("Idle"),self.tr("Closing connection..."),self.tr("Error")]
message = messages[state]
if state2:
message += " (%s%%)" % state2
self.item[key].setText(1,message)
if state == 4 and state2:
self.itemProgress[key] = state2
totalProgress = sum(self.itemProgress.values())
self.progressBar.setValue(totalProgress)
# ----------------------------------------- #
def repositoryFetched(self, repoName):
self.itemProgress[repoName] = 100
if repositories.all()[repoName]["state"] == 2:
self.displayState(repoName,0)
else:
self.displayState(repoName,7)
if not repositories.fetchingInProgress():
self.close()
# --- /class QgsPluginInstallerFetchingDialog -------------------------------------------------------------- #
# --- class QgsPluginInstallerRepositoryDialog ------------------------------------------------------------- #
class QgsPluginInstallerRepositoryDialog(QDialog, Ui_QgsPluginInstallerRepositoryDetailsDialogBase):
# ----------------------------------------- #
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.editURL.setText("http://")
self.connect(self.editName, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.connect(self.editURL, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.textChanged(None)
# ----------------------------------------- #
def textChanged(self, string):
enable = (self.editName.text().count() > 0 and self.editURL.text().count() > 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enable)
# --- /class QgsPluginInstallerRepositoryDialog ------------------------------------------------------------ #
# --- class QgsPluginInstallerInstallingDialog --------------------------------------------------------------- #
class QgsPluginInstallerInstallingDialog(QDialog, Ui_QgsPluginInstallerInstallingDialogBase):
# ----------------------------------------- #
def __init__(self, parent, plugin):
QDialog.__init__(self, parent)
self.setupUi(self)
self.plugin = plugin
self.mResult = QString()
self.progressBar.setRange(0,0)
self.progressBar.setFormat(QString("%p%"))
self.labelName.setText(QString(plugin["name"]))
self.connect(self.buttonBox, SIGNAL("clicked(QAbstractButton*)"), self.abort)
url = QUrl(plugin["url"])
path = QString(url.toPercentEncoding(url.path(), "!$&'()*+,;=:/@"))
fileName = plugin["filename"]
tmpDir = QDir.tempPath()
tmpPath = QDir.cleanPath(tmpDir+"/"+fileName)
self.file = QFile(tmpPath)
port = url.port()
if port < 0:
port = 80
self.http = QPHttp(url.host(), port)
self.connect(self.http, SIGNAL("stateChanged ( int )"), self.stateChanged)
self.connect(self.http, SIGNAL("dataReadProgress ( int , int )"), self.readProgress)
self.connect(self.http, SIGNAL("requestFinished (int, bool)"), self.requestFinished)
self.httpGetId = self.http.get(path, self.file)
# ----------------------------------------- #
def result(self):
return self.mResult
# ----------------------------------------- #
def stateChanged(self, state):
messages=[self.tr("Installing..."),self.tr("Resolving host name..."),self.tr("Connecting..."),self.tr("Host connected. Sending request..."),self.tr("Downloading data..."),self.tr("Idle"),self.tr("Closing connection..."),self.tr("Error")]
self.labelState.setText(messages[state])
# ----------------------------------------- #
def readProgress(self, done, total):
self.progressBar.setMaximum(total)
self.progressBar.setValue(done)
# ----------------------------------------- #
def requestFinished(self, requestId, state):
if requestId != self.httpGetId:
return
self.buttonBox.setEnabled(False)
if state:
self.mResult = self.http.errorString()
self.reject()
return
self.file.close()
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins"
tmpPath = self.file.fileName()
# make sure that the parent directory exists
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
# if the target directory already exists as a link, remove the link without resolving:
QFile(pluginDir+QString(QDir.separator())+self.plugin["localdir"]).remove()
try:
un = unzip()
un.extract(unicode(tmpPath), unicode(pluginDir)) # test extract. If fails, then exception will be raised and no removing occurs
# removing old plugin files if exist
removeDir(QDir.cleanPath(pluginDir+"/"+self.plugin["localdir"])) # remove old plugin if exists
un.extract(unicode(tmpPath), unicode(pluginDir)) # final extract.
except:
self.mResult = self.tr("Failed to unzip the plugin package. Probably it's broken or missing from the repository. You may also want to make sure that you have write permission to the plugin directory:") + "\n" + pluginDir
self.reject()
return
try:
# cleaning: removing the temporary zip file
QFile(tmpPath).remove()
except:
pass
self.close()
# ----------------------------------------- #
def abort(self):
self.http.abort()
self.mResult = self.tr("Aborted by user")
self.reject()
# --- /class QgsPluginInstallerInstallingDialog ------------------------------------------------------------- #
# --- class QgsPluginInstallerPluginErrorDialog -------------------------------------------------------------- #
class QgsPluginInstallerPluginErrorDialog(QDialog, Ui_QgsPluginInstallerPluginErrorDialogBase):
# ----------------------------------------- #
def __init__(self, parent, errorMessage):
QDialog.__init__(self, parent)
self.setupUi(self)
if not errorMessage:
errorMessage = self.tr("no error message received")
self.textBrowser.setText(errorMessage)
# --- /class QgsPluginInstallerPluginErrorDialog ------------------------------------------------------------- #
# --- class QgsPluginInstallerDialog ------------------------------------------------------------------------- #
class QgsPluginInstallerDialog(QDialog, Ui_QgsPluginInstallerDialogBase):
# ----------------------------------------- #
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.reposGroup = "/Qgis/plugin-repos"
self.connect(self.lineFilter, SIGNAL("textChanged (QString)"), self.filterChanged)
self.connect(self.comboFilter1, SIGNAL("currentIndexChanged (int)"), self.filterChanged)
self.connect(self.comboFilter2, SIGNAL("currentIndexChanged (int)"), self.filterChanged)
# grab clicks on trees
self.connect(self.treePlugins, SIGNAL("itemSelectionChanged()"), self.pluginTreeClicked)
self.connect(self.treeRepositories, SIGNAL("itemSelectionChanged()"), self.repositoryTreeClicked)
# buttons
self.connect(self.buttonUpgradeAll, SIGNAL("clicked()"), self.upgradeAllClicked)
self.connect(self.buttonInstall, SIGNAL("clicked()"), self.installPluginClicked)
self.connect(self.buttonUninstall, SIGNAL("clicked()"), self.uninstallPluginClicked)
self.buttonInstall.setEnabled(False)
self.buttonUninstall.setEnabled(False)
self.buttonHelp.setEnabled(QGIS_14)
self.connect(self.buttonHelp, SIGNAL("clicked()"), self.runHelp)
# repositories handling
self.connect(self.treeRepositories, SIGNAL("doubleClicked(QModelIndex)"), self.editRepository)
#self.connect(self.buttonFetchRepositories, SIGNAL("clicked()"), self.addKnownRepositories)
self.connect(self.buttonAddRep, SIGNAL("clicked()"), self.addRepository)
self.connect(self.buttonEditRep, SIGNAL("clicked()"), self.editRepository)
self.connect(self.buttonDeleteRep, SIGNAL("clicked()"), self.deleteRepository)
self.buttonEditRep.setEnabled(False)
self.buttonDeleteRep.setEnabled(False)
# configuration widgets
self.connect(self.checkUpdates, SIGNAL("toggled (bool)"), self.changeCheckingPolicy)
self.connect(self.comboInterval, SIGNAL("currentIndexChanged (int)"), self.changeCheckingInterval)
self.connect(self.radioPluginType0, SIGNAL("toggled (bool)"), self.changePluginPolicy)
self.connect(self.radioPluginType1, SIGNAL("toggled (bool)"), self.changePluginPolicy)
self.connect(self.radioPluginType2, SIGNAL("toggled (bool)"), self.changePluginPolicy)
if repositories.checkingOnStart():
self.checkUpdates.setChecked(Qt.Checked)
else:
self.checkUpdates.setChecked(Qt.Unchecked)
interval = repositories.checkingOnStartInterval()
intervals = [0,1,3,7,14,30] # days
if intervals.count(interval):
index = intervals.index(interval)
else:
index = 1
self.comboInterval.setCurrentIndex(index)
self.populateMostWidgets()
# ----------------------------------------- #
def getAllAvailablePlugins(self):
""" fetch plugins from all repositories """
repositories.load()
plugins.getAllInstalled()
for key in repositories.allEnabled():
repositories.requestFetching(key)
if repositories.fetchingInProgress():
self.fetchDlg = QgsPluginInstallerFetchingDialog(self)
self.fetchDlg.exec_()
del self.fetchDlg
for key in repositories.all():
repositories.killConnection(key)
# display error messages for every unavailable reposioty, unless Shift pressed nor all repositories are unavailable
keepQuiet = QgsApplication.keyboardModifiers() == Qt.KeyboardModifiers(Qt.ShiftModifier)
if repositories.allUnavailable() and repositories.allUnavailable() != repositories.allEnabled():
for key in repositories.allUnavailable():
if not keepQuiet:
QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Error reading repository:") + " " + key + "\n" + repositories.all()[key]["error"])
if QgsApplication.keyboardModifiers() == Qt.KeyboardModifiers(Qt.ShiftModifier):
keepQuiet = True
# ----------------------------------------- #
def populateMostWidgets(self):
self.comboFilter1.clear()
self.comboFilter1.addItem(self.tr("all repositories"))
self.treeRepositories.clear()
for key in repositories.all():
a = QTreeWidgetItem(self.treeRepositories)
a.setText(1,key)
a.setText(2,repositories.all()[key]["url"])
if repositories.all()[key]["enabled"] and repositories.all()[key]["valid"]:
if repositories.all()[key]["state"] == 2:
a.setText(0,self.tr("connected"))
a.setIcon(0,QIcon(":/plugins/installer/repoConnected.png"))
a.setToolTip(0,self.tr("This repository is connected"))
self.comboFilter1.addItem(key)
else:
a.setText(0,self.tr("unavailable"))
a.setIcon(0,QIcon(":/plugins/installer/repoUnavailable.png"))
a.setToolTip(0,self.tr("This repository is enabled, but unavailable"))
self.comboFilter1.addItem(key)
else:
a.setText(0,self.tr("disabled"))
a.setIcon(0,QIcon(":/plugins/installer/repoDisabled.png"))
if repositories.all()[key]["valid"]:
a.setToolTip(0,self.tr("This repository is disabled"))
else:
a.setToolTip(0,self.tr("This repository is blocked due to incompatibility with your Quantum GIS version"))
for i in [0,1,2]:
a.setForeground(i,QBrush(QColor(Qt.gray)))
for i in [0,1,2]:
self.treeRepositories.resizeColumnToContents(i)
self.comboFilter1.addItem(self.tr("orphans"))
# fill the status filter comboBox
self.comboFilter2.clear()
self.comboFilter2.addItem(self.tr("any status"))
self.comboFilter2.addItem(self.tr("not installed", "plural"))
self.comboFilter2.addItem(self.tr("installed", "plural"))
if plugins.isThereAnythingNew():
self.comboFilter2.addItem(self.tr("upgradeable and news"))
settings = QSettings()
(i, ok) = settings.value(settingsGroup+"/allowedPluginType", QVariant(1)).toInt()
if i == 1:
self.radioPluginType0.setChecked(Qt.Checked)
elif i == 2:
self.radioPluginType1.setChecked(Qt.Checked)
else:
self.radioPluginType2.setChecked(Qt.Checked)
# ----------------------------------------- #
def filterChanged(self,i):
""" one of the filter widgets has been changed """
self.populatePluginTree()
# ----------------------------------------- #
def filterCheck(self,plugin):
""" the filter for the pluginsTree """
if self.comboFilter1.currentIndex() != 0 and self.comboFilter1.currentText() != self.tr("orphans"):
if self.comboFilter1.currentText() != plugin["repository"]:
return False
elif self.comboFilter1.currentText() == self.tr("orphans"):
if plugin["status"] != "orphan":
return False
if self.comboFilter2.currentIndex() == 1 and not plugin["status"] in ["not installed","new"]:
return False
if self.comboFilter2.currentIndex() == 2 and not plugin["status"] in ["installed","upgradeable","newer","orphan"]:
return False
if self.comboFilter2.currentIndex() == 3 and not plugin["status"] in ["upgradeable","new"]:
return False
if self.lineFilter.text() == "":
return True
else:
for i in ["name","version_inst","version_avail","desc_repo","desc_local","author","status","repository"]:
item = QString(plugin[i])
if item != None:
if item.contains(self.lineFilter.text(), Qt.CaseInsensitive):
return True
return False
# ----------------------------------------- #
def populatePluginTree(self):
""" fill up the pluginTree """
statusTips={"not installed" : self.tr("This plugin is not installed"),
"installed" : self.tr("This plugin is installed"),
"upgradeable" : self.tr("This plugin is installed, but there is an updated version available"),
"orphan" : self.tr("This plugin is installed, but I can't find it in any enabled repository"),
"new" : self.tr("This plugin is not installed and is seen for the first time"),
"newer" : self.tr("This plugin is installed and is newer than its version available in a repository"),
"incompatible" : self.tr("This plugin is incompatible with your Quantum GIS version and probably won't work."),
"dependent" : self.tr("The required Python module is not installed.\nFor more information, please visit its homepage and Quantum GIS wiki."),
"broken" : self.tr("This plugin seems to be broken.\nIt has been installed but can't be loaded.\nHere is the error message:")}
statuses ={"not installed" : self.tr("not installed", "singular"),
"installed" : self.tr("installed", "singular"),
"upgradeable" : self.tr("upgradeable", "singular"),
"orphan" : self.tr("installed", "singular"),
"new" : self.tr("new!", "singular"),
"newer" : self.tr("installed", "singular"),
"incompatible" : self.tr("invalid", "singular"),
"dependent" : self.tr("invalid", "singular"),
"broken" : self.tr("invalid", "singular")}
orderInvalid = ["incompatible","broken","dependent"]
orderValid = ["upgradeable","new","not installed","installed","orphan","newer"]
def addItem(p):
if self.filterCheck(p):
statusTip = statusTips[p["status"]]
if p["read-only"]:
statusTip = statusTip + "\n" + self.tr("Note that it's an uninstallable core plugin")
installedVersion = p["version_inst"]
if not installedVersion:
installedVersion = "?"
availableVersion = p["version_avail"]
if not availableVersion:
availableVersion = "?"
if p["status"] == "upgradeable":
ver = installedVersion + " -> " + availableVersion
elif p["status"] == "newer":
ver = installedVersion + " (" + availableVersion + ")"
elif p["status"] in ["not installed", "new"]:
ver = availableVersion
else:
ver = installedVersion
if p["status"] in ["upgradeable","newer"] or p["error"]:
verTip = self.tr("installed version") + ": " + installedVersion + "\n" + self.tr("available version") + ": " + availableVersion
elif p["status"] in ["not installed", "new"]:
verTip = self.tr("available version") + ": " + availableVersion
elif p["status"] == "installed":
verTip = self.tr("installed version") + ": " + installedVersion + "\n" + self.tr("That's the newest available version")
elif p["status"] == "orphan":
verTip = self.tr("installed version") + ": " + installedVersion + "\n" + self.tr("There is no version available for download")
else:
verTip = ""
if p["error"] == "broken":
desc = self.tr("This plugin is broken")
descTip = statusTips[p["error"]] + "\n" + p["error_details"]
statusTip = descTip
elif p["error"] == "incompatible":
desc = self.tr("This plugin requires a newer version of Quantum GIS") + " (" + self.tr("at least")+ " " + p["error_details"] + ")"
descTip = statusTips[p["error"]]
statusTip = descTip
elif p["error"] == "dependent":
desc = self.tr("This plugin requires a missing module") + " (" + p["error_details"] + ")"
descTip = statusTips[p["error"]]
statusTip = descTip
else:
desc = p["desc_local"]
descTip = p["desc_repo"]
if not desc:
desc = descTip
if not p["repository"]:
repository = self.tr("only locally available")
else:
repository = p["repository"]
a = QTreeWidgetItem(self.treePlugins)
if p["error"]:
a.setText(0,statuses[p["error"]])
else:
a.setText(0,statuses[p["status"]])
a.setToolTip(0,statusTip)
a.setText(1,p["name"])
a.setText(2,ver)
a.setToolTip(2,verTip)
a.setText(3,desc)
a.setToolTip(3,descTip)
a.setText(4,p["author"])
if p["homepage"]:
a.setToolTip(4,p["homepage"])
else:
a.setToolTip(4,"")
a.setText(5,repository)
a.setToolTip(5,p["url"])
# set fonts and colors
for i in [0,1,2,3,4,5]:
if p["error"]:
a.setForeground(i,QBrush(QColor(Qt.red)))
if p["status"] in ["new","upgradeable"] or p["error"]:
font = QFont()
font.setWeight(QFont.Bold)
a.setFont(i,font)
# -------- #
if not plugins.all():
return
self.treePlugins.clear()
for i in orderInvalid:
for p in plugins.all().values():
if p["error"] == i:
addItem(p)
n = 0 # displayed plugins count
self.upgradeablePlugins = [] # list of plugins able to update
for i in orderValid:
for p in plugins.all().values():
if p["status"] == i and not p["error"]:
addItem(p)
if p["status"] == "upgradeable": self.upgradeablePlugins += [p["localdir"]]
n +=1
self.setWindowTitle(self.tr("QGIS Python Plugin Installer") + self.tr(" - %d plugins available" % len(plugins.all())))
self.buttonUpgradeAll.setEnabled( len(self.upgradeablePlugins) )
# initially, keep insert order
self.treePlugins.sortItems(100,Qt.AscendingOrder)
# resize the columns
for i in [0,1,2,3,4,5]:
self.treePlugins.resizeColumnToContents(i)
for i in [0,1,2,4,5]:
if self.treePlugins.columnWidth(i) > 260:
self.treePlugins.setColumnWidth(i, 260)
if self.treePlugins.columnWidth(3) > 560:
self.treePlugins.setColumnWidth(3, 560)
# ----------------------------------------- #
def pluginTreeClicked(self):
""" the pluginsTree has been clicked """
buttons={"not installed":(True,False,self.tr("Install plugin")),
"installed":(True,True,self.tr("Reinstall plugin")),
"upgradeable":(True,True,self.tr("Upgrade plugin")),
"orphan":(False,True,self.tr("Install/upgrade plugin")),
"new":(True,False,self.tr("Install plugin")),
"newer":(True,True,self.tr("Downgrade plugin"))}
self.buttonInstall.setEnabled(False)
self.buttonInstall.setText(self.tr("Install/upgrade plugin"))
self.buttonUninstall.setEnabled(False)
if not self.treePlugins.selectedItems():
return
item = self.treePlugins.currentItem()
if not item:
return
key = plugins.keyByUrl(item.toolTip(5))
if not key:
return
plugin = plugins.all()[key]
if not plugin:
return
self.buttonInstall.setEnabled(buttons[plugin["status"]][0])
self.buttonUninstall.setEnabled(buttons[plugin["status"]][1])
self.buttonInstall.setText(buttons[plugin["status"]][2])
if plugin["read-only"]:
self.buttonUninstall.setEnabled(False)
# ----------------------------------------- #
def upgradeAllClicked(self):
for key in self.upgradeablePlugins:
self.installPlugin(key, quiet=True)
# ----------------------------------------- #
def installPluginClicked(self):
if not self.treePlugins.currentItem():
return
key = plugins.keyByUrl(self.treePlugins.currentItem().toolTip(5))
self.installPlugin(key)
# ----------------------------------------- #
def uninstallPluginClicked(self):
if not self.treePlugins.currentItem():
return
key = plugins.keyByUrl(self.treePlugins.currentItem().toolTip(5))
self.uninstallPlugin(key)
# ----------------------------------------- #
def installPlugin(self, key, quiet=False):
""" install currently selected plugin """
infoString = ('','')
plugin = plugins.all()[key]
previousStatus = plugin["status"]
if not plugin:
return
if plugin["status"] == "newer" and not plugin["error"]: # ask for confirmation if user downgrades an usable plugin
if QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Are you sure you want to downgrade the plugin to the latest available version? The installed one is newer!"), QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
dlg = QgsPluginInstallerInstallingDialog(self,plugin)
dlg.exec_()
if dlg.result():
infoString = (self.tr("Plugin installation failed"), dlg.result())
elif not QDir(QDir.cleanPath(QgsApplication.qgisSettingsDirPath() + "/python/plugins/" + key)).exists():
infoString = (self.tr("Plugin has disappeared"), self.tr("The plugin seems to have been installed but I don't know where. Probably the plugin package contained a wrong named directory.\nPlease search the list of installed plugins. I'm nearly sure you'll find the plugin there, but I just can't determine which of them it is. It also means that I won't be able to determine if this plugin is installed and inform you about available updates. However the plugin may work. Please contact the plugin author and submit this issue."))
QApplication.setOverrideCursor(Qt.WaitCursor)
plugins.getAllInstalled()
plugins.rebuild()
QApplication.restoreOverrideCursor()
else:
if QGIS_14:
if QGIS_15: # update the list of plugins in plugin handling routines
updateAvailablePlugins()
# try to load the plugin
loadPlugin(plugin["localdir"])
else: # QGIS < 1.4
try:
exec ("sys.path_importer_cache.clear()")
exec ("import %s" % plugin["localdir"])
exec ("reload (%s)" % plugin["localdir"])
except:
pass
plugins.getAllInstalled(testLoad=True)
plugins.rebuild()
plugin = plugins.all()[key]
if not plugin["error"]:
if previousStatus in ["not installed", "new"]:
if QGIS_14: # plugins can be started in python from QGIS >= 1.4
infoString = (self.tr("Plugin installed successfully"), self.tr("Plugin installed successfully"))
settings = QSettings()
settings.setValue("/PythonPlugins/"+plugin["localdir"], QVariant(True))
startPlugin(plugin["localdir"])
else: infoString = (self.tr("Plugin installed successfully"), self.tr("Python plugin installed.\nNow you need to enable it in Plugin Manager."))
else:
if QGIS_15: # plugins can be reloaded on the fly in QGIS >= 1.5
settings = QSettings()
if key != 'plugin_installer' and settings.value("/PythonPlugins/"+key).toBool(): # plugin will be reloaded on the fly only if currently loaded
infoString = (self.tr("Plugin reinstalled successfully"), self.tr("Plugin reinstalled successfully"))
reloadPlugin(key)
else:
infoString = (self.tr("Plugin reinstalled successfully"), self.tr("Python plugin reinstalled.\nYou need to restart Quantum GIS in order to reload it."))
else: infoString = (self.tr("Plugin reinstalled successfully"), self.tr("Python plugin reinstalled.\nYou need to restart Quantum GIS in order to reload it."))
if quiet:
infoString = (None, None)
else:
if plugin["error"] == "incompatible":
message = self.tr("The plugin is designed for a newer version of Quantum GIS. The minimum required version is:")
message += " <b>" + plugin["error_details"] + "</b>"
elif plugin["error"] == "dependent":
message = self.tr("The plugin depends on some components missing on your system. You need to install the following Python module in order to enable it:")
message += "<b> " + plugin["error_details"] + "</b>"
else:
message = self.tr("The plugin is broken. Python said:")
message += "<br><b>" + plugin["error_details"] + "</b>"
dlg = QgsPluginInstallerPluginErrorDialog(self,message)
dlg.exec_()
if dlg.result():
# revert installation
plugins.getAllInstalled()
plugins.rebuild()
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins/" + plugin["localdir"]
removeDir(pluginDir)
if QDir(pluginDir).exists():
infoString = (self.tr("Plugin uninstall failed"), result)
try:
exec ("sys.path_importer_cache.clear()")
exec ("import %s" % plugin["localdir"])
exec ("reload (%s)" % plugin["localdir"])
except:
pass
else:
try:
exec ("del sys.modules[%s]" % plugin["localdir"])
except:
pass
plugins.getAllInstalled()
plugins.rebuild()
if plugins.all().has_key(key) and not plugins.all()[key]["status"] in ["not installed", "new"]:
if previousStatus in ["not installed", "new"]:
history.markChange(key,'A')
else:
history.markChange(key,'R')
self.populatePluginTree()
if infoString[0]:
QMessageBox.information(self, infoString[0], infoString[1])
# ----------------------------------------- #
def uninstallPlugin(self,key):
""" uninstall currently selected plugin """
plugin = plugins.all()[key]
if not plugin:
return
warning = self.tr("Are you sure you want to uninstall the following plugin?") + "\n(" + plugin["name"] + ")"
if plugin["status"] == "orphan" and not plugin["error"]:
warning += "\n\n"+self.tr("Warning: this plugin isn't available in any accessible repository!")
if QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), warning , QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
# unload the plugin if it's not plugin_installer itself (otherwise, do it after removing its directory):
if key != "plugin_installer":
try:
unloadPlugin(key)
except:
pass
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins/" + plugin["localdir"]
result = removeDir(pluginDir)
if result:
QMessageBox.warning(self, self.tr("Plugin uninstall failed"), result)
else:
# if the uninstalled plugin is the installer itself, reload it and quit
if key == "plugin_installer":
if QGIS_15:
try:
QMessageBox.information(self, self.tr("QGIS Python Plugin Installer"), self.tr("Plugin Installer update uninstalled. Plugin Installer will now close and revert to its primary version. You can find it in the Plugins menu and continue operation."))
reloadPlugin(key)
return
except:
pass
else:
QMessageBox.information(self, self.tr("QGIS Python Plugin Installer"), self.tr("Plugin Installer update uninstalled. Please restart QGIS in order to load its primary version."))
# safe remove
try:
unloadPlugin(plugin["localdir"])
except:
pass
try:
exec ("plugins[%s].unload()" % plugin["localdir"])
exec ("del plugins[%s]" % plugin["localdir"])
except:
pass
try:
exec ("del sys.modules[%s]" % plugin["localdir"])
except:
pass
plugins.getAllInstalled()
plugins.rebuild()
self.populatePluginTree()
if QGIS_14: QMessageBox.information(self, self.tr("Plugin uninstalled successfully"), self.tr("Plugin uninstalled successfully"))
else: QMessageBox.information(self, self.tr("Plugin uninstalled successfully"), self.tr("Python plugin uninstalled. Note that you may need to restart Quantum GIS in order to remove it completely."))
history.markChange(key,'D')
# ----------------------------------------- #
def repositoryTreeClicked(self):
""" the repositoryTree has been clicked """
if self.treeRepositories.selectedItems():
self.buttonEditRep.setEnabled(True)
self.buttonDeleteRep.setEnabled(True)
else:
self.buttonEditRep.setEnabled(False)
self.buttonDeleteRep.setEnabled(False)
# ----------------------------------------- #
def changeCheckingPolicy(self,policy):
""" the Checking On Start checkbox has been clicked """
if policy:
repositories.setCheckingOnStart(True)
else:
repositories.setCheckingOnStart(False)
# ----------------------------------------- #
def changeCheckingInterval(self,interval):
""" the Checking on start interval combobox has been clicked """
intervals = [0,1,3,7,14,30]
repositories.setCheckingOnStartInterval(intervals[interval])
# ----------------------------------------- #
def changePluginPolicy(self, state):
""" one of the plugin type radiobuttons has been clicked """
if not state: # radio button released
return
if self.radioPluginType0.isChecked():
i = 1
elif self.radioPluginType1.isChecked():
i = 2
else:
i = 3
settings = QSettings()
settings.setValue(settingsGroup+"/allowedPluginType", QVariant(i))
plugins.rebuild()
self.populatePluginTree()
## depreciated in qgis 1.8 until we use 3rd party repos again
# ----------------------------------------- #
#def addKnownRepositories(self):
#""" update list of known repositories - in the future it will be replaced with an online fetching """
#message = self.tr("You are about to add several plugin repositories that are neither authorized nor supported by the Quantum GIS team. Plugin authors generally make efforts to ensure that their work is useful and safe, however, we can assume no responsibility for them.")
#if QMessageBox.question(self, self.tr("QGIS Python Plugin Installer"), message, QMessageBox.Ok, QMessageBox.Abort) == QMessageBox.Ok:
#repositories.addKnownRepos()
## refresh lists and populate widgets
#QApplication.setOverrideCursor(Qt.WaitCursor)
#self.getAllAvailablePlugins()
#plugins.rebuild()
#self.populateMostWidgets()
#self.populatePluginTree()
#QApplication.restoreOverrideCursor()
# ----------------------------------------- #
def addRepository(self):
""" add repository button has been clicked """
dlg = QgsPluginInstallerRepositoryDialog(self)
dlg.checkBoxEnabled.setCheckState(Qt.Checked)
if not dlg.exec_():
return
for i in repositories.all().values():
if dlg.editURL.text().trimmed() == i["url"]:
QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Unable to add another repository with the same URL!"))
return
settings = QSettings()
settings.beginGroup(self.reposGroup)
reposName = dlg.editName.text()
reposURL = dlg.editURL.text().trimmed()
if repositories.all().has_key(reposName):
reposName = reposName + "(2)"
# add to settings
settings.setValue(reposName+"/url", QVariant(reposURL))
settings.setValue(reposName+"/enabled", QVariant(bool(dlg.checkBoxEnabled.checkState())))
# refresh lists and populate widgets
QApplication.setOverrideCursor(Qt.WaitCursor)
plugins.removeRepository(reposName)
self.getAllAvailablePlugins()
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
QApplication.restoreOverrideCursor()
# ----------------------------------------- #
def editRepository(self):
""" edit repository button has been clicked """
checkState={False:Qt.Unchecked,True:Qt.Checked}
current = self.treeRepositories.currentItem()
if current == None:
return
reposName = current.text(1)
dlg = QgsPluginInstallerRepositoryDialog(self)
dlg.editName.setText(reposName)
dlg.editURL.setText(repositories.all()[reposName]["url"])
dlg.checkBoxEnabled.setCheckState(checkState[repositories.all()[reposName]["enabled"]])
if repositories.all()[reposName]["valid"]:
dlg.checkBoxEnabled.setEnabled(True)
dlg.labelInfo.setText("")
else:
dlg.checkBoxEnabled.setEnabled(False)
dlg.labelInfo.setText(self.tr("This repository is blocked due to incompatibility with your Quantum GIS version"))
dlg.labelInfo.setFrameShape(QFrame.Box)
if not dlg.exec_():
return # nothing to do if cancelled
for i in repositories.all().values():
if dlg.editURL.text().trimmed() == i["url"] and dlg.editURL.text().trimmed() != repositories.all()[reposName]["url"]:
QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Unable to add another repository with the same URL!"))
return
# delete old repo from QSettings and create new one
settings = QSettings()
settings.beginGroup(self.reposGroup)
settings.remove(reposName)
newName = dlg.editName.text()
if repositories.all().has_key(newName) and newName != reposName:
newName = newName + "(2)"
settings.setValue(newName+"/url", QVariant(dlg.editURL.text().trimmed()))
settings.setValue(newName+"/enabled", QVariant(bool(dlg.checkBoxEnabled.checkState())))
if dlg.editURL.text().trimmed() == repositories.all()[reposName]["url"] and dlg.checkBoxEnabled.checkState() == checkState[repositories.all()[reposName]["enabled"]]:
repositories.rename(reposName, newName)
self.populateMostWidgets()
return # nothing else to do if only repository name was changed
# refresh lists and populate widgets
QApplication.setOverrideCursor(Qt.WaitCursor)
plugins.removeRepository(reposName)
self.getAllAvailablePlugins()
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
QApplication.restoreOverrideCursor()
# ----------------------------------------- #
def deleteRepository(self):
""" delete repository button has been clicked """
current = self.treeRepositories.currentItem()
if current == None:
return
warning = self.tr("Are you sure you want to remove the following repository?") + "\n" + current.text(1)
if QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), warning , QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
reposName = current.text(1)
# delete from the settings, refresh data and repopulate all the widgets
settings = QSettings()
settings.beginGroup(self.reposGroup)
settings.remove(reposName)
repositories.remove(reposName)
plugins.removeRepository(reposName)
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
# ----------------------------------------- #
def runHelp(self):
""" open the context help browser """
QgsContextHelp.run("QgsPluginInstallerDialog")
# ----------------------------------------- #
def reject(self):
""" update the list of seen plugins before exit (both 'done' and 'x' buttons emit 'reject' signal) """
plugins.updateSeenPluginsList()
QDialog.reject(self)
# --- /class QgsPluginInstallerDialog ------------------------------------------------------------------------ #
|
imincik/pkg-qgis-1.8
|
python/plugins/plugin_installer/installer_gui.py
|
Python
|
gpl-2.0
| 41,029
|
[
"VisIt"
] |
8c4bfe54a896bcecd23a4e911d83340b182ff2807fdbc95e26570f8b90128630
|
#
# File:
# TRANS_read_netCDF.py
#
# Synopsis:
# Illustrates how to read a netCDF file
#
# Categories:
# I/O
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to read a netCDF file.
#
# Effects illustrated:
# o Read netCDF data
#
# Output:
# -
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
"""
Transition Guide Python Example: TRANS_read_netCDF.py
- read netCDF file
- retrieve variable informations
2018-08-21 kmf
"""
from __future__ import print_function
import Ngl,Nio
print("")
#-- data file name
fname = "./rectilinear_grid_3D.nc"
#-- open file
f = Nio.open_file(fname, "r")
#-- get the sizes of all dimensions in the same order as the names
dims = f.dimensions.values()
print("--> Dimensions: "+ str(dims))
#-- retrive the dimension names of the file
dimnames = f.dimensions.keys()
print("--> Dimension names of file: "+ str(dimnames))
#-- get only the variable names not the dimension names
varnames = f.variables.keys()
print ("--> Variable names: "+ str(varnames))
var_list = [i for i in varnames if i not in dimnames]
print ("--> Variables: "+ str(var_list))
print("")
#-- read variable, first time step
var = f.variables["t"]
#-- get type, rank, shape, dimension names and attributes of the variable
type = var.typecode()
shape = var.shape
attr = var.attributes.keys()
dims = var.dimensions
rank = var.rank
print("")
print("--> Type: "+ str(type))
print("--> Shape: "+ str(shape))
print("--> Attributes: "+ str(attr))
print("--> Dimensions: "+ str(dims))
print("--> Rank: "+ str(rank))
print("")
#-- print variable lat content
lat = f.variables["lat"]
lon = f.variables["lon"]
print(lat)
print(lon)
#-- check if variable has attribute
if hasattr(lon,'units'):
print("--> Has units attribute: "+var.attributes['units'])
exit()
|
KMFleischer/PyEarthScience
|
Transition_examples_NCL_to_PyNGL/read_data/TRANS_read_netCDF.py
|
Python
|
mit
| 2,055
|
[
"NetCDF"
] |
1564432cf05f6c715c332f1232e4322715d2c2df7b84b23288429aeb61d46381
|
# New cli for testing
import datetime
from pathlib import Path
import click
import openmmtools.utils
from perses.app.setup_relative_calculation import getSetupOptions
percy = """
MMMMMMMMMMMMXo:ccldOKNNWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMWxcxOkxdodddxxxk0XWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWWWWMMWNX0Okk0WMMMMMM
MMMMMMMMMMMWxlk0OkdddddxkxdoodONWNXK0OOOOOOOOO0KXNWMMMMN0kxxxxxxxxddoddockWMMMMM
MMMMMMMMMMMMKooOOxooooc;:ldOOxooddddodddddddddodddxxkOkoodkOOkxdodxO000kcxWMMMMM
MMMMMMMMMMMMMKolk0000OdoddllxOOkkO00000O000000000OkxdookOOdl:cloooxO00kloXMMMMMM
MMMMMMMMMMMMMMNklokO0OO000OkkO000000000000O000000000O00OdllollxO0000OdldXMMMMMMM
MMMMMMMMMMMMMMMWXkdodkO00O00O0000OdlxO000000000OxxkO000OxkO00OOO00Odlo0WMMMMMMMM
MMMMMMMMMMMMMMMMMMWKkdldOOOO00000kdxOO000000000OxldO0000OO000OOxdoox0NMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMXdldOOO0000O000kkO0000000O0OOOOO0O00000Odloxk0NMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMKook00000000000Odlk00000000kooO000O00000OxcxNMMMMMWX0KNMMMMMM
MMMMMMMMMMMMMMMMMMKloO00000000000O0OkxdooooodxkxkO000000000O0xlkWMMMNkloook0OkxO
MMMMMMMMMMMMMMMMMNolO000000000000OxlllodddddolloxO00000000O00Odl0MMMOcx0Okooddo:
MMMMMMMMMMMMMMMMMOcx0OO00000O000kocoxxkO000OOkkocoO0000O0O0000kldWMMxck0000000xc
MMMMMMMMMMMMMMMMWdlO00O000000OOOo:dd:,:x000xc;cxd:d0000OO000O0OolXMWxck00OO0Oxlx
MMMMMMMMMMMMMMMMXooOO0000OOOOxkkclkc,,lO000k:,,lxclO00000000000dl0MNdcO0000kolkN
MMMMMMMMMMMMMMMMXolO00000kolccxOlckxook0000Oxlokx:okdkkxkO00000dl0XxlxO000kloXWM
MMMMMMMMMMMMMMMMNdlO00000OOd:lO0xclk00000000O0Oxclkkc;loxO0000OocoookO000OolKMMM
MMMMMMMMMMMMMMMMMOcx00000000xclkOkocldkOOOOOxdlldOOocoOO000000Oc,lxO00000kcxWMMM
MMMMMMMMMMMMMMMMMNdlO000000O0kocokOkdoooooooooxOkdllxO000O000Od:oO00O000OooXMMMM
MMMMMMMMMMMMMMMMMMXolk00000O00OkollodxkkkkkkkxdlllxO00000OOOOd:oO000O00OdcOMMMMM
MMMMMMMMMMMMMMMMMMM0::xO0000000O0OkdooooooooooodkO000000000OocoO000O000xlxWMMMMM
MMMMMMMMMMMMMMMMMNOoccclxO00O000000000OOOOOOO00000000OO00OdclxO00O0000kldNMMMMMM
MMMMMMMMMMMMMMMWKdlxOOkolldkO0000000000000000000000000OxolldO0O000000kldNMMMMMMM
MMMMMMMMMMMMMMWOldO00O00OxollodxOO000000000000000OOkdolloxO000O0000OxlxNMMMMMMMM
MMMMMMMMMMMMMWkldO000O00000OkdoooooooodddddddoooooooodkO000OO0O000OdlOWMMMMMMMMM
MMMMMMMMMMMMWOldO00000000000000OOkxxddooooooooddxkO0000O000000O0kdoxKWMMMMMMMMMM
MMMMMMMMMMMMXooO000000O0000000O0000000000000000OO00000OO000000kooxKWMMMMMMMMMMMM
MMMMMMMMMMMWxck00000000000000O000000000000000000O00OO000000O00dcOMMMMMMMMMMMMMMM
MMMWKkkOKWMXooO000000OkkO000000000000000000000000O000000000O00xckMMMMMMMMMMMMMMM
WMKdlcccldKOcd000000OdcdO0000000000000000000000000000000000000klkMMMMMMMMMMMMMMM
odccocccdllolk000000d:oO00000000000000000000000000000000000000OlxWMMMMMMMMMMMMMM
;:;:c::oxllolk00000kclO000000000000000000000000000000000000000OcdWMMMMMMMMMMMMMM
Oollodxdl;lc:x0000Oo:x0000000000000000000000000000000000000000kcxWMMMMMMMMMMMMMM
MN0kxxo::odl:d0000Olck000000000000000000000000000000000000O000dcOMMMMMMMMMMMMMMM
MMMMMMWKkddo;cO000Olck0000000000000000000000000000000000000O0OloNMMMMMMMMMMMMMMM
MMMMMMMMMWWOcoO000OkclO0000000000000000000000000000000000000Ool0MMMMMMMMMMMMMMMM
MMMMMMMMMMWxck0OOO0Olck00O00000000000000000000000000000O000Ool0WMMMMMMMMMMMMMMMM
MMMMMMMMMMM0loxolool:oO000000O000000000000000000000000000OxldKMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMWKkxkKN0o;ck0000O000000000000000000000000OO0OxooONMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMWxck000O000OkkO000000000000000000Oxo:;xNMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMOcx0000000kloO0000000000000Okxoollolc0MMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMKld000O000d;codddddddddoooclooodkO0olKMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMNolO00000Oll00OOkkkkkkkO0OllO000O0OloNMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMWxck00000kckMMMMMMMMMMMMMMkcx00000kcxWMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMkcx00000dcOMMMMMMMMMMMMMMKld00O00xckMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMWdlk00000kckWMMMMMMMMMMMMM0ld0OOO0kldNMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMKlo0OkkO0OloNMMMMMMMMMMMMWxck0OOOO0dc0MMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMXocdoollxxcdNMMMMMMMMMMMMMkcxOdlloxlc0MMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMKolxXXxc:oKMMMMMMMMMMMMMMKl:::x0dcckNMMMMMMMMMMMMMMMMMMMMMM
"""
def _check_openeye_license():
import openeye
assert openeye.oechem.OEChemIsLicensed(), "OpenEye license checks failed!"
def _test_platform(platform_name):
import openmm.testInstallation
openmm.testInstallation.main()
# If a user asks for a platform, try and see if we can use it
if platform_name:
assert openmmtools.utils.platform_supports_precision(platform_name, 'mixed')
click.echo("🎉\t Platform test successful!")
def _write_out_files(path, options):
# Convert path to a pathlib object
yaml_path = Path(path)
# Generate parsed yaml name
yaml_name = yaml_path.name
time = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
yaml_parse_name = f"parsed-{time}-{yaml_name}"
# First make files in same dir as yaml
files_next_to_yaml = [
"debug.png",
"system.xml",
yaml_parse_name,
]
for _file in files_next_to_yaml:
with open(_file, "w") as fp:
pass
# Now we make the directory structure
trajectory_directory = Path(options["trajectory_directory"])
dirs_to_make = trajectory_directory.joinpath("xml")
Path(dirs_to_make).mkdir(parents=True, exist_ok=True)
# Now files that belong in the lower directories
files_in_lower_dir = [
"atom_mapping.png",
"out-complex_checkpoint.nc",
"out-complex.nc",
"out-complex.pdb",
"outhybrid_factory.npy.npz",
"out-solvent_checkpoint.nc",
"out-solvent.nc",
"out-solvent.pdb",
"out_topology_proposals.pkl",
"out-vacuum_checkpoint.nc",
"out-vacuum.nc",
]
# add the dir prefix
files_in_lower_dir = [
Path(trajectory_directory).joinpath(_) for _ in files_in_lower_dir
]
for _file in files_in_lower_dir:
with open(_file, "w") as fp:
pass
# Now the files in the 'xml' dir
files_in_xml_dir = [
"complex-hybrid-system.gz",
"complex-new-system.gz",
"complex-old-system.gz",
"solvent-hybrid-system.gz",
"solvent-new-system.gz",
"solvent-old-system.gz",
"vacuum-hybrid-system.gz",
"vacuum-new-system.gz",
"vacuum-old-system.gz",
]
# add the dir prefix
files_in_xml_dir = [dirs_to_make.joinpath(_) for _ in files_in_xml_dir]
for _file in files_in_lower_dir:
with open(_file, "w") as fp:
pass
@click.command()
@click.option("--yaml", type=click.Path(exists=True, dir_okay=False), required=True)
@click.option("--platform-name", type=str, default=None)
def cli(yaml, platform_name):
"""test"""
click.echo(click.style(percy, fg="bright_magenta"))
click.echo("📖\t Fetching simulation options ")
options = getSetupOptions(yaml)
click.echo("🖨️\t Printing options")
click.echo(options)
click.echo("🕵️\t Checking OpenEye license")
_check_openeye_license()
click.echo("✅\t OpenEye license good")
click.echo("🖥️⚡\t Checking whether requested compute platform is available")
_test_platform(platform_name)
click.echo("🖨️\t Writing out files")
trajectory_directory = options["trajectory_directory"]
_write_out_files(trajectory_directory, options)
click.echo("🧪\t Simulation over")
if __name__ == "__main__":
cli()
|
choderalab/perses
|
perses/app/cli.py
|
Python
|
mit
| 7,497
|
[
"OpenMM"
] |
09de8c4e7f423ee748ee07e88e88988d13a7ac13a660aec58bc2269456f776e7
|
import json
from octopus.server.orientdb.orientdb_server_command import OrientDBServerCommand
class OrientDBPluginExecutor(object):
def __init__(self, server_host, server_port):
self.command = OrientDBServerCommand(server_host, server_port)
def execute(self, pluginname, classname, settings=None):
data = {"plugin": pluginname, "class": classname, "settings": settings}
json_data = json.dumps(data)
return self.post(json_data)
def post(self, json_data):
return self.command.execute_post_command("/executeplugin/", json_data)
|
octopus-platform/bjoern
|
python/octopus-tools/octopus/server/orientdb/orientdb_plugin_executor.py
|
Python
|
gpl-3.0
| 582
|
[
"Octopus"
] |
73ddfd73cbab6e208797eebbed40728e3aef6710ce986a8c5768034cf0bb60de
|
# -*- coding: utf-8 -*-
"""Functions to plot evoked M/EEG data (besides topographies)."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from numbers import Integral
import numpy as np
from ..io.pick import (channel_type,
_VALID_CHANNEL_TYPES, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _pick_inst, _get_channel_types,
_PICK_TYPES_DATA_DICT, _picks_to_idx, pick_info)
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax, _check_cov, _make_combine_callable,
_validate_if_list_of_axes, _triage_rank_sss,
_connection_line, _get_color_list, _setup_ax_spines,
_setup_plot_projector, _prepare_joint_axes, _check_option,
_set_title_multiple_electrodes, _check_time_unit,
_plot_masked_image, _trim_ticks, _set_window_title)
from ..utils import (logger, _clean_names, warn, _pl, verbose, _validate_type,
_check_if_nan, _check_ch_locs, fill_doc, _is_numeric)
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topomap_plot, plot_topomap, _get_pos_outlines,
_draw_outlines, _prepare_topomap, _set_contour_locator,
_check_sphere, _make_head_outlines)
from ..channels.layout import _pair_grad_sensors, find_layout
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([
line is event.artist for line in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False, time_unit='s', sphere=None):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continue
picks, pos, merge_channels, _, ch_type, this_sphere, clip_origin = \
_prepare_topomap_plot(info, ch_type, sphere=sphere)
outlines = _make_head_outlines(this_sphere, pos, 'head', clip_origin)
if len(pos) < 2:
fig.delaxes(axarr[0][idx])
continue
this_data = data[picks, minidx:maxidx]
if merge_channels:
from ..channels.layout import _merge_ch_data
method = 'mean' if psd else 'rms'
this_data, _ = _merge_ch_data(this_data, ch_type, [],
method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False, sphere=this_sphere,
outlines=outlines)
unit = 'Hz' if psd else time_unit
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc, size=30):
"""Plot (possibly colorized) channel legends for evoked plots."""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axis.get_figure().canvas.draw()
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(size / ratio) + '%',
height=str(size) + '%', loc=loc)
ax.set_adjustable("box")
_prepare_topomap(pos, ax, check_nonzero=False)
pos_x, pos_y = pos.T
ax.scatter(pos_x, pos_y, color=colors, s=size * .8, marker='.', zorder=1)
if bads:
bads = np.array(bads)
ax.scatter(pos_x[bads], pos_y[bads], s=size / 6, marker='.',
color='w', zorder=1)
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
set_tight_layout=True, selectable=True, zorder='unsorted',
noise_cov=None, colorbar=True, mask=None, mask_style=None,
mask_cmap=None, mask_alpha=.25, time_unit='s',
show_names=False, group_by=None, sphere=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
# For evoked.plot_image ...
# First input checks for group_by and axes if any of them is not None.
# Either both must be dicts, or neither.
# If the former, the two dicts provide picks and axes to plot them to.
# Then, we call this function recursively for each entry in `group_by`.
if plot_type == "image" and isinstance(group_by, dict):
if axes is None:
axes = dict()
for sel in group_by:
plt.figure()
axes[sel] = plt.axes()
if not isinstance(axes, dict):
raise ValueError("If `group_by` is a dict, `axes` must be "
"a dict of axes or None.")
_validate_if_list_of_axes(list(axes.values()))
remove_xlabels = any([ax.is_last_row() for ax in axes.values()])
for sel in group_by: # ... we loop over selections
if sel not in axes:
raise ValueError(sel + " present in `group_by`, but not "
"found in `axes`")
ax = axes[sel]
# the unwieldy dict comp below defaults the title to the sel
titles = ({channel_type(evoked.info, idx): sel
for idx in group_by[sel]} if titles is None else titles)
_plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,
proj, xlim, hline, units, scalings, titles,
ax, plot_type, cmap=cmap, gfp=gfp,
window_title=window_title,
set_tight_layout=set_tight_layout,
selectable=selectable, noise_cov=noise_cov,
colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, time_unit=time_unit,
show_names=show_names,
sphere=sphere)
if remove_xlabels and not ax.is_last_row():
ax.set_xticklabels([])
ax.set_xlabel("")
ims = [ax.images[0] for ax in axes.values()]
clims = np.array([im.get_clim() for im in ims])
min, max = clims.min(), clims.max()
for im in ims:
im.set_clim(min, max)
figs = [ax.get_figure() for ax in axes.values()]
if len(set(figs)) == 1:
return figs[0]
else:
return figs
elif isinstance(axes, dict):
raise ValueError("If `group_by` is not a dict, "
"`axes` must not be a dict either.")
time_unit, times = _check_time_unit(time_unit, evoked.times)
evoked = evoked.copy() # we modify info
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
if isinstance(gfp, str) and gfp != 'only':
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
picks = _picks_to_idx(info, picks, none='all', exclude=())
if len(picks) != len(set(picks)):
raise ValueError("`picks` are not unique. Please remove duplicates.")
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, str) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, str) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError(
'exclude has to be a list of channel names or "bads"')
picks = np.array([pick for pick in picks if pick not in exclude])
types = np.array(_get_channel_types(info, picks), str)
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
fig = None
if axes is None:
fig, axes = plt.subplots(len(ch_types_used), 1)
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if isinstance(axes, plt.Axes):
axes = [axes]
fig.set_size_inches(6.4, 2 + len(axes))
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if window_title is not None:
_set_window_title(fig, window_title)
if len(axes) != len(ch_types_used):
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), len(ch_types_used),
sorted(ch_types_used)))
_check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))
noise_cov = _check_cov(noise_cov, info)
if proj == 'reconstruct' and noise_cov is not None:
raise ValueError('Cannot use proj="reconstruct" when noise_cov is not '
'None')
projector, whitened_ch_names = _setup_plot_projector(
info, noise_cov, proj=proj is True, nave=evoked.nave)
if len(whitened_ch_names) > 0:
unit = False
if projector is not None:
evoked.data[:] = np.dot(projector, evoked.data)
if proj == 'reconstruct':
evoked = evoked._reconstruct_proj()
if plot_type == 'butterfly':
_plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,
units, scalings, hline, gfp, types, zorder, xlim, ylim,
times, bad_ch_idx, titles, ch_types_used, selectable,
False, line_alpha=1., nave=evoked.nave,
time_unit=time_unit, sphere=sphere)
plt.setp(axes, xlabel='Time (%s)' % time_unit)
elif plot_type == 'image':
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
use_nave = evoked.nave if ai == 0 else None
this_picks = list(picks[types == this_type])
_plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,
units, scalings, times, xlim, ylim, titles,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
nave=use_nave, time_unit=time_unit,
show_names=show_names, ch_names=evoked.ch_names)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')
fig.canvas.draw() # for axes plots update axes.
if set_tight_layout:
tight_layout(fig=fig)
plt_show(show)
return fig
def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units,
scalings, hline, gfp, types, zorder, xlim, ylim, times,
bad_ch_idx, titles, ch_types_used, selectable, psd,
line_alpha, nave, time_unit, sphere):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
sphere = _check_sphere(sphere, info)
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == 'grad' and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info('Need more than one channel to make '
'topography for %s. Disabling interactivity.'
% (this_type,))
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
else:
this_scaling = 1. if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = (isinstance(gfp, str) and gfp == 'only')
if not gfp_only:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
if spatial_colors is True and not _check_ch_locs(chs):
warn('Channel locations not available. Disabling spatial '
'colors.')
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd,
ax, sphere)
else:
if isinstance(spatial_colors, (tuple, str)):
col = [spatial_colors]
else:
col = ['k']
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
if zorder == 'std':
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == 'unsorted':
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = ('`zorder` must be a function, "std" '
'or "unsorted", not {0}.')
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(times, D[ch_idx], picker=True,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx], alpha=line_alpha,
linewidth=0.5)[0])
line_list[-1].set_pickradius(3.)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1.,
0.)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = ax.get_ylim() if (ylim is None or this_type not in
ylim.keys()) else ylim[this_type]
if gfp_only:
y_offset = 0.
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.2)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3, alpha=line_alpha)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
'GFP', zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0,
clip_on=True))
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(title=r'%s (%d channel%s)'
% (titles[this_type], len(D), _pl(len(D))))
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = ('grey' if spatial_colors is True else 'r')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
callback_onselect = partial(_line_plot_onselect,
ch_types=ch_types_used, info=info,
data=data, times=times, text=text,
psd=psd, time_unit=time_unit,
sphere=sphere)
blit = False if plt.get_backend() == 'MacOSX' else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax, callback_onselect, 'horizontal', minspan=minspan,
useblit=blit, rectprops=dict(alpha=0.5, facecolor='red'))
def _add_nave(ax, nave):
"""Add nave to axes."""
if nave is not None:
ax.annotate(
r'N$_{\mathrm{ave}}$=%d' % nave, ha='left', va='bottom',
xy=(0, 1), xycoords='axes fraction',
xytext=(0, 5), textcoords='offset pixels')
def _handle_spatial_colors(colors, info, idx, ch_type, psd, ax, sphere):
"""Set up spatial colors."""
used_nm = np.array(_clean_names(info['ch_names']))[idx]
# find indices for bads
bads = [np.where(used_nm == bad)[0][0] for bad in info['bads'] if bad in
used_nm]
pos, outlines = _get_pos_outlines(info, idx, sphere=sphere)
loc = 1 if psd else 2 # Legend in top right for psd plot.
_plot_legend(pos, colors, ax, bads, outlines, loc)
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
# ignore xlim='tight'; happens automatically with `extent` in imshow
xlim = None if xlim == 'tight' else xlim
if xlim is not None:
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = 'Channels' if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
yticks = np.arange(len(picks))
if show_names != 'all':
yticks = np.intersect1d(np.round(ax.get_yticks()).astype(int), yticks)
yticklabels = np.array(ch_names)[picks] if show_names else np.array(picks)
ax.set(yticks=yticks, yticklabels=yticklabels[yticks])
@verbose
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, zorder='unsorted',
selectable=True, noise_cov=None, time_unit='s', sphere=None,
verbose=None):
"""Plot evoked data using butterfly plots.
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
.. note:: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
Y limits for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
%(plot_proj)s
hline : list of float | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot GFP in green if True or "only". If "only", then the individual
channel traces will not be shown.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
zorder : str | callable
Which channels to put in the front or back. Only matters if
``spatial_colors`` is used.
If str, must be ``std`` or ``unsorted`` (defaults to ``unsorted``). If
``std``, data with the lowest standard deviation (weakest effects) will
be put in front so that they are not obscured by those with stronger
effects. If ``unsorted``, channels are z-sorted as in the evoked
instance.
If callable, must take one argument: a numpy array of the same
dimensionality as the evoked raw data; and return a list of
unique integers corresponding to the number of channels.
.. versionadded:: 0.13.0
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
See Also
--------
mne.viz.plot_evoked_white
"""
return _plot_evoked(
evoked=evoked, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title, spatial_colors=spatial_colors,
selectable=selectable, zorder=zorder, noise_cov=noise_cov,
time_unit=time_unit, sphere=sphere)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color | color | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
Y limits for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel is determined by
the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of float | None
The values at which to show a vertical line.
fig_background : None | ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | str | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
background_color : color
Background color. Typically 'k' (black) or 'w' (white; default).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations.
"""
from matplotlib.colors import colorConverter
if not type(evoked) in (tuple, list):
evoked = [evoked]
dark_background = \
np.mean(colorConverter.to_rgb(background_color)) < 0.5
if dark_background:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'w'
else:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'k'
if color is None:
if dark_background:
color = ['w'] + _get_color_list()
else:
color = _get_color_list()
color = color * ((len(evoked) % len(color)) + 1)
color = color[:len(evoked)]
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color,
merge_channels=merge_grads,
legend=legend, axes=axes, show=show,
noise_cov=noise_cov)
@fill_doc
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True,
show=True, clim=None, xlim='tight', proj=False,
units=None, scalings=None, titles=None, axes=None,
cmap='RdBu_r', colorbar=True, mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
time_unit='s', show_names="auto", group_by=None,
sphere=None):
"""Plot evoked data as images.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
This parameter can also be used to set the order the channels
are shown in, as the channel image is sorted by the order of picks.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
Color limits for plots (after scaling has been applied). e.g.
``clim = dict(eeg=[-20, 20])``.
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | dict | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
If ``group_by`` is a dict, this cannot be a list, but it can be a dict
of lists of axes, with the keys matching those of ``group_by``. In that
case, the provided axes will be used for the corresponding groups.
Defaults to ``None``.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ``('RdBu_r', True)``.
Defaults to ``'RdBu_r'``.
colorbar : bool
If True, plot a colorbar. Defaults to True.
.. versionadded:: 0.16
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to ``False`` in the mask are masked (see
``do_mask`` below). Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if 'contour', a contour line is drawn around
the masked areas (``True`` in ``mask``). If 'mask', entries not
``True`` in ``mask`` are shown transparently. If 'both', both a contour
and transparency are used.
If ``None``, defaults to 'both' if ``mask`` is not None, and is ignored
otherwise.
.. versionadded:: 0.16
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``Greys``. Not interactive. Otherwise, as ``cmap``.
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to .25.
.. versionadded:: 0.16
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
show_names : bool | 'auto' | 'all'
Determines if channel names should be plotted on the y axis. If False,
no names are shown. If True, ticks are set automatically by matplotlib
and the corresponding channel names are shown. If "all", all channel
names are shown. If "auto", is set to False if ``picks`` is ``None``,
to ``True`` if ``picks`` contains 25 or more entries, or to "all"
if ``picks`` contains fewer than 25 entries.
group_by : None | dict
If a dict, the values must be picks, and ``axes`` must also be a dict
with matching keys, or None. If ``axes`` is None, one figure and one
axis will be created for each entry in ``group_by``.Then, for each
entry, the picked channels will be plotted to the corresponding axis.
If ``titles`` are None, keys will become plot titles. This is useful
for e.g. ROIs. Each entry must contain only one channel type.
For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
If None, all picked channels are plotted to the same axis.
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim, hline=None,
units=units, scalings=scalings, titles=titles,
axes=axes, plot_type="image", cmap=cmap,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names,
group_by=group_by, sphere=sphere)
def _plot_update_evoked(params, bools):
"""Update the plot evoked lines."""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_ydata(di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
@verbose
def plot_evoked_white(evoked, noise_cov, show=True, rank=None, time_unit='s',
sphere=None, axes=None, verbose=None):
"""Plot whitened evoked response.
Plots the whitened evoked response and the whitened GFP as described in
[1]_. This function is especially useful for investigating noise
covariance properties to determine if data are properly whitened (e.g.,
achieving expected values in line with model assumptions, see Notes below).
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance. Can be a string to load a covariance from disk.
show : bool
Show figure if True.
%(rank_None)s
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
axes : list | None
List of axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
See Also
--------
mne.Evoked.plot
Notes
-----
If baseline signals match the assumption of Gaussian white noise,
values should be centered at 0, and be within 2 standard deviations
(±1.96) for 95%% of the time points. For the global field power (GFP),
we expect it to fluctuate around a value of 1.
If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger
(if ``verbose=True``) for each noise covariance estimator that is passed.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
from ..cov import whiten_evoked, read_cov # recursive import
import matplotlib.pyplot as plt
time_unit, times = _check_time_unit(time_unit, evoked.times)
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
evoked = evoked.copy() # handle ref meg
passive_idx = [idx for idx, proj in enumerate(evoked.info['projs'])
if not proj['active']]
# either applied already or not-- else issue
for idx in passive_idx[::-1]: # reverse order so idx does not change
evoked.del_proj(idx)
evoked.pick_types(ref_meg=False, exclude='bads', **_PICK_TYPES_DATA_DICT)
n_ch_used, rank_list, picks_list, has_sss = _triage_rank_sss(
evoked.info, noise_cov, rank, scalings=None)
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
# get one whitened evoked per cov
evokeds_white = [whiten_evoked(evoked, cov, picks=None, rank=r)
for cov, r in zip(noise_cov, rank_list)]
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power.
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
want_shape = (n_rows, n_columns) if len(noise_cov) > 1 else (n_rows,)
_validate_type(axes, (list, tuple, np.ndarray, None), 'axes')
if axes is None:
_, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
else:
axes = np.array(axes)
for ai, ax in enumerate(axes.flat):
_validate_type(ax, plt.Axes, 'axes.flat[%d]' % (ai,))
if axes.shape != want_shape:
raise ValueError(f'axes must have shape {want_shape}, got '
f'{axes.shape}')
fig = axes.flat[0].figure
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
if any(((n_columns == 1 and n_ch_used >= 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = _handle_default('color', None)
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
# the first is by law the best noise cov, on the left we plot that one.
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False,
time_unit=time_unit)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k',
lw=0.5)
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--', lw=2)
ax.set(title='%s (%d channel%s)'
% (titles_[ch_type], len(picks), _pl(len(picks))))
# Now plot the GFP for all covs if indicated.
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax = ax_gfp[i]
ax.set_title(title if n_columns > 1 else
'Whitened GFP, method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
# Wrap SSS-processed data (MEG) to the mag color
color_ch = 'mag' if ch == 'meg' else ch
ax.plot(times, gfp,
label=label if n_columns > 1 else title,
color=color if n_columns > 1 else ch_colors[color_ch],
lw=0.5)
ax.set(xlabel='Time (%s)' % (time_unit,), ylabel=r'GFP ($\chi^2$)',
xlim=[times[0], times[-1]], ylim=(0, 10))
ax.axhline(1, color='red', linestyle='--', lw=2.)
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
plt_show(show)
return fig
@verbose
def plot_snr_estimate(evoked, inv, show=True, axes=None, verbose=None):
"""Plot a data SNR estimate.
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
axes : instance of Axes | None
The axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
The bluish green line is the SNR determined by the GFP of the whitened
evoked data. The orange line is the SNR estimated based on the mismatch
between the data and the data re-estimated from the regularized inverse.
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv)
_validate_type(axes, (None, plt.Axes))
if axes is None:
_, ax = plt.subplots(1, 1)
else:
ax = axes
del axes
fig = ax.figure
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.axvline(0, color='k', ls=':', lw=1)
ax.axhline(0, color='k', ls=':', lw=1)
# Colors are "bluish green" and "vermilion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
hs = list()
labels = ('Inverse', 'Whitened GFP')
hs.append(ax.plot(
evoked.times, snr_est, color=[0.0, 0.6, 0.5])[0])
hs.append(ax.plot(
evoked.times, snr - 1, color=[0.8, 0.4, 0.0])[0])
ax.set(xlim=lims[:2], ylim=lims[2:], ylabel='SNR',
xlabel='Time (s)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
ax.legend(hs, labels, title='Estimation method')
plt_show(show)
return fig
@fill_doc
def plot_evoked_joint(evoked, times="peaks", title='', picks=None,
exclude=None, show=True, ts_args=None,
topomap_args=None):
"""Plot evoked data as butterfly plot and add topomaps for time points.
.. note:: Axes to plot in can be passed by the user through ``ts_args`` or
``topomap_args``. In that case both ``ts_args`` and
``topomap_args`` axes have to be used. Be aware that when the
axes are provided, their position may be slightly modified.
Parameters
----------
evoked : instance of Evoked
The evoked instance.
times : float | array of float | "auto" | "peaks"
The time point(s) to plot. If ``"auto"``, 5 evenly spaced topographies
between the first and last time instant will be shown. If ``"peaks"``,
finds time points automatically by checking for 3 local maxima in
Global Field Power. Defaults to ``"peaks"``.
title : str | None
The title. If ``None``, suppress printing channel type title. If an
empty string, a default title is created. Defaults to ''. If custom
axes are passed make sure to set ``title=None``, otherwise some of your
axes may be removed during placement of the title axis.
%(picks_all)s
exclude : None | list of str | 'bads'
Channels names to exclude from being shown. If ``'bads'``, the
bad channels are excluded. Defaults to ``None``.
show : bool
Show figure if ``True``. Defaults to ``True``.
ts_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`mne.Evoked.plot` to
style the butterfly plot. If they are not in this dict, the following
defaults are passed: ``spatial_colors=True``, ``zorder='std'``.
``show`` and ``exclude`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:meth:`mne.Evoked.plot_topomap` to style the topomaps.
If it is not in this dict, ``outlines='skirt'`` will be passed.
``show``, ``times``, ``colorbar`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
Returns
-------
fig : instance of matplotlib.figure.Figure | list
The figure object containing the plot. If ``evoked`` has multiple
channel types, a list of figures, one for each channel type, is
returned.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
if ts_args is not None and not isinstance(ts_args, dict):
raise TypeError('ts_args must be dict or None, got type %s'
% (type(ts_args),))
ts_args = dict() if ts_args is None else ts_args.copy()
ts_args['time_unit'], _ = _check_time_unit(
ts_args.get('time_unit', 's'), evoked.times)
topomap_args = dict() if topomap_args is None else topomap_args.copy()
got_axes = False
illegal_args = {"show", 'times', 'exclude'}
for args in (ts_args, topomap_args):
if any((x in args for x in illegal_args)):
raise ValueError("Don't pass any of {} as *_args.".format(
", ".join(list(illegal_args))))
if ("axes" in ts_args) or ("axes" in topomap_args):
if not (("axes" in ts_args) and ("axes" in topomap_args)):
raise ValueError("If one of `ts_args` and `topomap_args` contains "
"'axes', the other must, too.")
_validate_if_list_of_axes([ts_args["axes"]], 1)
n_topomaps = (3 if times is None else len(times)) + 1
_validate_if_list_of_axes(list(topomap_args["axes"]), n_topomaps)
got_axes = True
# channel selection
# simply create a new evoked object with the desired channel selection
# Need to deal with proj before picking to avoid bad projections
proj = topomap_args.get('proj', True)
proj_ts = ts_args.get('proj', True)
if proj_ts != proj:
raise ValueError(
f'topomap_args["proj"] (default True, got {proj}) must match '
f'ts_args["proj"] (default True, got {proj_ts})')
_check_option('topomap_args["proj"]', proj, (True, False, 'reconstruct'))
evoked = evoked.copy()
if proj:
evoked.apply_proj()
if proj == 'reconstruct':
evoked._reconstruct_proj()
topomap_args['proj'] = ts_args['proj'] = False # don't reapply
evoked = _pick_inst(evoked, picks, exclude, copy=False)
info = evoked.info
ch_types = _get_channel_types(info, unique=True, only_data_chs=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
if got_axes:
raise NotImplementedError(
"Currently, passing axes manually (via `ts_args` or "
"`topomap_args`) is not supported for multiple channel types.")
figs = list()
for this_type in ch_types: # pick only the corresponding channel type
ev_ = evoked.copy().pick_channels(
[info['ch_names'][idx] for idx in range(info['nchan'])
if channel_type(info, idx) == this_type])
if len(_get_channel_types(ev_.info, unique=True)) > 1:
raise RuntimeError('Possibly infinite loop due to channel '
'selection problem. This should never '
'happen! Please check your channel types.')
figs.append(
plot_evoked_joint(
ev_, times=times, title=title, show=show, ts_args=ts_args,
exclude=list(), topomap_args=topomap_args))
return figs
# set up time points to show topomaps for
times_sec = _process_times(evoked, times, few=True)
del times
_, times_ts = _check_time_unit(ts_args['time_unit'], times_sec)
# prepare axes for topomap
if not got_axes:
fig, ts_ax, map_ax, cbar_ax = _prepare_joint_axes(len(times_sec),
figsize=(8.0, 4.2))
else:
ts_ax = ts_args["axes"]
del ts_args["axes"]
map_ax = topomap_args["axes"][:-1]
cbar_ax = topomap_args["axes"][-1]
del topomap_args["axes"]
fig = cbar_ax.figure
# butterfly/time series plot
# most of this code is about passing defaults on demand
ts_args_def = dict(picks=None, unit=True, ylim=None, xlim='tight',
proj=False, hline=None, units=None, scalings=None,
titles=None, gfp=False, window_title=None,
spatial_colors=True, zorder='std',
sphere=None)
ts_args_def.update(ts_args)
_plot_evoked(evoked, axes=ts_ax, show=False, plot_type='butterfly',
exclude=[], set_tight_layout=False, **ts_args_def)
# handle title
# we use a new axis for the title to handle scaling of plots
old_title = ts_ax.get_title()
ts_ax.set_title('')
if title is not None:
title_ax = plt.subplot(4, 3, 2)
if title == '':
title = old_title
title_ax.text(.5, .5, title, transform=title_ax.transAxes,
horizontalalignment='center',
verticalalignment='center')
title_ax.axis('off')
# topomap
contours = topomap_args.get('contours', 6)
ch_type = ch_types.pop() # set should only contain one element
# Since the data has all the ch_types, we get the limits from the plot.
vmin, vmax = ts_ax.get_ylim()
norm = ch_type == 'grad'
vmin = 0 if norm else vmin
vmin, vmax = _setup_vmin_vmax(evoked.data, vmin, vmax, norm)
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
else:
locator = None
topomap_args_pass = topomap_args.copy()
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass['contours'] = contours
evoked.plot_topomap(times=times_sec, axes=map_ax, show=False,
colorbar=False, **topomap_args_pass)
if topomap_args.get('colorbar', True):
from matplotlib import ticker
cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax)
if isinstance(contours, (list, np.ndarray)):
cbar.set_ticks(contours)
else:
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
if not got_axes:
plt.subplots_adjust(left=.1, right=.93, bottom=.14,
top=1. if title is not None else 1.2)
# connection lines
# draw the connection lines between time series and topoplots
lines = [_connection_line(timepoint, fig, ts_ax, map_ax_)
for timepoint, map_ax_ in zip(times_ts, map_ax)]
for line in lines:
fig.lines.append(line)
# mark times in time series plot
for timepoint in times_ts:
ts_ax.axvline(timepoint, color='grey', linestyle='-',
linewidth=1.5, alpha=.66, zorder=0)
# show and return it
plt_show(show)
return fig
###############################################################################
# The following functions are all helpers for plot_compare_evokeds. #
###############################################################################
def _check_loc_legal(loc, what='your choice', default=1):
"""Check if loc is a legal location for MPL subordinate axes."""
true_default = {"legend": 2, "show_sensors": 1}.get(what, default)
if isinstance(loc, (bool, np.bool_)) and loc:
loc = true_default
loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3,
'lower right': 4, 'right': 5, 'center left': 6,
'center right': 7, 'lower center': 8, 'upper center': 9,
'center': 10}
loc_ = loc_dict.get(loc, loc)
if loc_ not in range(11):
raise ValueError(str(loc) + " is not a legal MPL loc, please supply"
"another value for " + what + ".")
return loc_
def _validate_style_keys_pce(styles, conditions, tags):
"""Validate styles dict keys for plot_compare_evokeds."""
styles = deepcopy(styles)
if not set(styles).issubset(tags.union(conditions)):
raise ValueError('The keys in "styles" ({}) must match the keys in '
'"evokeds" ({}).'.format(list(styles), conditions))
# make sure all the keys are in there
for cond in conditions:
if cond not in styles:
styles[cond] = dict()
# deal with matplotlib's synonymous handling of "c" and "color" /
# "ls" and "linestyle" / "lw" and "linewidth"
elif 'c' in styles[cond]:
styles[cond]['color'] = styles[cond].pop('c')
elif 'ls' in styles[cond]:
styles[cond]['linestyle'] = styles[cond].pop('ls')
elif 'lw' in styles[cond]:
styles[cond]['linewidth'] = styles[cond].pop('lw')
# transfer styles from partial-matched entries
for tag in cond.split('/'):
if tag in styles:
styles[cond].update(styles[tag])
# remove the (now transferred) partial-matching style entries
for key in list(styles):
if key not in conditions:
del styles[key]
return styles
def _validate_colors_pce(colors, cmap, conditions, tags):
"""Check and assign colors for plot_compare_evokeds."""
err_suffix = ''
if colors is None:
if cmap is None:
colors = _get_color_list()
err_suffix = ' in the default color cycle'
else:
colors = list(range(len(conditions)))
# convert color list to dict
if isinstance(colors, (list, tuple, np.ndarray)):
if len(conditions) > len(colors):
raise ValueError('Trying to plot {} conditions, but there are only'
' {} colors{}. Please specify colors manually.'
.format(len(conditions), len(colors), err_suffix))
colors = dict(zip(conditions, colors))
# should be a dict by now...
if not isinstance(colors, dict):
raise TypeError('"colors" must be a dict, list, or None; got {}.'
.format(type(colors).__name__))
# validate color dict keys
if not set(colors).issubset(tags.union(conditions)):
raise ValueError('If "colors" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(colors), conditions))
# validate color dict values
color_vals = list(colors.values())
all_numeric = all(_is_numeric(_color) for _color in color_vals)
if cmap is not None and not all_numeric:
raise TypeError('if "cmap" is specified, then "colors" must be '
'None or a (list or dict) of (ints or floats); got {}.'
.format(', '.join(color_vals)))
# convert provided ints to sequential, rank-ordered ints
all_int = all([isinstance(_color, Integral) for _color in color_vals])
if all_int:
colors = deepcopy(colors)
ranks = {val: ix for ix, val in enumerate(sorted(set(color_vals)))}
for key, orig_int in colors.items():
colors[key] = ranks[orig_int]
# if no cmap, convert color ints to real colors
if cmap is None:
color_list = _get_color_list()
for cond, color_int in colors.items():
colors[cond] = color_list[color_int]
# recompute color_vals as a sorted set (we'll need it that way later)
color_vals = set(colors.values())
if all_numeric:
color_vals = sorted(color_vals)
return colors, color_vals
def _validate_cmap_pce(cmap, colors, color_vals):
"""Check and assign colormap for plot_compare_evokeds."""
from matplotlib.cm import get_cmap
from matplotlib.colors import Colormap
all_int = all([isinstance(_color, Integral) for _color in color_vals])
lut = len(color_vals) if all_int else None
colorbar_title = ''
if isinstance(cmap, (list, tuple, np.ndarray)) and len(cmap) == 2:
colorbar_title, cmap = cmap
if isinstance(cmap, str):
cmap = get_cmap(cmap, lut=lut)
elif isinstance(cmap, Colormap) and all_int:
cmap = cmap._resample(lut)
return cmap, colorbar_title
def _validate_linestyles_pce(linestyles, conditions, tags):
"""Check and assign linestyles for plot_compare_evokeds."""
# make linestyles a list if it's not defined
if linestyles is None:
linestyles = [None] * len(conditions) # will get changed to defaults
# convert linestyle list to dict
if isinstance(linestyles, (list, tuple, np.ndarray)):
if len(conditions) > len(linestyles):
raise ValueError('Trying to plot {} conditions, but there are '
'only {} linestyles. Please specify linestyles '
'manually.'
.format(len(conditions), len(linestyles)))
linestyles = dict(zip(conditions, linestyles))
# should be a dict by now...
if not isinstance(linestyles, dict):
raise TypeError('"linestyles" must be a dict, list, or None; got {}.'
.format(type(linestyles).__name__))
# validate linestyle dict keys
if not set(linestyles).issubset(tags.union(conditions)):
raise ValueError('If "linestyles" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(linestyles), conditions))
# normalize linestyle values (so we can accurately count unique linestyles
# later). See https://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template#L131-L133 # noqa
linestyle_map = {'solid': (0, ()),
'dotted': (0, (1., 1.65)),
'dashed': (0, (3.7, 1.6)),
'dashdot': (0, (6.4, 1.6, 1., 1.6)),
'-': (0, ()),
':': (0, (1., 1.65)),
'--': (0, (3.7, 1.6)),
'-.': (0, (6.4, 1.6, 1., 1.6))}
for cond, _ls in linestyles.items():
linestyles[cond] = linestyle_map.get(_ls, _ls)
return linestyles
def _populate_style_dict_pce(condition, condition_styles, style_name,
style_dict, cmap):
"""Transfer styles into condition_styles dict for plot_compare_evokeds."""
defaults = dict(color='gray', linestyle=(0, ())) # (0, ()) == 'solid'
# if condition X doesn't yet have style Y defined:
if condition_styles.get(style_name, None) is None:
# check the style dict for the full condition name
try:
condition_styles[style_name] = style_dict[condition]
# if it's not in there, try the slash-separated condition tags
except KeyError:
for tag in condition.split('/'):
try:
condition_styles[style_name] = style_dict[tag]
# if the tag's not in there, assign a default value (but also
# continue looping in search of a tag that *is* in there)
except KeyError:
condition_styles[style_name] = defaults[style_name]
# if we found a valid tag, keep track of it for colorbar
# legend purposes, and also stop looping (so we don't overwrite
# a valid tag's style with an invalid tag → default style)
else:
if style_name == 'color' and cmap is not None:
condition_styles['cmap_label'] = tag
break
return condition_styles
def _handle_styles_pce(styles, linestyles, colors, cmap, conditions):
"""Check and assign styles for plot_compare_evokeds."""
styles = deepcopy(styles)
# validate style dict structure (doesn't check/assign values yet)
tags = set(tag for cond in conditions for tag in cond.split('/'))
if styles is None:
styles = {cond: dict() for cond in conditions}
styles = _validate_style_keys_pce(styles, conditions, tags)
# validate color dict
colors, color_vals = _validate_colors_pce(colors, cmap, conditions, tags)
all_int = all([isinstance(_color, Integral) for _color in color_vals])
# instantiate cmap
cmap, colorbar_title = _validate_cmap_pce(cmap, colors, color_vals)
# validate linestyles
linestyles = _validate_linestyles_pce(linestyles, conditions, tags)
# prep for colorbar tick handling
colorbar_ticks = None if cmap is None else dict()
# array mapping color integers (indices) to tick locations (array values)
tick_locs = np.linspace(0, 1, 2 * len(color_vals) + 1)[1::2]
# transfer colors/linestyles dicts into styles dict; fall back on defaults
color_and_linestyle = dict(color=colors, linestyle=linestyles)
for cond, cond_styles in styles.items():
for _name, _style in color_and_linestyle.items():
cond_styles = _populate_style_dict_pce(cond, cond_styles, _name,
_style, cmap)
# convert numeric colors into cmap color values; store colorbar ticks
if cmap is not None:
color_number = cond_styles['color']
cond_styles['color'] = cmap(color_number)
tick_loc = tick_locs[color_number] if all_int else color_number
key = cond_styles.pop('cmap_label', cond)
colorbar_ticks[key] = tick_loc
return styles, linestyles, colors, cmap, colorbar_title, colorbar_ticks
def _evoked_sensor_legend(info, picks, ymin, ymax, show_sensors, ax,
sphere):
"""Show sensor legend (location of a set of sensors on the head)."""
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
pos, outlines = _get_pos_outlines(info, picks, sphere=sphere)
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(pos, ["k"] * len(picks), ax, list(), outlines,
show_sensors, size=25)
def _draw_colorbar_pce(ax, colors, cmap, colorbar_title, colorbar_ticks):
"""Draw colorbar for plot_compare_evokeds."""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colorbar import ColorbarBase
from matplotlib.transforms import Bbox
# create colorbar axes
orig_bbox = ax.get_position()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cax.yaxis.tick_right()
cb = ColorbarBase(cax, cmap=cmap, norm=None, orientation='vertical')
cb.set_label(colorbar_title)
# handle ticks
ticks = sorted(set(colorbar_ticks.values()))
ticklabels = [''] * len(ticks)
for label, tick in colorbar_ticks.items():
idx = ticks.index(tick)
if len(ticklabels[idx]): # handle labels with the same color/location
ticklabels[idx] = '\n'.join([ticklabels[idx], label])
else:
ticklabels[idx] = label
assert all(len(label) for label in ticklabels)
cb.set_ticks(ticks)
cb.set_ticklabels(ticklabels)
# shrink colorbar if discrete colors
color_vals = set(colors.values())
if all([isinstance(_color, Integral) for _color in color_vals]):
fig = ax.get_figure()
fig.canvas.draw()
fig_aspect = np.divide(*fig.get_size_inches())
new_bbox = ax.get_position()
cax_width = 0.75 * (orig_bbox.xmax - new_bbox.xmax)
# add extra space for multiline colorbar labels
h_mult = max(2, max([len(label.split('\n')) for label in ticklabels]))
cax_height = len(color_vals) * h_mult * cax_width / fig_aspect
x0 = orig_bbox.xmax - cax_width
y0 = (new_bbox.ymax + new_bbox.ymin - cax_height) / 2
x1 = orig_bbox.xmax
y1 = y0 + cax_height
new_bbox = Bbox([[x0, y0], [x1, y1]])
cax.set_axes_locator(None)
cax.set_position(new_bbox)
def _draw_legend_pce(legend, split_legend, styles, linestyles, colors, cmap,
do_topo, ax):
"""Draw legend for plot_compare_evokeds."""
import matplotlib.lines as mlines
lines = list()
# triage
if split_legend is None:
split_legend = cmap is not None
n_colors = len(set(colors.values()))
n_linestyles = len(set(linestyles.values()))
draw_styles = cmap is None and not split_legend
draw_colors = cmap is None and split_legend and n_colors > 1
draw_linestyles = (cmap is None or split_legend) and n_linestyles > 1
# create the fake lines for the legend
if draw_styles:
for label, cond_styles in styles.items():
line = mlines.Line2D([], [], label=label, **cond_styles)
lines.append(line)
else:
if draw_colors:
for label, color in colors.items():
line = mlines.Line2D([], [], label=label, linestyle='solid',
color=color)
lines.append(line)
if draw_linestyles:
for label, linestyle in linestyles.items():
line = mlines.Line2D([], [], label=label, linestyle=linestyle,
color='black')
lines.append(line)
# legend params
ncol = 1 + (len(lines) // 5)
loc = _check_loc_legal(legend, 'legend')
legend_params = dict(loc=loc, frameon=True, ncol=ncol)
# special placement (above dedicated legend axes) in topoplot
if do_topo and isinstance(legend, bool):
legend_params.update(loc='lower right', bbox_to_anchor=(1, 1))
# draw the legend
if any([draw_styles, draw_colors, draw_linestyles]):
labels = [line.get_label() for line in lines]
ax.legend(lines, labels, **legend_params)
def _draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis, invert_y,
vlines, tmin, tmax, unit, skip_axlabel=True):
"""Position, draw, and truncate axes for plot_compare_evokeds."""
# avoid matplotlib errors
if ymin == ymax:
ymax += 1e-15
if tmin == tmax:
tmax += 1e-9
ax.set_xlim(tmin, tmax)
# for dark backgrounds:
ax.patch.set_alpha(0)
if not np.isfinite([ymin, ymax]).all(): # nothing plotted
return
ax.set_ylim(ymin, ymax)
ybounds = (ymin, ymax)
# determine ymin/ymax for spine truncation
trunc_y = True if truncate_yaxis == 'auto' else truncate_yaxis
if truncate_yaxis:
if isinstance(truncate_yaxis, bool):
# truncate to half the max abs. value and round to a nice-ish
# number. ylims are already symmetric about 0 or have a lower bound
# of 0, so div. by 2 should suffice.
ybounds = np.array([ymin, ymax]) / 2.
precision = 0.25
ybounds = np.round(ybounds / precision) * precision
elif truncate_yaxis == 'auto':
# truncate to existing max/min ticks
ybounds = _trim_ticks(ax.get_yticks(), ymin, ymax)[[0, -1]]
else:
raise ValueError('"truncate_yaxis" must be bool or '
'"auto", got {}'.format(truncate_yaxis))
_setup_ax_spines(ax, vlines, tmin, tmax, ybounds[0], ybounds[1], invert_y,
unit, truncate_xaxis, trunc_y, skip_axlabel)
def _get_data_and_ci(evoked, combine, combine_func, picks, scaling=1,
ci_fun=None):
"""Compute (sensor-aggregated, scaled) time series and possibly CI."""
picks = np.array(picks).flatten()
# apply scalings
data = np.array([evk.data[picks] * scaling for evk in evoked])
# combine across sensors
if combine is not None:
logger.info('combining channels using "{}"'.format(combine))
data = combine_func(data)
# get confidence band
if ci_fun is not None:
ci = ci_fun(data)
# get grand mean across evokeds
data = np.mean(data, axis=0)
_check_if_nan(data)
return (data,) if ci_fun is None else (data, ci)
def _get_ci_function_pce(ci, do_topo=False):
"""Get confidence interval function for plot_compare_evokeds."""
if ci is None:
return None
elif callable(ci):
return ci
elif isinstance(ci, bool) and not ci:
return None
elif isinstance(ci, bool):
ci = 0.95
if isinstance(ci, float):
from ..stats import _ci
method = 'parametric' if do_topo else 'bootstrap'
return partial(_ci, ci=ci, method=method)
else:
raise TypeError('"ci" must be None, bool, float or callable, got {}'
.format(type(ci).__name__))
def _plot_compare_evokeds(ax, data_dict, conditions, times, ci_dict, styles,
title, all_positive, topo):
"""Plot evokeds (to compare them; with CIs) based on a data_dict."""
for condition in conditions:
# plot the actual data ('dat') as a line
dat = data_dict[condition].T
ax.plot(times, dat, zorder=1000, label=condition, clip_on=False,
**styles[condition])
# plot the confidence interval if available
if ci_dict.get(condition, None) is not None:
ci_ = ci_dict[condition]
ax.fill_between(times, ci_[0].flatten(), ci_[1].flatten(),
zorder=9, color=styles[condition]['color'],
alpha=0.3, clip_on=False)
if topo:
ax.text(-.1, 1, title, transform=ax.transAxes)
else:
ax.set_title(title)
def _title_helper_pce(title, picked_types, picks, ch_names, combine):
"""Format title for plot_compare_evokeds."""
if title is None:
title = (_handle_default('titles').get(picks, None) if picked_types
else _set_title_multiple_electrodes(title, combine, ch_names))
# add the `combine` modifier
do_combine = picked_types or len(ch_names) > 1
if (title is not None and len(title) and isinstance(combine, str) and
do_combine):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += ' ({})'.format(_comb)
return title
@fill_doc
def plot_compare_evokeds(evokeds, picks=None, colors=None,
linestyles=None, styles=None, cmap=None,
vlines='auto', ci=True, truncate_yaxis='auto',
truncate_xaxis=True, ylim=None, invert_y=False,
show_sensors=None, legend=True,
split_legend=None, axes=None, title=None, show=True,
combine=None, sphere=None):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a list of Evokeds, the contents are plotted with their
``.comment`` attributes used as condition labels. If no comment is set,
the index of the respectiv Evoked the list will be used instead,
starting with ``1`` for the first Evoked.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
%(picks_all_data)s
* If picks is None or a (collection of) data channel types, the
global field power will be plotted for all data channels.
Otherwise, picks will be averaged.
* If multiple channel types are selected, one
figure will be returned for each channel type.
* If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
colors : list | dict | None
Colors to use when plotting the ERP/F lines and confidence bands. If
``cmap`` is not ``None``, ``colors`` must be a :class:`list` or
:class:`dict` of :class:`ints <int>` or :class:`floats <float>`
indicating steps or percentiles (respectively) along the colormap. If
``cmap`` is ``None``, list elements or dict values of ``colors`` must
be :class:`ints <int>` or valid :doc:`matplotlib colors
<tutorials/colors/colors>`; lists are cycled through sequentially,
while dicts must have keys matching the keys or conditions of an
``evokeds`` dict (see Notes for details). If ``None``, the current
:doc:`matplotlib color cycle <gallery/color/color_cycle_default>` is
used. Defaults to ``None``.
linestyles : list | dict | None
Styles to use when plotting the ERP/F lines. If a :class:`list` or
:class:`dict`, elements must be valid :doc:`matplotlib linestyles
<matplotlib:gallery/lines_bars_and_markers/linestyles>`. Lists are
cycled through sequentially; dictionaries must have keys matching the
keys or conditions of an ``evokeds`` dict (see Notes for details). If
``None``, all lines will be solid. Defaults to ``None``.
styles : dict | None
Dictionary of styles to use when plotting ERP/F lines. Keys must match
keys or conditions of ``evokeds``, and values must be a :class:`dict`
of legal inputs to :func:`matplotlib.pyplot.plot`. Those values will be
passed as parameters to the line plot call of the corresponding
condition, overriding defaults (e.g.,
``styles={"Aud/L": {"linewidth": 3}}`` will set the linewidth for
"Aud/L" to 3). As with ``colors`` and ``linestyles``, keys matching
conditions in ``/``-separated ``evokeds`` keys are supported (see Notes
for details).
cmap : None | str | tuple | instance of matplotlib.colors.Colormap
Colormap from which to draw color values when plotting the ERP/F lines
and confidence bands. If not ``None``, ints or floats in the ``colors``
parameter are mapped to steps or percentiles (respectively) along the
colormap. If ``cmap`` is a :class:`str`, it will be passed to
:func:`matplotlib.cm.get_cmap`; if ``cmap`` is a tuple, its first
element will be used as a string to label the colorbar, and its
second element will be passed to :func:`matplotlib.cm.get_cmap` (unless
it is already an instance of :class:`~matplotlib.colors.Colormap`).
.. versionchanged:: 0.19
Support for passing :class:`~matplotlib.colors.Colormap` instances.
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0. If an empty list is passed,
no vertical lines are plotted.
ci : float | bool | callable | None
Confidence band around each ERP/F time series. If ``False`` or ``None``
no confidence band is drawn. If :class:`float`, ``ci`` must be between
0 and 1, and will set the threshold for a bootstrap
(single plot)/parametric (when ``axes=='topo'``) estimation of the
confidence band; ``True`` is equivalent to setting a threshold of 0.95
(i.e., the 95%% confidence band is drawn). If a callable, it must take
a single array (n_observations × n_times) as input and return upper and
lower confidence margins (2 × n_times). Defaults to ``True``.
truncate_yaxis : bool | 'auto'
Whether to shorten the y-axis spine. If 'auto', the spine is truncated
at the minimum and maximum ticks. If ``True``, it is truncated at the
multiple of 0.25 nearest to half the maximum absolute value of the
data. If ``truncate_xaxis=False``, only the far bound of the y-axis
will be truncated. Defaults to 'auto'.
truncate_xaxis : bool
Whether to shorten the x-axis spine. If ``True``, the spine is
truncated at the minimum and maximum ticks. If
``truncate_yaxis=False``, only the far bound of the x-axis will be
truncated. Defaults to ``True``.
ylim : dict | None
Y-axis limits for plots (after scaling has been applied). :class:`dict`
keys should match channel types; valid keys are eeg, mag, grad, misc
(example: ``ylim=dict(eeg=[-20, 20])``). If ``None``, the y-axis limits
will be set automatically by matplotlib. Defaults to ``None``.
invert_y : bool
Whether to plot negative values upward (as is sometimes done
for ERPs out of tradition). Defaults to ``False``.
show_sensors : bool | int | str | None
Whether to display an inset showing sensor locations on a head outline.
If :class:`int` or :class:`str`, indicates position of the inset (see
:func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`). If ``None``,
treated as ``True`` if there is only one channel in ``picks``. If
``True``, location is upper or lower right corner, depending on data
values. Defaults to ``None``.
legend : bool | int | str
Whether to show a legend for the colors/linestyles of the conditions
plotted. If :class:`int` or :class:`str`, indicates position of the
legend (see :func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`).
If ``True``, equivalent to ``'upper left'``. Defaults to ``True``.
split_legend : bool | None
Whether to separate color and linestyle in the legend. If ``None``,
a separate linestyle legend will still be shown if ``cmap`` is
specified. Defaults to ``None``.
axes : None | Axes instance | list of Axes | 'topo'
:class:`~matplotlib.axes.Axes` object to plot into. If plotting
multiple channel types (or multiple channels when ``combine=None``),
``axes`` should be a list of appropriate length containing
:class:`~matplotlib.axes.Axes` objects. If ``'topo'``, a new
:class:`~matplotlib.figure.Figure` is created with one axis for each
channel, in a topographical layout. If ``None``, a new
:class:`~matplotlib.figure.Figure` is created for each channel type.
Defaults to ``None``.
title : str | None
Title printed above the plot. If ``None``, a title will be
automatically generated based on channel name(s) or type(s) and the
value of the ``combine`` parameter. Defaults to ``None``.
show : bool
Whether to show the figure. Defaults to ``True``.
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_evokeds, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``picks`` is a single channel (not channel type) or
``axes='topo'``, in which cases no combining is performed. Defaults to
``None``.
%(topomap_sphere_auto)s
Returns
-------
fig : list of Figure instances
A list of the figure(s) generated.
Notes
-----
If the parameters ``styles``, ``colors``, or ``linestyles`` are passed as
:class:`dicts <python:dict>`, then ``evokeds`` must also be a
:class:`python:dict`, and
the keys of the plot-style parameters must either match the keys of
``evokeds``, or match a ``/``-separated partial key ("condition") of
``evokeds``. For example, if evokeds has keys "Aud/L", "Aud/R", "Vis/L",
and "Vis/R", then ``linestyles=dict(L='--', R='-')`` will plot both Aud/L
and Vis/L conditions with dashed lines and both Aud/R and Vis/R conditions
with solid lines. Similarly, ``colors=dict(Aud='r', Vis='b')`` will plot
Aud/L and Aud/R conditions red and Vis/L and Vis/R conditions blue.
Color specification depends on whether a colormap has been provided in the
``cmap`` parameter. The following table summarizes how the ``colors``
parameter is interpreted:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+-------------+----------------+------------------------------------------+
| ``cmap`` | ``colors`` | result |
+=============+================+==========================================+
| | None | matplotlib default color cycle; unique |
| | | color for each condition |
| +----------------+------------------------------------------+
| | | matplotlib default color cycle; lowest |
| | list or dict | integer mapped to first cycle color; |
| | of integers | conditions with same integer get same |
| None | | color; unspecified conditions are "gray" |
| +----------------+------------------------------------------+
| | list or dict | ``ValueError`` |
| | of floats | |
| +----------------+------------------------------------------+
| | list or dict | the specified hex colors; unspecified |
| | of hexadecimal | conditions are "gray" |
| | color strings | |
+-------------+----------------+------------------------------------------+
| | None | equally spaced colors on the colormap; |
| | | unique color for each condition |
| +----------------+------------------------------------------+
| | | equally spaced colors on the colormap; |
| | list or dict | lowest integer mapped to first cycle |
| string or | of integers | color; conditions with same integer |
| instance of | | get same color |
| matplotlib +----------------+------------------------------------------+
| Colormap | list or dict | floats mapped to corresponding colormap |
| | of floats | values |
| +----------------+------------------------------------------+
| | list or dict | |
| | of hexadecimal | ``TypeError`` |
| | color strings | |
+-------------+----------------+------------------------------------------+
"""
import matplotlib.pyplot as plt
from ..evoked import Evoked, _check_evokeds_ch_names_times
# build up evokeds into a dict, if it's not already
if isinstance(evokeds, Evoked):
evokeds = [evokeds]
if isinstance(evokeds, (list, tuple)):
evokeds_copy = evokeds.copy()
evokeds = dict()
for evk_idx, evk in enumerate(evokeds_copy, start=1):
label = None
if hasattr(evk, 'comment'):
label = evk.comment
label = label if label else str(evk_idx)
evokeds[label] = evk
del evokeds_copy
if not isinstance(evokeds, dict):
raise TypeError('"evokeds" must be a dict, list, or instance of '
'mne.Evoked; got {}'.format(type(evokeds).__name__))
evokeds = deepcopy(evokeds) # avoid modifying dict outside function scope
for cond, evoked in evokeds.items():
_validate_type(cond, 'str', 'Conditions')
if isinstance(evoked, Evoked):
evokeds[cond] = [evoked] # wrap singleton evokeds in a list
for evk in evokeds[cond]:
_validate_type(evk, Evoked, 'All evokeds entries ', 'Evoked')
# ensure same channels and times across all evokeds
all_evoked = sum(evokeds.values(), [])
_check_evokeds_ch_names_times(all_evoked)
del all_evoked
# get some representative info
conditions = list(evokeds)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
sphere = _check_sphere(sphere, info)
tmin, tmax = times[0], times[-1]
# set some defaults
if ylim is None:
ylim = dict()
if vlines == 'auto':
vlines = [0.] if (tmin < 0 < tmax) else []
_validate_type(vlines, (list, tuple), 'vlines', 'list or tuple')
# is picks a channel type (or None)?
orig_picks = deepcopy(picks)
picks, picked_types = _picks_to_idx(info, picks, return_kind=True)
# some things that depend on picks:
ch_names = np.array(one_evoked.ch_names)[picks].tolist()
ch_types = list(_get_channel_types(info, picks=picks, unique=True)
.intersection(_DATA_CH_TYPES_SPLIT + ('misc',))) # miscICA
picks_by_type = channel_indices_by_type(info, picks)
# discard picks from non-data channels (e.g., ref_meg)
good_picks = sum([picks_by_type[ch_type] for ch_type in ch_types], [])
picks = np.intersect1d(picks, good_picks)
if show_sensors is None:
show_sensors = (len(picks) == 1)
# cannot combine a single channel
if (len(picks) < 2) and combine is not None:
warn('Only {} channel in "picks"; cannot combine by method "{}".'
.format(len(picks), combine))
# `combine` defaults to GFP unless picked a single channel or axes='topo'
if combine is None and len(picks) > 1 and axes != 'topo':
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# title
title = _title_helper_pce(title, picked_types, picks=orig_picks,
ch_names=ch_names, combine=combine)
# setup axes
do_topo = (axes == 'topo')
if do_topo:
show_sensors = False
if len(picks) > 70:
logger.info('You are plotting to a topographical layout with >70 '
'sensors. This can be extremely slow. Consider using '
'mne.viz.plot_topo, which is optimized for speed.')
axes = ['topo'] * len(ch_types)
else:
if axes is None:
axes = (plt.subplots(figsize=(8, 6))[1] for _ in ch_types)
elif isinstance(axes, plt.Axes):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
if len(ch_types) > 1:
logger.info('Multiple channel types selected, returning one figure '
'per type.')
figs = list()
for ch_type, ax in zip(ch_types, axes):
_picks = picks_by_type[ch_type]
_ch_names = np.array(one_evoked.ch_names)[_picks].tolist()
_picks = ch_type if picked_types else _picks
# don't pass `combine` here; title will run through this helper
# function a second time & it will get added then
_title = _title_helper_pce(title, picked_types, picks=_picks,
ch_names=_ch_names, combine=None)
figs.extend(plot_compare_evokeds(
evokeds, picks=_picks, colors=colors, cmap=cmap,
linestyles=linestyles, styles=styles, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, ylim=ylim, invert_y=invert_y,
legend=legend, show_sensors=show_sensors,
axes=ax, title=_title, split_legend=split_legend, show=show,
sphere=sphere))
return figs
# colors and colormap. This yields a `styles` dict with one entry per
# condition, specifying at least color and linestyle. THIS MUST BE DONE
# AFTER THE "MULTIPLE CHANNEL TYPES" LOOP
(_styles, _linestyles, _colors, _cmap, colorbar_title,
colorbar_ticks) = _handle_styles_pce(styles, linestyles, colors, cmap,
conditions)
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
# some things that depend on ch_type:
units = _handle_default('units')[ch_type]
scalings = _handle_default('scalings')[ch_type]
# prep for topo
pos_picks = picks # need this version of picks for sensor location inset
info = pick_info(info, sel=picks, copy=True)
all_ch_names = info['ch_names']
if not do_topo:
# add vacuous "index" (needed for topo) so same code works for both
axes = [(ax, 0) for ax in axes]
if np.array(picks).ndim < 2:
picks = [picks] # enables zipping w/ axes
else:
from .topo import iter_topography
fig = plt.figure(figsize=(18, 14))
def click_func(
ax_, pick_, evokeds=evokeds, colors=colors,
linestyles=linestyles, styles=styles, cmap=cmap, vlines=vlines,
ci=ci, truncate_yaxis=truncate_yaxis,
truncate_xaxis=truncate_xaxis, ylim=ylim, invert_y=invert_y,
show_sensors=show_sensors, legend=legend,
split_legend=split_legend, picks=picks, combine=combine):
plot_compare_evokeds(
evokeds=evokeds, colors=colors, linestyles=linestyles,
styles=styles, cmap=cmap, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, truncate_xaxis=truncate_xaxis,
ylim=ylim, invert_y=invert_y, show_sensors=show_sensors,
legend=legend, split_legend=split_legend,
picks=picks[pick_], combine=combine, axes=ax_, show=True,
sphere=sphere)
layout = find_layout(info)
# shift everything to the right by 15% of one axes width
layout.pos[:, 0] += layout.pos[0, 2] * .15
layout.pos[:, 1] += layout.pos[0, 3] * .15
# `axes` will be a list of (axis_object, channel_index) tuples
axes = list(iter_topography(
info, layout=layout, on_pick=click_func,
fig=fig, fig_facecolor='w', axis_facecolor='w',
axis_spinecolor='k', layout_scale=.925, legend=True))
picks = list(picks)
del info
# for each axis, compute the grand average and (maybe) the CI
# (per sensor if topo, otherwise aggregating over sensors)
c_func = None if do_topo else combine_func
all_data = list()
all_cis = list()
for _picks, (ax, idx) in zip(picks, axes):
data_dict = dict()
ci_dict = dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# skip CIs when possible; assign ci_fun first to get arg checking
ci_fun = _get_ci_function_pce(ci, do_topo=do_topo)
ci_fun = ci_fun if len(this_evokeds) > 1 else None
res = _get_data_and_ci(this_evokeds, combine, c_func, picks=_picks,
scaling=scalings, ci_fun=ci_fun)
data_dict[cond] = res[0]
if ci_fun is not None:
ci_dict[cond] = res[1]
all_data.append(data_dict) # grand means, or indiv. sensors if do_topo
all_cis.append(ci_dict)
del evokeds
# compute ylims
allvalues = list()
for _dict in all_data:
for _array in list(_dict.values()):
allvalues.append(_array[np.newaxis]) # to get same .ndim as CIs
for _dict in all_cis:
allvalues.extend(list(_dict.values()))
allvalues = np.concatenate(allvalues)
norm = np.all(allvalues > 0)
orig_ymin, orig_ymax = ylim.get(ch_type, [None, None])
ymin, ymax = _setup_vmin_vmax(allvalues, orig_ymin, orig_ymax, norm)
del allvalues
# add empty data and title for the legend axis
if do_topo:
all_data.append({cond: np.array([]) for cond in data_dict})
all_cis.append({cond: None for cond in ci_dict})
all_ch_names.append('')
# plot!
for (ax, idx), data, cis in zip(axes, all_data, all_cis):
if do_topo:
title = all_ch_names[idx]
# plot the data
_times = [] if idx == -1 else times
_plot_compare_evokeds(ax, data, conditions, _times, cis, _styles,
title, norm, do_topo)
# draw axes & vlines
skip_axlabel = do_topo and (idx != -1)
_draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis,
invert_y, vlines, tmin, tmax, units, skip_axlabel)
# add inset scalp plot showing location of sensors picked
if show_sensors:
_validate_type(show_sensors, (np.int64, bool, str, type(None)),
'show_sensors', 'numeric, str, None or bool')
if not _check_ch_locs(np.array(one_evoked.info['chs'])[pos_picks]):
warn('Cannot find channel coordinates in the supplied Evokeds. '
'Not showing channel locations.')
else:
_evoked_sensor_legend(one_evoked.info, pos_picks, ymin, ymax,
show_sensors, ax, sphere)
# add color/linestyle/colormap legend(s)
if legend:
_draw_legend_pce(legend, split_legend, _styles, _linestyles, _colors,
_cmap, do_topo, ax)
if cmap is not None:
_draw_colorbar_pce(ax, _colors, _cmap, colorbar_title, colorbar_ticks)
# finish
plt_show(show)
return [ax.figure]
|
Teekuningas/mne-python
|
mne/viz/evoked.py
|
Python
|
bsd-3-clause
| 106,681
|
[
"Gaussian"
] |
0b9b1d8969cd048d3b3fd12238c0796b41f2a37f19f31d1d25b9f26acdd58cf3
|
''' COSMO-VIEW
Script: drawing.py
Changes:
J. Ballabrera, December 2017
EGL, 06/2020:
No more support to python 2.7
Support to Basemap deprecated and updated to cartopy
A consola can be added to the main window (it requires
to change all prints and use instead the tools.toconsola()
function. In some cases the wid of the consola is passed to
others constructors or a heap variable MESSAGE has been
introduced to collect "print" messages.
Added limited support to loading and drawing shapefiles
Base layers of topography and relief substituted by GEBCO and
EMODNET tile services (requieres internet connection)
Limited support to geographical projections. Everything is
plotted in PlateCarree
setmap() is now deprecated
Corrected some text font managements
All color selections are now managed through tools.colsel() function
Cartopy projection can be accessed through tools.map_proj()
EGL, 12/2020:
Now multiple lagrangian trajectories can be loaded at once by
using askopenfilenames instead of askopenfile
QPB, 03/2021:
Allow for a user defined time axis
Add a distance calculator
'''
__version__ = "3.0"
__author__ = "Quim Ballabrera and Emilio García"
__date__ = "July 2020"
import sys
import os
from os.path import isfile, join
import numpy as np
import numpy.ma as ma
from scipy import interpolate
import json
import io
import ast
import math
import datetime
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.font_manager
import matplotlib.ticker as mticker
from matplotlib.font_manager import FontProperties
from matplotlib.figure import Figure
from matplotlib.offsetbox import TextArea, OffsetImage, AnnotationBbox, AnchoredText
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib import cm as CM
from matplotlib import colors
import matplotlib.patches as mpatches
#EG Cartopy
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeat
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
#EG from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset,num2date
from itertools import chain
from PIL import Image, ImageTk
import matplotlib.animation as manimation
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog as filedialog
#from tkcolorpicker import askcolor
from tkinter.colorchooser import askcolor
from tkinter import font as tkfont
from tkcalendar import Calendar, DateEntry
try:
to_unicode = unicode
except:
to_unicode = str
import cosmo.tools as tools
import cosmo.contourplot as contourplot
import cosmo.vectorplot as vectorplot
import cosmo.providers as providers
import cosmo.codar as codar
import cosmo.copernicus as copernicus
import cosmo.saidin as saidin
import cosmo.lagrangian as lagrangian
import cosmo.lineplot as lineplot
import cosmo.clm as clm
import cosmo.db as db
import cosmo.geomarker as geomarker
import cosmo.dotplot as dotplot
import cosmo.json_editor as jeditor
import cosmo.legend as legend
#EG
import cosmo.shape as shape
import cosmo.geoplot as geoplot
import cosmo.field as field
import cosmo.plotxy as plotxy
import cosmo.ellipse as ellipse
import cosmo.patch as patch
import cosmo.climatology as climatology
import cosmo.feature as feature
from cosmo.tools import empty
from cosmo.tools import myround
from cosmo.tools import exists
from cosmo.tools import askforpermission
from cosmo.tools import placeontop
from cosmo.tools import get_remote
from cosmo.tools import get_Date
from cosmo.tools import folderList
from cosmo.tools import urlList
from cosmo.tools import simple_form
from cosmo.tools import haversine
from cosmo.tools import fontconfig
from cosmo.tools import setfont
from cosmo.tools import read_lines
from cosmo.tools import colsel
#EG
from cosmo.tools import map_proj
from cosmo.tools import scale_bar
#EG consola
from cosmo.tools import toconsola
from cosmo import COSMO_CONF_NAME
from cosmo import COSMO_CONF
from cosmo import COSMO_ROOT
from cosmo import COSMO_CONF_PATH
from cosmo import COSMO_CONF_DATA
from cosmo import VERSION
from cosmo import TKINTER_VERSION
from cosmo import MATPLOTLIB_VERSION
from cosmo import CARTOPY_VERSION
global COSMO_CONF,COSMO_CONF_PATH,COSMO_CONF_NAME,COSMO_CONF_DATA
BGC = 'pale green' # Background color
BWC = 'lime green' # Buttons (PREV and NEXT) color
EBC = 'forest green' # Exit Buttons color
FONT = 'Helvetica 14' # Default font
# =====================
class OBJECT():
# =====================
''' Class for cosmo-view objects'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "February 2022"
def __init__(self,TYPE,OPTIONS=None):
# ======================
''' Define and initialize the class attributes '''
self.n = 0
self.TYPE = TYPE
self.DATA = []
self.LIST = []
self.INDX = tk.IntVar()
self.INDX.set(0)
self.OPTIONS = OPTIONS
# =====================
class CONTOUR():
# =====================
''' Class for 2D data contours'''
__version__ = "2.0"
__author__ = "Quim Ballabrera"
__date__ = "July 2020"
def __init__(self,filename=None):
# ===============================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nCONTOUR class:\n"
self.ALIAS = tk.StringVar()
self.FILENAME = tk.StringVar()
self.SOURCE = 'FILE' # Default source: Read from file
self.PARENT = None # USed in mean and variance calculations
if filename is None:
pass
else:
self.FILENAME.set(filename)
self.FLD = field.fld_parameters()
self.PLOT = contourplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.show = tk.BooleanVar()
self.varname = tk.StringVar()
#self.minval = tk.DoubleVar()
#self.maxval = tk.DoubleVar()
self.landmask = tk.BooleanVar()
self.K = tk.IntVar()
self.L = tk.IntVar()
self.K.set(0)
self.L.set(0)
self.K_LIST = []
self.L_LIST = []
self.Z_LIST = []
self.T_LIST = []
self.DATE = []
self.TIME = []
self.TIME_SET= False
self.landmask.set(False)
self.ALIAS.set('')
self.cbar = None
self.show.set(True)
# Selected point
self.io = tk.IntVar()
self.jo = tk.IntVar()
# Link to the Drawing Time Axis
self.LINK = tk.BooleanVar()
self.LINK.set(False)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['ALIAS'] = self.ALIAS.get()
conf['FILENAME'] = self.FILENAME.get()
conf['SOURCE'] = self.SOURCE
conf['PARENT'] = self.PARENT
conf['VARNAME'] = self.varname.get()
conf['K'] = self.K.get()
conf['L'] = self.L.get()
conf['LANDMASK'] = self.landmask.get()
conf['SHOW'] = self.show.get()
conf['LINK'] = self.LINK.get()
conf['PLOT'] = self.PLOT.conf_get()
conf['FLD'] = self.FLD.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.ALIAS.set(conf['ALIAS'])
self.FILENAME.set(conf['FILENAME'])
self.SOURCE = conf['SOURCE']
self.PARENT = conf['PARENT']
self.varname.set(conf['VARNAME'])
self.K.set(conf['K'])
self.L.set(conf['L'])
self.landmask.set(conf['LANDMASK'])
self.show.set(conf['SHOW'])
self.LINK.set(conf['LINK'])
self.PLOT.conf_set(conf['PLOT'])
self.FLD.conf_set(conf['FLD'])
def read(self,**args):
# ====================
try:
wid = args["wid"]
except:
wid = None
try:
update_lims = args["update_lims"]
except:
update_lims = True
K = self.K.get()
L = self.L.get()
self.SOURCE = 'FILE'
toconsola("Reading contour, K, L = "+str(K)+", "+str(L),wid=wid)
if self.FLD.ndims == 2:
u = self.FLD.nc.variables[self.FLD.varname][:,:]
elif self.FLD.ndims == 3:
if self.FLD.icdf.ppl[self.FLD.varid] > -1:
u = self.FLD.nc.variables[self.FLD.varname][L,:,:].squeeze()
elif self.FLD.icdf.ppk[self.FLD.varid] > -1:
u = self.FLD.nc.variables[self.FLD.varname][K,:,:].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif self.FLD.ndims == 4:
u = self.FLD.nc.variables[self.FLD.varname][L,K,:,:].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(self.FLD.ndims),wid=wid)
# Eliminate NaN values in field:
fill_value = u.fill_value
_u = u.filled()
_u[np.isnan(_u)] = fill_value
u = np.ma.masked_equal(_u,fill_value)
# Min and max values
#self.FLD.minval = float(u.min())
#self.FLD.maxval = float(u.max())
self.FLD.minval = np.nanmin(u)
self.FLD.maxval = np.nanmax(u)
toconsola('Min val = '+str(self.FLD.minval),wid=wid)
toconsola('Max val = '+str(self.FLD.maxval),wid=wid)
#print(self.FLD.minval, self.FLD.maxval)
# Make sure that the missing value is NaN:
#_u = u.filled(fill_value=np.nan)
#self.FLD.data = np.ma.masked_equal(_u,np.nan); del _u
self.FLD.data = u.copy()
if update_lims:
toconsola('Setting contour intervals ...',wid=wid)
try:
self.PLOT.CONTOUR_MIN.set(myround(self.FLD.minval))
except:
self.PLOT.CONTOUR_MIN.set(self.FLD.minval)
try:
self.PLOT.CONTOUR_MAX.set(myround(self.FLD.maxval))
except:
self.PLOT.CONTOUR_MAX.set(self.FLD.maxval)
dd = self.PLOT.CONTOUR_MAX.get() - self.PLOT.CONTOUR_MIN.get()
try:
self.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
except:
self.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
else:
toconsola('Preserving contour intervals.',wid=wid)
def save(self,**args):
# ====================
filetypes=[('NetCDF','*.nc'),('ALL','*')]
nn = filedialog.asksaveasfilename(title='Save vector file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if len(nn) == 0:
return
else:
filename = '%s' % nn
try:
wid = args["wid"]
except:
wid = None
toconsola('Saving contour data into '+filename,wid=wid)
#nc = Dataset(filename,'w',format='NETCDF4')
nc = Dataset(filename,'w')
nc.createDimension('x',self.FLD.icdf.nx)
nc.createDimension('y',self.FLD.icdf.ny)
dimensions_list = ['y','x']
if self.FLD.icdf.idk >= 0:
nc.createDimension('z',1)
dimensions_list.insert(0,'z')
if self.FLD.icdf.idl >= 0:
nc.createDimension('t',None)
dimensions_list.insert(0,'t')
if self.FLD.icdf.grid2d:
lon = nc.createVariable('Longitude','f8',['y','x'])
nc['Longitude'].setncatts(self.FLD.nc[self.FLD.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y','x'])
nc['Latitude'].setncatts(self.FLD.nc[self.FLD.icdf.yname].__dict__)
else:
lon = nc.createVariable('Longitude','f8',['x'])
nc['Longitude'].setncatts(self.FLD.nc[self.FLD.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y'])
nc['Latitude'].setncatts(self.FLD.nc[self.FLD.icdf.yname].__dict__)
if self.FLD.icdf.idk >= 0:
depth = nc.createVariable('Depth','f8',['z'])
nc['Depth'].setncatts(self.FLD.nc[self.FLD.icdf.zname].__dict__)
if self.FLD.icdf.idl >= 0:
time = nc.createVariable('Time','f8',['t'])
nc['Time'].setncatts(self.FLD.nc[self.FLD.icdf.tname].__dict__)
aname = self.varname.get()
a = nc.createVariable(aname,'f8',dimensions_list,fill_value=1e36)
try:
long_name = self.FLD.nc[aname].long_name
nc[aname].long_name = long_name
except:
pass
try:
units = self.FLD.nc[aname].units
nc[aname].units = units
except:
pass
_a = self.FLD.data.filled(fill_value=1e36)
_a[np.isnan(_a)] = 1e36
# Write data
if self.FLD.icdf.grid2d:
lon[:,:] = self.FLD.xx
lat[:,:] = self.FLD.yy
else:
lon[:] = self.FLD.x
lat[:] = self.FLD.y
depth[0] = self.Z_LIST[self.K.get()]
time[0] = self.T_LIST[self.L.get()]
a[0,0,:,:] = _a
nc.close()
# =====================
class VECTOR():
# =====================
''' Class for 2D data (x,y) vectors'''
__version__ = "2.0"
__author__ = "Quim Ballabrera"
__date__ = "July 2020"
def __init__(self,ufile=None,vfile=None):
# =======================================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nVECTOR class:\n"
self.ALIAS = tk.StringVar()
self.UFILENAME = tk.StringVar()
self.VFILENAME = tk.StringVar()
self.two_files = 0 #By default, U and V in the same file
self.SOURCE = 'FILE'
self.PARENT = None # USed in mean and variance calculations
if ufile is None:
pass
else:
self.UFILENAME.set(ufile)
if vfile is None:
self.VFILENAME.set(ufile)
else:
self.VFILENAME.set(vfile)
self.U = field.fld_parameters()
self.V = field.fld_parameters()
self.uname = tk.StringVar()
self.vname = tk.StringVar()
self.PLOT = vectorplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.K = tk.IntVar()
self.L = tk.IntVar()
self.K.set(0)
self.L.set(0)
self.K_LIST = []
self.L_LIST = []
self.Z_LIST = []
self.T_LIST = []
self.DATE = []
self.TIME = []
self.ALIAS.set('')
self.show = tk.BooleanVar()
self.show.set(True)
# Select grid type:
self.grid_type = tk.StringVar()
self.grid_type_list = ['A','B','C']
self.grid_type.set('A')
# Selected point
self.io = tk.IntVar()
self.jo = tk.IntVar()
# Variables to plot
self.reprocess = True
self.xplt = None
self.yplt = None
self.uplt = None
self.vplt = None
self.GRID_MODE_0 = -1
self.CURRENT_DX_0 = -1
self.CURRENT_DY_0 = -1
self.CURRENT_NX_0 = -1
self.CURRENT_NY_0 = -1
# Link to the drawing Time Axis
self.LINK = tk.BooleanVar()
self.LINK.set(False)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['ALIAS'] = self.ALIAS.get()
conf['UFILENAME'] = self.UFILENAME.get()
conf['VFILENAME'] = self.VFILENAME.get()
conf['TWO_FILES'] = self.two_files
conf['SOURCE'] = self.SOURCE
conf['PARENT'] = self.PARENT
conf['UNAME'] = self.uname.get()
conf['VNAME'] = self.vname.get()
conf['K'] = self.K.get()
conf['L'] = self.L.get()
conf['SHOW'] = self.show.get()
conf['GRID_TYPE'] = self.grid_type.get()
conf['LINK'] = self.LINK.get()
conf['PLOT'] = self.PLOT.conf_get()
conf['U'] = self.U.conf_get()
conf['V'] = self.V.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.ALIAS.set(conf['ALIAS'])
self.UFILENAME.set(conf['UFILENAME'])
self.VFILENAME.set(conf['VFILENAME'])
self.two_files = conf['TWO_FILES']
self.SOURCE = conf['SOURCE']
self.PARENT = conf['PARENT']
self.uname.set(conf['UNAME'])
self.vname.set(conf['VNAME'])
self.K.set(conf['K'])
self.L.set(conf['L'])
self.show.set(conf['SHOW'])
self.LINK.set(conf['LINK'])
self.grid_type.set(conf['GRID_TYPE'])
self.PLOT.conf_set(conf['PLOT'])
self.U.conf_set(conf['U'])
self.V.conf_set(conf['V'])
def read(self,**args):
# ====================
try:
wid = args["wid"]
except:
wid = None
K = self.K.get()
L = self.L.get()
self.SOURCE = 'FILE'
self.reprocess = True
toconsola("Reading vector, K, L = "+str(K)+", "+str(L),wid=wid)
if self.U.ndims == 2:
u = self.U.nc.variables[self.U.varname][:,:]
v = self.V.nc.variables[self.V.varname][:,:]
elif self.U.ndims == 3:
if self.U.icdf.ppl[self.U.varid] > -1:
u = self.U.nc.variables[self.U.varname][L,:,:].squeeze()
v = self.V.nc.variables[self.V.varname][L,:,:].squeeze()
elif self.U.icdf.ppk[self.U.varid] > -1:
u = self.U.nc.variables[self.U.varname][K,:,:].squeeze()
v = self.V.nc.variables[self.V.varname][K,:,:].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif self.U.ndims == 4:
u = self.U.nc.variables[self.U.varname][L,K,:,:].squeeze()
v = self.V.nc.variables[self.V.varname][L,K,:,:].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(self.U.ndims),wid=wid)
# Make sure that the missing value is NaN:
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
if self.grid_type.get() == 'A' or self.grid_type.get() == 'B':
toconsola("Velocities in a A-grid",wid=wid)
self.U.data = u.copy()
self.V.data = v.copy()
return
if self.grid_type.get() == 'C':
toconsola("Regrid C-grid velocities",wid=wid)
self.U.data = 0.5*(u[1:-1,:-1]+u[1:-1,1:])
self.V.data = 0.5*(v[:-1,1:-1]+v[1:,1:-1])
return
def save(self,**args):
# ====================
filetypes=[('NetCDF','*.nc'),('ALL','*')]
nn = filedialog.asksaveasfilename(title='Save vector file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if len(nn) == 0:
return
else:
filename = '%s' % nn
try:
wid = args["wid"]
except:
wid = None
toconsola('Saving vector data into '+filename,wid=wid)
#nc = Dataset(filename,'w',format='NETCDF4')
nc = Dataset(filename,'w')
nc.createDimension('x',self.U.icdf.nx)
nc.createDimension('y',self.U.icdf.ny)
dimensions_list = ['y','x']
if self.U.icdf.idk >= 0:
nc.createDimension('z',1)
dimensions_list.insert(0,'z')
if self.U.icdf.idl >= 0:
nc.createDimension('t',None)
dimensions_list.insert(0,'t')
if self.U.icdf.grid2d:
lon = nc.createVariable('Longitude','f8',['y','x'])
nc['Longitude'].setncatts(self.U.nc[self.U.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y','x'])
nc['Latitude'].setncatts(self.U.nc[self.U.icdf.yname].__dict__)
else:
lon = nc.createVariable('Longitude','f8',['x'])
nc['Longitude'].setncatts(self.U.nc[self.U.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y'])
nc['Latitude'].setncatts(self.U.nc[self.U.icdf.yname].__dict__)
if self.U.icdf.idk >= 0:
depth = nc.createVariable('Depth','f8',['z'])
nc['Depth'].setncatts(self.U.nc[self.U.icdf.zname].__dict__)
if self.U.icdf.idl >= 0:
time = nc.createVariable('Time','f8',['t'])
nc['Time'].setncatts(self.U.nc[self.U.icdf.tname].__dict__)
uname = self.uname.get()
vname = self.vname.get()
u = nc.createVariable(uname,'f8',dimensions_list,fill_value=1e36)
v = nc.createVariable(vname,'f8',dimensions_list,fill_value=1e36)
try:
long_name = self.U.nc[uname].long_name
nc[uname].long_name = long_name
except:
pass
try:
units = self.U.nc[uname].units
nc[uname].units = units
except:
pass
try:
long_name = self.V.nc[vname].long_name
nc[vname].long_name = long_name
except:
pass
try:
units = self.V.nc[vname].units
nc[vname].units = units
except:
pass
_u = self.U.data.filled(fill_value=1e36)
_u[np.isnan(_u)] = 1e36
_v = self.V.data.filled(fill_value=1e36)
_v[np.isnan(_v)] = 1e36
# Write data
if self.U.icdf.grid2d:
lon[:,:] = self.U.xx
lat[:,:] = self.U.yy
else:
lon[:] = self.U.x
lat[:] = self.U.y
depth[0] = self.Z_LIST[self.K.get()]
time[0] = self.T_LIST[self.L.get()]
u[0,0,:,:] = _u
v[0,0,:,:] = _v
nc.close()
# =====================
class LAYER():
# =====================
''' Class for Drawing layers'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "August 2020"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nLAYER class:\n"
self.n = 0 # Number of layers
# self.nsequence = 0 # Number of layers attached to a SEQUENCE
# self.seqlen = 0 # SEQUENCE length
# self.leader = 0 # Points to the SEQUENCER layer
self.TYPE = [] # VEC, FLD, MARKER, ....
self.TYPE_INDEX = []
self.FILENAME = []
# self.INSEQUENCE = [] # Belongs to a SEQUENCE
# self.SEQUENCER = [] # True if SEQUENCE leader
self.NREC = [] # Number of records in layer
self.update = False
def erase(self,TYPE,ii,**args):
# =============================
try:
wid = args["wid"]
except:
wid = None
if self.n == 0:
toconsola('Invalid ERASE action in empty layer structure',wid=wid)
return
ll = -1
for i in range(self.n):
if self.TYPE[i] == TYPE and self.TYPE_INDEX[i] == ii:
ll = i
if ll == -1:
toconsola('Layer not found',wid=wid)
return
#INSEQUENCE = self.INSEQUENCE[ll].get()
#SEQUENCER = self.SEQUENCER[ll].get()
self.update = False
toconsola('Erasing '+TYPE+' layer '+str(ii),wid=wid)
del self.TYPE[ll]
del self.TYPE_INDEX[ll]
del self.FILENAME[ll]
#del self.INSEQUENCE[ll]
#del self.SEQUENCER[ll]
del self.NREC[ll]
self.n -= 1
if self.n == 0:
toconsola('Empty layer structure',wid=wid)
self.TYPE = []
self.TYPE_INDEX = []
self.FILENAME = []
#self.INSEQUENCE = []
#self.SEQUENCER = []
self.NREC = []
#self.nsequence = 0
#self.seqlen = 0
return
# If we are here , it means that the structure is not empty
# Update TYPE_INDEX
ii = -1
for i in range(self.n):
if self.TYPE[i] == TYPE:
ii += 1
self.TYPE_INDEX[i] = ii
# # If erasing a layer in the SEQUENCE:
# if INSEQUENCE:
# self.nsequence -= 1
# if self.nsequence > 0:
# if SEQUENCER:
# # If we have erased the SEQUENCER,
# # we set the first field as SEQUENCE leader
# for i in range(self.n):
# if self.INSEQUENCE[i].get():
# self.SEQUENCER[i].set(True)
# self.leader = i
# self.update = True
# else:
# self.seqlen = 0
#
def add(self,TYPE,Filename=None,N=None,**args):
# ==============================================
try:
wid = args["wid"]
except:
wid = None
self.TYPE.append(TYPE)
self.FILENAME.append(Filename)
self.NREC.append(N)
ii = 0
for i in range(self.n):
if self.TYPE[i] == TYPE:
ii += 1
self.TYPE_INDEX.append(ii)
self.n += 1
#self.INSEQUENCE.append(tk.BooleanVar(value=False))
#self.SEQUENCER.append(tk.BooleanVar(value=False))
toconsola('Adding '+TYPE+' layer ',wid=wid)
toconsola('Layer %s with index %d' %(TYPE,self.TYPE_INDEX[-1]),wid=wid)
toconsola('Number of layers: ' + str(self.n)+'\n',wid=wid)
def print(self):
# ==============
print('\n ================================== ')
print('Number of layers, n = ', self.n)
# print('Number of layers in SEQUENCE, nsequence = ', self.nsequence)
# print('SEQUENCE,lenght = ', self.seqlen)
# print('SEQUENCE leader id = ', self.leader)
for i in range(self.n):
print('> Layer ', i)
print('>> Type, Type order, num records : ', self.TYPE[i], self.TYPE_INDEX[i], self.NREC[i])
print('>> Filename : ', self.FILENAME[i])
#print('>> LINK : ', self.LINK.get())
# print('>> In sequence?, Sequence leader ? ', self.INSEQUENCE[i].get(), self.SEQUENCER[i].get())
# =====================
class fld_parameters():
# =====================
''' Class for 2D data fields'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "December 2017"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nFLD_PARA:\n"
self.PLOT = contourplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.missing = tk.DoubleVar()
self.masked = tk.BooleanVar()
self.show = tk.BooleanVar()
self.masked.set(True)
self.show.set(True)
self.F = None
self.minval = None
self.maxval = None
self.mask = None
self.data = None
self.varname = None
self.units = None
self.missing_value = None
self.cbar = None
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['MISSING'] = self.missing.get()
conf['MASKED'] = self.masked.get()
conf['SHOW'] = self.show.get()
conf['PLOT'] = self.PLOT.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.missing.set(conf['MISSING'])
self.masked.set(conf['MASKED'])
self.show.set(conf['SHOW'])
self.PLOT.conf_set(conf['PLOT'])
# =====================
class vel_parameters():
# =====================
''' Class for 2D velocity fields'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "December 2017"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.MESSAGE = "VEL_PARA:\n"
self.PLOT = vectorplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.u = None
self.v = None
self.xu = None # To allow grid-types
self.yu = None # To allow grid-types
self.xv = None # To allow grid-types
self.yv = None # To allow grid-types
self.xt = None # To allow grid-types
self.yt = None # To allow grid-types
self.speed = None
self.F = None
self.cbar = None
self.show = tk.BooleanVar()
self.show.set(True)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['SHOW'] = self.show.get()
conf['PLOT'] = self.PLOT.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.show.set(conf['SHOW'])
self.PLOT.conf_set(conf['PLOT'])
# =====================
class cdf_parameters():
# =====================
''' Class for NetCDF files'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "December 2017"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.FILENAME = tk.StringVar()
self.varname = tk.StringVar()
self.uname = tk.StringVar()
self.vname = tk.StringVar()
self.K = tk.IntVar()
self.L = tk.IntVar()
self.ncid = None
self.icdf = None
self.varid = None
self.uid = None
self.vid = None
self.K_LIST = []
self.L_LIST = []
self.Z_LIST = []
self.T_LIST = []
self.DATE = []
self.TIME = []
self.K.set(0)
self.L.set(0)
self.FIELD = None
self.VEL = None
self.lon = None
self.lat = None
self.xx = None
self.yy = None
self.varname.set('')
self.uname.set('')
self.vname.set('')
# Add mutiple grid types:
self.grid_type = tk.StringVar()
self.grid_type_list = ['A','B','C']
self.grid_type.set('A')
def conf_get(self):
# =================
''' Set class dictionary from class attributes'''
conf = {}
#conf['FILENAME'] = self.FILENAME.get()
conf['varname'] = self.varname.get()
conf['uname'] = self.uname.get()
conf['vname'] = self.vname.get()
conf['K'] = self.K.get()
conf['L'] = self.L.get()
conf['varid'] = self.varid
conf['uid'] = self.uid
conf['vid'] = self.vid
conf['grid_type'] = self.grid_type.get()
if self.icdf is None:
conf['ICDF'] = None
else:
conf['ICDF'] = self.icdf.conf_get()
if self.FIELD is None:
conf['FIELD'] = None
else:
conf['FIELD'] = self.FIELD.conf_get()
if self.VEL is None:
conf['VEL'] = None
else:
conf['VEL'] = self.VEL.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class attributes from dictionary '''
#self.FILENAME.set(conf['FILENAME'])
self.varname.set(conf['varname'])
self.uname.set(conf['uname'])
self.vname.set(conf['vname'])
self.K.set(conf['K'])
self.L.set(conf['L'])
self.varid = conf['varid']
self.uid = conf['uid']
self.vid = conf['vid']
self.grid_type.set(conf['grid_type'])
if conf['ICDF'] == "None":
pass
else:
self.icdf.conf_set(conf['ICDF'])
if self.FIELD is None:
pass
else:
self.FIELD.conf_set(conf['FIELD'])
if self.VEL is None:
pass
else:
self.VEL.conf_set(conf['VEL'])
# ====================
class DrawingConfig():
# ====================
def __init__(self):
# ========================
self.FILECONF = '%s' % COSMO_CONF + 'drawing.conf'
self.VERSION = __version__
self.OUTPUT_FIGURE = tk.BooleanVar()
self.OUTPUT_LEAFLET = tk.BooleanVar()
self.GEOMAP = tk.BooleanVar()
self.WITH_AXIS = tk.BooleanVar()
#EG Cartopy projection and parameters
self.MAP_PROJECTION = tk.StringVar()
self.MAP_PROJ_LAT_0 = tk.DoubleVar()
self.MAP_PROJ_LON_0 = tk.DoubleVar()
self.MAP_PROJ_MIN_LAT = tk.DoubleVar()
self.MAP_PROJ_MAX_LAT = tk.DoubleVar()
self.MAP_PROJ_F_NORTH = tk.DoubleVar()
self.MAP_PROJ_F_EAST = tk.DoubleVar()
self.MAP_PROJ_LAT_T_SCA = tk.DoubleVar()
self.MAP_PROJ_T_SCA_LAT = tk.DoubleVar()
self.MAP_PROJ_SCA_FAC = tk.DoubleVar()
self.MAP_PROJ_SATELLITE_HEIGHT = tk.DoubleVar()
self.MAP_PROJ_SWEEP_AXIS = tk.StringVar()
self.MAP_RESOLUTION = tk.StringVar()
self.EPSG = tk.IntVar()
self.SOUTH = tk.DoubleVar()
self.NORTH = tk.DoubleVar()
self.WEST = tk.DoubleVar()
self.EAST = tk.DoubleVar()
self.WIDTH = tk.DoubleVar()
self.HEIGHT = tk.DoubleVar()
self.LAT_0 = tk.DoubleVar() #
self.LON_0 = tk.DoubleVar()
self.SATELLITE_HEIGHT = tk.DoubleVar()
self.COASTLINE_SHOW = tk.BooleanVar()
# EG 1:Natural-Earth 2: EMODNET
self.COASTLINE_SOURCE = tk.IntVar()
self.COASTLINE_WIDTH = tk.DoubleVar()
self.COASTLINE_COLOR = tk.StringVar()
self.COASTLINE_ZORDER = tk.IntVar()
self.COUNTRYLINE_SHOW = tk.BooleanVar()
self.COUNTRYLINE_WIDTH = tk.DoubleVar()
self.COUNTRYLINE_COLOR = tk.StringVar()
self.LAND_COLOR = tk.StringVar()
self.LAND_ZORDER = tk.IntVar()
self.WATER_COLOR = tk.StringVar()
self.WATER_ZORDER = tk.IntVar()
self.TITLE = tk.StringVar()
self.TITLEFONT = FontProperties().copy()
self.TITLE_PAD = tk.DoubleVar()
self.XLABEL = tk.StringVar()
self.YLABEL = tk.StringVar()
self.LABEL_SIZE = tk.IntVar()
self.XLABEL_PAD = tk.DoubleVar()
self.YLABEL_PAD = tk.DoubleVar()
self.ZLABEL = tk.StringVar()
self.TLABEL = tk.StringVar()
self.DPI = tk.IntVar()
self.OUT_FILENAME = None
self.FIGURE_COLOR = tk.StringVar()
self.TEXT_COLOR = tk.StringVar()
self.GRID_SHOW = tk.BooleanVar()
self.GRID_LINEWIDTH = tk.DoubleVar()
self.MERIDIAN_INI = tk.DoubleVar()
self.MERIDIAN_FIN = tk.DoubleVar()
self.MERIDIAN_INT = tk.DoubleVar()
self.PARALLEL_INI = tk.DoubleVar()
self.PARALLEL_FIN = tk.DoubleVar()
self.PARALLEL_INT = tk.DoubleVar()
self.GRID_COLOR = tk.StringVar()
self.GRID_FONTCOLOR = tk.StringVar()
self.GRID_SIZE = tk.IntVar()
self.GRID_NORTH = tk.BooleanVar()
self.GRID_SOUTH = tk.BooleanVar()
self.GRID_WEST = tk.BooleanVar()
self.GRID_EAST = tk.BooleanVar()
self.GRID_LINESTYLE = tk.StringVar()
self.GRID_ALPHA = tk.DoubleVar()
self.GRID_ZORDER = tk.IntVar()
self.SCALE_SHOW = tk.BooleanVar()
self.SCALE_X = tk.DoubleVar()
self.SCALE_Y = tk.DoubleVar()
self.SCALE_XO = tk.DoubleVar()
self.SCALE_YO = tk.DoubleVar()
self.SCALE_LENGTH = tk.DoubleVar()
self.SCALE_UNITS = tk.StringVar()
self.SCALE_STYLE = tk.StringVar()
self.SCALE_FONTSIZE = tk.IntVar()
self.SCALE_FONTCOLOR = tk.StringVar()
self.SCALE_LABELSTYLE = tk.StringVar()
self.SCALE_FORMAT = tk.StringVar()
self.SCALE_YOFFSET = tk.DoubleVar()
self.SCALE_FILLCOLOR1 = tk.StringVar()
self.SCALE_FILLCOLOR2 = tk.StringVar()
self.SCALE_LINECOLOR = tk.StringVar()
self.SCALE_LINEWIDTH = tk.IntVar()
self.SCALE_ZORDER = tk.IntVar()
self.cons = None
#self.X = None
#self.Y = None
#EG RELIEF=1 GEBCO, RELIEF=2 EMODNET
self.RELIEF_SHOW = tk.BooleanVar()
self.RELIEF = tk.IntVar()
#EG self.BLUEMARBLE = tk.BooleanVar()
#EG self.ETOPO = tk.BooleanVar()
self.BACKGROUND_SCALE = tk.DoubleVar()
self.RIVERS_SHOW = tk.BooleanVar()
self.RIVERS_WIDTH = tk.DoubleVar()
self.RIVERS_COLOR = tk.StringVar()
#EG ARCGIS changed by EMODNET
self.EMODNET_ISO = tk.BooleanVar()
#EG self.ARCGISIMAGE = tk.IntVar()
#EG self.ARCGISSERVICE = tk.StringVar()
#EG self.ARCGISSERVICE_LIST = ['ESRI_Imagery_World_2D', \
#EG 'ESRI_StreetMap_World_2D', \
#EG 'NatGEo_World_Map', \
#EG 'Ocean_Basemap', \
#EG 'World_Imagery', \
#EG 'World_Physical_Map', \
#EG 'World_Shaded_Relief', \
#EG 'World_Street_Map', \
#EG 'World_Terrain_Base', \
#EG 'World_Topo_Map']
#EG self.ARCGISPIXELS = tk.IntVar()
#EG self.ARCGISDPI = tk.IntVar()
#EG self.ARCGISVERBOSE = tk.BooleanVar()
self.LOGO_FILE = tk.StringVar()
self.LOGO_ZOOM = tk.DoubleVar()
self.LOGO_LOCATION = tk.StringVar()
self.LOGO_X = tk.DoubleVar()
self.LOGO_Y = tk.DoubleVar()
self.LOGO_DISPLAY = tk.BooleanVar()
self.TIMESTAMP_SHOW = tk.BooleanVar()
self.TIMESTAMP_BOLD = tk.BooleanVar()
self.TIMESTAMP_X = tk.DoubleVar()
self.TIMESTAMP_Y = tk.DoubleVar()
self.TIMESTAMP_SIZE = tk.IntVar()
self.TIMESTAMP_COLOR = tk.StringVar()
self.VIDEO_NAME = tk.StringVar()
self.VIDEO_TITLE = tk.StringVar()
self.VIDEO_AUTHOR = tk.StringVar()
self.VIDEO_COMMENT = tk.StringVar()
self.VIDEO_FPS = tk.IntVar()
self.VIDEO_DPI = tk.IntVar()
self.VIDEO_L1 = tk.IntVar()
self.VIDEO_L2 = tk.IntVar()
self.WINDOW_FONT_TYPE = tk.StringVar()
self.WINDOW_FONT_SIZE = tk.IntVar()
self.MAP_FONT_TYPE = tk.StringVar()
self.LEGEND = legend.LegendConfig()
self.LEGEND.SHOW.set(False)
self.CROP_PAD = tk.DoubleVar()
self.CROP_PAD.set(0.0)
# Parameters for Saving frames
self.SFRAME_PREFIX = tk.StringVar()
self.SFRAME_POSTFIX_MODE = tk.IntVar()
self.SFRAME_L1 = tk.IntVar()
self.SFRAME_L2 = tk.IntVar()
self.SFRAME_LSTEP = tk.IntVar()
self.SIZE = [9,6]
self.OUTPUT_FIGURE.set(True)
self.OUTPUT_LEAFLET.set(False)
self.GEOMAP.set(True)
self.WITH_AXIS.set(False)
#EG Default Cartopy PlateCarree and parameters
self.MAP_PROJECTION.set('PlateCarree')
self.MAP_PROJ_LAT_0.set(0.0)
self.MAP_PROJ_LON_0.set(0.0)
self.MAP_PROJ_MIN_LAT.set(-80.0)
self.MAP_PROJ_MAX_LAT.set(84.0)
self.MAP_PROJ_F_NORTH.set(0.0)
self.MAP_PROJ_F_EAST.set(0.0)
self.MAP_PROJ_LAT_T_SCA.set(0.0)
self.MAP_PROJ_T_SCA_LAT.set(-1)
self.MAP_PROJ_SCA_FAC.set(-1)
self.MAP_PROJ_SATELLITE_HEIGHT.set(35785831)
self.MAP_PROJ_SWEEP_AXIS.set('y')
self.MAP_RESOLUTION.set('50m')
self.EPSG.set(4326)
#EG self.MAP_PROJECTION.set('cyl')
#EG self.MAP_RESOLUTION.set('l')
self.SOUTH.set(-90)
self.NORTH.set(90)
self.WEST.set(-180)
self.EAST.set(180)
self.WIDTH.set(0)
self.HEIGHT.set(0)
self.LAT_0.set(0)
self.LON_0.set(0)
self.SATELLITE_HEIGHT.set(35786000)
self.COASTLINE_SHOW.set(False)
self.COASTLINE_SOURCE.set(1)
self.COASTLINE_WIDTH.set(1)
self.COASTLINE_COLOR.set('black')
self.COASTLINE_ZORDER.set(1)
self.COUNTRYLINE_SHOW.set(False)
self.COUNTRYLINE_WIDTH.set(2)
self.COUNTRYLINE_COLOR.set('grey')
self.LAND_COLOR.set('coral')
self.LAND_ZORDER.set(0)
self.WATER_COLOR.set('white')
self.WATER_ZORDER.set(0)
self.TITLE.set('')
self.TITLEFONT.set_size(22)
self.TITLEFONT.set_weight('bold')
self.TITLE_PAD.set(0)
self.XLABEL.set('Longitude')
self.YLABEL.set('Latitude')
self.LABEL_SIZE.set(16)
self.XLABEL_PAD.set(0.12)
self.YLABEL_PAD.set(0.05)
self.ZLABEL.set('')
self.TLABEL.set('')
self.DPI.set(72)
self.FIGURE_COLOR.set('white')
self.TEXT_COLOR.set('black')
self.GRID_SHOW.set(True)
self.GRID_LINEWIDTH.set(1)
self.MERIDIAN_INI.set(-180)
self.MERIDIAN_FIN.set(210)
self.MERIDIAN_INT.set(60)
self.PARALLEL_INI.set(-90)
self.PARALLEL_FIN.set(120)
self.PARALLEL_INT.set(30)
self.GRID_COLOR.set('black')
self.GRID_FONTCOLOR.set('black')
self.GRID_SIZE.set(12)
self.GRID_NORTH.set(False)
self.GRID_SOUTH.set(True)
self.GRID_WEST.set(True)
self.GRID_EAST.set(False)
self.GRID_LINESTYLE.set(':')
self.GRID_ALPHA.set(1.0)
self.GRID_ZORDER.set(2)
self.SCALE_SHOW.set(False)
self.SCALE_X.set(0)
self.SCALE_Y.set(0)
self.SCALE_XO.set(0.5)
self.SCALE_YO.set(0.05)
self.SCALE_LENGTH.set(400)
self.SCALE_UNITS.set('km')
self.SCALE_STYLE.set('fancy')
self.SCALE_FONTSIZE.set(14)
self.SCALE_FONTCOLOR.set('k')
self.SCALE_LABELSTYLE.set('simple')
self.SCALE_FORMAT.set('%d')
self.SCALE_YOFFSET.set(None)
self.SCALE_FILLCOLOR1.set('w')
self.SCALE_FILLCOLOR2.set('k')
self.SCALE_LINECOLOR.set('k')
self.SCALE_LINEWIDTH.set(3)
self.SCALE_ZORDER.set(10)
#EG RELIEF refers to GEBCO tile vms
self.RELIEF_SHOW.set(False)
self.RELIEF.set(1)
self.BACKGROUND_SCALE.set(1.0)
self.RIVERS_SHOW.set(False)
self.RIVERS_WIDTH.set(0.2)
self.RIVERS_COLOR.set('blue')
#EG EMODNET
#self.EMODNET_COAST.set(False)
self.EMODNET_ISO.set(False)
#EG self.ARCGISIMAGE.set(0)
#EG self.ARCGISSERVICE.set('ESRI_Imagery_world_2D')
#EG self.ARCGISPIXELS.set(400)
#EG self.ARCGISDPI.set(96)
#EG self.ARCGISVERBOSE.set(True)
self.LOGO_FILE.set(COSMO_CONF_PATH+'MEDOSMOSIS.png')
self.LOGO_IMAGE = image.imread(self.LOGO_FILE.get())
self.LOGO_ZOOM.set(0.20)
self.LOGO_LOCATION.set('SW')
self.LOGO_DISPLAY.set(False)
self.ISOBAT_PATH = tk.StringVar()
self.ISOBAT_PATH.set(COSMO_ROOT+'/data/isobaths/')
# self.ISOBAT_Z = [ 0, 100, 200, 400,
# 600, 800, 1000, 1200, 1400,
# 1600, 1800, 2000, 2500, 3000,
# ]
#
# self.ISOBAT_LABEL = ['coastline', '100 m', '200 m', '400 m',
# '600 m', '800 m','1000 m','1200 m','1400 m',
# '1600 m','1800 m','2000 m','2500 m','3000 m',
# ]
#
self.ISOBAT_Z = [ 0, 50, 100, 200, 250, 400, 500,
600, 750, 800, 1000, 1250, 1500, 1750,
2000, 2500, 3000, 3500, 4000, 4500, 5000]
self.ISOBAT_LABEL = ['coastline', '50 m', '100 m', '200 m',
'250 m', '400 m', '500 m', '600 m', '750 m',
'800 m','1000 m','1250 m','1500 m','1750 m',
'2000 m','2500 m','3000 m','5500 m','4000 m',
'4500 m','5000 m' ]
self.nisobat = len(self.ISOBAT_Z)
self.ISOBAT_SELEC = []
self.ISOBAT_COLOR = []
self.ISOBAT_STYLE = []
self.ISOBAT_WIDTH = []
self.ISOBAT_SHOW = []
self.ISOBAT_DATA = []
for i in range(self.nisobat):
self.ISOBAT_SELEC.append(tk.BooleanVar(value=False))
self.ISOBAT_COLOR.append(tk.StringVar(value='black'))
self.ISOBAT_STYLE.append(tk.StringVar(value='-'))
self.ISOBAT_WIDTH.append(tk.DoubleVar(value=1))
self.ISOBAT_SHOW.append(False)
self.ISOBAT_DATA.append(None)
self.ISOBAT_LABEL_SHOW = tk.BooleanVar()
self.ISOBAT_LABEL_SHOW.set(False)
self.ISOBAT_NPLOT = sum(self.ISOBAT_SHOW)
self.ISOBAT_ZPOINTER = tk.StringVar()
self.ISOBAT_ZPOINTER.set(self.ISOBAT_LABEL[0])
self.ISOBAT_selected = False
self.ISOBAT_loaded = False
self.ISOBAT_cropped = False
self.ISOBAT_LEGEND = legend.LegendConfig()
self.ISOBAT_LEGEND.TITLE.set('Isobaths')
self.ISOBAT_LEGEND.LOC.set(2)
self.TIMESTAMP_SHOW.set(False)
self.TIMESTAMP_BOLD.set(False)
self.TIMESTAMP_X.set(0.12)
self.TIMESTAMP_Y.set(0.12)
self.TIMESTAMP_COLOR.set('black')
self.TIMESTAMP_SIZE.set(15)
self.VIDEO_NAME.set('movie.mp4')
self.VIDEO_TITLE.set('COSMO-VIEW Movie')
self.VIDEO_AUTHOR.set('Matplotlib')
self.VIDEO_COMMENT.set('Ocean currents movie')
self.VIDEO_FPS.set(2)
self.VIDEO_DPI.set(100)
self.VIDEO_L1.set(0)
self.SFRAME_PREFIX.set('Frame')
self.SFRAME_POSTFIX_MODE.set(0)
self.SFRAME_L1.set(0)
self.SFRAME_LSTEP.set(1)
self.WINDOW_FONT_TYPE.set('Helvetica')
self.WINDOW_FONT_SIZE.set(14)
font_type = matplotlib.rcParams['font.family'][0]
self.MAP_FONT_TYPE.set(font_type)
self.MESSAGE = "\n"+self.LEGEND.MESSAGE+"\n"+self.ISOBAT_LEGEND.MESSAGE
if exists(self.FILECONF):
self.MESSAGE += "\nReading conf. file: "+self.FILECONF
try:
conf = self.conf_load(self.FILECONF)
self.conf_set(conf)
except:
self.MESSAGE += '\n\tError reading, using default parameters'
conf = self.conf_get()
self.conf_save(conf,self.FILECONF)
else:
self.MESSAGE += '\n\tSaving configuration file ...'
conf = self.conf_get()
self.conf_save(conf,self.FILECONF)
def conf_get(self):
# ===========================
'''Get the conf dictionnary from program variables'''
conf = {}
conf['_VERSION_'] = self.VERSION
conf['OUTPUT_FIGURE'] = self.OUTPUT_FIGURE.get()
conf['OUTPUT_LEAFLET'] = self.OUTPUT_LEAFLET.get()
conf['SIZE'] = [self.SIZE[0],self.SIZE[1]]
conf['DPI'] = self.DPI.get()
conf['FIGURE_COLOR'] = self.FIGURE_COLOR.get()
conf['TEXT_COLOR'] = self.TEXT_COLOR.get()
conf['GEOMAP'] = self.GEOMAP.get()
conf['WITH_AXIS'] = self.WITH_AXIS.get()
#EG Default Cartopy PlateCarree and parameters
conf['MAP_PROJECTION'] = self.MAP_PROJECTION.get()
conf['MAP_PROJ_LAT_0'] = self.MAP_PROJ_LAT_0.get()
conf['MAP_PROJ_LON_0'] = self.MAP_PROJ_LON_0.get()
conf['MAP_PROJ_MIN_LAT'] = self.MAP_PROJ_MIN_LAT.get()
conf['MAP_PROJ_MAX_LAT'] = self.MAP_PROJ_MAX_LAT.get()
conf['MAP_PROJ_F_NORTH'] = self.MAP_PROJ_F_NORTH.get()
conf['MAP_PROJ_F_EAST'] = self.MAP_PROJ_F_EAST.get()
conf['MAP_PROJ_LAT_T_SCA'] = self.MAP_PROJ_LAT_T_SCA.get()
conf['MAP_PROJ_T_SCA_LAT'] = self.MAP_PROJ_T_SCA_LAT.get()
conf['MAP_PROJ_SCA_FAC'] = self.MAP_PROJ_SCA_FAC.get()
conf['MAP_PROJ_SATELLITE_HEIGHT'] = self.MAP_PROJ_SATELLITE_HEIGHT.get()
conf['MAP_PROJ_SWEEP_AXIS'] = self.MAP_PROJ_SWEEP_AXIS.get()
conf['MAP_RESOLUTION'] = self.MAP_RESOLUTION.get()
conf['EPSG'] = self.EPSG.get()
conf['SOUTH'] = self.SOUTH.get()
conf['NORTH'] = self.NORTH.get()
conf['WEST'] = self.WEST.get()
conf['EAST'] = self.EAST.get()
conf['WIDTH'] = self.WIDTH.get()
conf['HEIGHT'] = self.HEIGHT.get()
conf['LAT_0'] = self.LAT_0.get()
conf['LON_0'] = self.LON_0.get()
conf['SATELLITE_HEIGHT'] = self.SATELLITE_HEIGHT.get()
conf['COASTLINE_SHOW'] = self.COASTLINE_SHOW.get()
conf['COASTLINE_SOURCE'] = self.COASTLINE_SOURCE.get()
conf['COASTLINE_WIDTH'] = self.COASTLINE_WIDTH.get()
conf['COASTLINE_COLOR'] = self.COASTLINE_COLOR.get()
conf['COASTLINE_ZORDER'] = self.COASTLINE_ZORDER.get()
conf['COUNTRYLINE_SHOW'] = self.COUNTRYLINE_SHOW.get()
conf['COUNTRYLINE_WIDTH'] = self.COUNTRYLINE_WIDTH.get()
conf['COUNTRYLINE_COLOR'] = self.COUNTRYLINE_COLOR.get()
conf['LAND_COLOR'] = self.LAND_COLOR.get()
conf['LAND_ZORDER'] = self.LAND_ZORDER.get()
conf['WATER_COLOR'] = self.WATER_COLOR.get()
conf['WATER_ZORDER'] = self.WATER_ZORDER.get()
conf['TITLE'] = self.TITLE.get()
conf['TITLEFONT'] = self.TITLEFONT.__dict__
conf['TITLE_PAD'] = self.TITLE_PAD.get()
conf['XLABEL'] = self.XLABEL.get()
conf['YLABEL'] = self.YLABEL.get()
conf['LABEL_SIZE'] = self.LABEL_SIZE.get()
conf['XLABEL_PAD'] = self.XLABEL_PAD.get()
conf['YLABEL_PAD'] = self.YLABEL_PAD.get()
conf['GRID_SHOW'] = self.GRID_SHOW.get()
conf['GRID_LINEWIDTH'] = self.GRID_LINEWIDTH.get()
conf['MERIDIAN_INI'] = self.MERIDIAN_INI.get()
conf['MERIDIAN_FIN'] = self.MERIDIAN_FIN.get()
conf['MERIDIAN_INT'] = self.MERIDIAN_INT.get()
conf['PARALLEL_INI'] = self.PARALLEL_INI.get()
conf['PARALLEL_FIN'] = self.PARALLEL_FIN.get()
conf['PARALLEL_INT'] = self.PARALLEL_INT.get()
conf['GRID_COLOR'] = self.GRID_COLOR.get()
conf['GRID_FONTCOLOR'] = self.GRID_FONTCOLOR.get()
conf['GRID_SIZE'] = self.GRID_SIZE.get()
conf['GRID_NORTH'] = self.GRID_NORTH.get()
conf['GRID_SOUTH'] = self.GRID_SOUTH.get()
conf['GRID_WEST'] = self.GRID_WEST.get()
conf['GRID_EAST'] = self.GRID_EAST.get()
conf['GRID_LINESTYLE'] = self.GRID_LINESTYLE.get()
conf['GRID_ALPHA'] = self.GRID_ALPHA.get()
conf['GRID_ZORDER'] = self.GRID_ZORDER.get()
conf['SCALE_SHOW'] = self.SCALE_SHOW.get()
conf['SCALE_X'] = self.SCALE_X.get()
conf['SCALE_Y'] = self.SCALE_Y.get()
conf['SCALE_XO'] = self.SCALE_XO.get()
conf['SCALE_YO'] = self.SCALE_YO.get()
conf['SCALE_LENGTH'] = self.SCALE_LENGTH.get()
conf['SCALE_UNITS'] = self.SCALE_UNITS.get()
conf['SCALE_STYLE'] = self.SCALE_STYLE.get()
conf['SCALE_FONTSIZE'] = self.SCALE_FONTSIZE.get()
conf['SCALE_FONTCOLOR'] = self.SCALE_FONTCOLOR.get()
conf['SCALE_LABELSTYLE'] = self.SCALE_LABELSTYLE.get()
conf['SCALE_FORMAT'] = self.SCALE_FORMAT.get()
try:
conf['SCALE_YOFFSET'] = self.SCALE_YOFFSET.get()
except:
conf['SCALE_YOFFSET'] = None
conf['SCALE_FILLCOLOR1'] = self.SCALE_FILLCOLOR1.get()
conf['SCALE_FILLCOLOR2'] = self.SCALE_FILLCOLOR2.get()
conf['SCALE_LINECOLOR'] = self.SCALE_LINECOLOR.get()
try:
conf['SCALE_LINEWIDTH'] = self.SCALE_LINEWIDTH.get()
except:
conf['SCALE_LINEWIDTH'] = None
conf['SCALE_ZORDER'] = self.SCALE_ZORDER.get()
#EG RELIEF refers to GEBCO
conf['RELIEF_SHOW'] = self.RELIEF_SHOW.get()
conf['RELIEF'] = self.RELIEF.get()
#EGconf['BLUEMARBLE'] = self.BLUEMARBLE.get()
#EGconf['ETOPO'] = self.ETOPO.get()
conf['BACKGROUND_SCALE'] = self.BACKGROUND_SCALE.get()
conf['RIVERS_SHOW'] = self.RIVERS_SHOW.get()
conf['RIVERS_WIDTH'] = self.RIVERS_WIDTH.get()
conf['RIVERS_COLOR'] = self.RIVERS_COLOR.get()
#EG EMODNET
#conf['EMODNET_COAST'] = self.EMODNET_COAST.get()
conf['EMODNET_ISO'] = self.EMODNET_ISO.get()
#EG conf['ARCGISIMAGE'] = self.ARCGISIMAGE.get()
#EG conf['ARCGISSERVICE'] = self.ARCGISSERVICE.get()
#EG conf['ARCGISPIXELS'] = self.ARCGISPIXELS.get()
#EG conf['ARCGISDPI'] = self.ARCGISDPI.get()
#EG conf['ARCGISVERBOSE'] = self.ARCGISVERBOSE.get()
conf['LOGO_FILE'] = self.LOGO_FILE.get()
conf['LOGO_ZOOM'] = self.LOGO_ZOOM.get()
conf['LOGO_LOCATION'] = self.LOGO_LOCATION.get()
conf['LOGO_X'] = self.LOGO_X.get()
conf['LOGO_Y'] = self.LOGO_Y.get()
conf['LOGO_DISPLAY'] = self.LOGO_DISPLAY.get()
conf['ISOBAT_PATH'] = self.ISOBAT_PATH.get()
conf['ISOBAT_Z'] = self.ISOBAT_Z
conf['ISOBAT_LABEL'] = self.ISOBAT_LABEL
WIDTH = []
COLOR = []
STYLE = []
SELEC = []
for i in range(self.nisobat):
WIDTH.append(self.ISOBAT_WIDTH[i].get())
COLOR.append(self.ISOBAT_COLOR[i].get())
STYLE.append(self.ISOBAT_STYLE[i].get())
SELEC.append(self.ISOBAT_SELEC[i].get())
conf['ISOBAT_WIDTH'] = WIDTH
conf['ISOBAT_COLOR'] = COLOR
conf['ISOBAT_STYLE'] = STYLE
conf['ISOBAT_SELEC'] = SELEC
conf['ISOBAT_LABEL_SHOW'] = self.ISOBAT_LABEL_SHOW.get()
conf['ISOBAT_cropped'] = self.ISOBAT_cropped
conf['ISOBAT_LEGEND'] = self.ISOBAT_LEGEND.conf_get()
conf['LEGEND'] = self.LEGEND.conf_get()
conf['TIMESTAMP_SHOW'] = self.TIMESTAMP_SHOW.get()
conf['TIMESTAMP_BOLD'] = self.TIMESTAMP_BOLD.get()
conf['TIMESTAMP_X'] = self.TIMESTAMP_X.get()
conf['TIMESTAMP_Y'] = self.TIMESTAMP_Y.get()
conf['TIMESTAMP_SIZE'] = self.TIMESTAMP_SIZE.get()
conf['TIMESTAMP_COLOR'] = self.TIMESTAMP_COLOR.get()
conf['VIDEO_NAME'] = self.VIDEO_NAME.get()
conf['VIDEO_TITLE'] = self.VIDEO_TITLE.get()
conf['VIDEO_AUTHOR'] = self.VIDEO_AUTHOR.get()
conf['VIDEO_COMMENT'] = self.VIDEO_COMMENT.get()
conf['VIDEO_FPS'] = self.VIDEO_FPS.get()
conf['VIDEO_DPI'] = self.VIDEO_DPI.get()
conf['FRAME_PREFIX'] = self.SFRAME_PREFIX.get()
conf['FRAME_POSTFIX_MODE'] = self.SFRAME_POSTFIX_MODE.get()
conf['FRAME_STEP'] = self.SFRAME_LSTEP.get()
conf['WINDOW_FONT_TYPE'] = self.WINDOW_FONT_TYPE.get()
conf['WINDOW_FONT_SIZE'] = self.WINDOW_FONT_SIZE.get()
conf['MAP_FONT_TYPE'] = self.MAP_FONT_TYPE.get()
conf['CROP_PAD'] = self.CROP_PAD.get()
return conf
def conf_set(self,conf):
# =======================
'''Set program variables from the conf dictionnary'''
self.VERSION = conf['_VERSION_']
self.OUTPUT_FIGURE.set(conf['OUTPUT_FIGURE'])
self.OUTPUT_LEAFLET.set(conf['OUTPUT_LEAFLET'])
self.SIZE = conf['SIZE']
self.DPI.set(conf['DPI'])
self.FIGURE_COLOR.set(conf['FIGURE_COLOR'])
self.TEXT_COLOR.set(conf['TEXT_COLOR'])
self.GEOMAP.set(conf['GEOMAP'])
self.WITH_AXIS.set(conf['WITH_AXIS'])
#EG Default Cartopy PlateCarree and parameters
self.MAP_PROJECTION.set(conf['MAP_PROJECTION'])
self.MAP_PROJ_LAT_0.set(conf['MAP_PROJ_LAT_0'])
self.MAP_PROJ_LON_0.set(conf['MAP_PROJ_LON_0'])
self.MAP_PROJ_MIN_LAT.set(conf['MAP_PROJ_MIN_LAT'])
self.MAP_PROJ_MAX_LAT.set(conf['MAP_PROJ_MAX_LAT'])
self.MAP_PROJ_F_NORTH.set(conf['MAP_PROJ_F_NORTH'])
self.MAP_PROJ_F_EAST.set(conf['MAP_PROJ_F_EAST'])
self.MAP_PROJ_LAT_T_SCA.set(conf['MAP_PROJ_LAT_T_SCA'])
self.MAP_PROJ_T_SCA_LAT.set(conf['MAP_PROJ_T_SCA_LAT'])
self.MAP_PROJ_SCA_FAC.set(conf['MAP_PROJ_SCA_FAC'])
self.MAP_PROJ_SATELLITE_HEIGHT.set(conf['MAP_PROJ_SATELLITE_HEIGHT'])
self.MAP_PROJ_SWEEP_AXIS.set(conf['MAP_PROJ_SWEEP_AXIS'])
self.MAP_RESOLUTION.set(conf['MAP_RESOLUTION'])
self.EPSG.set(conf['EPSG'])
self.SOUTH.set(conf['SOUTH'])
self.NORTH.set(conf['NORTH'])
self.WEST.set(conf['WEST'])
self.EAST.set(conf['EAST'])
self.WIDTH.set(conf['WIDTH'])
self.HEIGHT.set(conf['HEIGHT'])
self.LAT_0.set(conf['LAT_0'])
self.LON_0.set(conf['LON_0'])
self.SATELLITE_HEIGHT.set(conf['SATELLITE_HEIGHT'])
self.MERIDIAN_INI.set(conf['MERIDIAN_INI'])
self.MERIDIAN_FIN.set(conf['MERIDIAN_FIN'])
self.MERIDIAN_INT.set(conf['MERIDIAN_INT'])
self.PARALLEL_INI.set(conf['PARALLEL_INI'])
self.PARALLEL_FIN.set(conf['PARALLEL_FIN'])
self.PARALLEL_INT.set(conf['PARALLEL_INT'])
self.COASTLINE_SHOW.set(conf['COASTLINE_SHOW'])
self.COASTLINE_SOURCE.set(conf['COASTLINE_SOURCE'])
self.COASTLINE_WIDTH.set(conf['COASTLINE_WIDTH'])
self.COASTLINE_COLOR.set(conf['COASTLINE_COLOR'])
self.COASTLINE_ZORDER.set(conf['COASTLINE_ZORDER'])
self.COUNTRYLINE_SHOW.set(conf['COUNTRYLINE_SHOW'])
self.COUNTRYLINE_WIDTH.set(conf['COUNTRYLINE_WIDTH'])
self.COUNTRYLINE_COLOR.set(conf['COUNTRYLINE_COLOR'])
self.LAND_COLOR.set(conf['LAND_COLOR'])
self.LAND_ZORDER.set(conf['LAND_ZORDER'])
self.WATER_COLOR.set(conf['WATER_COLOR'])
self.WATER_ZORDER.set(conf['WATER_ZORDER'])
self.TITLE.set(conf['TITLE'])
self.TITLEFONT = setfont(conf['TITLEFONT'])
self.TITLE_PAD.set(conf['TITLE_PAD'])
self.XLABEL.set(conf['XLABEL'])
self.YLABEL.set(conf['YLABEL'])
self.LABEL_SIZE.set(conf['LABEL_SIZE'])
self.XLABEL_PAD.set(conf['XLABEL_PAD'])
self.YLABEL_PAD.set(conf['YLABEL_PAD'])
self.GRID_SHOW.set(conf['GRID_SHOW'])
self.GRID_LINEWIDTH.set(conf['GRID_LINEWIDTH'])
self.GRID_COLOR.set(conf['GRID_COLOR'])
self.GRID_FONTCOLOR.set(conf['GRID_FONTCOLOR'])
self.GRID_SIZE.set(conf['GRID_SIZE'])
self.GRID_NORTH.set(conf['GRID_NORTH'])
self.GRID_SOUTH.set(conf['GRID_SOUTH'])
self.GRID_WEST.set(conf['GRID_WEST'])
self.GRID_EAST.set(conf['GRID_EAST'])
self.GRID_LINESTYLE.set(conf['GRID_LINESTYLE'])
self.GRID_ALPHA.set(conf['GRID_ALPHA'])
self.GRID_ZORDER.set(conf['GRID_ZORDER'])
self.SCALE_SHOW.set(conf['SCALE_SHOW'])
self.SCALE_X.set(conf['SCALE_X'])
self.SCALE_Y.set(conf['SCALE_Y'])
self.SCALE_XO.set(conf['SCALE_XO'])
self.SCALE_YO.set(conf['SCALE_YO'])
self.SCALE_LENGTH.set(conf['SCALE_LENGTH'])
self.SCALE_UNITS.set(conf['SCALE_UNITS'])
self.SCALE_STYLE.set(conf['SCALE_STYLE'])
self.SCALE_FONTSIZE.set(conf['SCALE_FONTSIZE'])
self.SCALE_FONTCOLOR.set(conf['SCALE_FONTCOLOR'])
self.SCALE_LABELSTYLE.set(conf['SCALE_LABELSTYLE'])
self.SCALE_FORMAT.set(conf['SCALE_FORMAT'])
self.SCALE_YOFFSET.set(conf['SCALE_YOFFSET'])
self.SCALE_FILLCOLOR1.set(conf['SCALE_FILLCOLOR1'])
self.SCALE_FILLCOLOR2.set(conf['SCALE_FILLCOLOR2'])
self.SCALE_LINECOLOR.set(conf['SCALE_LINECOLOR'])
self.SCALE_LINEWIDTH.set(conf['SCALE_LINEWIDTH'])
self.SCALE_ZORDER.set(conf['SCALE_ZORDER'])
#EG Refers to GEBCO tile vms
self.RELIEF_SHOW.set(conf['RELIEF_SHOW'])
self.RELIEF.set(conf['RELIEF'])
#EGself.BLUEMARBLE.set(conf['BLUEMARBLE'])
#EGself.ETOPO.set(conf['ETOPO'])
self.BACKGROUND_SCALE.set(conf['BACKGROUND_SCALE'])
self.RIVERS_SHOW.set(conf['RIVERS_SHOW'])
self.RIVERS_WIDTH.set(conf['RIVERS_WIDTH'])
self.RIVERS_COLOR.set(conf['RIVERS_COLOR'])
#EG EMODNET
#self.EMODNET_COAST.set(conf['EMODNET_COAST'])
self.EMODNET_ISO.set(conf['EMODNET_ISO'])
#EG self.ARCGISIMAGE.set(conf['ARCGISIMAGE'])
#EG self.ARCGISSERVICE.set(conf['ARCGISSERVICE'])
#EG self.ARCGISPIXELS.set(conf['ARCGISPIXELS'])
#EG self.ARCGISDPI.set(conf['ARCGISDPI'])
#EG self.ARCGISVERBOSE.set(conf['ARCGISVERBOSE'])
self.LOGO_FILE.set(conf['LOGO_FILE'])
self.LOGO_ZOOM.set(conf['LOGO_ZOOM'])
self.LOGO_LOCATION.set(conf['LOGO_LOCATION'])
self.LOGO_X.set(conf['LOGO_X'])
self.LOGO_Y.set(conf['LOGO_Y'])
self.LOGO_DISPLAY.set(conf['LOGO_DISPLAY'])
self.ISOBAT_PATH.set(conf['ISOBAT_PATH'])
self.ISOBAT_Z = conf['ISOBAT_Z']
self.ISOBAT_LABEL = conf['ISOBAT_LABEL']
self.ISOBAT_LABEL_SHOW.set(conf['ISOBAT_LABEL_SHOW'])
self.ISOBAT_cropped = conf['ISOBAT_cropped']
self.ISOBAT_LEGEND.conf_set(conf['ISOBAT_LEGEND'])
self.nisobat = len(self.ISOBAT_Z)
WIDTH = conf['ISOBAT_WIDTH']
COLOR = conf['ISOBAT_COLOR']
STYLE = conf['ISOBAT_STYLE']
SELEC = conf['ISOBAT_SELEC']
self.ISOBAT_WIDTH = []
self.ISOBAT_COLOR = []
self.ISOBAT_STYLE = []
self.ISOBAT_SELEC = []
self.ISOBAT_SHOW = []
self.ISOBAT_DATA = []
for i in range(self.nisobat):
self.ISOBAT_SELEC.append(tk.BooleanVar(value=SELEC[i]))
self.ISOBAT_COLOR.append(tk.StringVar(value=COLOR[i]))
self.ISOBAT_STYLE.append(tk.StringVar(value=STYLE[i]))
self.ISOBAT_WIDTH.append(tk.DoubleVar(value=WIDTH[i]))
self.ISOBAT_SHOW.append(False)
self.ISOBAT_DATA.append(None)
if sum(SELEC) == 0:
self.ISOBAT_selected = False
else:
self.ISOBAT_selected = True
self.ISOBAT_loaded = False
for i in range(self.nisobat):
if self.ISOBAT_SELEC[i].get():
filename = self.ISOBAT_PATH.get() + \
'/%04d' % self.ISOBAT_Z[i] + '.dat'
self.ISOBAT_SHOW[i] = True
self.ISOBAT_loaded = True
try:
self.ISOBAT_DATA[i] = read_lines(filename)
except:
messagebox.showinfo(message='Error: unable to read '+filemane)
self.ISOBAT_DATA[i] = None
self.ISOBAT_SHOW[i] = False
self.ISOBAT_loaded = False
self.ISOBAT_NPLOT = sum(self.ISOBAT_SHOW)
self.TIMESTAMP_SHOW.set(conf['TIMESTAMP_SHOW'])
self.TIMESTAMP_BOLD.set(conf['TIMESTAMP_BOLD'])
self.TIMESTAMP_X.set(conf['TIMESTAMP_X'])
self.TIMESTAMP_Y.set(conf['TIMESTAMP_Y'])
self.TIMESTAMP_SIZE.set(conf['TIMESTAMP_SIZE'])
self.TIMESTAMP_COLOR.set(conf['TIMESTAMP_COLOR'])
self.VIDEO_NAME.set(conf['VIDEO_NAME'])
self.VIDEO_TITLE.set(conf['VIDEO_TITLE'])
self.VIDEO_AUTHOR.set(conf['VIDEO_AUTHOR'])
self.VIDEO_COMMENT.set(conf['VIDEO_COMMENT'])
self.VIDEO_FPS.set(conf['VIDEO_FPS'])
self.VIDEO_DPI.set(conf['VIDEO_DPI'])
self.LEGEND.conf_set(conf['LEGEND'])
self.WINDOW_FONT_TYPE.set(conf['WINDOW_FONT_TYPE'])
self.WINDOW_FONT_SIZE.set(conf['WINDOW_FONT_SIZE'])
self.MAP_FONT_TYPE.set(conf['MAP_FONT_TYPE'])
self.CROP_PAD.set(conf['CROP_PAD'])
self.SFRAME_PREFIX.set(conf['FRAME_PREFIX'])
self.SFRAME_POSTFIX_MODE.set(conf['FRAME_POSTFIX_MODE'])
self.SFRAME_LSTEP.set(conf['FRAME_STEP'])
# Derived variables:
self.LOGO_IMAGE = image.imread(self.LOGO_FILE.get())
def conf_load(self,filename):
# ===========================
'''Open an read the configuration file'''
# Read configuration
with open(filename) as infile:
conf = json.load(infile)
return conf
def conf_save(self,conf,filename):
# ===============================
'''Save the configuration file'''
with io.open(filename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
outfile.close()
# ===================
class CosmoDrawing():
# ===================
# ===============
def close(self):
# ===============
#quit() # DEBUG
if self.LAYERS.n == 0:
quit()
aa = messagebox.askquestion('Close','Are you sure?',icon='warning')
if aa == 'yes':
self.master.destroy()
self.master = None
quit()
# =======================================
def __init__ (self,master,tconsola=None):
# =======================================
# Initialization
global COSMO_CONF,COSMO_CONF_DATA
versions = 'Built with:\n'
versions += 'Tkinter '+ TKINTER_VERSION + '\n'
versions += 'Matplotlib '+ MATPLOTLIB_VERSION + '\n'
versions += 'Cartopy '+ CARTOPY_VERSION + '\n'
mess = "CONF_PATH: "+COSMO_CONF_PATH
mess += '\nCONF_DATA: '+COSMO_CONF_DATA
mess += '\nCONF: '+ COSMO_CONF
master.protocol('WM_DELETE_WINDOW',self.close)
self.master = master
self.master.configure(bg=BGC)
# EG we pass the console id
self.PLOT = DrawingConfig()
self.first = True
self.ftime = True
try:
font_name = self.PLOT.WINDOW_FONT_TYPE.get().split()[0]
font = '%s %d' % (font_name, self.PLOT.WINDOW_FONT_SIZE.get())
self.master.option_add('*Font',font)
except:
self.master.option_add('*Font',FONT)
self.default_font = tkfont.nametofont('TkDefaultFont')
# EG icon fonts for the askopenfile, etc
self.default_font2 = tkfont.nametofont("TkIconFont")
self.default_font.configure(family=self.PLOT.WINDOW_FONT_TYPE.get().
split()[0])
self.default_font.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
# EG icon fonts for the askopenfile, etc
self.default_font2.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
self.L_LIST = []
self.T_LIST = []
self.DATE = []
#self.TFILE = ''
self.L = tk.IntVar()
self.K = tk.IntVar()
self.NL = 0
self.NZ = 0
self.L.set(0)
self.K.set(0)
self.LAYERS = LAYER()
#self.nfiles = 0
#self.FILENAMES = []
#self.FILETYPES = []
#self.FILEORDER = []
#self.nsequence = 0
#self.SEQUENCES = []
#self.SEQLEADER = [] # The one provinsing the date and Nt
#self.SEQLEADER_INDX= 0
#self.SEQNTIMES = []
self.nvec = 0
self.VEC = []
self.VEC_LIST = [None]
self.VEC_INDX = tk.IntVar()
self.VEC_INDX.set(0)
self.CURRENT_OPTIONS = ['Operational', \
'HF Radar', \
'COPERNICUS', \
'Local Dataset', \
'Remote Dataset', \
'Active CONTOUR file']
self.ncdf = 0
self.CDF = []
self.CDF_LIST = [None]
self.CDF_INDX = tk.IntVar()
self.CDF_INDX.set(0)
self.cdfbar = []
self.Mcdfbar = []
self.CONTOUR_OPTIONS = ['Operational', \
'COPERNICUS', \
'Local Dataset', \
'Remote Dataset', \
'Active VECTOR file']
self.nfloat = 0
self.FLOAT = []
self.FLOAT_LIST = ['0']
self.FLOAT_INDX = tk.IntVar()
self.FLOAT_INDX.set(0)
self.FLOAT_OPTIONS = ['Local Dataset', \
'Local Folder', \
'Remote Folder', \
'Trajectories Database']
self.Lagrangian_types=[('Netcdf','*.nc'),('JSON','*.json'),('GEOJSON','*.geojson'),('ALL','*')]
self.SAIDIN = CONTOUR()
#self.SAIDIN = cdf_parameters()
#self.SAIDIN.FIELD = fld_parameters()
self.sbar = []
self.Msbar = []
# Features: Markers or shapefiles
# Stationary information. Types: MARKER,SHAPE
#EG
self.nmarker = 0 # Numero de fixters de marcadors
self.MARKER = [] # llista de estructures de marcadors dim(self.nmarker)
self.MARKER_LIST = ['0'] # Llista marker files en configuracion
self.MARKER_INDX = tk.IntVar() # contador de files
self.MARKER_INDX.set(0)
#EG Shape files
self.nshape = 0
self.SHAPE = []
self.SHAPE_LIST = ['0']
self.SHAPE_INDX = tk.IntVar()
self.SHAPE_INDX.set(0)
self.nellipse = 0
self.ELLIPSE = []
self.ELLIPSE_LIST = ['0'] # List of ellipse files en configuracion
self.ELLIPSE_INDX = tk.IntVar() # contador de files
self.ELLIPSE_INDX.set(0)
self.ELLIPSE_OPTIONS = ['Local Dataset']
self.npatch = 0
self.PATCH = []
self.PATCH_LIST = ['0'] # List of patches en configuracion
self.PATCH_INDX = tk.IntVar() # contador de files
self.PATCH_INDX.set(0)
self.FEATURE = OBJECT('FEATURE',['Local Dataset'])
#self.FEATURE = []
#self.FEATURE_LIST = ['0'] # List of features en configuracion
#self.FEATURE_INDX = tk.IntVar() # contador de files
#self.FEATURE_INDX.set(0)
#self.FEATURE_OPTIONS = ['Local Dataset']
# Initialize CLM command:
self.CLM = clm.parameters()
# Skill - related variables
self.time_sampling = tk.DoubleVar()
self.index_s = tk.DoubleVar()
self.index_n = tk.DoubleVar()
self.release_file = tk.StringVar()
self.clm_idt = tk.IntVar()
self.out_file = tk.StringVar()
self.time_ini = tk.IntVar()
self.Fp = tk.IntVar()
self.skill_release_VALUES = ['Earliest model state','Earliest buoy location']
self.skill_release = tk.StringVar()
self.skill_release.set(self.skill_release_VALUES[0])
self.time_sampling.set(1) # hours
self.index_n.set(1) #
self.time_ini.set(0) # 0 - Model time; 1 - Buoy time
self.release_file.set('skill_ini.dat')
self.out_file.set('skill_out.nc')
self.clm_idt.set(600) # 0.5 hours
self.Fp.set(0)
tmp = matplotlib.font_manager.get_fontconfig_fonts()
if type(tmp) is dict:
flist = list(tmp)
else:
flist = tmp.copy()
del tmp
# Get rid of fonts not well defined:
nf = len(flist)
for i in range(nf-1,-1,-1):
fname = flist[i]
try:
ftype = matplotlib.font_manager.FontProperties(fname=fname).get_name()
except:
del flist[i]
FONT_TYPES = [matplotlib.font_manager.FontProperties(fname=fname).get_family() for fname in flist]
try:
self.FONT_TYPES = list(set(FONT_TYPES))
except:
self.FONT_TYPES = FONT_TYPES.copy()
self.FONT_TYPES.sort()
self.FONT_SIZES = list(range(1,25))
self.GET_TIMESTAMP_LOCATION = False
# Initialize matplotlib and Cartopy
self.params = None
self.fig = None
#print('main ', self.PLOT.SIZE)
#self.fig = plt.figure('COSMO-VIEW canvas', \
# figsize=self.PLOT.SIZE, \
# dpi=self.PLOT.DPI.get())
#
#self.fig.canvas.mpl_connect('close_event',self.on_closing_figure)
#self.fig.canvas.callbacks.connect('button_press_event',self.on_click)
#self.ax = self.fig.add_subplot(111)
self.drawmap = None
# Window design
self.CreateMenu()
gui_style = ttk.Style()
gui_style.configure('My.TFrame',background="green")
gui_style.configure('My.TLabel',background="green")
#F0 = ttk.Frame(self.master,style='My.TFrame')
#F0 = tk.Frame(self.master,bg="yellow")
#F0 = tk.Frame(self.master,bg="yellow")
#EG F0 dropped to facilitate the consola design
tk.Label(self.master,text='Time',bg=BGC).grid(row=0,column=0,padx=3)
self.lbox = ttk.Combobox(self.master,textvariable=self.L,width=5)
self.lbox.grid(row=0,column=1)
self.lbox.configure(state='disabled')
self.lbox.bind('<<ComboboxSelected>>',lambda e: self.lselection())
self.lbox.bind('<Return>',lambda e: self.lselection())
self.bprev = tk.Button(self.master,text='PREV',command=self.tprev,bg=BWC)
self.bprev.grid(row=0,column=2,padx=3,sticky='e')
tk.Entry(self.master,textvariable=self.PLOT.TLABEL, \
state='readonly',width=20,bg='white').grid(row=0,column=3, padx=3)
self.bnext = tk.Button(self.master,text='NEXT',command=self.tnext,bg=BWC)
self.bnext.grid(row=0,column=4,padx=3,stick='w')
tk.Label(self.master,bg=BGC).grid(row=0,column=5)
if len(self.DATE) <= 0:
self.bprev.configure(state='disabled')
self.lbox.configure(state='disabled')
self.bnext.configure(state='disabled')
else:
self.lbox['values'] = list(range(len(self.L_LIST)))
tk.Button(self.master,text='Draw',command=self.make_plot,bg=EBC) \
.grid(row=1,column=4,padx=3,pady=3,sticky='e')
tk.Button(self.master,text='Quit',command=self.close,bg=EBC) \
.grid(row=1,column=5,padx=3,pady=3,sticky='w')
tk.Label(self.master,text='COSMO project, July 2018',bg=BGC) \
.grid(row=2,column=4,columnspan=6,sticky='e')
#F0.grid(row=0, column=0,sticky='ew')
#EG Afegim una Consola
#EG self.cons is the widget referencing the toconsola()
if tconsola is not None:
if len(tconsola) > 0:
wiconsola = tk.Frame(self.master) # Expandimos la Consola
wiconsola.grid_rowconfigure(0, weight=1)
cscrollb = tk.Scrollbar(wiconsola)
cscrollb.grid(row=0,column=1,sticky='nswe')
myFont = tkfont.Font(family=self.PLOT.WINDOW_FONT_TYPE.get(), \
size=self.PLOT.WINDOW_FONT_SIZE.get())
self.cons = tk.Text(wiconsola,bg="black", fg="white", \
yscrollcommand=cscrollb.set)
self.cons.configure(font=myFont)
# tags to highligth different cathegories of messages by formating the the text
self.cons.tag_config("y", foreground="yellow", font="-weight bold")
self.cons.tag_config("o", foreground="orange", font="-weight bold")
self.cons.tag_config("r", foreground="red", font="-weight bold")
self.cons.grid(row=0,column=0,sticky='we')
cscrollb.config(command=self.cons.yview)
line = tconsola + '\n'+ versions + "\n"+ mess + self.PLOT.MESSAGE+ \
self.SAIDIN.FLD.MESSAGE+self.CLM.MESSAGE
self.cons.insert("end", line + "\n")
self.cons.see(tk.END)
wiconsola.grid(row=3, column=0, columnspan=6, pady=5, sticky='nsew')
#EG
self.CAPTURE_POINT = False
self.pxo = tk.DoubleVar()
self.pyo = tk.DoubleVar()
self.pzo = tk.DoubleVar()
# Initialize window widget IDs:
self.Window_cfile = None
self.Window_legendconfig = None
self.Window_mapconfig = None
self.Window_vectorconfig = None
self.Window_contourconfig = None
self.Window_lineconfig = None
self.Window_other = None
self.Window_saidin = None
self.Window_currents = None
self.Window_currents_sel = None
self.Window_opendap = None
self.Window_copernicus = None
self.Window_codar = None
self.Window_isobat = None
self.Window_float = None
self.Window_saidinconfig = None
self.Window_floatconfig = None
self.Window_clm = None
self.Window_dpi = None
self.Window_anim = None
self.Window_sframe = None
self.Window_ncdf = None
self.Window_vec = None
self.Window_logo = None
self.Window_files = None
self.Window_about = None
self.Window_widgetconfig = None
self.Window_marker = None
self.Window_markerconfig = None
self.Window_dotconfig = None
self.Window_editor = None
#EG SHAPE files
self.Window_shapefile = None
self.Window_shapeconfig = None
self.Window_geoconfig = None
self.Window_xysel = None
self.Window_markered = None
self.Window_gellipse = None
self.Window_cellipse = None
self.Window_ellipseconfig = None
self.Window_featureconfig = None
self.Window_patch = None
self.Window_patchconfig = None
self.Window_skill = None
self.Window_converter = None
self.Window_settime = None
self.Window_feature = None
self.legendtabs = None
self.Window_mapa = None
## CosmoDrawing EVENTS Handlers ##############
# =============================
def canvas_closing(self,event):
# =============================
''' Update PLOT.SIZE variable according to the window size'''
self.PLOT.SIZE = list(self.fig.get_size_inches())
# Destruir lo que queda en memoria
self.fig = None
# =============================
def canvas_resizing(self,event):
# =============================
''' Update PLOT.SIZE variable according to the window size'''
self.PLOT.SIZE = list(self.fig.get_size_inches())
# ===========================
def canvas_click(self,event):
# ===========================
if self.PLOT.LEGEND.GET_XY:
if event.inaxes is not None:
toconsola("Getting Legend coordinates",wid=self.cons)
trans = self.ax.transData.transform((event.xdata,event.ydata))
trans = self.ax.transAxes.inverted().transform(trans)
self.PLOT.LEGEND.BBx.set(np.round(trans[0],3))
self.PLOT.LEGEND.BBy.set(np.round(trans[1],3))
self.PLOT.LEGEND.GET_XY = False
return
if self.PLOT.ISOBAT_LEGEND.GET_XY:
if event.inaxes is not None:
toconsola("Getting Legend coordinates",wid=self.cons)
trans = self.ax.transData.transform((event.xdata,event.ydata))
trans = self.ax.transAxes.inverted().transform(trans)
self.PLOT.ISOBAT_LEGEND.BBx.set(np.round(trans[0],3))
self.PLOT.ISOBAT_LEGEND.BBy.set(np.round(trans[1],3))
self.PLOT.ISOBAT_LEGEND.GET_XY = False
return
if self.GET_TIMESTAMP_LOCATION:
toconsola("EG Click_event: self.GET_TIMESTAMP_LOCATION",wid=self.cons)
try:
self.time_stamp.remove()
except:
pass
self.GET_TIMESTAMP_LOCATION = False
xx = event.x/self.PLOT.DPI.get()/self.PLOT.SIZE[0]
yy = event.y/self.PLOT.DPI.get()/self.PLOT.SIZE[1]
self.PLOT.TIMESTAMP_X.set(np.round(xx,3))
self.PLOT.TIMESTAMP_Y.set(np.round(yy,3))
if self.PLOT.TIMESTAMP_SHOW.get():
font_family = self.PLOT.MAP_FONT_TYPE.get()
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.ax.annotate(self.DATE[self.L.get()],xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(),fontfamily=font_family, \
fontweight=font_weight,annotation_clip=False)
self.canvas.draw()
return
if self.nvec > 0:
ii = self.VEC_INDX.get()
if self.VEC[ii].PLOT.KEY_GETXY:
self.VEC[ii].PLOT.KEY_GETXY = False
xx = event.x/self.PLOT.DPI.get()/self.PLOT.SIZE[0]
yy = event.y/self.PLOT.DPI.get()/self.PLOT.SIZE[1]
self.VEC[ii].PLOT.KEY_X.set(np.round(xx,3))
self.VEC[ii].PLOT.KEY_Y.set(np.round(yy,3))
self.VEC[ii].PLOT.KEY_OBJ.X = xx
self.VEC[ii].PLOT.KEY_OBJ.Y = yy
self.canvas.draw()
return
if event.inaxes is not None:
#EG xo,yo = self.m(event.xdata,event.ydata,inverse=True)
p_ref = map_proj('PlateCarree')
p_local = map_proj(self.PLOT.MAP_PROJECTION.get())
latlon = p_ref['proj'].transform_point(event.xdata, event.ydata, \
p_local['proj'])
# Coordinates selected point:
xo = latlon[0]; yo = latlon[1]
toconsola("Selected Point : "+str(latlon[0])+" - "+str(latlon[1]),wid=self.cons)
#print('Current speed = ', self.CURRENTS.F(event.xdata,event.ydata))
#if not empty(self.SAIDIN.FILENAME.get()):
# print('SAIDIN SST = ', self.SAIDIN.FIELD.F(xo,yo))
self.CLM.xo.set(latlon[0])
self.CLM.yo.set(latlon[1])
if self.CAPTURE_POINT:
self.pxo.set(xo)
self.pyo.set(yo)
return
if self.nvec > 0:
ii = self.VEC_INDX.get()
dis = (xo-self.VEC[ii].U.xx)**2 + (yo-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
self.VEC[ii].jo.set(ind[0])
self.VEC[ii].io.set(ind[1])
print('Vector selected point: ', self.VEC[ii].io.get(), self.VEC[ii].jo.get())
if self.ncdf > 0:
ii = self.CDF_INDX.get()
dis = (xo-self.CDF[ii].FLD.xx)**2 + (yo-self.CDF[ii].FLD.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
self.CDF[ii].jo.set(ind[0])
self.CDF[ii].io.set(ind[1])
print('Contour selected point: ', self.CDF[ii].io.get(), self.CDF[ii].jo.get())
# ===========================
def on_xlims_change(self,event):
# ===========================
lims = self.ax.get_xlim()
self.PLOT.WEST.set(lims[0])
self.PLOT.EAST.set(lims[1])
self.drawmap = True
# ===========================
def on_ylims_change(self,event):
# ===========================
lims = self.ax.get_ylim()
self.PLOT.SOUTH.set(lims[0])
self.PLOT.NORTH.set(lims[1])
self.drawmap = True
# ====================
def CreateMenu (self):
# ====================
''' Create options menu'''
menubar = tk.Menu(self.master)
plotmenu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='File',menu=plotmenu)
plotmenu.add_command(label='Save figure',
command=self.figure_save)
plotmenu.add_command(label='Read figure',
command=self.figure_read)
plotmenu.add_separator()
plotmenu.add_command(label='Plot layers',
command=self.layers)
plotmenu.add_separator()
plotmenu.add_command(label='Save plot',
command=self.save)
plotmenu.add_command(label='Save plot as',
command=self.saveas)
plotmenu.add_separator()
plotmenu.add_command(label='Quit',
command=self.close)
insmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Import/Select',menu=insmenu)
insmenu.add_command(label='Vector field',command=self.get_vector)
insmenu.add_command(label='Satellite SST',command=self.get_saidin)
insmenu.add_command(label='Contour field',command=self.get_contour)
insmenu.add_command(label='Trajectory',command=self.get_lagrangian)
insmenu.add_command(label='Marker',command=self.get_marker)
#EG Shapefile and WMS server
insmenu.add_command(label='Shapefile',command=self.get_shapefile)
insmenu.add_command(label='Ellipse',command=self.get_ellipse)
insmenu.add_command(label='Feature',command=self.get_feature)
insmenu.add_command(label='WMS Service',state="disable",command=self.get_wms)
confmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Configure',menu=confmenu)
confmenu.add_command(label='Widgets',command=self.widget_config)
confmenu.add_command(label='Map',command=self.map_config)
confmenu.add_command(label='Legends',command=self.legend_config)
confmenu.add_command(label='Logo',command=self.logo_config)
confmenu.add_separator()
confmenu.add_command(label='Vector field',
command=self.currents_config)
confmenu.add_command(label='Satellite SST', \
command=self.saidin_config)
confmenu.add_command(label='Contour field',
command=self.contour_config)
confmenu.add_command(label='Trajectory',
command=self.lagrangian_config)
confmenu.add_command(label='Marker',
command=self.marker_config)
confmenu.add_command(label='Shape geometry',
command=self.shape_config)
confmenu.add_command(label='Variance ellipse',
command=self.ellipse_config)
confmenu.add_command(label='Patch',
command=self.patch_config)
confmenu.add_command(label='Feature',
command=self.feature_config)
confmenu.add_command(label='Time axis',
command=self.set_time)
confmenu.add_separator()
confmenu.add_command(label='Select configuration',
command=self.configuration_file)
toolmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Tools',menu=toolmenu)
toolmenu.add_command(label='Trajectory model evaluation',
command=self.skill_Liu)
toolmenu.add_command(label='Vector series',
command=self.vector_series)
toolmenu.add_command(label='Vector mean',
command=self.vector_mean)
toolmenu.add_command(label='Contour mean',
command=self.contour_mean)
toolmenu.add_command(label='Ellipse of variance',
command=self.calc_ellipse)
toolmenu.add_command(label='Contour variance',
command=self.contour_var)
toolmenu.add_command(label='Trajectory editor',
command=self.trajectory_editor)
toolmenu.add_command(label='Marker editor',
command=self.marker_editor)
toolmenu.add_command(label='Add patch',
command=self.get_patch)
toolmenu.add_command(label='Download Atlas of currents',
command=self.atlas)
toolmenu.add_command(label='Make animation',
command=self.make_anim)
toolmenu.add_command(label='Save frames',
command=self.save_frames)
toolmenu.add_command(label='COSMO Lagrangian Model (CLM)',
command=self.clm)
calcmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Calculators',menu=calcmenu)
calcmenu.add_command(label='Coordinate converter',
command=self.converter)
calcmenu.add_command(label='Distance estimation',
command=self.ruler)
helpmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Help',menu=helpmenu)
helpmenu.add_command(label='About',command=self.about)
try:
self.master.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.master.tk.call(master, "config", "-menu", menubar)
# ============================
def about(self):
# ============================
'''Widget to print some help '''
def _close():
self.Window_about.destroy()
self.Window_about = None
if self.Window_about is None:
self.Window_about = tk.Toplevel(self.master)
self.Window_about.title('About')
self.Window_about.resizable(width=False,height=False)
self.Window_about.protocol('WM_DELETE_WINDOW',_close)
photoimage = ImageTk.PhotoImage(Image.open(self.PLOT.LOGO_FILE.get()).resize((200,100)))
panel1 = tk.Label(self.Window_about,image=photoimage)
panel1.grid(row=0,column=0,sticky='we')
# save the panel's image from 'garbage collection'
panel1.image = photoimage
_author = 'Authors: Quim Ballabrera (ICM/CSIC) \n Emilio Garcia (ICM/CSIC)'
_description = ' Ocean visualization tool for the COSMO and MED OSMOSIS projects\n V1.0 - Oct 2019 (COSMO project) \n V2.0 - July 2020 (COSMO project) \n V3.0 - April 2021 (MED OSMOSIS project)'
tk.Label(self.Window_about,text='COSMO-VIEW'). \
grid(row=1,column=0,sticky='ew')
tk.Label(self.Window_about,text='Version '+VERSION). \
grid(row=2,column=0,sticky='ew')
tk.Label(self.Window_about,text=_author) \
.grid(row=3,column=0,sticky='ew')
tk.Label(self.Window_about,text=_description). \
grid(row=4,column=0,sticky='ew')
tk.Button(self.Window_about,text='Close',command=_close). \
grid(row=5,column=0,sticky='ew')
else:
self.Window_about.lift()
# ============================
def get_vector(self):
# ============================
'''Widget to read files with currents (U,V) '''
self.VSOURCE = tk.StringVar()
self.VSOURCE.set('Operational')
def _close():
# ===========
self.Window_currents.destroy()
self.Window_currents = None
def _done():
# ==========
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
#EG Corrected exception when the user tries to plot before
#EG importing product
#try:
# self.read_UV(self.VEC[ii])
#except:
# toconsola("Press Import to select a product",tag="o", wid=self.cons)
# return
if self.VEC[ii].SOURCE == 'FILE':
self.VEC[ii].read(wid=self.cons)
#self.read_UV(self.VEC[ii])
_close()
self.make_plot()
if self.Window_vectorconfig is not None:
self.Window_vectorconfig.destroy()
self.Window_vectorconfig = None
self.currents_config()
def _clear():
# ===========
if self.nvec == 0:
return
# When erasing, we must erase two kinds of informations, the
# information in the LAYER structure and the VECTOR information
# Attention, if erasing the SEQUENCE leader, we need to update the
# DATE and TIMES of the SEQUENCE
ii = self.VEC_INDX.get()
self.LAYERS.erase('VEC',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing data field '+str(ii),wid=self.cons)
del self.VEC[ii]
self.nvec -= 1
ii = self.nvec-1 if ii>= self.nvec else ii
self.VEC_INDX.set(ii)
_refill(ii)
if self.LAYERS.update:
toconsola('Updating TIME and DATE values of SEQUENCE',wid=self.cons)
LEADER_TYPE = self.LAYERS.TYPE[self.LAYERS.leader]
jj = self.LAYERS.TYPE_INDEX[self.LAYERS.leader]
if LEADER_TYPE == 'VEC':
self.DATE = self.VEC[jj].DATE.copy()
self.TIME = self.VEC[jj].TIME.copy()
elif LEADER_TYPE == 'FLD':
self.DATE = self.FLD[jj].DATE.copy()
self.TIME = self.DATE.toordinal()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.make_plot()
def _reget():
# ===========
self.VEC_INDX.set(_wsel.get())
ii = self.VEC_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.VEC_LIST = list(range(self.nvec))
_wsel.configure(state='!disabled')
_wsel['values'] = self.VEC_LIST
_went['textvariable'] = self.VEC[ii].UFILENAME
_went2['textvariable'] = self.VEC[ii].VFILENAME
_uvar.configure(state='!disabled')
_uvar['textvariable'] = self.VEC[ii].uname
_uvar['values'] = self.VEC[ii].U.icdf.VAR_MENU
_vvar.configure(state='!disabled')
_vvar['textvariable'] = self.VEC[ii].vname
_vvar['values'] = self.VEC[ii].V.icdf.VAR_MENU
_kbox.configure(state='!disabled')
_kbox['textvariable'] = self.VEC[ii].K
_kbox['values'] = self.VEC[ii].K_LIST
_lbox.configure(state='!disabled')
_lbox['textvariable'] = self.VEC[ii].L
_lbox['values'] = self.VEC[ii].L_LIST
_aent.configure(state='!disabled')
_aent['textvariable'] = self.VEC[ii].ALIAS
if self.VEC[ii].U.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.VEC[ii].Z_LIST[self.VEC[ii].K.get()]
if self.VEC[ii].U.icdf.idl < 0:
_lbox.configure(state='disabled')
_dbox['text']='--'
else:
_lbox['textvariable'] = self.VEC[ii].L
_lbox['values'] = self.VEC[ii].L_LIST
_dbox['text'] = self.VEC[ii].DATE[self.VEC[ii].L.get()]
_show['variable'] = self.VEC[ii].show
#_wsav.configure(state='normal')
else:
self.VEC = []
self.VEC_LIST = [None]
self.VEC_INDX = tk.IntVar()
self.VEC_INDX.set(0)
_wsel.configure(state='disabled')
_uvar.configure(state='disabled')
_vvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_wsel['values'] = self.VEC_LIST
_went['textvariable'] = ''
_uvar['textvariable'] = ''
_uvar['values'] = ['']
_uvar.configure(state='disabled')
_vvar['textvariable'] = ''
_vvar['values'] = ['']
_vvar.configure(state='disabled')
_kbox['textvariable'] = ''
_kbox['values'] = ['']
_zbox['text'] = '--'
_lbox['text'] = ''
_lbox['values'] = ['']
_lbox['textvariable'] = ''
_lbox['values'] = ['']
_dbox['text'] = ['--']
_wsav.configure(state='disabled')
def _add(SOURCE):
# ===============
# Initialize VECTOR instance:
VEC = VECTOR()
def _cancel():
# ============
self.Window_currents_sel.destroy()
self.Window_currents_sel = None
def _done():
# ==========
global _uvar,_vvar
if empty(VEC.uname.get()):
VEC.U.varid = None
else:
VEC.U.varid = VEC.U.icdf.vname.index(VEC.uname.get())
if empty(VEC.vname.get()):
VEC.V.varid = None
else:
VEC.V.varid = VEC.V.icdf.vname.index(VEC.vname.get())
if VEC.U.varid is None or VEC.V.varid is None:
messagebox.showinfo(parent=self.Window_currents_sel,message='Select velocity components')
return
toconsola('2D-grid axes : '+'%s'%VEC.U.icdf.grid2d,wid=self.cons)
# Seems a suitable location for those statements:
VEC.U.varname = VEC.uname.get()
#VEC.U.varid = VEC.U.icdf.vname.index(VEC.U.varname)
VEC.U.ndims = VEC.U.icdf.ndims[VEC.U.varid]
VEC.U.get_info(wid=self.cons)
VEC.U.get_grid()
VEC.V.varname = VEC.vname.get()
#VEC.V.varid = VEC.V.icdf.vname.index(VEC.V.varname)
VEC.V.ndims = VEC.V.icdf.ndims[VEC.V.varid]
VEC.V.get_info(wid=self.cons)
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
VEC.V.icdf.VAR_MENU = VEC.U.icdf.VAR_MENU[:]
else:
VEC.V.get_grid()
if VEC.grid_type.get() == 'C':
VEC.U.icdf.grid2d = True
VEC.V.icdf.grid2d = True
# X-center
xmu0 = 0.5*(VEC.U.xx[:,:-1]+VEC.U.xx[:,1:])
xmv0 = VEC.V.xx[:,1:-1]
ymu0 = 0.5*(VEC.U.yy[:,:-1]+VEC.U.yy[:,1:])
ymv0 = VEC.V.yy[:,1:-1]
# Y-center
VEC.V.xx = 0.5*(xmv0[:-1,:]+xmv0[1:,:])
VEC.U.xx = xmu0[1:-1,:]
VEC.V.yy = 0.5*(ymv0[:-1,:]+ymv0[1:,:])
VEC.U.yy = ymu0[1:-1,:]
toconsola('Regridding field. Updating array shapes',wid=self.cons)
aa = VEC.U.xx.shape
VEC.U.icdf.nx = aa[1]
VEC.U.icdf.ny = aa[0]
VEC.V.icdf.nx = aa[1]
VEC.V.icdf.ny = aa[0]
#self.read_lonlat(VEC,VEC.icdf.xname,VEC.icdf.yname)
VEC.K_LIST = list(range(VEC.U.icdf.nz))
VEC.L_LIST = list(range(VEC.U.icdf.nt))
#VEC.K.set(0)
VEC.Z_LIST = VEC.U.get_zlist()
#VEC.L.set(0)
VEC.T_LIST, VEC.DATE, VEC.TIME = VEC.U.get_tlist()
#self.DepthandDate(VEC)
VEC.show.set(True)
# Adding a VECTOR in the Drawing class
#
nt = VEC.U.icdf.nt
self.LAYERS.add(TYPE='VEC',Filename=VEC.UFILENAME.get(),N=nt,wid=self.cons)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
n = self.LAYERS.n
#self.nfiles += 1
#self.FILENAMES.append(VEC.UFILENAME.get())
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False)) # By default, no attached
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(VEC.U.icdf.nt)
ii = self.VEC_INDX.get() # Points to the new VECTOR
if self.first:
if self.drawmap is None:
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
self.PLOT.WEST.set(self.VEC[ii].U.xmin)
self.PLOT.EAST.set(self.VEC[ii].U.xmax)
self.PLOT.SOUTH.set(self.VEC[ii].U.ymin)
self.PLOT.NORTH.set(self.VEC[ii].U.ymax)
else:
self.PLOT.WEST.set(max(self.VEC[ii].U.xmin,self.VEC[ii].V.xmin))
self.PLOT.EAST.set(min(self.VEC[ii].U.xmax,self.VEC[ii].V.xmax))
self.PLOT.SOUTH.set(max(self.VEC[ii].U.ymin,self.VEC[ii].V.ymin))
self.PLOT.NORTH.set(min(self.VEC[ii].U.ymax,self.VEC[ii].V.ymax))
self.plot_initialize()
#try:
# self.PLOT.XLABEL.set(self.VEC[ii].U.icdf.xname)
#except:
# self.PLOT.XLABEL.set('Longitude')
#try:
# self.PLOT.YLABEL.set(self.VEC[ii].U.icdf.yname)
#except:
# self.PLOT.YLABEL.set('Latitude')
self.DATE = self.VEC[ii].DATE.copy()
self.TIME = self.VEC[ii].TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
# if nt == 1:
# #if self.SEQNTIMES[ii] == 1:
# self.lbox.configure(state='disabled')
# else:
# self.lbox.configure(state='!disabled')
# self.lbox['values'] = self.L_LIST
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
#
# self.PLOT.TLABEL.set(self.VEC[ii].DATE[self.L.get()])
# if len(self.DATE) > 1:
# self.bnext.configure(state='normal')
#
# # CAROUSEL MANAGEMENT - VECTOR
# #if self.SEQNTIMES[-1] > 1:
# n = self.LAYERS.n
# if nt > 1:
# toconsola('Vector initiates SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence = 1
# self.LAYERS.INSEQUENCE[n].set(True)
# self.LAYERS.SEQUENCER[n].set(True)
# self.LAYERS.leader = n
#
# #self.nsequence = 1
# #self.SEQUENCES[-1].set(True)
# #self.SEQLEADER[-1].set(True) # Is the first field
# #self.SEQLEADER_INDX = self.nfiles
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
# self.L.set(self.VEC[ii].L.get())
# self.L_LIST = list(range(nt))
# self.NL = nt
# self.lbox.configure(state='normal')
# self.lbox['values'] = self.L_LIST
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
# if self.L.get() < self.NL-1:
# self.bnext.configure(state='normal')
# if self.L.get() > 0:
# self.bprev.configure(state='normal')
# else:
# Is this field member of the SEQUENCE?
# Is this field a member of the SEQUENCE?
if nt > 1:
if self.NL == 0:
toconsola('Vector initiates Time axis',wid=self.cons)
self.VEC[ii].LINK.set(True)
self.TIME = self.VEC[ii].TIME.copy()
self.DATE = self.VEC[ii].DATE.copy()
self.NL = nt
self.L.set(self.VEC[ii].L.get())
self.L_LIST = list(range(nt))
self.lbox.configure(state='normal')
self.lbox['values'] = self.L_LIST
if self.L.get() < self.NL-1:
self.bnext.configure(state='normal')
if self.L.get() > 0:
self.bprev.configure(state='normal')
elif self.NL == nt:
toconsola('Linking Vector to Time axis',wid=self.cons)
self.VEC[ii].LINK.set(True)
self.VEC[ii].L.set(self.L.get()) #Synchronize records
# if self.LAYERS.nsequence == 0:
# toconsola('Vector initiates SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence = 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(True)
# self.LAYERS.leader = n-1
# self.LAYERS.seqlen = nt
## self.SEQUENCES[-1].set(True)
## self.SEQLEADER[-1].set(True)
## self.SEQLEADER_INDX = self.nfiles
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
# self.L.set(self.VEC[ii].L.get())
# self.L_LIST = list(range(nt))
# self.NL = nt
# self.lbox.configure(state='normal')
# self.lbox['values'] = self.L_LIST
# if self.L.get() < self.NL-1:
# self.bnext.configure(state='normal')
# if self.L.get() > 0:
# self.bprev.configure(state='normal')
# else:
# if nt == self.LAYERS.seqlen:
# toconsola('Adding vector to SEQUENCE list',wid=self.cons)
# self.VEC[ii].LINK.set(True)
# self.LAYERS.nsequence += 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(False)
## self.nsequence += 1
## self.SEQUENCES[-1].set(True)
## self.SEQLEADER[-1].set(False)
# self.VEC[ii].L.set(self.L.get()) #Synchronize records
_refill(ii)
self.Window_currents_sel.destroy()
self.Window_currents_sel = None
self.LAYERS.print()
def _arakawa():
# =============
toconsola('Selected Arakawa '+VEC.grid_type.get()+' grid ',wid=self.cons)
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
vselect['state'] = 'normal'
vaxesid.Ibox['state'] = 'normal'
vaxesid.Jbox['state'] = 'normal'
vaxesid.Kbox['state'] = 'normal'
vaxesid.Lbox['state'] = 'normal'
vaxesid.Xbox['state'] = 'normal'
vaxesid.Ybox['state'] = 'normal'
vaxesid.Zbox['state'] = 'normal'
vaxesid.Tbox['state'] = 'normal'
vaxesid.wgr['state'] = 'normal'
else:
vselect['state'] = 'normal'
vaxesid.Ibox['state'] = 'normal'
vaxesid.Jbox['state'] = 'normal'
vaxesid.Kbox['state'] = 'normal'
vaxesid.Lbox['state'] = 'normal'
vaxesid.Xbox['state'] = 'normal'
vaxesid.Ybox['state'] = 'normal'
vaxesid.Zbox['state'] = 'normal'
vaxesid.Tbox['state'] = 'normal'
vaxesid.wgr['state'] = 'normal'
def _vselect():
# =============
global Vsel
nn = filedialog.askopenfilename(parent=self.Window_currents, \
filetypes=[('Netcdf','*.nc'), \
('CDF','*.cdf'), \
('ALL','*')])
if len(nn) == 0:
return
else:
filename = '%s' % nn
VEC.two_files = 1
VEC.VFILENAME.set(filename)
VEC.V.nc = Dataset(filename)
VEC.V.icdf = tools.geocdf(filename, wid=self.cons)
toconsola('Opening meridional velocity file '+VEC.VFILENAME.get(),wid=self.cons)
FV = ttk.Frame(FVmain,padding=5,borderwidth=5)
vaxesid = tools.WinGeoaxes(VEC.V.icdf,VEC.V.nc,FV)
FV.grid(row=1,column=0,columnspan=5)
Vsel['values'] = VEC.V.icdf.VAR_MENU
Vsel.bind('<<ComboboxSelected>>',lambda e: vaxesid.selected_var(VEC.V.icdf,VEC.V.nc,Vsel))
# Main part of the function ...
#names = ['Operational','CODAR','COPERNICUS','Local']
ISOURCE = self.CURRENT_OPTIONS.index(SOURCE)
if ISOURCE == 0:
filename = self.get_opendap_filename()
elif ISOURCE == 1:
filename = self.get_codar_filename()
elif ISOURCE == 2:
filename = self.get_copernicus_filename()
elif ISOURCE == 3:
nn = filedialog.askopenfilename(parent=self.Window_currents, \
filetypes=[('Netcdf','*.nc'), \
('CDF','*.cdf'), \
('ALL','*')])
if len(nn) == 0:
return
else:
filename = '%s' % nn
elif ISOURCE == 4:
aa = get_remote()
filename = aa.filename()
filename = 'https://cosmo.icm.csic.es/MEDSEA_100.nc'
filename = filename.decode('utf-8')
print('filename: ',filename)
else:
if self.ncdf <= 0:
messagebox.showinfo(message='No Contour file opened yet')
return
else:
jj = self.CDF_INDX.get()
filename = self.CDF[jj].FILENAME.get()
if empty(filename):
return
# Not empty filename:
'''Update to account for multiple Arakawa grids. We begin by duplicatinge
the velocity object and use the first one for the U information
and the second one for the V information. Once the grid information
has been filled, we merge the V-information of the second object
into the first one '''
#VEC = cdf_parameters()
VEC.UFILENAME.set(filename)
VEC.VFILENAME.set(filename)
#VEC.VEL = vel_parameters()
#toconsola(VEC.VEL.MESSAGE,wid=self.cons)
VEC.U.nc = Dataset(filename)
VEC.U.icdf = tools.geocdf(filename, wid=self.cons)
VEC.V.nc = Dataset(filename)
VEC.V.icdf = tools.geocdf(filename, wid=self.cons)
# Object to capture the information about the V-field
#VEC2 = cdf_parameters()
#VEC2.FILENAME.set(filename)
#VEC2.VEL = vel_parameters()
toconsola(VEC.MESSAGE,wid=self.cons)
#VEC2.ncid = Dataset(filename)
#VEC2.icdf = tools.geocdf(filename, wid=self.cons)
# self.read_lonlat(VEC,VEC.icdf.xname,VEC.icdf.yname)
# self.DepthandDate(VEC)
# VEC.VEL.show.set(True)
if self.Window_currents_sel is None:
self.Window_currents_sel = tk.Toplevel(self.master)
self.Window_currents_sel.title('SELECT VELOCITY COMPONENTS')
self.Window_currents_sel.protocol('WM_DELETE_WINDOW',self.Window_currents_sel.destroy)
#else:
# self.Window_currents_sel.lift()
# return
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
'''Now, we launch two WinGeoaxes widgets to capturate the two
components of the field'''
FAgrid = ttk.Frame(self.Window_currents_sel,padding=5,borderwidth=5)
ttk.Label(FAgrid,text='Grid type', \
font=font_bold).grid(row=0,column=0,sticky='w')
gtype = ttk.Combobox(FAgrid,textvariable=VEC.grid_type, \
values=VEC.grid_type_list, \
width=5)
gtype.grid(row=0,column=1,columnspan=1,sticky='w')
gtype.bind('<<ComboboxSelected>>',lambda e: _arakawa())
FAgrid.grid(row=0,column=0,columnspan=5)
# -------------------------------------------------------
FUmain = ttk.Frame(self.Window_currents_sel,padding=5,borderwidth=5)
FU = ttk.Frame(FUmain,padding=5,borderwidth=5)
uaxesid = tools.WinGeoaxes(VEC.U.icdf,VEC.U.nc,FU)
FU.grid(row=0,column=0,columnspan=5)
ttk.Label(FUmain,text='Select U', \
borderwidth=3, \
font=font_bold).grid(row=1,column=2)
Usel = ttk.Combobox(FUmain,textvariable=VEC.uname, \
values=VEC.U.icdf.VAR_MENU, \
width=20)
Usel.bind('<<ComboboxSelected>>',lambda e: uaxesid.selected_var(VEC.U.icdf,VEC.U.nc,Usel))
Usel.grid(row=1,column=3,columnspan=2)
FUmain.grid()
# -------------------------------------------------------
global Vsel
FVmain = ttk.Frame(self.Window_currents_sel,padding=5,borderwidth=5)
vselect = ttk.Button(FVmain,text='Open meridional velocity file',command=_vselect)
vselect.grid(row=0,column=0,columnspan=2)
FV = ttk.Frame(FVmain,padding=5,borderwidth=5)
vaxesid = tools.WinGeoaxes(VEC.V.icdf,VEC.V.nc,FV)
FV.grid(row=1,column=0,columnspan=5)
ttk.Label(FVmain,text='Select V', \
borderwidth=3, \
font=font_bold).grid(row=2,column=2)
Vsel = ttk.Combobox(FVmain,textvariable=VEC.vname, \
values=VEC.V.icdf.VAR_MENU, \
width=20)
Vsel.bind('<<ComboboxSelected>>',lambda e: vaxesid.selected_var(VEC.V.icdf,VEC.V.nc,Vsel))
Vsel.grid(row=2,column=3,columnspan=2)
FVmain.grid()
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
vselect['state'] = 'normal'
vaxesid.Ibox['state'] = 'normal'
vaxesid.Jbox['state'] = 'normal'
vaxesid.Kbox['state'] = 'normal'
vaxesid.Lbox['state'] = 'normal'
vaxesid.Xbox['state'] = 'normal'
vaxesid.Ybox['state'] = 'normal'
vaxesid.Zbox['state'] = 'normal'
vaxesid.Tbox['state'] = 'normal'
vaxesid.wgr['state'] = 'normal'
F1 = ttk.Frame(self.Window_currents_sel,padding=5)
cancel = ttk.Button(F1,text='Cancel',command=_cancel)
cancel.grid(row=0,column=3,sticky='e',padx=10)
cancel.bind("<Return>",lambda e:_cancel())
done = ttk.Button(F1,text='Done',command=_done)
done.grid(row=0,column=4,sticky='e',padx=10)
done.bind("<Return>",lambda e:_done())
F1.grid(sticky='we')
self.Window_currents_sel.wait_window(self.Window_currents_sel)
def _lselection():
# ================
_dbox['text'] = self.VEC[ii].DATE[self.VEC[ii].L.get()]
def _kselection():
# ================
_zbox['text'] = self.VEC[ii].Z_LIST[self.VEC[ii].K.get()]
def _uselection():
# ================
ii = self.VEC_INDX.get()
try:
self.VEC[ii].U.varname = self.VEC[ii].uname.get()
self.VEC[ii].U.varid = self.VEC[ii].U.icdf.vname.index( \
self.VEC[ii].uname.get())
except:
self.VEC[ii].U.varname = None
self.VEC[ii].U.varid = -1
def _vselection():
# ================
ii = self.VEC_INDX.get()
try:
self.VEC[ii].V.varname = self.VEC[ii].vname.get()
self.VEC[ii].V.varid = self.VEC[ii].V.icdf.vname.index( \
self.VEC[ii].vname.get())
except:
self.VEC[ii].V.varname = None
self.VEC[ii].V.varid = -1
def _save():
# ================
ii = self.VEC_INDX.get()
print('Saving ',ii)
self.VEC[ii].save()
# Main Window
# ============
if self.Window_currents is None:
self.Window_currents = tk.Toplevel(self.master)
self.Window_currents.title("Currents selector")
self.Window_currents.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_currents.lift()
if self.nvec > 0:
ii = self.VEC_INDX.get()
else:
ii = -1
global _uvar,_vvar
self.Window_currents_sel = None
F0 = ttk.Frame(self.Window_currents,padding=5)
# Add
ttk.Button(F0,text='Import',command=lambda:_add(self.VSOURCE.get())).grid(row=1,column=0,padx=3)
_source = ttk.Combobox(F0,textvariable=self.VSOURCE, \
values=self.CURRENT_OPTIONS)
_source.grid(row=0,column=0,padx=3)
#_source.bind('<<ComboboxSelected>>', \
# lambda e: _add(self.VSOURCE.get()))
# Filename:
ttk.Label(F0,text='Netcdf file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.VEC_INDX, \
values=self.VEC_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
_went2 = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went2.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
# Velocity components:
ttk.Label(F0,text='Zonal').grid(row=2,column=1,padx=3,pady=3)
_uvar = ttk.Combobox(F0,width=15)
_uvar.grid(row=2,column=2,columnspan=2,sticky='w')
_uvar.bind('<<ComboboxSelected>>',lambda e: _uselection())
ttk.Label(F0,text='Meridional').grid(row=2,column=4,padx=3,pady=3)
_vvar = ttk.Combobox(F0,width=15)
_vvar.grid(row=2,column=5,columnspan=2,sticky='w')
_vvar.bind('<<ComboboxSelected>>',lambda e: _vselection())
# Depth:
ttk.Label(F0,text='Depth').grid(row=3,column=1,padx=3,pady=3)
_kbox = ttk.Combobox(F0,values=['0'],width=5)
_kbox.grid(row=3,column=2)
_kbox.bind('<<ComboboxSelected>>',lambda e: _kselection())
_zbox = ttk.Label(F0,width=20)
_zbox.grid(row=3,column=3,columnspan=2,sticky='w')
# Time:
ttk.Label(F0,text='Time').grid(row=4,column=1,padx=3,pady=3)
_lbox = ttk.Combobox(F0,width=5)
_lbox.grid(row=4,column=2)
_lbox.bind('<<ComboboxSelected>>',lambda e: _lselection())
_dbox = ttk.Label(F0,width=20)
_dbox.grid(row=4,column=3,columnspan=2,sticky='w')
#Alias
ttk.Label(F0,text='Alias').grid(row=5,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=5,column=2,columnspan=2,sticky='w')
if ii == -1:
_wsel.configure(state='disabled')
_uvar.configure(state='disabled')
_vvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_aent.configure(state='disabled')
else:
_went['textvariable'] = self.VEC[ii].UFILENAME
_went2['textvariable'] = self.VEC[ii].VFILENAME
_uvar['textvariable'] = self.VEC[ii].uname
_vvar['textvariable'] = self.VEC[ii].vname
_uvar['values'] = self.VEC[ii].U.icdf.VAR_MENU
_vvar['values'] = self.VEC[ii].V.icdf.VAR_MENU
_kbox['textvariable'] = self.VEC[ii].K
_kbox['values'] = self.VEC[ii].K_LIST
_aent['textvariable'] = self.VEC[ii].ALIAS
if self.VEC[ii].U.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.VEC[ii].Z_LIST[self.VEC[ii].K.get()]
if self.VEC[ii].U.icdf.idl < 0:
_lbox.configure(state='disabled')
_dbox['text']='--'
try:
nodate = empty(self.VEC[ii].DATE[0])
except:
nodate = False
if nodate:
_dbox['text']='--'
else:
_dbox['text'] = xelf.CDF[ii].DATE[0]
else:
_lbox['textvariable'] = self.VEC[ii].L
_lbox['values'] = self.VEC[ii].L_LIST
_dbox['text'] = self.VEC[ii].DATE[self.VEC[ii].L.get()]
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_currents,padding=5)
_wsav = ttk.Button(F1,text='Save data',command=_save)
_wsav.grid(row=1,column=0,padx=3,sticky='w')
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_wsav.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show')
_show['variable']=self.VEC[ii].show
_show.configure(command=self.make_plot)
_wsav.configure(state='normal')
_show.grid(row=1,column=5)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_done).grid(row=1,column=8,padx=3)
ttk.Label(F1,text=' ',width=8).grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(F1,text=' ',width=8).grid(row=1,column=2,padx=3,sticky='w')
F1.grid(row=1,column=0)
# =============================
def get_opendap_filename(self):
# =============================
def _close():
self.Window_opendap.destroy()
self.Window_opendap = None
if self.Window_opendap is None:
self.Window_opendap = tk.Toplevel(self.master)
self.Window_opendap.title('Load Operational service Opendap file')
self.Window_opendap.protocol('WM_DELETE_WINDOW',_close)
a = providers.WinOpendap(self.Window_opendap)
toconsola(a.MESSAGE,wid=self.cons)
self.Window_opendap.wait_window()
self.Window_opendap = None
filename = a.get_filename()
return filename
else:
self.Window_opendap.lift()
# =============================
def get_codar_filename(self):
# =============================
def _close():
self.Window_codar.destroy()
self.Window_codar = None
if self.Window_codar is None:
self.Window_codar = tk.Toplevel(self.master)
self.Window_codar.title('HF Radar station selector')
self.Window_codar.protocol('WM_DELETE_WINDOW',_close)
a = codar.WinCodar(self.Window_codar)
toconsola(a.MESSAGE,wid=self.cons)
self.Window_codar.wait_window()
self.Window_codar = None
filename = a.get_filename()
return filename
else:
self.Window_codar.lift()
# ================================
def get_copernicus_filename(self):
# ================================
def _close():
self.Window_copernicus.destroy()
self.Window_copernicus = None
if self.Window_copernicus is None:
self.Window_copernicus = tk.Toplevel(self.master)
self.Window_copernicus.title('COPERNICUS file selector')
self.Window_copernicus.configure(background='#87CEEB')
self.Window_copernicus.protocol('WM_DELETE_WINDOW',_close)
a = copernicus.WinTracking(self.Window_copernicus)
toconsola(a.MESSAGE,wid=self.cons)
self.Window_copernicus.wait_window()
self.Window_copernicus = None
filename = a.out()
return filename
else:
self.Window_copernicus.lift()
# ==================
def layers(self):
# ==================
'''Display the files being opened'''
def _close():
# ===========
self.Window_files.destroy()
self.Window_files = None
def _tosequence():
# ================
if TYPE == 'FLT':
self.FLOAT[ii].MAPX = []
self.FLOAT[ii].MAPY = []
if self.FLOAT[ii].nfloats > 1:
for i in range(self.FLOAT[ii].nfloats):
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon[:,i],
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPX.append(f(self.TIME))
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat[:,i],
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPY.append(f(self.TIME))
# Transpose FLT.MAPX and FLT.MAPY:
self.FLOAT[ii].MAPX = np.array(self.FLOAT[ii].MAPX).T.tolist()
self.FLOAT[ii].MAPY = np.array(self.FLOAT[ii].MAPY).T.tolist()
else:
self.FLOAT[ii].Fx = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPX = self.FLOAT[ii].Fx(self.TIME)
self.FLOAT[ii].Fy = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPY = self.FLOAT[ii].Fy(self.TIME)
#Main Window
# =========
if self.LAYERS.n == 0:
toconsola('No layers added yet',wid=self.cons)
return
if self.Window_files is None:
self.Window_files = tk.Toplevel(self.master,width=80)
self.Window_files.title('Plot layers')
self.Window_files.resizable(width=True,height=False)
self.Window_files.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_files.lift()
return
Fh = ttk.Frame(self.Window_files,borderwidth=5,padding=5)
txt1 = 'Number layers: %d' % self.LAYERS.n
ttk.Label(Fh,text=txt1).grid(row=0,column=0,padx=3,sticky='w')
Fh.grid(sticky='w')
F0 = ttk.Frame(self.Window_files,width=80,borderwidth=5,padding=5)
ttk.Label(F0,text='SHOW').grid(row=0,column=0,padx=3,sticky='we')
ttk.Label(F0,text='TYPE').grid(row=0,column=1,padx=3,sticky='we')
ttk.Label(F0,text='SOURCE',width=10).grid(row=0,column=2,padx=3,sticky='we')
ttk.Label(F0,text='ZORDER').grid(row=0,column=3,padx=3,sticky='we')
ttk.Label(F0,text='ALPHA').grid(row=0,column=4,padx=3,sticky='we')
ttk.Label(F0,text='TIME LINK').grid(row=0,column=5,padx=3,sticky='we')
#ttk.Label(F0,text='SEQUENCE').grid(row=0,column=5,padx=3,sticky='we')
#ttk.Label(F0,text='SEQ LEADER').grid(row=0,column=6,padx=3,sticky='we')
ttk.Label(F0,text='ALIAS',width=12).grid(row=0,column=7,padx=3,sticky='we')
ttk.Label(F0,text='FILENAME').grid(row=0,column=8,sticky='we')
nvec = -1
nfld = -1
nflo = -1
i = 0
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
noseq = False
if TYPE == 'VEC':
ttk.Checkbutton(F0,variable=self.VEC[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.VEC[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.VEC[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.VEC[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
# Link
cc = ttk.Checkbutton(F0,variable=self.VEC[ii].LINK)
cc.grid(row=i+1,column=5,padx=3)
if self.VEC[ii].U.icdf.nt != self.NL:
cc.configure(state='disabled')
# Alias
ttk.Label(F0,text=self.VEC[ii].ALIAS.get(),justify='left',
width=12).grid(row=i+1,column=7,padx=3)
if TYPE == 'FLD':
ttk.Checkbutton(F0,variable=self.CDF[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.CDF[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.CDF[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.CDF[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
# Link
cc = ttk.Checkbutton(F0,variable=self.CDF[ii].LINK)
cc.grid(row=i+1,column=5,padx=3)
if self.CDF[ii].FLD.icdf.nt != self.NL:
cc.configure(state='disabled')
ttk.Label(F0,text=self.CDF[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
if TYPE == 'FLOAT':
ttk.Checkbutton(F0,variable=self.FLOAT[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.FLOAT[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.FLOAT[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.FLOAT[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.FLOAT[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
if TYPE == 'SAIDIN':
ttk.Checkbutton(F0,variable=self.SAIDIN.show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.SAIDIN.SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.SAIDIN.PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.SAIDIN.PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.SAIDIN.ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
# Show, Zorder
if TYPE == 'MARKER':
ttk.Checkbutton(F0,variable=self.MARKER[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.MARKER[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.MARKER[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.MARKER[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.MARKER[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'SHAPE':
ttk.Checkbutton(F0,variable=self.SHAPE[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.SHAPE[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.SHAPE[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.SHAPE[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.SHAPE[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'ELLIPSE':
ttk.Checkbutton(F0,variable=self.ELLIPSE[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.ELLIPSE[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.ELLIPSE[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.ELLIPSE[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.ELLIPSE[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'FEATURE':
ttk.Checkbutton(F0,variable=self.FEATURE.DATA[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.FEATURE.DATA[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.FEATURE.DATA[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.FEATURE.DATA[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.FEATURE.DATA[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'PATCH':
ttk.Checkbutton(F0,variable=self.PATCH[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.PATCH[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.PATCH[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.PATCH[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.PATCH[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
# Type
ttk.Label(F0,text=TYPE, \
width=7,justify='left').grid(row=i+1, \
column=1, \
columnspan=1,padx=3,sticky='we')
# # Sequence
# cc = ttk.Checkbutton(F0,variable=self.LAYERS.INSEQUENCE[i],command=_tosequence)
# cc.grid(row=i+1,column=5,padx=3)
# if self.LAYERS.NREC[ii] != self.LAYERS.NREC[self.LAYERS.leader]:
# cc.configure(state='disabled')
# # Sequence leader
# bb = ttk.Checkbutton(F0,variable=self.LAYERS.SEQUENCER[i])
# bb.grid(row=i+1,column=6,padx=3)
#
# if self.LAYERS.NREC[i] <= 1 or noseq:
# cc.configure(state='disabled')
# bb.configure(state='disabled')
# Filename
try:
base = os.path.basename(self.LAYERS.FILENAME[i])
except:
base = '-'
ttk.Label(F0,text=base, \
width=60,justify='left').grid(row=i+1, \
column=8, \
columnspan=2,padx=3)
F0.grid()
for i in range(self.LAYERS.n):
toconsola('%s as %s' % (self.LAYERS.FILENAME[i],self.LAYERS.TYPE[i]),wid=self.cons)
# ===========================
def configuration_file(self):
# ===========================
''' Launch the Configuration file script '''
new_conf = tk.StringVar()
new_conf.set(COSMO_CONF_NAME)
# -----------
def _done():
# -----------
'''Close the widget'''
if exists(self.PLOT.FILECONF):
toconsola('Reading configuration file '+self.PLOT.FILECONF,wid=self.cons)
try:
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.conf_set(conf)
except:
toconsola('Error reading, using default parameters',wid=self.cons)
conf = self.PLOT.conf_get()
self.PLOT.conf_save(conf,self.PLOT.FILECONF)
else:
toconsola('Saving configuration file ...',wid=self.cons)
conf = self.PLOT.conf_get()
self.PLOT.conf_save(conf,self.PLOT.FILECONF)
conf = {}
conf['COSMO_CONF_NAME']=COSMO_CONF_NAME
with io.open(COSMO_CONF_DATA,'w',encoding='utf8') as outfile:
_str = json.dumps(conf,ensure_ascii=False,
sort_keys=False,
indent=2,
separators=(',',': '))
outfile.write(to_unicode(_str)+'\n')
outfile.close()
self.Window_cfile.destroy()
self.Window_cfile = None
# -----------
def _cancel():
# -----------
'''Recover backup value and close the widget'''
with open(COSMO_CONF_DATA) as infile:
conf = json.load(infile)
COSMO_CONF_NAME = conf['COSMO_CONF_NAME']
COSMO_CONF = COSMO_CONF_PATH + COSMO_CONF_NAME + os.sep
self.Window_cfile.destroy()
self.Window_cfile = None
def _select():
# ============
global COSMO_CONF,COSMO_CONF_PATH,COSMO_CONF_NAME
nn = tk.filedialog.askdirectory(parent=self.Window_cfile,
initialdir=COSMO_CONF_PATH)
if len(nn) == 0:
return
if os.path.isdir(nn):
# Check that the user has not just selected the folde, but also opened it
#
if nn == COSMO_CONF_PATH or nn+os.sep == COSMO_CONF_PATH:
toconsola('Configuration name must be "Opened" in dialog, not just selected ;-)',wid=self.cons)
toconsola('The configuration has not changed',wid=self.cons)
return
toconsola('Configuration folder exists',wid=self.cons)
COSMO_CONF_NAME = '%s' % os.path.basename(os.path.normpath(nn))
COSMO_CONF = nn + os.sep
# else:
# toconsola('New Configuration folder',wid=self.cons)
# os.makedirs(nn)
# COSMO_CONF_NAME = '%s' % os.path.basename(os.path.normpath(nn))
# COSMO_CONF = nn + os.sep
new_conf.set(COSMO_CONF_NAME)
message ='COSMO_CONF_PATH = '+COSMO_CONF_PATH+"\n"+ \
'COSMO_CONF_NAME = '+COSMO_CONF_NAME+"\n"+ \
'COSMO_CONF = '+COSMO_CONF
toconsola(message,wid=self.cons)
self.PLOT.FILECONF = COSMO_CONF + 'drawing.conf'
toconsola('self.PLOT.FILECONF = '+self.PLOT.FILECONF,wid=self.cons)
def _create(event=None):
# ======================
global COSMO_CONF,COSMO_CONF_PATH,COSMO_CONF_NAME
if empty(new_conf.get()):
toconsola('Empty configuration name',wid=self.cons)
new_conf.set(COSMO_CONF_NAME)
return
COSMO_CONF_NAME = '%s' % new_conf.get()
COSMO_CONF = COSMO_CONF_PATH+COSMO_CONF_NAME+os.sep
if os.path.isdir(COSMO_CONF):
toconsola('Configuration ' + COSMO_CONF + ' already exists',wid=self.cons)
toconsola('Overwriting it !',wid=self.cons)
else:
toconsola('Writing in configuration folder '+COSMO_CONF,wid=self.cons)
os.makedirs(COSMO_CONF)
self.PLOT.FILECONF = COSMO_CONF + 'drawing.conf'
# Main window
# -----------
if self.Window_cfile is not None:
self.Window_cfile.lift()
return
self.Window_cfile = tk.Toplevel(self.master)
self.Window_cfile.title('Configuration file')
self.Window_cfile.resizable(width=False,height=False)
self.Window_cfile.protocol('WM_DELETE_WINDOW',_cancel)
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
F0 = ttk.Frame(self.Window_cfile,borderwidth=5,padding=5)
ttk.Label(F0,text='Configuration PATH: ',
font=font_bold).grid(row=0,column=0)
ttk.Label(F0,text=COSMO_CONF_PATH,width=40,
justify='left').grid(row=0,column=1,columnspan=4)
ttk.Label(F0,text='Configuration name: ',
font=font_bold).grid(row=1,column=0)
bb = ttk.Label(F0,textvariable=new_conf,width=40,
justify='left')
bb.grid(row=1,column=1,columnspan=4)
ttk.Label(F0,text='Load configuratione',
font=font_bold).grid(row=2,column=0)
ttk.Button(F0,text='Select',command=_select).grid(row=2,column=1,padx=3)
ttk.Label(F0,text='New configuration',
font=font_bold).grid(row=3,column=0)
aa = ttk.Entry(F0,textvariable=new_conf,width=30)
aa.grid(row=3,column=1,columnspan=3)
bb = ttk.Button(F0,text='Create',command=_create)
bb.grid(row=3,column=4,padx=3)
bb.bind("<Return>",lambda f: _create())
#bb.bind("<Return>",_create())
# AAAAAA
cancel = ttk.Button(F0,text='Cancel',command=_cancel)
cancel.grid(row=4,column=0,padx=3)
cancel.bind("<Return>",lambda e:_cancel())
done = ttk.Button(F0,text='Done',command=_done)
done.grid(row=4,column=1,padx=3)
done.bind("<Return>",lambda e:_done())
F0.grid()
# ==================================
def figure_save(self):
# ==================================
''' Saving Drawing configuration using json'''
toconsola('Saving figure ...',wid=self.cons)
CONF = []
# Add the main PLOT class:
#
conf = self.PLOT.conf_get()
CONF.append(conf)
# Add the FILES (SAIDIN; CONTOURS, VECTORS, TRAJECTORIES):
# Types: VEC, FLD, SAIDIN, FLOAT
# ZZZ
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
conf = {}
conf['FILENAME'] = self.LAYERS.FILENAME[i]
conf['TYPE'] = TYPE
conf['NREC'] = self.LAYERS.NREC[i]
if TYPE == 'FLD':
conf['CDF'] = self.CDF[ii].conf_get()
elif TYPE == 'VEC':
conf['VEC'] = self.VEC[ii].conf_get()
elif TYPE == 'SAIDIN':
conf['SAIDIN'] = self.SAIDIN.conf_get()
elif TYPE == 'FLOAT':
conf['FLOAT'] = self.FLOAT[ii].conf_get()
elif TYPE == 'MARKER':
conf['MARKER'] = self.MARKER[ii].conf_get()
elif TYPE == 'SHAPE':
conf['SHAPE'] = self.SHAPE[ii].conf_get()
elif TYPE == 'ELLIPSE':
conf['ELLIPSE'] = self.ELLIPSE[ii].conf_get()
elif TYPE == 'PATCH':
conf['PATCH'] = self.PATCH[ii].conf_get()
elif TYPE == 'FEATURE':
conf['FEATURE'] = self.FEATURE.DATA[ii].conf_get()
else:
toconsola('Unknown layer type',wid=self.cons)
return
CONF.append(conf)
# Request output configuration filename:
#
filetypes = [('COSMO-VIEW','.cvw')]
nn = filedialog.asksaveasfilename(title='Save plot configuration',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
# Write JSON file:
#
self.save_conf(CONF,nn)
toconsola('done !',wid=self.cons)
# ==================================
def figure_read(self,filename=None):
# ==================================
''' Load Figure configuration from json'''
self.cons = None
self.first = True
if filename is None:
nn = filedialog.askopenfilename(title='Load plot configuration',
initialdir='./')
if len(nn) == 0:
return
filename = '%s' % nn
toconsola('Restoring figure configuration from '+filename,wid=self.cons)
CONF = json.load(open(filename))
# The PLOT:
#
self.PLOT.conf_set(CONF[0])
if self.PLOT.ISOBAT_cropped:
self.isobath_crop()
# Initialize matplotlib
#
self.fig = None
self.ax = None
self.drawmap = True
#try:
# self.fig = plt.figure('COSMO-VIEW canvas', \
# figsize=self.PLOT.SIZE, \
# dpi=self.PLOT.DPI.get())
#except:
# print('Failure')
#
# self.fig.canvas.mpl_connect('close_event',self.on_closing_figure)
# self.fig.canvas.callbacks.connect('button_press_event',self.on_click)
# self.ax = self.fig.add_subplot(111)
# self.drawmap = True
for ii in range(1,len(CONF)):
filename = CONF[ii]['FILENAME']
if CONF[ii]['TYPE'] == 'FLD':
# Initialize contour class:
CDF = CONTOUR(filename)
CDF.FLD.open(filename,wid=self.cons)
nt = CDF.FLD.icdf.nt # Save the number of time records
# Update from CONF attributes:
#
CDF.conf_set(CONF[ii]['CDF'])
print('CDF.FLD.varname: ', CDF.FLD.varname)
print('CDF.FLD.varid: ', CDF.FLD.varid)
if self.first:
self.K.set(CDF.K.get())
self.L.set(CDF.L.get())
self.L_LIST = list(range(CDF.FLD.icdf.nt))
self.NL = len(self.L_LIST)
# Read data:
#
#self.read_lonlat(CDF,CDF.icdf.xname,CDF.icdf.yname)
#self.DepthandDate(CDF)
CDF.FLD.get_grid()
self.DepthandDate(CDF)
if CDF.SOURCE == 'FILE':
CDF.read(update_lims=False,wid=self.cons)
elif CDF.SOURCE == 'MEAN':
CDF.FLD.mean(nt,self.K.get(),wid=self.cons)
elif CDF.SOURCE == 'VARIANCE':
print('going to calculate the variance ...')
CDF.FLD.variance(nt,self.K.get(),wid=self.cons)
#self.read_CDF(CDF,update_lims=False)
#print(CDF.PLOT.CONTOUR_MIN.get())
#print(CDF.PLOT.CONTOUR_MAX.get())
#print(CDF.FLD.xx)
#print(CDF.FLD.yy)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='FLD',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
#self.nfiles += 1
#self.FILENAMES.append(filename)
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=CONF[ii]['SEQUENCES']))
#self.SEQLEADER.append(tk.BooleanVar(value=CONF[ii]['SEQLEADER']))
#self.SEQNTIMES.append(CONF[ii]['SEQNTIMES'])
if self.first:
#self.TFILE = '%d' % self.nfiles
self.PLOT.TLABEL.set(CDF.DATE[self.L.get()])
self.lbox.configure(state='!disabled')
self.lbox['values'] = self.L_LIST
self.DATE = CDF.DATE.copy()
self.TIME = CDF.TIME.copy()
if self.L.get() == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
#self.TFILE = '%d' % self.nfiles
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
if CONF[ii]['TYPE'] == 'VEC':
# Initialize classes:
#
VEC = VECTOR()
VEC.UFILENAME.set(filename)
VEC.U.nc = Dataset(filename)
VEC.U.icdf = tools.geocdf(filename, wid=self.cons)
nt = VEC.U.icdf.nt # Save the number of time records
# Check the Arakawa's grid type and read, if required, the VFILENAME
#
vv = CONF[ii]['VEC']
VEC.grid_type.set(vv['GRID_TYPE'])
if VEC.two_files == 0:
vfilename = filename
else:
vfilename = vv['VFILENAME']
print('In read_figure, VEC.grid_type: ', VEC.grid_type.get())
VEC.VFILENAME.set(vfilename)
VEC.V.nc = Dataset(vfilename)
VEC.V.icdf = tools.geocdf(vfilename, wid=self.cons)
# Update from CONF attributes:
#
VEC.conf_set(CONF[ii]['VEC'])
# Read data:
#
VEC.U.get_info(wid=self.cons)
VEC.U.get_grid()
VEC.V.varname = VEC.vname.get()
VEC.V.ndims = VEC.V.icdf.ndims[VEC.V.varid]
VEC.V.get_info(wid=self.cons)
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
VEC.V.icdf.VAR_MENU = VEC.U.icdf.VAR_MENU[:]
else:
VEC.V.get_grid()
if VEC.grid_type.get() == 'C':
VEC.U.icdf.grid2d = True
VEC.V.icdf.grid2d = True
# X-center
xmu0 = 0.5*(VEC.U.xx[:,:-1]+VEC.U.xx[:,1:])
xmv0 = VEC.V.xx[:,1:-1]
ymu0 = 0.5*(VEC.U.yy[:,:-1]+VEC.U.yy[:,1:])
ymv0 = VEC.V.yy[:,1:-1]
# Y-center
VEC.V.xx = 0.5*(xmv0[:-1,:]+xmv0[1:,:])
VEC.U.xx = xmu0[1:-1,:]
VEC.V.yy = 0.5*(ymv0[:-1,:]+ymv0[1:,:])
VEC.U.yy = ymu0[1:-1,:]
aa = VEC.U.xx.shape
print('New shape sizes: ', aa)
print('----------------------------')
VEC.K_LIST = list(range(VEC.U.icdf.nz))
VEC.L_LIST = list(range(VEC.U.icdf.nt))
VEC.Z_LIST = VEC.U.get_zlist()
VEC.T_LIST, VEC.DATE, VEC.TIME = VEC.U.get_tlist()
if VEC.SOURCE == 'FILE':
VEC.read(wid=self.cons)
elif VEC.SOURCE == 'MEAN':
VEC.U.mean(nt,self.K.get(),wid=self.cons)
VEC.V.mean(nt,self.K.get(),wid=self.cons)
# Make sure that the missing value is NaN:
_u = VEC.U.data.filled(fill_value=np.nan)
_v = VEC.V.data.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
toconsola("Velocities in a A-grid",wid=self.cons)
VEC.U.data = u.copy()
VEC.V.data = v.copy()
elif VEC.grid_type.get() == 'C':
toconsola("Regrid C-grid velocities",wid=self.cons)
VEC.U.data = 0.5*(u[1:-1,:-1]+u[1:-1,1:])
VEC.V.data = 0.5*(v[:-1,1:-1]+v[1:,1:-1])
if self.first:
self.K.set(VEC.K.get())
self.L.set(VEC.L.get())
self.L_LIST = list(range(VEC.U.icdf.nt))
self.NL = len(VEC.L_LIST)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='VEC',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
#self.nfiles += 1
#self.FILENAMES.append(filename)
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=CONF[ii]['SEQUENCES']))
#self.SEQLEADER.append(tk.BooleanVar(value=CONF[ii]['SEQLEADER']))
#self.SEQNTIMES.append(CONF[ii]['SEQNTIMES'])
if self.first:
#self.TFILE = '%d' % self.nfiles
self.PLOT.TLABEL.set(VEC.DATE[self.L.get()])
self.lbox.configure(state='!disabled')
self.lbox['values'] = self.L_LIST
self.DATE = VEC.DATE.copy()
self.TIME = VEC.TIME.copy()
if self.L.get() == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
#self.TFILE = '%d' % self.nfiles
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
if CONF[ii]['TYPE'] == 'FLOAT':
# Initialize classes:
#
FLT = lagrangian.parameters()
toconsola(FLT.MESSAGE, wid=self.cons)
FLT.Read(filename)
# Update from CONF attributes:
FLT.conf_set(CONF[ii]['FLOAT'])
if self.first:
# Set Figure DATA and TIME reference:
self.DATE = FLT.DATE.copy()
self.TIME = FLT.TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.L_LIST = list(range(len(FLT.DATE)))
self.NL = len(self.L_LIST)
self.lbox.configure(state='!disabled')
self.lbox['values'] = self.L_LIST
if self.L.get() == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
if len(self.TIME) > 0:
if FLT.CROP.get():
print('Cropping ...')
nt = FLT.nrecords
ppi = [i for i in range(nt) if FLT.DATE[i] >= self.DATE[0]]
ppf = [i for i in range(nt) if FLT.DATE[i] > self.DATE[-1]]
pi = ppi[0]
pf = ppf[0] - 1
print('Initial index : ', pi)
print('Final index : ', pf)
print(FLT.nfloats)
print(FLT.nrecords)
if FLT.nfloats > 1:
lon = FLT.lon[pi:pf+1,:]
lat = FLT.lat[pi:pf+1,:]
date = FLT.DATE[pi:pf+1]
TIME = FLT.TIME[pi:pf+1]
FLT.lon = lon
FLT.lat = lat
FLT.DATE = date
FLT.TIME = TIME
else:
lon = FLT.lon[pi:pf+1]
lat = FLT.lat[pi:pf+1]
date = FLT.DATE[pi:pf+1]
TIME = FLT.TIME[pi:pf+1]
FLT.lon = lon
FLT.lat = lat
FLT.DATE = date
FLT.TIME = TIME
FLT.nrecords = len(date)
print('Setting MAPX and MAPY ...')
FLT.MAPX = []
FLT.MAPY = []
FLT.Fx = []
FLT.Fy = []
if FLT.nfloats > 1:
for i in range(FLT.nfloats):
FLT.Fx.append(interpolate.interp1d(FLT.TIME,np.array(FLT.lon[:,i]),
bounds_error=False, fill_value=np.NaN))
FLT.MAPX.append(list(FLT.Fx[-1](self.TIME)))
FLT.Fy.append(interpolate.interp1d(FLT.TIME,np.array(FLT.lat[:,i]),
bounds_error=False, fill_value=np.NaN))
FLT.MAPY.append(list(FLT.Fy[-1](self.TIME)))
# Transpose FLT.MAPX and FLT.MAPY:
FLT.MAPX = np.array(FLT.MAPX).T.tolist()
FLT.MAPY = np.array(FLT.MAPY).T.tolist()
else:
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon,
bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat,
bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='FLOAT',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'SAIDIN':
# Initialize classes:
#
self.SAIDIN.FILENAME.set(filename)
self.SAIDIN.FLD.nc = Dataset(filename)
self.SAIDIN.FLD.icdf = tools.geocdf(filename, wid=self.cons)
# Update from CONF attributes:
#
self.SAIDIN.conf_set(CONF[ii]['SAIDIN'])
# Read the data:
self.SAIDIN.FLD.x = self.SAIDIN.FLD.nc.variables['lon'][:]
self.SAIDIN.FLD.y = self.SAIDIN.FLD.nc.variables['lat'][:]
self.SAIDIN.varname.set('mcsst')
self.SAIDIN.FLD.varname = 'mcsst'
self.SAIDIN.FLD.data = self.SAIDIN.FLD.nc.variables[self.SAIDIN.FLD.varname][0,:,:].squeeze()
self.SAIDIN.FLD.xx,self.SAIDIN.FLD.yy = np.meshgrid(self.SAIDIN.FLD.x,self.SAIDIN.FLD.y)
self.DepthandDate(self.SAIDIN)
if self.SAIDIN.landmask.get():
toconsola('Applying land/sea mask ...',wid=self.cons)
_a = self.SAIDIN.FLD.data.copy()
tmp = self.SAIDIN.FLD.nc.variables['lsmask'][0,:,:].squeeze()
msk = ma.masked_where(tmp==1,tmp)
self.SAIDIN.FLD.data = ma.array(_a,mask=msk).copy()
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='SAIDIN',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
#self.nfiles += 1
#self.FILENAMES.append(filename)
#self.FILETYPES.append('SAIDIN')
#self.FILEORDER.append(0)
#self.SEQUENCES.append(tk.BooleanVar(value=CONF[ii]['SEQUENCES']))
#self.SEQLEADER.append(tk.BooleanVar(value=CONF[ii]['SEQLEADER']))
#self.SEQNTIMES.append(CONF[ii]['SEQNTIMES'])
if self.first:
self.DATE = self.SAIDIN.DATE.copy()
self.TIME = self.SAIDIN.TIME.copy()
self.first = False
if CONF[ii]['TYPE'] == 'MARKER':
# Initialize classes:
#
MARKER = geomarker.parameters()
if filename is None:
toconsola('MARKER data from configuration file',wid=self.cons)
else:
MARKER.Read(filename)
# Update from CONF attributes:
#
MARKER.conf_set(CONF[ii]['MARKER'])
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=filename,N=len(MARKER.lon),wid=self.cons)
if CONF[ii]['TYPE'] == 'SHAPE':
# Initialize classes:
#
SHAPE = shape.parameters()
SHAPE.Read(filename)
# Update from CONF attributes:
#
SHAPE.conf_set(CONF[ii]['SHAPE'])
if not empty(SHAPE.LABEL_KEY.get()):
SHAPE.get_name()
if SHAPE.CROP.get() and SHAPE.type == 'POINT':
toconsola('Cropping shapefile type POINT',wid=self.cons)
nsp = SHAPE.n
x = SHAPE.lon[:].copy()
y = SHAPE.lat[:].copy()
s = SHAPE.name[:].copy()
SHAPE.lon = []
SHAPE.lat = []
SHAPE.name = []
xmin = self.PLOT.WEST.get() + self.PLOT.CROP_PAD.get()
xmax = self.PLOT.EAST.get() - self.PLOT.CROP_PAD.get()
ymin = self.PLOT.SOUTH.get() + self.PLOT.CROP_PAD.get()
ymax = self.PLOT.NORTH.get() - self.PLOT.CROP_PAD.get()
for i in range(nsp):
if x[i] > xmin:
if x[i] < xmax:
if y[i] > ymin:
if y[i] < ymax:
SHAPE.lon.append(x[i])
SHAPE.lat.append(y[i])
SHAPE.name.append(s[i])
SHAPE.n = len(SHAPE.lon)
self.nshape += 1
self.SHAPE.append(SHAPE)
self.SHAPE_INDX.set(self.nshape-1)
self.SHAPE_LIST = list(range(self.nshape))
self.LAYERS.add(TYPE='SHAPE',Filename=filename,N=SHAPE.n,wid=self.cons)
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'ELLIPSE':
# Initialize classes:
#
ELLIPSE = ellipse.ELLIPSE()
# Update from CONF attributes:
#
ELLIPSE.conf_set(CONF[ii]['ELLIPSE'])
if ELLIPSE.SOURCE == 'VIEWER':
toconsola('ELLIPSE data from configuration file',wid=self.cons)
else:
ELLIPSE.Read(filename)
self.nellipse += 1
self.ELLIPSE.append(ELLIPSE)
self.ELLIPSE_INDX.set(self.nellipse-1)
self.ELLIPSE_LIST = list(range(self.nellipse))
self.LAYERS.add(TYPE='ELLIPSE',Filename=filename,N=ELLIPSE.n,wid=self.cons)
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'FEATURE':
# Initialize classes:
#
FEATURE = feature.parameters()
# Update from CONF attributes:
#
FEATURE.conf_set(CONF[ii]['FEATURE'])
FEATURE.Read(filename)
self.FEATURE.n += 1
self.FEATURE.DATA.append(FEATURE)
self.FEATURE.INDX.set(self.FEATURE.n-1)
self.FEATURE.LIST = list(range(self.FEATURE.n))
self.LAYERS.add(TYPE='FEATURE',Filename=filename,N=FEATURE.n,wid=self.cons)
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'PATCH':
# Initialize classes:
#
PATCH = patch.PATCH()
# Update from CONF attributes:
#
PATCH.conf_set(CONF[ii]['PATCH'])
self.npatch += 1
self.PATCH.append(PATCH)
self.PATCH_INDX.set(self.npatch-1)
self.PATCH_LIST = list(range(self.npatch))
self.LAYERS.add(TYPE='PATCH',Filename=None,N=1,wid=self.cons)
self.LAYERS.print()
self.make_plot()
# ===========================
def save_conf(self,conf,filename):
# ===========================
# Write JSON file:
with io.open(filename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# =============
def save(self):
# =============
'''If name has been already set, save the plot'''
if self.PLOT.OUT_FILENAME is None:
self.saveas()
# If output filename exist, we save:
if self.PLOT.OUT_FILENAME is not None:
toconsola('Saving in '+self.PLOT.OUT_FILENAME,wid=self.cons)
self.fig.savefig(self.PLOT.OUT_FILENAME,
dpi=self.PLOT.DPI.get(),
bbox_inches='tight')
# ===============
def saveas(self):
# ===============
'''Get the output filename and the save the plot'''
filetypes = [('PNG file','.png'),('EPS file','.eps'),('PDF file','.pdf')]
nn = tk.filedialog.asksaveasfilename(title='Save',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if len(nn) == 0:
self.PLOT.OUT_FILENAME = None
else:
self.PLOT.OUT_FILENAME = '%s' % nn
toconsola('Saving in '+self.PLOT.OUT_FILENAME,wid=self.cons)
self.fig.savefig(self.PLOT.OUT_FILENAME,
dpi=self.PLOT.DPI.get(),
bbox_inches='tight')
# ======================
def widget_config(self):
# ======================
'''Options for the widget font type and size'''
#global WINDOW_FONT_TYPE_BACKUP
#global WINDOW_FONT_SIZE_BACKUP
def _cancel():
# ===========
self.PLOT.WINDOW_FONT_TYPE.set(WINDOW_FONT_TYPE_BACKUP)
self.PLOT.WINDOW_FONT_SIZE.set(WINDOW_FONT_SIZE_BACKUP)
self.Window_widgetconfig.destroy()
self.Window_widgetconfig = None
def _close():
# ===========
self.Window_widgetconfig.destroy()
self.Window_widgetconfig = None
def _apply():
# ===========
font_name = self.PLOT.WINDOW_FONT_TYPE.get().split()[0]
font = '%s %d' % (font_name, self.PLOT.WINDOW_FONT_SIZE.get())
self.master.option_add('*Font',font)
self.default_font.configure(family=self.PLOT.WINDOW_FONT_TYPE.get().
split()[0])
self.default_font.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
self.default_font2.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
if self.Window_mapconfig is not None:
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
self.map_config()
def _loadconf():
# =============
'''Load the Widget config parameters'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.WINDOW_FONT_TYPE.set(conf['WINDOW_FONT_TYPE'])
self.PLOT.WINDOW_FONT_SIZE.set(conf['WINDOW_FONT_SIZE'])
def _saveconf():
# =============
'''Save the Widget config parameters'''
if self.widget_nowarning.get() == False:
ans = askforpermission(self.Window_widgetconfig, \
'Are you sure ?', \
self.widget_nowarning)
if ans == False:
return
toconsola('Updating widget font default values',wid=self.cons)
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
conf['WINDOW_FONT_TYPE'] = self.PLOT.WINDOW_FONT_TYPE.get()
conf['WINDOW_FONT_SIZE'] = self.PLOT.WINDOW_FONT_SIZE.get()
self.save_conf(conf,self.PLOT.FILECONF)
if self.Window_widgetconfig is not None:
self.Window_widgetconfig.lift()
return
WINDOW_FONT_TYPE_BACKUP = self.PLOT.WINDOW_FONT_TYPE.get()
WINDOW_FONT_SIZE_BACKUP = self.PLOT.WINDOW_FONT_SIZE.get()
self.Window_widgetconfig = tk.Toplevel(self.master)
self.Window_widgetconfig.title('Widget options')
self.Window_widgetconfig.resizable(width=True,height=True)
self.Window_widgetconfig.protocol('WM_DELETE_WINDOW',_close)
self.widget_nowarning = tk.BooleanVar()
self.widget_nowarning.set(False)
menubar = tk.Menu(self.Window_widgetconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_widgetconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_widgetconfig.tk.call(self.Window_widgetconfig, "config", "-menu", menubar)
F0 = ttk.Frame(self.Window_widgetconfig,borderwidth=5,padding=5)
ttk.Label(F0,text='Font style').grid(row=0,column=0,padx=3,sticky='w')
mp = ttk.Combobox(F0,textvariable=self.PLOT.WINDOW_FONT_TYPE,values=self.FONT_TYPES,width=35)
mp.grid(row=0,column=1,columnspan=7,padx=3,sticky='w')
ttk.Label(F0,text='Font size').grid(row=1,column=0,padx=3,sticky='w')
mp = ttk.Combobox(F0,textvariable=self.PLOT.WINDOW_FONT_SIZE,values=self.FONT_SIZES,width=5)
mp.grid(row=1,column=1,columnspan=1,padx=3,sticky='w')
ttk.Button(F0,text='Cancel',command=_cancel).grid(row=2,column=5,padx=3)
ttk.Button(F0,text='Apply',command=_apply).grid(row=2,column=6,padx=3)
ttk.Button(F0,text='Close',command=_close).grid(row=2,column=7,padx=3)
F0.grid()
# =====================
def isobath_crop(self):
# =====================
'''Crop isobaths from domain'''
west = self.PLOT.WEST.get() - 5
east = self.PLOT.EAST.get() + 5
south = self.PLOT.SOUTH.get() - 5
north = self.PLOT.NORTH.get() + 5
toconsola('Cropping isobaths',wid=self.cons)
for i in range(self.PLOT.nisobat):
if self.PLOT.ISOBAT_SHOW[i]:
xo = self.PLOT.ISOBAT_DATA[i]['lon']
yo = self.PLOT.ISOBAT_DATA[i]['lat']
for ii in range(len(xo)-1,-1,-1):
if xo[ii] < west:
del xo[ii]
del yo[ii]
elif xo[ii] > east:
del xo[ii]
del yo[ii]
elif yo[ii] < south:
del xo[ii]
del yo[ii]
elif yo[ii] > north:
del xo[ii]
del yo[ii]
else:
pass
self.PLOT.ISOBAT_DATA[i]['lon'] = xo
self.PLOT.ISOBAT_DATA[i]['lat'] = yo
toconsola('done',wid=self.cons)
self.PLOT.ISOBAT_cropped = True
# ======================
def legend_config(self):
# ======================
'''Options for Map limits and colors'''
def _apply():
# ===========
self.make_plot()
def _close():
# ==========
self.make_plot()
self.Window_legendconfig.destroy()
self.Window_legendconfig = None
def _loadconf():
# =============
'''Load map configuration'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.conf_set(conf)
def _saveconf():
# =============
'''Save current map configuration as default'''
toconsola('Updating map default values',wid=self.cons)
conf = self.PLOT.conf_get()
self.save_conf(conf,self.PLOT.FILECONF)
self.Window_legendconfig = tk.Toplevel(self.master)
self.Window_legendconfig.title('Legend options')
self.Window_legendconfig.resizable(width=True,height=True)
self.Window_legendconfig.protocol('WM_DELETE_WINDOW',_close)
self.map_nowarning = tk.BooleanVar()
self.map_nowarning.set(False)
menubar = tk.Menu(self.Window_legendconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_legendconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_legendconfig.tk.call(self.Window_legendconfig, "config", "-menu", menubar)
# Define tabs
self.legendtabs = ttk.Notebook(self.Window_legendconfig)
page1 = ttk.Frame(self.legendtabs)
page2 = ttk.Frame(self.legendtabs)
self.legendtabs.add(page1,text='Isobaths')
self.legendtabs.add(page2,text='Markers')
self.PLOT.ISOBAT_LEGEND.Winconfig(page1)
self.PLOT.LEGEND.Winconfig(page2)
self.legendtabs.grid()
frame5 = ttk.Frame(self.Window_legendconfig,borderwidth=5,padding=5)
ttk.Button(frame5,text='Apply',command=_apply).grid(row=0,column=5,padx=3)
ttk.Button(frame5,text='Close',command=_close).grid(row=0,column=6,padx=3)
frame5.grid(row=24,column=0,columnspan=5)
# ===================
def map_config(self):
# ===================
# Options for Map limits and colors
# EG Now the list of projections is recovered from map_proj in tools
# By default 50m (50 miles) is the default
# EG pdict = {} substituted by map_proj
pdict = map_proj('defs')
rdict = {'110m':'Crude','50m':'Intermediate','10m':'High'}
LEGEND_LOCATION_LIST = ['best','upper right','upper left','lower left', \
'lower right', 'right', 'center left', 'center right', \
'lower center', 'upper center', 'center']
LEGEND_MODE_LIST = ['None','expand']
PSIZE = tk.StringVar()
PSIZE.set(str(self.PLOT.SIZE))
BACKUP = self.PLOT.conf_get()
font_norm = tkfont.Font(font='TkDefaultFont').copy()
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
def _cancel():
self.PLOT.conf_set(BACKUP)
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
if self.fig is not None:
self.make_plot()
def _close():
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
#EG Projection selection
def pselection():
''' We set the appropriate enable/disable fields for each projection
selection. We recover all the widgets of type Entry and recover
the state array of the selected projection. Finally the state
of the widgets are updated.'''
entries_id = []
wids = self.fh.winfo_children()
#entries_id = map(lambda x: x if isinstance(x,tk.ttk.Entry), wids)
#print(self.fh.winfo_children())
for wid in wids:
if isinstance(wid,tk.ttk.Entry):
entries_id.append(wid)
new_proj = self.PLOT.MAP_PROJECTION.get()
mpl.config(text=new_proj,width=25)
proj_state = map_proj(new_proj)
var_state = list(map(lambda x: "normal" if x==1 else "disabled", proj_state["state"]))
toconsola("New PROJECTION selected: "+self.PLOT.MAP_PROJECTION.get(),wid=self.cons)
for i in range(len(entries_id)): entries_id[i]["state"]=var_state[i]
self.drawmap = True
def rselection():
mrl.config(text=rdict[self.PLOT.MAP_RESOLUTION.get()],width=10)
self.drawmap = True
# EG deprecated ?
def icselection():
ii = self.PLOT.ISOBAT_LABEL.index(self.PLOT.ISOBAT_ZPOINTER.get())
backup = self.PLOT.ISOBAT_COLOR[ii].get()
rgb, hx = askcolor(color=self.PLOT.ISOBAT_COLOR[ii].get(),
parent=self.master)
if hx is None:
self.PLOT.ISOBAT_COLOR[ii].set(backup)
else:
self.PLOT.ISOBAT_COLOR[ii].set(hx)
def lims_reset():
# ================
''' Resets the domain and grid to the default values'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.WEST.set(conf['WEST'])
self.PLOT.EAST.set(conf['EAST'])
self.PLOT.SOUTH.set(conf['SOUTH'])
self.PLOT.NORTH.set(conf['NORTH'])
self.PLOT.MERIDIAN_INI.set(conf['MERIDIAN_INI'])
self.PLOT.MERIDIAN_FIN.set(conf['MERIDIAN_FIN'])
self.PLOT.MERIDIAN_INT.set(conf['MERIDIAN_INT'])
self.PLOT.PARALLEL_INI.set(conf['PARALLEL_INI'])
self.PLOT.PARALLEL_FIN.set(conf['PARALLEL_FIN'])
self.PLOT.PARALLEL_INT.set(conf['PARALLEL_INT'])
self.drawmap = True
self.make_plot()
def iload():
# =================
'''Load from external file the selected isobaths'''
for i in range(self.PLOT.nisobat):
if self.PLOT.ISOBAT_SELEC[i].get():
filename = self.PLOT.ISOBAT_PATH.get() + \
'/%04d' % self.PLOT.ISOBAT_Z[i] + '.dat'
toconsola("New PROJECTION selected: "+self.PLOT.MAP_PROJECTION.get(),wid=self.cons)
try:
self.PLOT.ISOBAT_DATA[i] = read_lines(filename)
self.PLOT.ISOBAT_SHOW[i] = True
wwr.configure(font=font_norm)
wwr.configure(foreground='#125704')
wwr['text'] = 'Isobaths have been loaded'
self.PLOT.ISOBAT_loaded = True
except:
messagebox.showinfo(message='Error: unable to read '+filename)
self.PLOT.ISOBAT_DATA[i] = None
self.PLOT.ISOBAT_SHOW[i] = False
self.PLOT.ISOBAT_NPLOT = sum(self.PLOT.ISOBAT_SHOW)
if self.PLOT.ISOBAT_loaded:
wlr.configure(state='enabled')
else:
wlr.configure(state='disabled')
self.PLOT.ISOBATH_crop = False
def _pselect():
# =============
nn = tk.filedialog.askdirectory(parent=self.Window_mapconfig)
if not empty(nn):
self.PLOT.ISOBAT_PATH.set(nn)
def select_isobaths():
# ====================
some_selected = False
for i in range(self.PLOT.nisobat):
if self.PLOT.ISOBAT_SELEC[i].get():
some_selected = True
if some_selected:
wwr['text'] = 'Isobaths need to be loaded'
wwr.configure(font=font_bold)
wwr.configure(foreground='red')
self.PLOT.ISOBAT_selected = True
self.PLOT.ISOBAT_loaded = False
else:
wwr['text'] = 'No isobaths have been selected'
wwr.configure(font=font_norm)
wwr.configure(foreground='black')
self.PLOT.ISOBAT_selected = False
if self.PLOT.ISOBAT_selected:
wli.configure(state='enabled')
else:
wli.configure(state='disabled')
for i in range(self.PLOT.nisobat):
self.PLOT.ISOBAT_DATA[i] = None
self.PLOT.ISOBAT_SHOW[i] = False
self.PLOT.ISOBAT_NPLOT = 0
#EG We need to set a new projection object
def _updated():
# =============
self.drawmap = True
self.make_plot()
def _apply():
# ===========
toconsola("(Apply) Drawing...wait",wid=self.cons)
self.make_plot()
toconsola("Done !",wid=self.cons)
def _done():
# ==========
toconsola("(Done) Drawing...wait",wid=self.cons)
self.make_plot()
toconsola("Done !",wid=self.cons)
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
def _loadconf():
# =============
'''Load map configuration'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.conf_set(conf)
def _saveconf():
# =============
'''Save current map configuration as default'''
toconsola("Saving map default values",wid=self.cons)
conf = self.PLOT.conf_get()
self.save_conf(conf,self.PLOT.FILECONF)
def legend_location():
# ====================
''' Process the location combobox'''
location_name = loc.get()
self.PLOT.LEGEND.LOC.set(LEGEND_LOCATION_LIST.index(location_name))
def legend_mode():
# ================
''' Process the location combobox'''
mode_name = mod.get()
self.PLOT.LEGEND.MODE.set(LEGEND_MODE_LIST.index(mode_name))
def sizeupdate():
# ===============
self.PLOT.SIZE = ast.literal_eval(PSIZE.get())
plt.close(self.fig)
self.fig = None
self.make_plot()
def _calculator():
# ================
SOUTH = float(self.PLOT.SOUTH.get())
NORTH = float(self.PLOT.NORTH.get())
WEST = float(self.PLOT.WEST.get())
EAST = float(self.PLOT.EAST.get())
LON_0 = 0.5*(WEST+EAST)
LAT_0 = 0.5*(SOUTH+NORTH)
width = haversine((WEST,LAT_0),(EAST,LAT_0))
height = haversine((LON_0,SOUTH),(LON_0,NORTH))
self.PLOT.LON_0.set(LON_0)
self.PLOT.LAT_0.set(LAT_0)
self.PLOT.WIDTH.set(width)
self.PLOT.HEIGHT.set(height)
if self.Window_mapconfig is not None:
self.Window_mapconfig.lift()
return
self.Window_mapconfig = tk.Toplevel(self.master)
self.Window_mapconfig.title('Map options')
self.Window_mapconfig.resizable(width=True,height=True)
self.Window_mapconfig.protocol('WM_DELETE_WINDOW',_close)
self.map_nowarning = tk.BooleanVar()
self.map_nowarning.set(False)
menubar = tk.Menu(self.Window_mapconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_mapconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_mapconfig.tk.call(self.Window_mapconfig, "config", "-menu", menubar)
# Define tabs
maptabs = ttk.Notebook(self.Window_mapconfig)
page1 = ttk.Frame(maptabs)
page2 = ttk.Frame(maptabs)
page3 = ttk.Frame(maptabs)
page4 = ttk.Frame(maptabs)
page5 = ttk.Frame(maptabs)
page7 = ttk.Frame(maptabs)
page8 = ttk.Frame(maptabs)
maptabs.add(page1,text=' Domain ')
maptabs.add(page2,text=' Background ')
maptabs.add(page3,text=' Isobaths ')
maptabs.add(page4,text=' Grid ')
maptabs.add(page5,text=' Labels ')
maptabs.add(page7,text=' Scale ')
maptabs.add(page8,text=' Other ')
#EG We get the projection from tools.map_proj instead of a list
PROJECTION_LIST = map_proj('lista')
#EG PAGE 1
f1 = ttk.Frame(page1,borderwidth=5,padding=5)
ttk.Label(f1,text='Map Projection').grid(row=0,column=0,padx=3,sticky='w')
mp = ttk.Combobox(f1,
textvariable=self.PLOT.MAP_PROJECTION,
values=PROJECTION_LIST,width=14)
mp.grid(row=0,column=1,padx=3)
mp.bind('<<ComboboxSelected>>',lambda e: pselection())
mpl = ttk.Label(f1,
text=pdict[self.PLOT.MAP_PROJECTION.get()],width=40)
mpl.grid(row=0,column=2,columnspan=3,padx=3)
ttk.Label(f1,text='Map Resolution').grid(row=1,column=0,padx=3,sticky='w')
#EG values=('c','l','i','h','f') changed by ('110m','50m','10m')
mr = ttk.Combobox(f1,
textvariable=self.PLOT.MAP_RESOLUTION,
values=('110m','50m','10m'),width=7,justify="center")
mr.grid(row=1,column=1,padx=3,sticky='w')
mr.bind('<<ComboboxSelected>>',lambda e: rselection())
mrl = ttk.Label(f1,text=rdict[self.PLOT.MAP_RESOLUTION.get()],width=10)
mrl.grid(row=1,column=2,columnspan=2,padx=3,sticky='w')
ttk.Label(f1,text='EPSG').grid(row=2,column=0,padx=3,sticky='w')
ttk.Entry(f1,textvariable=self.PLOT.EPSG,width=7,justify="center").grid(row=2,column=1,padx=3,sticky='w')
f1.grid(row=0,column=0)
f2 = ttk.Frame(page1,borderwidth=5,padding=5,relief='sunken')
ttk.Label(f2,text='Plot limits',font=font_bold).grid(row=0,column=0,padx=3,sticky='w')
ttk.Label(f2,text='North').grid(row=1,column=3,pady=5,padx=3)
eno = ttk.Entry(f2,textvariable=self.PLOT.NORTH,width=10,justify="center")
eno.grid(row=2,column=3,pady=5,padx=3)
eno.bind('<Return>',lambda e:_updated())
ttk.Label(f2,text='West').grid(row=3,column=1,pady=5,padx=3)
ewe = ttk.Entry(f2,textvariable=self.PLOT.WEST,width=10,justify="center")
ewe.grid(row=3,column=2,pady=5,padx=3)
ewe.bind('<Return>',lambda e:_updated())
eea = ttk.Entry(f2,textvariable=self.PLOT.EAST,width=10,justify="center")
eea.grid(row=3,column=4,pady=5,padx=3,sticky='w')
eea.bind('<Return>',lambda e:_updated())
ttk.Label(f2,text='East').grid(row=3,column=5,pady=5,padx=3)
eso = ttk.Entry(f2,textvariable=self.PLOT.SOUTH,width=10,justify="center")
eso.grid(row=4,column=3,pady=5,padx=3)
eso.bind('<Return>',lambda e:_updated())
ttk.Label(f2,text='South').grid(row=5,column=3,pady=5,padx=3)
ttk.Button(f2,text='Reset',command=lims_reset).grid(row=6,column=5)
f2.grid(row=1,column=0,padx=30,sticky='w')
#EG We recover the full properties of each projection
proj_state = map_proj(self.PLOT.MAP_PROJECTION.get())
var_state = list(map(lambda x: "normal" if x==1 else "disabled", proj_state["state"]))
self.params = {"central_longitude":self.PLOT.MAP_PROJ_LAT_0.get(),
"central_latitude":self.PLOT.MAP_PROJ_LON_0.get(),
"min_latitude":self.PLOT.MAP_PROJ_MIN_LAT.get(),
"max_latitude":self.PLOT.MAP_PROJ_MAX_LAT.get(),
"false_easting":self.PLOT.MAP_PROJ_F_EAST.get(),
"false_northing":self.PLOT.MAP_PROJ_F_NORTH.get(),
"latitude_true_scale":self.PLOT.MAP_PROJ_LAT_T_SCA.get(),
"true_scale_latitude":self.PLOT.MAP_PROJ_T_SCA_LAT.get(),
"scale_factor":self.PLOT.MAP_PROJ_SCA_FAC.get(),
"satellite_height":self.PLOT.MAP_PROJ_SATELLITE_HEIGHT.get(),
"sweep_axis":self.PLOT.MAP_PROJ_SWEEP_AXIS.get()}
var_proj = [self.PLOT.MAP_PROJ_LAT_0, self.PLOT.MAP_PROJ_LON_0,
self.PLOT.MAP_PROJ_F_EAST, self.PLOT.MAP_PROJ_F_NORTH,
self.PLOT.MAP_PROJ_MIN_LAT, self.PLOT.MAP_PROJ_MAX_LAT,
self.PLOT.MAP_PROJ_LAT_T_SCA, self.PLOT.MAP_PROJ_T_SCA_LAT,
self.PLOT.MAP_PROJ_SCA_FAC, self.PLOT.MAP_PROJ_SWEEP_AXIS,
self.PLOT.MAP_PROJ_SATELLITE_HEIGHT]
var_txt = ['Central Longitude','Central Latitude',
'False Easting','False Northing',
'Min. Latitude','Max. Latitude',
'Latitude true scale','True scale Latitude',
'Scale Factor','Sweep Axis',
'Satellite Height']
self.fh = ttk.Frame(page1,borderwidth=5,padding=5)
ivar = 0
for i in range(5):
ivar = 2*i
ttk.Label(self.fh,text=var_txt[ivar]).grid(row=i,column=0,padx=3,sticky='e')
ttk.Entry(self.fh,textvariable=var_proj[ivar],state=var_state[ivar], width=10). \
grid(row=i,column=1,padx=3,sticky='w')
ttk.Label(self.fh,text=var_txt[ivar+1]).grid(row=i,column=2,padx=3,sticky='e')
ttk.Entry(self.fh,textvariable=var_proj[ivar+1],state=var_state[ivar+1], width=10). \
grid(row=i,column=3,padx=3,sticky='w')
ttk.Label(self.fh,text=var_txt[10]).grid(row=5,column=0,padx=3,sticky='e')
ttk.Entry(self.fh,textvariable=var_proj[10], state=var_state[10], width=10). \
grid(row=5,column=1,padx=3,sticky='w')
ttk.Button(self.fh,text='Update projection',command=_updated). \
grid(row=6,column=0,pady=10,columnspan=4, sticky='ew')
'''
ttk.Label(fh,text='Width').grid(row=0,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.WIDTH, width=10).grid(row=0,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Optional map keywords').grid(row=0,column=2,padx=3,sticky='e')
ttk.Button(fh,text='Estimate', command=_calculator).grid(row=0,column=3,padx=3,sticky='ew')
ttk.Label(fh,text='Height').grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.HEIGHT, width=10).grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Lon_0').grid(row=2,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.LON_0, width=10).grid(row=2,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Lat_0').grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.LAT_0, width=10).grid(row=3,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Satellite height').grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.SATELLITE_HEIGHT, width=10).grid(row=4,column=1,padx=3,sticky='w')
'''
self.fh.grid(row=2,column=0,padx=15,sticky='ew')
#EG PAGE 2, Background and Features
f3 = ttk.Frame(page2,borderwidth=5,padding=5)
# Styles
self.sland, self.swater = ttk.Style(), ttk.Style()
self.scoast, self.scount = ttk.Style(), ttk.Style()
self.sriv, scenter = ttk.Style(), ttk.Style()
self.sland.configure("sland.TLabel",background=self.PLOT.LAND_COLOR.get(),anchor="center")
self.swater.configure("swater.TLabel",background=self.PLOT.WATER_COLOR.get(),anchor="center")
self.scoast.configure("scoast.TLabel",background=self.PLOT.COASTLINE_COLOR.get(),anchor="center")
self.scount.configure("scount.TLabel",background=self.PLOT.COUNTRYLINE_COLOR.get(),anchor="center")
self.sriv.configure("sriv.TLabel",background=self.PLOT.RIVERS_COLOR.get(),anchor="center")
scenter.configure("scenter.TEntry",anchor="center")
tpad = ttk.Style()
tpad.configure("tpad.TLabelframe",padding=[20,5,5,10])
#Land & Sea
f3_b=ttk.LabelFrame(f3,text='Basic',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(f3_b,text='Continents').grid(row=0,column=0,padx=5)
self.LLabel = ttk.Label(f3_b,textvariable=self.PLOT.LAND_COLOR,width=7,style="sland.TLabel")
self.LLabel.grid(row=0,column=1,padx=5)
ttk.Button(f3_b,text='Select',command=lambda:colsel(self.PLOT.LAND_COLOR, \
self.sland,self.LLabel,"sland.TLabel",master=self.Window_mapconfig)). \
grid(row=0,column=2,padx=5,sticky='w')
ttk.Label(f3_b,text='Zorder').grid(row=0,column=3,padx=5,sticky='e')
ttk.Entry(f3_b,textvariable=self.PLOT.LAND_ZORDER,width=4).grid(row=0,column=4,padx=5,sticky='e')
ttk.Label(f3_b,text='Sea').grid(row=1,column=0,padx=5)
self.WLabel = ttk.Label(f3_b,textvariable=self.PLOT.WATER_COLOR,width=7,style="swater.TLabel")
self.WLabel.grid(row=1,column=1,padx=5)
ttk.Button(f3_b,text='Select',command=lambda:colsel(self.PLOT.WATER_COLOR, \
self.swater,self.WLabel,"swater.TLabel",master=self.Window_mapconfig)). \
grid(row=1,column=2,padx=5,sticky='w')
ttk.Label(f3_b,text='Zorder').grid(row=1,column=3,padx=5,sticky='e')
ttk.Entry(f3_b,textvariable=self.PLOT.WATER_ZORDER,width=4).grid(row=1,column=4,padx=5,sticky='e')
f3_b.grid(row=0,column=0,padx=5,pady=10,sticky='ewsn')
# Features: Coastlines
f3_c=ttk.LabelFrame(f3,text='Coastlines',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(f3_c,text='').grid(row=0,column=0)
ttk.Checkbutton(f3_c,text=' Show',variable=self.PLOT.COASTLINE_SHOW). \
grid(row=0,column=1,columnspan=2,padx=3,sticky='w')
ttk.Label(f3_c,text='Width').grid(row=0,column=3)
ttk.Label(f3_c,text='Color').grid(row=0,column=4,columnspan=2)
ttk.Label(f3_c,text='').grid(row=1,column=0)
ttk.Label(f3_c,text='Natural-Earth').grid(row=1,column=1,padx=3,sticky='w')
ttk.Radiobutton(f3_c,text=' Show',variable=self.PLOT.COASTLINE_SOURCE,value=1).\
grid(row=1,column=2,padx=7)
ttk.Entry(f3_c,textvariable=self.PLOT.COASTLINE_WIDTH,width=7,justify="center"). \
grid(row=1,column=3,padx=3,sticky='we')
self.CoLabel = ttk.Label(f3_c,textvariable=self.PLOT.COASTLINE_COLOR,width=7,style="scoast.TLabel")
self.CoLabel.grid(row=1,column=4,padx=3)
ttk.Button(f3_c,text='Select',command=lambda:colsel(self.PLOT.COASTLINE_COLOR, \
self.scoast,self.CoLabel,"scoast.TLabel",master=self.Window_mapconfig)). \
grid(row=1,column=5,padx=3,sticky='ew')
ttk.Label(f3_c,text='EMODNET').grid(row=2,column=1,padx=5,sticky='w')
ttk.Radiobutton(f3_c,text=' Show',variable=self.PLOT.COASTLINE_SOURCE,value=2). \
grid(row=2,column=2,padx=5)
ttk.Label(f3_c,text='Zorder').grid(row=2,column=3,padx=5,sticky='e')
ttk.Entry(f3_c,textvariable=self.PLOT.COASTLINE_ZORDER,width=4).grid(row=2,column=4,padx=5,sticky='e')
f3_c.grid(row=1,column=0,padx=5,pady=10,sticky='ewsn')
# Miscelanea
f3_m=ttk.LabelFrame(f3,text='Miscelanea',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(f3_m,text='Countryline').grid(row=0,column=0,sticky='w')
ttk.Checkbutton(f3_m,text=' Show',variable=self.PLOT.COUNTRYLINE_SHOW). \
grid(row=0,column=1,padx=3)
ttk.Entry(f3_m,textvariable=self.PLOT.COUNTRYLINE_WIDTH,width=7,justify="center"). \
grid(row=0,column=2,padx=3,sticky='we')
self.CouLabel = ttk.Label(f3_m,textvariable=self.PLOT.COUNTRYLINE_COLOR,width=7,style="scount.TLabel")
self.CouLabel.grid(row=0,column=3,padx=3)
ttk.Button(f3_m,text='Select',command=lambda:colsel(self.PLOT.COUNTRYLINE_COLOR, \
self.scount,self.CouLabel,"scount.TLabel",master=self.Window_mapconfig)).\
grid(row=0,column=4,padx=3,sticky='ew')
ttk.Label(f3_m,text='Rivers').grid(row=1,column=0,padx=3,sticky='w')
ttk.Checkbutton(f3_m,text=' Show',variable=self.PLOT.RIVERS_SHOW). \
grid(row=1,column=1,padx=3)
ttk.Entry(f3_m,textvariable=self.PLOT.RIVERS_WIDTH,width=7,justify="center"). \
grid(row=1,column=2,padx=3,sticky='we')
self.RLabel = ttk.Label(f3_m,textvariable=self.PLOT.RIVERS_COLOR,width=7,style="sriv.TLabel")
self.RLabel.grid(row=1,column=3,padx=3)
ttk.Button(f3_m,text='Select',command=lambda:colsel(self.PLOT.RIVERS_COLOR, \
self.sriv,self.RLabel,"sriv.TLabel",master=self.Window_mapconfig)). \
grid(row=1,column=4,padx=3,sticky='ew')
f3_m.grid(row=2,column=0,padx=5,pady=10,sticky='ewsn')
#EG RELIEF AND ISOBATHS
f3_r = ttk.LabelFrame(f3,text='Earth Relief (WMS Tiles)',borderwidth=5,style='tpad.TLabelframe')
ttk.Checkbutton(f3_r,text=' Show',variable=self.PLOT.RELIEF_SHOW). \
grid(row=0,column=0,columnspan=3,padx=3,sticky='w')
ttk.Label(f3_r,text='GEBCO service').grid(row=1,column=0,padx=5,sticky='w')
ttk.Radiobutton(f3_r,text=' Show', variable=self.PLOT.RELIEF, value=1).\
grid(row=1,column=1,padx=3)
ttk.Label(f3_r,text='Land & Ocean relief',width=25). \
grid(row=1,column=2,padx=3)
ttk.Label(f3_r,text='EMODNET service').grid(row=2,column=0,padx=5,pady=10,sticky='w')
ttk.Radiobutton(f3_r ,text=' Show',variable=self.PLOT.RELIEF, value=2). \
grid(row=2,column=1,padx=5,pady=10)
ttk.Label(f3_r,text='Land & Ocean relief',width=25). \
grid(row=2,column=2,padx=3,pady=10)
f3_r.grid(row=3,column=0,padx=5,pady=10,sticky='ewsn')
f3.grid()
#EG PAGE 3, ISOBATHS
f4a=ttk.Frame(page3,borderwidth=5,padding=5)
ttk.Label(f4a,text='EMODNET Depth contours').grid(row=0,column=0,padx=5,pady=10,sticky='w')
ttk.Checkbutton(f4a,text=' Show',variable=self.PLOT.EMODNET_ISO). \
grid(row=0,column=1,padx=5,pady=10,columnspan=2)
f4a.grid(row=0,column=0,pady=10,padx=5,ipadx=5,sticky='w')
f4aa=ttk.LabelFrame(page3,text='Custom Isobaths (meters)',borderwidth=5,padding=5)
ttk.Label(f4aa,text='Path:',justify='right').grid(row=0,column=0)
ttk.Entry(f4aa,textvariable=self.PLOT.ISOBAT_PATH, \
justify='left',width=50).grid(row=0,column=1,padx=3,pady=10)
ttk.Button(f4aa,text='Select',command=_pselect).grid(row=0,column=2)
f4b = tk.LabelFrame(f4aa,text='Isobaths (meters)',borderwidth=5,relief='sunken')
self.w = []
for i in range(self.PLOT.nisobat):
self.w.append(tk.Checkbutton(f4b,text=str(self.PLOT.ISOBAT_Z[i]), \
variable=self.PLOT.ISOBAT_SELEC[i], \
command=select_isobaths,justify='right'))
ii, jj = 0, 1
for i in range(self.PLOT.nisobat):
self.w[i].grid(row=jj,column=ii,sticky='w')
ii += 1
if ii > 7:
ii = 0
jj += 1
wwr = ttk.Label(f4b,width=26,justify='left')
wwr.grid(row=4,column=0,columnspan=3,sticky='w',padx=5)
if self.PLOT.ISOBAT_selected:
if self.PLOT.ISOBAT_loaded:
wwr.configure(font=font_norm)
wwr.configure(foreground='#125704')
wwr['text'] = 'Isobaths have been loaded'
else:
wwr.configure(font=font_bold)
wwr.configure(foreground='red')
wwr['text'] = 'Isobaths need to be loaded'
else:
wwr['text'] = 'No isobaths have been selected'
wwr.configure(font=font_norm)
wwr.configure(foreground='black')
wli = ttk.Button(f4b,text='Load isobaths',command=iload)
wli.grid(row=4,column=3,columnspan=2,padx=3,sticky='ew')
wlr = ttk.Button(f4b,text='Crop isobaths',command=self.isobath_crop)
wlr.grid(row=4,column=5,columnspan=2,padx=3,sticky='ew')
if self.PLOT.ISOBAT_selected:
wli.configure(state='enabled')
else:
wli.configure(state='disabled')
if self.PLOT.ISOBAT_loaded:
wlr.configure(state='enabled')
else:
wlr.configure(state='disabled')
f4b.grid(row=1,column=0,columnspan=3,sticky='we',padx=10)
# ....................
def update_name():
ii = self.PLOT.ISOBAT_LABEL.index(self.PLOT.ISOBAT_ZPOINTER.get())
wly['textvariable'] = self.PLOT.ISOBAT_STYLE[ii]
wlw['textvariable'] = self.PLOT.ISOBAT_WIDTH[ii]
wlc['textvariable'] = self.PLOT.ISOBAT_COLOR[ii]
# ....................
# Select the style, width and color of isobaths
f4c = tk.Frame(f4aa,borderwidth=5)
ii = self.PLOT.ISOBAT_LABEL.index(self.PLOT.ISOBAT_ZPOINTER.get())
wln = ttk.Combobox(f4c,width=10,justify="center",
textvariable=self.PLOT.ISOBAT_ZPOINTER,
values=self.PLOT.ISOBAT_LABEL)
wln.grid(row=0,column=0, padx=10)
wln.bind('<<ComboboxSelected>>',lambda e: update_name())
ttk.Label(f4c,text='Line style').grid(row=0,column=1,padx=5)
wly = ttk.Combobox(f4c,textvariable=self.PLOT.ISOBAT_STYLE[ii],
width=4,justify="center",
values=['-',':','--','-.',' '])
wly.grid(row=0,column=2,padx=5)
ttk.Label(f4c,text='Line width').grid(row=0,column=3,padx=5)
wlw = ttk.Entry(f4c,textvariable=self.PLOT.ISOBAT_WIDTH[ii],
width=4)
wlw.grid(row=0,column=4)
ttk.Label(f4c,text='Line color').grid(row=0,column=5,padx=3)
wlc = ttk.Entry(f4c, textvariable=self.PLOT.ISOBAT_COLOR[ii],width=10)
wlc.grid(row=0,column=6)
ttk.Button(f4c,text='Select',command=icselection).grid(row=0,column=7)
wls = ttk.Checkbutton(f4c,variable=self.PLOT.ISOBAT_LABEL_SHOW)
wls.grid(row=1, column=6, sticky='e')
# ....................
def cgrad():
R0 = CM.Blues(80)
R1 = CM.Blues(255)
N = self.PLOT.nisobat
Ra = [(R1[0]-R0[0])/(N-1),(R1[1]-R0[1])/(N-1),(R1[2]-R0[2])/(N-1),1]
for i in range(N):
self.PLOT.ISOBAT_COLOR[i].set([R0[0]+Ra[0]*i,
R0[1]+Ra[1]*i,
R0[2]+Ra[2]*i,
1])
# ....................
ttk.Button(f4c,text='Color grad',command=cgrad).grid(row=1,column=5,padx=3)
ttk.Label(f4c,text='Label isobaths').grid(row=1,column=7,sticky='w')
f4c.grid(row=2,column=0,columnspan=3,padx=10)
f4aa.grid(row=1,column=0,pady=10,padx=5,ipadx=5)
#EG PAGE 4
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
self.sgrid, self.sfgrid = ttk.Style(), ttk.Style()
self.sgrid.configure("sgrid.TLabel",background=self.PLOT.GRID_COLOR.get(),anchor="center")
self.sfgrid.configure("sfgrid.TLabel",background=self.PLOT.GRID_FONTCOLOR.get(),anchor="center")
f5 = ttk.Frame(page4,padding=5)
ttk.Label(f5,text='Show grid').grid(row=0,column=1,padx=3,sticky='e')
ttk.Checkbutton(f5,variable=self.PLOT.GRID_SHOW,command=self.make_plot) \
.grid(row=0,column=2,padx=3,sticky='w')
ttk.Label(f5,text='Meridians',font=font_bold).grid(row=1,column=0,sticky='w')
ttk.Label(f5,text='Initial').grid(row=2,column=1,sticky='w')
wxo = ttk.Entry(f5,textvariable=self.PLOT.MERIDIAN_INI,justify='left',width=8)
wxo.grid(row=2,column=2)
ttk.Label(f5,text='Final').grid(row=3,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.MERIDIAN_FIN,justify='left',width=8)
wdx.grid(row=3,column=2)
ttk.Label(f5,text='Interval').grid(row=4,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.MERIDIAN_INT,justify='left',width=8)
wdx.grid(row=4,column=2)
ttk.Checkbutton(f5,text='North',
variable=self.PLOT.GRID_NORTH).grid(row=2,
column=3,padx=6)
ttk.Checkbutton(f5,text='South',
variable=self.PLOT.GRID_SOUTH).grid(row=3,
column=3,padx=6)
ttk.Label(f5,text='Parallels',font=font_bold).grid(row=5,column=0,sticky='w')
ttk.Label(f5,text='Initial').grid(row=6,column=1,sticky='w')
wxo = ttk.Entry(f5,textvariable=self.PLOT.PARALLEL_INI,justify='left',width=8)
wxo.grid(row=6,column=2)
ttk.Label(f5,text='Final').grid(row=7,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.PARALLEL_FIN,justify='left',width=8)
wdx.grid(row=7,column=2)
ttk.Label(f5,text='Interval').grid(row=8,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.PARALLEL_INT,justify='left',width=8)
wdx.grid(row=8,column=2)
ttk.Checkbutton(f5,text='West',
variable=self.PLOT.GRID_WEST).grid(row=6,
column=3,padx=6)
ttk.Checkbutton(f5,text='East',
variable=self.PLOT.GRID_EAST).grid(row=7,
column=3,padx=6)
ttk.Label(f5,text='Configuration',font=font_bold) \
.grid(row=10,column=0,sticky='w')
ttk.Label(f5,text='Character Size').grid(row=11,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_SIZE,justify='left',width=8) \
.grid(row=11,column=2)
ttk.Label(f5,text='Font Color').grid(row=12,column=1,sticky='w')
self.Glabel = ttk.Label(f5,textvariable=self.PLOT.GRID_COLOR,style="sgrid.TLabel",width=8)
self.Glabel.grid(row=12,column=2,padx=3)
ttk.Button(f5,text='Select',command=lambda:colsel(self.PLOT.GRID_COLOR, \
self.sgrid,self.Glabel,"sgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=12,column=3,padx=3)
ttk.Label(f5,text='Line Width').grid(row=13,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_LINEWIDTH,justify='left',width=8) \
.grid(row=13,column=2)
ttk.Label(f5,text='Line Style').grid(row=14,column=1,sticky='w')
ttk.Combobox(f5,textvariable=self.PLOT.GRID_LINESTYLE,
justify='left',
#EG values=['',' ','None','--','-.','-',':'],width=8) \
values=['None','--','-.','-',':'],width=8) \
.grid(row=14,column=2)
ttk.Label(f5,text='Line alpha').grid(row=15,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_ALPHA,justify='left',width=8) \
.grid(row=15,column=2)
ttk.Label(f5,text='Line color').grid(row=16,column=1,sticky='w')
self.GFlabel = ttk.Label(f5,textvariable=self.PLOT.GRID_FONTCOLOR,style="sfgrid.TLabel",width=8)
self.GFlabel.grid(row=16,column=2,padx=3)
ttk.Button(f5,text='Select',command=lambda:colsel(self.PLOT.GRID_FONTCOLOR, \
self.sgrid,self.GFlabel,"sfgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=16,column=3,padx=3)
ttk.Label(f5,text='Zorder').grid(row=17,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_ZORDER,justify='left',width=8) \
.grid(row=17,column=2)
f5.grid()
f6 = ttk.Frame(page5,borderwidth=5,padding=5)
self.stsgrid= ttk.Style()
self.stsgrid.configure("stsgrid.TLabel",background=self.PLOT.TIMESTAMP_COLOR.get(),anchor="center")
ttk.Label(f6,text='Title').grid(row=1,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TITLE,width=40). \
grid(row=1,column=1,columnspan=4,sticky='w')
def titleprop0():
self.PLOT.TITLEFONT = fontconfig(font=self.PLOT.TITLEFONT,
sample=self.PLOT.TITLE.get())
#ttk.Label(f6,text='Title font').grid(row=2,
# column=0,
# columnspan=1,
# sticky='w')
ttk.Button(f6,text='Set font',command=titleprop0).grid(row=1,column=5,padx=5,sticky='ew')
#ttk.Checkbutton(f6,text='Bold',variable=self.PLOT.TITLE_BOLD). \
# grid(row=1,column=5)
#ttk.Label(f6,text='Size').grid(row=2,column=0,columnspan=1,sticky='w')
#ttk.Entry(f6,textvariable=self.PLOT.TITLE_SIZE,width=7). \
# grid(row=2,column=1,sticky='w')
ttk.Label(f6,text='Title Pad').grid(row=2,column=0,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TITLE_PAD,width=7). \
grid(row=2,column=1,sticky='w')
ttk.Label(f6,text='X label').grid(row=4,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.XLABEL,width=40). \
grid(row=4,column=1,columnspan=4,sticky='w')
ttk.Label(f6,text='Y label').grid(row=5,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.YLABEL,width=40). \
grid(row=5,column=1,columnspan=4,sticky='w')
ttk.Label(f6,text='Size').grid(row=6,column=0,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.LABEL_SIZE,width=5). \
grid(row=6,column=1,columnspan=1,sticky='w')
ttk.Label(f6,text='X Label Pad'). \
grid(row=7,column=0,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.XLABEL_PAD,width=5). \
grid(row=7,column=1,columnspan=1,sticky='w')
ttk.Label(f6,text='Y Label Pad'). \
grid(row=7,column=3,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.YLABEL_PAD,width=5). \
grid(row=7,column=4,columnspan=1,sticky='w')
#ttk.Label(f6,text='Plot logo'). \
# grid(row=8,column=0,sticky='w')
#ttk.Checkbutton(f6,variable=self.PLOT.LOGO_DISPLAY). \
# grid(row=8,column=1,sticky='w',padx=3)
ttk.Label(f6,text='Timestamp'). \
grid(row=9,column=0,sticky='w',pady=[15,1])
ttk.Checkbutton(f6,text='Show',variable=self.PLOT.TIMESTAMP_SHOW). \
grid(row=10,column=1,sticky='w')
ttk.Checkbutton(f6,text='Bold',variable=self.PLOT.TIMESTAMP_BOLD). \
grid(row=11,column=1,sticky='w')
def getlabelpos():
# ================
self.GET_TIMESTAMP_LOCATION = True
ttk.Label(f6,text='X pos'). \
grid(row=12,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TIMESTAMP_X,width=12). \
grid(row=12,column=1,sticky='w')
ttk.Button(f6,text='Select',command=getlabelpos).grid(row=12,column=2)
ttk.Label(f6,text='Y pos'). \
grid(row=13,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TIMESTAMP_Y,width=12). \
grid(row=13,column=1,columnspan=1,sticky='w')
ttk.Label(f6,text='Size'). \
grid(row=14,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TIMESTAMP_SIZE,width=5). \
grid(row=14,column=1,sticky='w')
ttk.Label(f6,text='Color').grid(row=15,column=0,sticky='w')
self.GTSlabel = ttk.Label(f6,textvariable=self.PLOT.TIMESTAMP_COLOR,style="stsgrid.TLabel",width=8)
self.GTSlabel.grid(row=15,column=1,sticky='w')
ttk.Button(f6,text='Select',command=lambda:colsel(self.PLOT.TIMESTAMP_COLOR, \
self.stsgrid,self.GTSlabel,"stsgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=15,column=2,sticky='w')
f6.grid()
# ---------------------------------------------
def center():
SOUTH = float(self.PLOT.SOUTH.get())
NORTH = float(self.PLOT.NORTH.get())
WEST = float(self.PLOT.WEST.get())
EAST = float(self.PLOT.EAST.get())
self.PLOT.SCALE_XO.set(0.5*(WEST+EAST))
self.PLOT.SCALE_YO.set(0.5*(SOUTH+NORTH))
# ---------------------------------------------
fs = ttk.Frame(page7,borderwidth=5,padding=5)
ttk.Label(fs,text='Show').grid(row=0,column=0,padx=3)
ttk.Checkbutton(fs,variable=self.PLOT.SCALE_SHOW).grid(row=0,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='LON = ').grid(row=1,column=0,padx=3,sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_X,
# width=10).grid(row=1,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='LAT = ').grid(row=2,column=0,padx=3,sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_Y,
# width=10).grid(row=2,column=1,padx=3,sticky='w')
#ttk.Label(fs,
# text='Map position where Scale will be drawn').grid(row=1,
# column=2,rowspan=2,columnspan=2,padx=3,pady=5)
ttk.Label(fs,text='xo = ').grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_XO,
width=10).grid(row=3,column=1,padx=3,sticky='w')
ttk.Label(fs,text='yo = ').grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_YO,
width=10).grid(row=4,column=1,padx=3,sticky='w')
ttk.Label(fs,
text='Screen position where scale will be drawn').grid(row=3,
column=2,rowspan=2,columnspan=2,padx=3,pady=5)
#ttk.Button(fs,text='Map center',command=center).grid(row=3,column=4,
# rowspan=2,padx=3)
ttk.Label(fs,text='Length = ').grid(row=5,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_LENGTH,
width=10).grid(row=5,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Units = ').grid(row=6,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Combobox(fs,textvariable=self.PLOT.SCALE_UNITS,
# values=['km','mi','nmi','ft','m'],
# width=10).grid(row=6,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Bar style = ').grid(row=7,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Combobox(fs,textvariable=self.PLOT.SCALE_STYLE,
# values=['simple','fancy'],
# width=10).grid(row=7,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Yoffset = ').grid(row=8,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_YOFFSET,
# width=10).grid(row=8,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Default: 0.02*(MAXLAT-MINLAT)').grid(row=8,
# column=2,columnspan=2,padx=3,sticky='w')
#ttk.Label(fs,text='Label style = ').grid(row=9,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Combobox(fs,textvariable=self.PLOT.SCALE_LABELSTYLE,
# values=['simple','fancy'],
# width=10).grid(row=9,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Font size = ').grid(row=10,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_FONTSIZE,
width=10).grid(row=10,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Font color = ').grid(row=11,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_FONTCOLOR,
width=10).grid(row=11,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Format = ').grid(row=12,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_FORMAT,
# width=10).grid(row=12,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Line width = ').grid(row=13,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_LINEWIDTH,
width=10).grid(row=13,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Line color = ').grid(row=14,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_LINECOLOR,
width=10).grid(row=14,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Fill color 1 = ').grid(row=15,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_FILLCOLOR1,
# width=10).grid(row=15,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Fill color 2 = ').grid(row=16,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_FILLCOLOR2,
# width=10).grid(row=16,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Zorder = ').grid(row=17,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_ZORDER,
width=10).grid(row=17,column=1,padx=3,sticky='w')
fs.grid()
f8 = ttk.Frame(page8,borderwidth=5,padding=5)
self.obgrid, self.otgrid = ttk.Style(),ttk.Style()
self.obgrid.configure("obgrid.TLabel",background=self.PLOT.FIGURE_COLOR.get(),anchor="center")
self.otgrid.configure("otgrid.TLabel",background=self.PLOT.TEXT_COLOR.get(),anchor="center")
ttk.Label(f8,text='Map dots per inch (DPI): ').grid(row=0,column=0,sticky='w')
ttk.Entry(f8,textvariable=self.PLOT.DPI,width=10).grid(row=0,column=1,sticky='w')
ttk.Label(f8,text='Map window size: ').grid(row=1,column=0,sticky='w')
size = ttk.Entry(f8,textvariable=PSIZE,width=30)
size.grid(row=1,column=1,columnspan=3,sticky='w')
size.bind("<Return>",lambda f: sizeupdate())
ttk.Label(f8,text='(It will close current map) ').grid(row=1,column=4,sticky='w')
ttk.Label(f8,text='Font style').grid(row=2,column=0,sticky='w')
ttk.Combobox(f8,textvariable=self.PLOT.MAP_FONT_TYPE, \
values=self.FONT_TYPES,width=30).grid(row=2,column=1, \
columnspan=3, \
padx=3,sticky='w')
ttk.Label(f8,text='Background color').grid(row=3,column=0,sticky='w')
self.OBlabel = ttk.Label(f8,textvariable=self.PLOT.FIGURE_COLOR,style="obgrid.TLabel",width=8)
self.OBlabel.grid(row=3,column=1,padx=3)
ttk.Button(f8,text='Select',command=lambda:colsel(self.PLOT.FIGURE_COLOR, \
self.obgrid,self.OBlabel,"obgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=3,column=2,padx=3)
ttk.Label(f8,text='Text color').grid(row=4,column=0,sticky='w')
self.OTlabel = ttk.Label(f8,textvariable=self.PLOT.TEXT_COLOR,style="otgrid.TLabel",width=8)
self.OTlabel.grid(row=4,column=1,padx=3)
ttk.Button(f8,text='Select',command=lambda:colsel(self.PLOT.TEXT_COLOR, \
self.otgrid,self.OTlabel,"otgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=4,column=2,padx=3)
f8.grid()
maptabs.grid()
frame5 = ttk.Frame(self.Window_mapconfig,borderwidth=5,padding=5)
ttk.Button(frame5,text='Cancel',command=_cancel).grid(row=0,column=4,padx=3)
ttk.Button(frame5,text='Apply',command=_apply).grid(row=0,column=5,padx=3)
ttk.Button(frame5,text='Close',command=_done).grid(row=0,column=6,padx=3)
frame5.grid(row=24,column=0,columnspan=5)
# ====================
def logo_config(self):
# ====================
def _close():
# ===========
self.Window_logo.destroy()
self.Window_logo = None
self.make_plot()
def new_logo():
# =============
nn = tk.filedialog.askopenfile()
if not empty(nn.name):
self.PLOT.LOGO_FILE.set(nn.name)
self.PLOT.LOGO_IMAGE = image.imread(self.PLOT.LOGO_FILE.get())
self.make_plot()
def _loadconf():
# =============
'''Load map configuration'''
cfilename = COSMO_CONF + 'drawing.conf'
try:
# Read configuration
with open(cfilename) as infile:
conf = json.load(infile)
self.PLOT.LOGO_FILE.set(conf['LOGO_FILE'])
self.PLOT.LOGO_ZOOM.set(conf['LOGO_ZOOM'])
self.PLOT.LOGO_LOCATION.set(conf['LOGO_LOCATION'])
self.PLOT.LOGO_X.set(conf['LOGO_X'])
self.PLOT.LOGO_Y.set(conf['LOGO_Y'])
self.PLOT.LOGO_IMAGE = image.imread(self.PLOT.LOGO_FILE.get())
except:
toconsola('Cannot read default configuration file '+cfilename,wid=self.cons)
self.make_plot()
def _saveconf():
# =============
'''Save map configuration'''
cfilename = COSMO_CONF + 'drawing.conf'
try:
# Read configuration
with open(cfilename) as infile:
conf = json.load(infile)
conf['LOGO_FILE'] = self.PLOT.LOGO_FILE.get()
conf['LOGO_ZOOM'] = self.PLOT.LOGO_ZOOM.get()
conf['LOGO_LOCATION'] = self.PLOT.LOGO_LOCATION.get()
conf['LOGO_X'] = self.PLOT.LOGO_X.get()
conf['LOGO_Y'] = self.PLOT.LOGO_Y.get()
# Write JSON file:
with io.open(cfilename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
toconsola("New default values saved in file "+cfilename,wid=self.cons)
except:
toconsola('Cannot open default configuration file '+cfilename,wid=self.cons)
# Main Window
# ============
if self.Window_logo is None:
self.Window_logo = tk.Toplevel(self.master)
self.Window_logo.title("Logo configuration")
self.Window_logo.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_logo.lift()
return
menubar = tk.Menu(self.Window_logo)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Default configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_logo.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_logo.tk.call(master, "config", "-menu", menubar)
F0 = ttk.Frame(self.Window_logo,borderwidth=5,padding=5)
ttk.Label(F0,text='Plot logo'). \
grid(row=0,column=1,sticky='w')
ttk.Checkbutton(F0,variable=self.PLOT.LOGO_DISPLAY). \
grid(row=0,column=2,sticky='w',padx=3)
ttk.Label(F0,text='File', \
font='Helvetica 12 bold').grid(row=1,column=0,sticky='w')
le = ttk.Entry(F0,textvariable=self.PLOT.LOGO_FILE, \
justify='left',width=30)
le.grid(row=1,column=1,columnspan=5,sticky='w')
le.bind('<<ComboboxSelected>>',lambda e: new_logo())
ttk.Button(F0,text='Open', \
command=new_logo).grid(row=1,column=6,sticky='w')
ttk.Label(F0,text='Zoom', \
font='Helvetica 12 bold').grid(row=2,column=0,sticky='w')
ttk.Entry(F0,textvariable=self.PLOT.LOGO_ZOOM, \
justify='left',width=8).grid(row=2,column=1,sticky='w')
ttk.Label(F0,text='Location', \
font='Helvetica 12 bold').grid(row=3,column=0,sticky='w')
ttk.Radiobutton(F0,text='SW',variable=self.PLOT.LOGO_LOCATION,\
value='SW').grid(row=4,column=1,sticky='w')
ttk.Radiobutton(F0,text='NW',variable=self.PLOT.LOGO_LOCATION,\
value='NW').grid(row=5,column=1,sticky='w')
ttk.Radiobutton(F0,text='NE',variable=self.PLOT.LOGO_LOCATION,\
value='NE').grid(row=6,column=1,sticky='w')
ttk.Radiobutton(F0,text='SE',variable=self.PLOT.LOGO_LOCATION,\
value='SE').grid(row=7,column=1,sticky='w')
ttk.Radiobutton(F0,text='Other',variable=self.PLOT.LOGO_LOCATION,\
value='OTHER').grid(row=8,column=1,sticky='w')
lx = ttk.Entry(F0,textvariable=self.PLOT.LOGO_X,\
justify='left',width=7)
lx.grid(row=8,column=2,sticky='w')
ly = ttk.Entry(F0,textvariable=self.PLOT.LOGO_Y,\
justify='left',width=7)
ly.grid(row=8,column=3,sticky='w')
ttk.Button(F0,text='Apply',command=_close,padding=5).grid(row=9,column=6)
F0.grid()
# ==================
def plot_logo(self):
# ==================
'''Add a logo in the plot'''
im = OffsetImage(self.PLOT.LOGO_IMAGE,zoom=self.PLOT.LOGO_ZOOM.get())
if self.PLOT.LOGO_LOCATION.get() == 'SW':
xx = self.PLOT.WEST.get()
yy = self.PLOT.SOUTH.get()
ba = (0,0)
elif self.PLOT.LOGO_LOCATION.get() == 'NW':
xx = self.PLOT.WEST.get()
yy = self.PLOT.NORTH.get()
ba = (0,1)
elif self.PLOT.LOGO_LOCATION.get() == 'NE':
xx = self.PLOT.EAST.get()
yy = self.PLOT.NORTH.get()
ba = (1,1)
elif self.PLOT.LOGO_LOCATION.get() == 'SE':
xx = self.PLOT.EAST.get()
yy = self.PLOT.SOUTH.get()
ba = (1,0)
else:
xx = self.PLOT.LOGO_X.get()
yy = self.PLOT.LOGO_Y.get()
ba = (0,0)
self.ab = AnnotationBbox(im,[xx,yy], xycoords='data', \
box_alignment=ba,pad=0.0,frameon=True)
# box_alignment=ba,pad=0.0,frameon=True,zorder=100)
self.with_logo = self.ax.add_artist(self.ab)
# =====================
def clm(self):
# =====================
'''Options to launch the COSMO Lagrangian Model'''
self.CLM.west.set(self.PLOT.WEST.get())
self.CLM.east.set(self.PLOT.EAST.get())
self.CLM.south.set(self.PLOT.SOUTH.get())
self.CLM.north.set(self.PLOT.NORTH.get())
try:
self.CLM.do.set(self.DATE[0])
except:
self.CLM.do.set(datetime.datetime.now())
def _close():
# ===========
self.Window_clm.destroy()
self.Window_clm = None
def _run(options):
# ================
if self.CLM.script.get():
now = datetime.datetime.now()
soptions = '# COSMO Lagrangian Model options generated by cosmo-view.\n# %s' % now + options
soptions = soptions.replace('-OU','\n-OU')
soptions = soptions.replace('-OV','\n-OV')
soptions = soptions.replace('-trajectory','\n-trajectory')
soptions = soptions.replace('-final','\n-final')
soptions = soptions.replace('-end','\n-end')
soptions = soptions.replace('-xo','\n-xo')
soptions = soptions.replace('-yo','\n-yo')
soptions = soptions.replace('-zo','\n-zo')
soptions = soptions.replace('-to','\n-to')
soptions = soptions.replace('-release','\n-release')
soptions = soptions.replace('-from','\n-from')
soptions = soptions.replace('-for','\n-for')
soptions = soptions.replace('-dt','\n-dt')
soptions = soptions.replace('-alpha','\n-alpha')
soptions = soptions.replace('-mu','\n-mu')
soptions = soptions.replace('-va','\n-va')
soptions = soptions.replace('-xmin','\n-xmin')
soptions = soptions.replace('-xmax','\n-xmax')
soptions = soptions.replace('-ymin','\n-ymin')
soptions = soptions.replace('-ymax','\n-ymax')
soptions = soptions.replace('-random','\n-random')
soptions = soptions.replace('-Rx','\n-Rx')
soptions = soptions.replace('-Ry','\n-Ry')
soptions = soptions.replace('-reverse','\n-reverse')
ofile = open(self.CLM.SFILE.get(), "w")
a = ofile.write(soptions)
ofile.close()
command = self.CLM.PATH.get() + \
self.CLM.BIN.get()
command += options
toconsola(command,wid=self.cons)
#print(command)
os.system(command)
if os.path.isfile(self.CLM.TRAJECTORY.get()):
FLT = lagrangian.parameters()
toconsola(FLT.MESSAGE,wid=self.cons)
FLT.Read(self.CLM.TRAJECTORY.get())
if FLT is None:
return
# FLT.TIME = np.array([(FLT.date[i].replace(tzinfo=None)-\
# self.DATE[0]).total_seconds() \
# for i in range(FLT.nrecords)])
FLT.MAPX = []
FLT.MAPY = []
if FLT.nfloats > 1:
for i in range(FLT.nfloats):
f = interpolate.interp1d(FLT.TIME,FLT.lon[:,i], bounds_error=False, fill_value=np.NaN)
FLT.MAPX.append(f(self.TIME))
f = interpolate.interp1d(FLT.TIME,FLT.lat[:,i], bounds_error=False, fill_value=np.NaN)
FLT.MAPY.append(f(self.TIME))
FLT.MAPX = np.array(FLT.MAPX).T.tolist()
FLT.MAPY = np.array(FLT.MAPY).T.tolist()
else:
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon, bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat, bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
nt = len(FLT.TIME)
self.LAYERS.add(TYPE='FLOAT',Filename=FLT.FILENAME.get(),N=nt,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(FLT.FILENAME.get())
#self.FILETYPES.append('FLOAT')
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(0)
#self.FILEORDER.append(self.nfloat-1)
self.make_plot()
else:
messagebox.showinfo(message='COSMO Lagrangian Model failed')
def _help():
# ==========
options = ' --help'
_run(options)
def _run_single():
# ================
options = clm.Basic_options(self.CLM)
if empty(options):
return
_run(options)
def _run_ensemble():
# ==================
options = clm.Basic_options(self.CLM)
try:
aa = ' -random %s' % self.CLM.nfloats.get()
options += aa
except:
pass
try:
aa = ' -Rx %s' % self.CLM.Rx.get()
options += aa
except:
pass
try:
aa = ' -Ry %s' % self.CLM.Ry.get()
options += aa
except:
pass
_run(options)
# -------------------------
# Main CLM Window
# -------------------------
if self.nvec == 0:
messagebox.showinfo(message='No file with ocean currents has ben opened yet')
return
if self.Window_clm is not None:
self.Window_clm.lift()
return
# Copy the VEC information to the CLM class
#
self.CLM.VEC = self.VEC
string = self.DATE[self.L.get()]
try:
self.CLM.do.set(string.replace(' ','T'))
except:
pass
self.CLM.to.set(self.TIME[self.L.get()]-self.TIME[0])
lini = self.L.get() + 1
if lini < 2:
lini = 2
self.CLM.Lini.set(lini)
self.CLM.Tini.set(self.TIME[lini])
self.CLM.Dini.set(self.DATE[lini])
self.Window_clm = tk.Toplevel(self.master)
self.Window_clm.title('COSMO Lagrangian Model options')
self.Window_clm.resizable(width=True,height=True)
self.Window_clm.protocol('WM_DELETE_WINDOW',_close)
clm.WinConfig(self.Window_clm,self.CLM,self.TIME,self.DATE)
F0 = ttk.Frame(self.Window_clm,padding=5)
#ttk.Checkbutton(F0,text='Reverse Run',variable=self.CLM.reverse). \
# grid(row=0,column=1,padx=5)
ttk.Checkbutton(F0,text='Save options',variable=self.CLM.script). \
grid(row=0,column=1,padx=5)
ttk.Button(F0,text='Run Single',command=_run_single).grid(row=0,column=2,padx=5)
ttk.Button(F0,text='Run Ensemble',command=_run_ensemble).grid(row=0,column=3,padx=5)
ttk.Button(F0,text='Run Help',command=_help).grid(row=0,column=4,padx=5)
F0.grid()
# ==================
def make_anim(self):
# ==================
''' Launch the matplotlib animation'''
# -----------
def _close():
# -----------
self.Window_anim.destroy()
self.Window_anim = None
def _done():
# ----------
L_Backup = self.L.get()
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=self.PLOT.VIDEO_TITLE.get(),
artist=self.PLOT.VIDEO_AUTHOR.get(),
comment=self.PLOT.VIDEO_COMMENT.get())
writer = FFMpegWriter(fps=self.PLOT.VIDEO_FPS.get(),metadata=metadata)
with writer.saving(self.Mfig,self.PLOT.VIDEO_NAME.get(),self.PLOT.VIDEO_DPI.get()):
for L in range(self.PLOT.VIDEO_L1.get(),self.PLOT.VIDEO_L2.get()+1):
self.L.set(L)
self.PLOT.TLABEL.set(self.DATE[L])
print('L = ', L)
for i in range(self.nvec):
if self.VEC[i].LINK.get():
self.VEC[i].L.set(L)
self.VEC[i].read(update_lims=False,wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
self.CDF[i].L.set(L)
self.CDF[i].read(update_lims=False,wid=self.cons)
self.make_Mplot()
writer.grab_frame()
messagebox.showinfo(parent=self.Window_anim,message='Movie has been saved')
self.L.set(L_Backup)
def _loadconf():
# -------------
'''Load ANIM configuration'''
toconsola('Retrieving VIDEO defaults.',wid=self.cons)
#print('Retrieving VIDEO defaults.')
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
self.PLOT.VIDEO_NAME.set(conf['VIDEO_NAME'])
self.PLOT.VIDEO_TITLE.set(conf['VIDEO_TITLE'])
self.PLOT.VIDEO_AUTHOR.set(conf['VIDEO_AUTHOR'])
self.PLOT.VIDEO_COMMENT.set(conf['VIDEO_COMMENT'])
self.PLOT.VIDEO_FPS.set(conf['VIDEO_FPS'])
self.PLOT.VIDEO_DPI.set(conf['VIDEO_DPI'])
def _saveconf():
# -------------
'''Save ANIM configuration'''
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
toconsola('Updating VIDEO defaults.',wid=self.cons)
#print('Updating VIDEO defaults.')
conf['VIDEO_NAME'] = self.PLOT.VIDEO_NAME.get()
conf['VIDEO_TITLE'] = self.PLOT.VIDEO_TITLE.get()
conf['VIDEO_AUTHOR'] = self.PLOT.VIDEO_AUTHOR.get()
conf['VIDEO_COMMENT'] = self.PLOT.VIDEO_COMMENT.get()
conf['VIDEO_FPS'] = self.PLOT.VIDEO_FPS.get()
conf['VIDEO_DPI'] = self.PLOT.VIDEO_DPI.get()
with io.open(self.PLOT.FILECONF,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# Main
# ----
if self.LAYERS.n == 0:
messagebox.showinfo(message='No layers have been added')
return
if self.Window_anim is not None:
self.Window_anim.lift()
return
self.Window_anim = tk.Toplevel(self.master)
self.Window_anim.title('Animation creation')
self.Window_anim.resizable(width=True,height=True)
self.Window_anim.protocol('WM_DELETE_WINDOW',_close)
# Menu:
# AAA
menubar = tk.Menu(self.Window_anim)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_anim.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_anim.tk.call(self.Window_anim, "config", "-menu", menubar)
# Widgets
#
F0 = ttk.Frame(self.Window_anim,borderwidth=5,padding=5)
ttk.Label(F0,text='Output filename : ').grid(row=0,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_NAME,width=40).grid(row=0,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Video title : ').grid(row=1,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_TITLE,width=40).grid(row=1,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Author : ').grid(row=2,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_AUTHOR,width=40).grid(row=2,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Comment : ').grid(row=3,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_COMMENT,width=40).grid(row=3,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Initial frame : ').grid(row=4,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_L1,width=7).grid(row=4,column=1,sticky='w')
ttk.Label(F0,text='Final frame : ').grid(row=5,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_L2,width=7).grid(row=5,column=1,sticky='w')
ttk.Label(F0,text='FPS : ').grid(row=6,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_FPS,width=7).grid(row=6,column=1,sticky='w')
ttk.Label(F0,text='DPI : ').grid(row=7,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_DPI,width=7).grid(row=7,column=1,sticky='w')
done = ttk.Button(F0,text='Do it',command=_done)
done.grid(row=8,column=3,padx=3)
done.bind("<Return>",lambda e:_done())
close = ttk.Button(F0,text='Close',command=_close)
close.grid(row=8,column=4,padx=3)
close.bind("<Return>",lambda e:_close())
F0.grid()
F1 = ttk.Frame(self.Window_anim,borderwidth=5,padding=5)
self.Mfig = Figure(figsize=self.PLOT.SIZE,dpi=self.PLOT.DPI.get())
#EG Projection
projection = self.PLOT.MAP_PROJECTION.get()
proj = map_proj(projection)
self.Max = self.Mfig.add_subplot(111, projection=proj['proj'])
self.Mcanvas = FigureCanvasTkAgg(self.Mfig, master=F1)
self.Mcanvas.draw()
self.Mcanvas.get_tk_widget().grid(row=0,column=0,columnspan=11,sticky='wn')
self.Mdrawmap = True
F1.grid()
self.make_Mplot(proj=proj['proj'])
# =========================
def DepthandDate(self,CDF):
# =========================
'''Fill the lists: K_LIST, L_LIST, Z_LIST, T_LIST and DATE'''
CDF.K.set(0) # Default layer
CDF.L.set(0) # Default time step
CDF.K_LIST = list(range(CDF.FLD.icdf.nz))
CDF.L_LIST = list(range(CDF.FLD.icdf.nt))
# Depth selector
if CDF.FLD.icdf.idk > -1:
if self.PLOT.GEOMAP.get():
wrk = CDF.FLD.nc.variables[CDF.FLD.icdf.zname][:]
CDF.Z_LIST = list(wrk)
toconsola(str(CDF.Z_LIST),wid=self.cons)
#print(CDF.Z_LIST)
else:
CDF.Z_LIST = np.arange(CDF.FLD.icdf.nz)
else:
CDF.Z_LIST = []
# Time selector and TIME and DATE values
CDF.DATE = []
if CDF.FLD.icdf.idl > -1:
wrk = CDF.FLD.nc.variables[CDF.FLD.icdf.tname][:]
CDF.T_LIST = list(wrk)
try:
for i in range(CDF.FLD.icdf.nt):
CDF.DATE.append(num2date(CDF.T_LIST[i], \
units=CDF.FLD.icdf.time_units, \
calendar=CDF.FLD.icdf.time_calendar))
except:
for i in range(CDF.FLD.icdf.nt):
CDF.DATE.append(i)
try:
CDF.TIME = np.array([(CDF.DATE[i]-CDF.DATE[0]).total_seconds() \
for i in range(CDF.FLD.icdf.nt)])
except:
CDF.TIME = np.array([(CDF.DATE[i]-CDF.DATE[0]) \
for i in range(CDF.FLD.icdf.nt)])
else:
CDF.T_LIST = []
CDF.DATE = [' ']
CDF.TIME = np.array([0])
# # ====================================
# def read_lonlat(self,CDF,xname,yname):
# # ====================================
# '''Read 1D/2D lon lat grid '''
#
# if CDF.icdf.georef:
# vlon = CDF.ncid.variables[xname]
# vlat = CDF.ncid.variables[yname]
# toconsola(str(vlon),wid=self.cons)
# toconsola(str(vlat),wid=self.cons)
# else:
# toconsola('Georef is False',wid=self.cons)
# #print('Georef is False')
# self.PLOT.GEOMAP.set(False)
# vlon = np.arange(CDF.icdf.nx)
# vlat = np.arange(CDF.icdf.ny)
#
# CDF.lon = vlon[:].copy()
# CDF.lat = vlat[:].copy()
# if len(vlon.shape) == 1:
# CDF.xx,CDF.yy = np.meshgrid(CDF.lon,CDF.lat)
# else:
# CDF.xx = vlon[:].copy()
# CDF.yy = vlat[:].copy()
# # ====================
# def read_UV(self,VEC):
# # ====================
# '''Read 2D velocity data according to user selections'''
# #K = self.K.get()
# #L = self.L.get()
# K = VEC.K.get()
# L = VEC.L.get()
# uname = '%s' % VEC.uname.get()
# vname = '%s' % VEC.vname.get()
# ndim = VEC.icdf.ndims[VEC.uid]
#
# #VEC.K.set(K)
# #VEC.L.set(L)
#
# if ndim == 2:
# VEC.VEL.u = VEC.ncid.variables[uname][:,:]
# VEC.VEL.v = VEC.ncid.variables[vname][:,:]
# elif ndim == 3:
# if VEC.icdf.ppl[VEC.uid] > -1:
# VEC.VEL.u = VEC.ncid.variables[uname][L,:,:].squeeze()
# VEC.VEL.v = VEC.ncid.variables[vname][L,:,:].squeeze()
# elif VEC.icdf.ppk[VEC.uid] > -1:
# VEC.VEL.u = VEC.ncid.variables[uname][K,:,:].squeeze()
# VEC.VEL.v = VEC.ncid.variables[vname][K,:,:].squeeze()
# else:
# toconsola('Invalid file!',wid=self.cons)
# print('Invalid file!')
# return
# elif ndim == 4:
# VEC.VEL.u = VEC.ncid.variables[uname][L,K,:,:].squeeze()
# VEC.VEL.v = VEC.ncid.variables[vname][L,K,:,:].squeeze()
# else:
# toconsola("Invalid number of dimensions, "+str(ndim),wid=self.cons)
# #print('Invalid number of dimensions, '+str(ndim))
#
# _u = VEC.VEL.u.copy()
# _v = VEC.VEL.v.copy()
# msku = ma.getmask(VEC.VEL.u)
# mskv = ma.getmask(VEC.VEL.v)
# msk = ma.mask_or(msku,mskv)
# VEC.VEL.u = ma.array(_u,mask=msk).copy()
# VEC.VEL.v = ma.array(_v,mask=msk).copy()
# #VEC.VEL.speed = np.sqrt(VEC.VEL.u**2+VEC.VEL.v**2)
# #VEC.VEL.F = interpolate.interp2d(VEC.lon, \
# # VEC.lat, \
# # VEC.VEL.speed)
#
# # ===========================================
# #def read_Field(self,FIELD,ncid,icdf,sid,K,L):
# # ===========================================
# # ===========================================
# def read_CDF(self,CDF,update_lims=True):
# # ===========================================
# '''Read 2D data according to user selections'''
#
# # self.read_Field(self.CDF[ii].FIELD, \
# # self.CDF[ii].ncid, \
# # self.CDF[ii].icdf, \
# # self.CDF[ii].varid, \
# # self.CDF[ii].K.get(), \
# # self.CDF[ii].L.get())
# if CDF.varid < 0:
# CDF.FLD.data = None
# return
#
# K = CDF.K.get()
# L = CDF.L.get()
#
# vname = '%s' % CDF.varname.get()
# toconsola('READ_FIELD Reading Var, Level and Time:'+str(CDF.varid)+
# ", "+str(CDF.K.get())+
# ", "+str(CDF.L.get()),wid=self.cons)
# #print('READ_FIELD Reading Var, Level and Time:'+str(CDF.varid)+
# # ", "+str(CDF.K.get())+
# # ", "+str(CDF.L.get()))
#
# ndim = CDF.icdf.ndims[CDF.varid]
# if ndim == 2:
# CDF.FLD.data = CDF.ncid.variables[vname][:,:]
# elif ndim == 3:
# if CDF.icdf.ppl[CDF.varid] > -1:
# CDF.FLD.data = CDF.ncid.variables[vname][L,:,:].squeeze()
# elif CDF.icdf.ppk[CDF.varid] > -1:
# CDF.FLD.data = CDF.ncid.variables[vname][K,:,:].squeeze()
# else:
# messagebox.showinfo(message='Invalid variable dimensions')
# CDF.FLD.data = None
# elif ndim == 4:
# CDF.FLD.data = CDF.ncid.variables[vname][L,K,:,:].squeeze()
#
# CDF.FLD.missing_value = None
#
# if CDF.FLD.data is not None:
# CDF.FLD.varname = vname
# try:
# CDF.FLD.units = CDF.ncid.variables[vname].getncattr('units')
# except:
# CDF.FLD.units = ''
#
# try:
# CDF.FLD.missing_value = CDF.ncid.variables[vname].getncattr('_FillValue')
# except:
# try:
# CDF.FLD.missing_value = CDF.ncid.variables[vname].getncattr('missing_value')
# except:
# CDF.FLD.missing_value = None
#
# if CDF.FLD.missing_value is not None:
# CDF.FLD.mask = ma.getmask(CDF.FLD.data)
# CDF.FLD.data[CDF.FLD.data==CDF.FLD.missing_value] = np.nan
#
# # Contour intervals
# CDF.FLD.minval = float(CDF.FLD.data.min())
# CDF.FLD.maxval = float(CDF.FLD.data.max())
# toconsola('Min val = '+str(CDF.FLD.minval),wid=self.cons)
# toconsola('Max val = '+str(CDF.FLD.maxval),wid=self.cons)
# #print('Min val = '+str(CDF.FIELD.minval))
# #print('Max val = '+str(CDF.FIELD.maxval))
#
# print('Here: ', update_lims)
# print(CDF.FLD.minval)
# print(CDF.FLD.maxval)
#
# if update_lims:
# try:
# CDF.PLOT.CONTOUR_MIN.set(myround(CDF.FLD.minval))
# except:
# CDF.PLOT.CONTOUR_MIN.set(CDF.FLD.minval)
# try:
# CDF.PLOT.CONTOUR_MAX.set(myround(CDF.FLD.maxval))
# except:
# CDF.PLOT.CONTOUR_MAX.set(CDF.FLD.maxval)
#
# dd = CDF.PLOT.CONTOUR_MAX.get() \
# - CDF.PLOT.CONTOUR_MIN.get()
# try:
# CDF.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
# except:
# CDF.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
#
# ===================
def get_contour(self):
# ==================
'''Widget to read Netcdf files'''
self.CSOURCE = tk.StringVar()
self.CSOURCE.set('Local Dataset')
self.DATETIME = ''
def _close():
# ===========
self.Window_ncdf.destroy()
self.Window_ncdf = None
return
def _done():
# ===========
ii = self.CDF_INDX.get()
if self.CDF[ii].SOURCE == 'FILE':
self.CDF[ii].read(wid=self.cons)
# # The date of the data
# try:
# nodate = empty(self.DATE[0])
# except:
# nodate = False
# try:
# nodatetime = empty(self.DATETIME)
# except:
# nodatetime = False
#
# if not nodatetime:
# if nodate:
# self.DATE[0] = self.DATETIME
# else:
# if len(self.DATE[0]) == 1:
# a = self.DATE[0].__str__()
# b = self.CDF[ii].DATE[0].__str__()
# if a == b:
# self.DATE[0] = self.DATETIME
# self.CDF[ii].DATE[0] = self.DATETIME
_close()
self.make_plot()
if self.Window_contourconfig is not None:
self.Window_contourconfig.destroy()
self.Window_contourconfig = None
self.contour_config()
def _clear():
# ===========
if self.ncdf == 0:
return
# When erasing, we must erase two kinds of informations, the
# information in the LAYER structure and the VECTOR information
# Attention, if erasing the SEQUENCE leader, we need to update the
# DATE and TIMES of the SEQUENCE
ii = self.CDF_INDX.get()
self.LAYERS.erase('FLD',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing data field '+str(ii),wid=self.cons)
#print('Erasing record '+str(ii))
del self.CDF[ii]
self.ncdf -= 1
ii = self.ncdf-1 if ii >= self.ncdf else ii
self.CDF_INDX.set(ii)
_refill(ii)
if self.LAYERS.update:
toconsola('Updating TIME and DATE values of SEQUENCE',wid=self.cons)
LEADER_TYPE = self.LAYERS.TYPE[self.LAYERS.leader]
jj = self.LAYERS.TYPE_INDEX[self.LAYERS.leader]
if LEADER_TYPE == 'VEC':
self.DATE = self.VEC[jj].DATE.copy()
self.TIME = self.VEC[jj].TIME.copy()
elif LEADER_TYPE == 'FLD':
self.DATE = self.FLD[jj].DATE.copy()
self.TIME = self.FLD[jj].TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.make_plot()
def _reget():
# ===========
self.CDF_INDX.set(_wsel.get())
ii = self.CDF_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.CDF_LIST = list(range(self.ncdf))
_wsel.configure(state='!disabled')
_wsel['values'] = self.CDF_LIST
_went['textvariable'] = self.CDF[ii].FILENAME
_wvar.configure(state='!disabled')
_wvar['textvariable'] = self.CDF[ii].varname
_wvar['values'] = self.CDF[ii].FLD.icdf.VAR_MENU
_kbox.configure(state='!disabled')
_kbox['textvariable'] = self.CDF[ii].K
_kbox['values'] = self.CDF[ii].K_LIST
_lbox.configure(state='!disabled')
_lbox['textvariable'] = self.CDF[ii].L
_lbox['values'] = self.CDF[ii].L_LIST
_aent.configure(state='!disabled')
_aent['textvariable'] = self.CDF[ii].ALIAS
if self.CDF[ii].FLD.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.CDF[ii].Z_LIST[self.CDF[ii].K.get()]
if self.CDF[ii].FLD.icdf.idl < 0:
_lbox.configure(state='disabled')
_dbox['text']='--'
else:
_lbox['textvariable'] = self.CDF[ii].L
_lbox['values'] = self.CDF[ii].L_LIST
_dbox['text'] = self.CDF[ii].DATE[self.CDF[ii].L.get()]
_show['variable'] = self.CDF[ii].show
else:
self.CDF = []
self.CDF_LIST = [None]
self.CDF_INDX = tk.IntVar()
self.CDF_INDX.set(0)
_wsel.configure(state='disabled')
_wvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_aent.configure(state='disabled')
_wsel['values'] = self.CDF_LIST
_went['textvariable'] = ''
_wvar['textvariable'] = ''
_wvar['values'] = ['']
_wvar.configure(state='disabled')
_kbox['textvariable'] = ''
_kbox['values'] = ['']
_zbox['text'] = '--'
_lbox['text'] = ''
_lbox['values'] = ['']
_lbox['textvariable'] = ''
_lbox['values'] = ['']
_dbox['text'] = ['--']
_wsav.configure(state='disabled')
def _add(SOURCE):
# ===============
global Window_select
CDF = CONTOUR()
def _cancel():
# ============
global Window_select
Window_select.destroy()
Window_select = None
def _done():
# ==========
global Window_select
global _wvar
if empty(CDF.varname.get()):
messagebox.showinfo(parent=Window_select,message='Select variable')
return
toconsola('2D-grid axes : '+'%s'%CDF.FLD.icdf.grid2d,wid=self.cons)
# Seems the suitable place where to put this:
CDF.FLD.varname = CDF.varname.get()
CDF.FLD.varid = CDF.FLD.icdf.vname.index(CDF.FLD.varname)
CDF.FLD.ndims = CDF.FLD.icdf.ndims[CDF.FLD.varid]
CDF.FLD.get_info(wid=self.cons)
CDF.FLD.get_grid()
#self.read_lonlat(CDF,CDF.FLD.icdf.xname,CDF.FLD.icdf.yname)
CDF.K_LIST = list(range(CDF.FLD.icdf.nz))
CDF.L_LIST = list(range(CDF.FLD.icdf.nt))
CDF.Z_LIST = CDF.FLD.get_zlist()
CDF.T_LIST, CDF.DATE, CDF.TIME = CDF.FLD.get_tlist()
#self.DepthandDate(CDF)
CDF.show.set(True)
#if empty(CDF.DATE[0].__str__()):
# _dsel.configure(state='enabled')
# Adding the CONTOUR to the Drawing class
#
nt = CDF.FLD.icdf.nt
self.LAYERS.add(TYPE='FLD',Filename=CDF.FILENAME.get(),N=nt,wid=self.cons)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
n = self.LAYERS.n
#self.nfiles += 1
#self.FILENAMES.append(CDF.FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False)) #By default, not attached
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(CDF.FLD.icdf.nt)
ii = self.CDF_INDX.get()
#if not empty(self.DATETIME):
# self.CDF[ii].DATE.append(self.DATETIME)
if self.first:
if self.drawmap is None:
self.PLOT.WEST.set(self.CDF[ii].FLD.xmin)
self.PLOT.EAST.set(self.CDF[ii].FLD.xmax)
self.PLOT.SOUTH.set(self.CDF[ii].FLD.ymin)
self.PLOT.NORTH.set(self.CDF[ii].FLD.ymax)
self.plot_initialize()
#try:
# self.PLOT.XLABEL.set(self.CDF[ii].FLD.icdf.xname)
#except:
# self.PLOT.XLABEL.set('Longitude')
#try:
# self.PLOT.YLABEL.set(self.CDF[ii].FLD.icdf.yname)
#except:
# self.PLOT.YLABEL.set('Latitude')
self.DATE = self.CDF[ii].DATE.copy()
self.TIME = self.CDF[ii].TIME.copy()
self.PLOT.TLABEL.set(self.CDF[ii].DATE[self.CDF[ii].L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.first = False
# Is this the field member of the SEQUENCE?
# CAROUSEL MANAGEMENT - CONTOUR
if nt > 1:
if self.NL == 0:
toconsola('Contour initiates Time axis',wid=self.cons)
self.CDF[ii].LINK.set(True)
self.TIME = self.CDF[ii].TIME.copy()
self.DATE = self.CDF[ii].DATE.copy()
self.NL = nt
self.L.set(self.CDF[ii].L.get())
self.L_LIST = list(range(nt))
self.lbox.configure(state='normal')
self.lbox['values'] = self.L_LIST
if self.L.get() < self.NL-1:
self.bnext.configure(state='normal')
if self.L.get() > 0:
self.bprev.configure(state='normal')
elif self.NL == nt:
toconsola('Linking Contour to Time axis',wid=self.cons)
self.CDF[ii].LINK.set(True)
self.CDF[ii].L.set(self.L.get()) #Synchronize records
# if self.LAYERS.nsequence == 0:
# toconsola('Contour initiates SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence = 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(True)
# self.LAYERS.leader = n-1
# self.LAYERS.seqlen = nt
## self.SEQUENCES[-1].set(True)
## self.SEQLEADER[-1].set(True) # Is the first field
## self.SEQLEADER_INDX = self.nfiles
# self.DATE = self.CDF[ii].DATE.copy()
# self.TIME = self.CDF[ii].TIME.copy()
# self.L.set(self.CDF[ii].L.get())
# self.L_LIST = list(range(self.CDF[ii].FLD.icdf.nt))
# self.NL = len(self.L_LIST)
# self.lbox.configure(state='normal')
# self.lbox['values'] = self.L_LIST
# if self.L.get() < self.NL-1:
# self.bnext.configure(state='normal')
# if self.L.get() > 0:
# self.bprev.configure(state='normal')
# else:
# if nt == self.LAYERS.seqlen:
# toconsola('Adding Contour to SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence += 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(False)
# self.CDF[ii].L.set(self.L.get()) #Synchronize records
_refill(ii)
Window_select.destroy()
Window_select = None
self.DATETIME = ''
self.LAYERS.print()
ISOURCE = self.CONTOUR_OPTIONS.index(SOURCE)
if ISOURCE == 0:
filename = self.get_opendap_filename()
elif ISOURCE == 1:
filename = self.get_copernicus_filename()
elif ISOURCE == 2:
nn = filedialog.askopenfilename(parent=self.Window_ncdf, \
filetypes=[('Netcdf','*.nc'), \
('CDF','*.cdf'), \
('ALL','*')])
if len(nn) == 0:
return
else:
filename = '%s' % nn
elif ISOURCE == 3:
#aa = get_remote()
#filename2 = aa.filename()
#filename = filename2.decode('utf-8')
filename = 'https://cosmo.icm.csic.es/MEDSEA_100.nc'
print('filename: ', filename)
else:
if self.nvec <= 0:
messagebox.showinfo(message='No Trajectory file opened yet')
return
else:
jj = self.VEC_INDX.get()
filename = self.VEC[jj].UFILENAME.get()
if empty(filename):
return
# Initialize contour class:
CDF.FILENAME.set(filename)
CDF.FLD.open(filename,wid=self.cons)
# Not empty filename:
#CDF = cdf_parameters()
#CDF.FIELD = fld_parameters()
#CDF.FILENAME.set(filename)
#CDF.ncid = Dataset(filename)
#CDF.icdf = tools.geocdf(filename, wid=self.cons)
##self.read_lonlat(CDF,CDF.icdf.xname,CDF.icdf.yname)
##self.DepthandDate(CDF)
##CDF.FIELD.show.set(True)
##if empty(CDF.DATE[0].__str__()):
## _dsel.configure(state='enabled')
if Window_select is None:
Window_select = tk.Toplevel(self.master)
Window_select.title('SELECT VARIABLE')
Window_select.protocol('WM_DELETE_WINDOW',Window_select.destroy)
else:
Window_select.lift()
return
#axesid = tools.WinGeoaxes(CDF.icdf,CDF.ncid,Window_select)
axesid = tools.WinGeoaxes(CDF.FLD.icdf,CDF.FLD.nc,Window_select)
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
F0 = ttk.Frame(Window_select,padding=5,borderwidth=5)
ttk.Label(F0,text='Select variable',borderwidth=3,font=font_bold) \
.grid(row=0,column=0)
dvar = ttk.Combobox(F0,textvariable=CDF.varname, \
values=CDF.FLD.icdf.VAR_MENU, \
width=20)
dvar.grid(row=0,column=1,columnspan=2)
dvar.bind('<<ComboboxSelected>>',lambda e: axesid.selected_var(CDF.FLD.icdf,CDF.FLD.nc,dvar))
F0.grid()
#CDF.icdf.nx = -9999
F1 = ttk.Frame(Window_select,padding=5)
cancel = ttk.Button(F1,text='Cancel',command=_cancel)
cancel.grid(row=0,column=3,sticky='e',padx=10)
cancel.bind("<Return>",lambda e:_cancel())
done = ttk.Button(F1,text='Done',command=_done)
done.grid(row=0,column=4,sticky='e',padx=10)
done.bind("<Return>",lambda e:_done())
F1.grid(sticky='we')
Window_select.wait_window(Window_select)
def _lselection():
# ================
_dbox['text'] = self.CDF[ii].DATE[self.CDF[ii].L.get()]
def _kselection():
# ================
_zbox['text'] = self.CDF[ii].Z_LIST[self.CDF[ii].K.get()]
def _vselection():
# ================
try:
self.CDF[ii].FLD.varname = self.CDF[ii].varname.get()
self.CDF[ii].FLD.varid = self.CDF[ii].FLD.icdf.vname.index( \
self.CDF[ii].varname.get())
except:
self.CDF[ii].FLD.varid = -1
def _save():
# ================
ii = self.CDF_INDX.get()
toconsola('Saving '+str(ii),wid=self.cons)
self.CDF[ii].save()
def _date():
# ==========
''' Manually select a date'''
aa = get_Date()
self.DATETIME = aa.date
_dbox['text'] = self.DATETIME
# Main window:
# ============
if self.Window_ncdf is None:
self.Window_ncdf = tk.Toplevel(self.master)
self.Window_ncdf.title("Contour files")
self.Window_ncdf.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_ncdf.lift()
if self.ncdf > 0:
ii = self.CDF_INDX.get()
else:
ii = -1
global Window_select
global _wvar
Window_select = None
F0 = ttk.Frame(self.Window_ncdf,padding=5)
# Add
ttk.Button(F0,text='Import', \
command=lambda:_add(self.CSOURCE.get())).grid(row=1, \
column=0,padx=3)
_source = ttk.Combobox(F0,textvariable=self.CSOURCE, \
values=self.CONTOUR_OPTIONS)
_source.grid(row=0,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Netcdf file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.CDF_INDX, \
values=self.CDF_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# Variable:
ttk.Label(F0,text='Variable').grid(row=1,column=1,padx=3,pady=3)
_wvar = ttk.Combobox(F0,width=15)
_wvar.grid(row=1,column=2,columnspan=2,sticky='w')
_wvar.bind('<<ComboboxSelected>>',lambda e: _vselection())
# Depth:
ttk.Label(F0,text='Depth').grid(row=2,column=1,padx=3,pady=3)
_kbox = ttk.Combobox(F0,values=['0'],width=5)
_kbox.grid(row=2,column=2)
_kbox.bind('<<ComboboxSelected>>',lambda e: _kselection())
_zbox = ttk.Label(F0,width=20)
_zbox.grid(row=2,column=3,columnspan=2,sticky='w')
# Time:
ttk.Label(F0,text='Time').grid(row=3,column=1,padx=3,pady=3)
_lbox = ttk.Combobox(F0,width=5)
_lbox.grid(row=3,column=2)
_lbox.bind('<<ComboboxSelected>>',lambda e: _lselection())
_dbox = ttk.Label(F0,width=20)
_dbox.grid(row=3,column=3,columnspan=2,sticky='w')
_dsel = ttk.Button(F0,text='Select date',command=_date)
_dsel.grid(row=3,column=5,sticky='w')
# Alias
ttk.Label(F0,text='Alias').grid(row=4,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=4,column=2,columnspan=2,sticky='w')
if ii == -1:
_wsel.configure(state='disabled')
_wvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_dsel.configure(state='disabled')
_aent.configure(state='disabled')
else:
_went['textvariable'] = self.CDF[ii].FILENAME
_wvar['textvariable'] = self.CDF[ii].varname
_wvar['values'] = self.CDF[ii].FLD.icdf.VAR_MENU
_kbox['textvariable'] = self.CDF[ii].K
_kbox['values'] = self.CDF[ii].K_LIST
_aent['textvariable'] = self.CDF[ii].ALIAS
if self.CDF[ii].FLD.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.CDF[ii].Z_LIST[self.CDF[ii].K.get()]
if self.CDF[ii].FLD.icdf.idl < 0:
_lbox.configure(state='disabled')
_dsel.configure(state='enabled')
try:
nodate = empty(sefl.CDF[ii].DATE[0])
except:
nodate = False
if nodate:
_dbox['text']='--'
else:
_dbox['text']=self.CDF[ii].DATE[0]
else:
_lbox['textvariable'] = self.CDF[ii].L
_lbox['values'] = self.CDF[ii].L_LIST
_dbox['text'] = self.CDF[ii].DATE[self.CDF[ii].L.get()]
_dsel.configure(state='disabled')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_ncdf,padding=5)
_wsav = ttk.Button(F1,text='Save data',command=_save)
_wsav.grid(row=1,column=0,padx=3,sticky='w')
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_show.configure(state='disabled')
_wsav.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.CDF[ii].show
_show.configure(command=self.make_plot)
_wsav.configure(state='normal')
_show.grid(row=1,column=5)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_done).grid(row=1,column=8,padx=3)
ttk.Label(F1,text=' ',width=8).grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(F1,text=' ',width=8).grid(row=1,column=2,padx=3,sticky='w')
F1.grid(row=1,column=0)
#====================
def get_saidin(self):
#====================
'''Function to retrieve the SAIDIN data'''
def _close():
self.Window_saidin.destroy()
self.Window_saidin = None
def _selector():
name = saidin.saidin_selector(parent=self.master, wid=self.cons)
if not empty(name):
self.SAIDIN.FILENAME.set(name)
def _done():
if (empty(self.SAIDIN.FILENAME.get())):
messagebox.showinfo(message='No image selected')
return
self.SAIDIN.FLD.nc = Dataset('[FillMismatch]'+self.SAIDIN.FILENAME.get(),'r')
self.SAIDIN.FLD.icdf = tools.geocdf(self.SAIDIN.FILENAME.get(), wid=self.cons)
self.SAIDIN.varname.set('mcsst')
self.SAIDIN.FLD.varname = 'mcsst'
self.SAIDIN.FLD.x = self.SAIDIN.FLD.nc.variables['lon'][:]
self.SAIDIN.FLD.y = self.SAIDIN.FLD.nc.variables['lat'][:]
self.SAIDIN.FLD.data = self.SAIDIN.FLD.nc.variables[self.SAIDIN.FLD.varname][0,:,:].squeeze()
self.SAIDIN.FLD.xx,self.SAIDIN.FLD.yy = np.meshgrid(self.SAIDIN.FLD.x,self.SAIDIN.FLD.y)
self.DepthandDate(self.SAIDIN)
self.LAYERS.add(TYPE='SAIDIN',Filename=self.SAIDIN.FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.SAIDIN.FILENAME.get())
#self.FILETYPES.append('SAIDIN')
#self.FILEORDER.append(0)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
if self.first:
if self.drawmap is None:
self.PLOT.WEST.set(np.min(self.SAIDIN.FLD.x))
self.PLOT.EAST.set(np.max(self.SAIDIN.FLD.x))
self.PLOT.SOUTH.set(np.min(self.SAIDIN.FLD.y))
self.PLOT.NORTH.set(np.max(self.SAIDIN.FLD.y))
self.plot_initialize()
self.L.set(self.SAIDIN.L.get())
self.DATE = self.SAIDIN.DATE.copy()
self.TIME = self.SAIDIN.TIME.copy()
#self.PLOT.XLABEL.set('Longitude')
#self.PLOT.YLABEL.set('Latitude')
self.first = False
self.SAIDIN.FLD.get_info(wid=self.cons)
#try:
# self.SAIDIN.FLD.units = self.SAIDIN.ncid.variables[self.SAIDIN.FIELD.varname] \
# .getncattr('units')
#except:
# self.SAIDIN.FLD.units = ''
#
# try:
# self.SAIDIN.FLD.missing_value = self.SAIDIN.ncid.variables[self.SAIDIN.FIELD.varname] \
# .getncattr('_FillValue')
# except:
# try:
# self.SAIDIN.FLD.missing_value = self.SAIDIN.ncid.variables[self.SAIDIN.FIELD.varname] \
# .getncattr('missing_value')
# except:
# self.SAIDIN.FIELD.missing_value = None
toconsola(str(self.SAIDIN.FLD.minval),wid=self.cons)
toconsola(str(self.SAIDIN.FLD.maxval),wid=self.cons)
if self.SAIDIN.landmask.get():
toconsola('Applying land/sea mask ...',wid=self.cons)
_a = self.SAIDIN.FLD.data.copy()
tmp = self.SAIDIN.FLD.nc.variables['lsmask'][0,:,:].squeeze()
msk = ma.masked_where(tmp==1,tmp)
self.SAIDIN.FLD.data = ma.array(_a,mask=msk).copy()
self.SAIDIN.FLD.mask = ma.getmask(self.SAIDIN.FLD.data)
# Contour intervals
self.SAIDIN.FLD.minval = self.SAIDIN.FLD.data.min()
self.SAIDIN.FLD.maxval = self.SAIDIN.FLD.data.max()
try:
self.SAIDIN.PLOT.CONTOUR_MIN.set(myround(self.SAIDIN.FLD.minval))
except:
self.SAIDIN.PLOT.CONTOUR_MIN.set(self.SAIDIN.FLD.minval)
try:
self.SAIDIN.PLOT.CONTOUR_MAX.set(myround(self.SAIDIN.FLD.maxval))
except:
self.SAIDIN.PLOT.CONTOUR_MAX.set(self.SAIDIN.FLD.maxval)
dd = self.SAIDIN.PLOT.CONTOUR_MAX.get() - self.SAIDIN.PLOT.CONTOUR_MIN.get()
try:
self.SAIDIN.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd))
except:
self.SAIDIN.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
#self.SAIDIN.FIELD.F = interpolate.interp2d(self.SAIDIN.lon, \
# self.SAIDIN.lat, \
# self.SAIDIN.FIELD.data)
_close()
self.make_plot()
def _clear():
self.SAIDIN.FILENAME.set('')
self.SAIDIN.FLD.x = None
self.SAIDIN.FLD.y = None
self.SAIDIN.FLD.xx = None
self.SAIDIN.FLD.yy = None
self.SAIDIN.FLD.data = None
_close()
if self.Window_saidin is None:
self.Window_saidin = tk.Toplevel(self.master)
self.Window_saidin.title("Satellite Sea surface temperature (SAIDIN)")
self.Window_saidin.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_saidin.lift()
F0 = ttk.Frame(self.Window_saidin,padding=5)
ttk.Entry(F0,textvariable=self.SAIDIN.FILENAME,justify='left', \
width=80).grid(row=0,column=0,columnspan=8,padx=3)
ttk.Button(F0,text='Select',command=_selector).grid(row=0,column=8,padx=3)
ttk.Checkbutton(F0,text='Mask land data',variable=self.SAIDIN.landmask).grid(row=1,column=5,padx=3)
ttk.Button(F0,text='Cancel',command=_clear).grid(row=1,column=6,padx=3)
ttk.Button(F0,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F0,text='Plot',command=_done).grid(row=1,column=8,padx=3)
F0.grid()
# ===================
def get_marker(self):
# ===================
'''Widget to read Markers'''
def _close():
# ===========
self.Window_marker.destroy()
self.Window_marker = None
def _done():
# ===========
ii = self.MARKER_INDX.get()
if ii >= 0:
self.MARKER[ii].LABEL.set(_wlab.get())
self.make_plot()
self.Window_marker.destroy()
self.Window_marker = None
if self.Window_dotconfig is not None:
self.Window_dotconfig.destroy()
self.Window_dotconfig = None
self.marker_config()
def _clear():
# ===========
'''Note that markers have no time axis'''
if self.nmarker == 0:
return
ii = self.MARKER_INDX.get()
self.LAYERS.erase('MARKER',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing marker '+str(ii),wid=self.cons)
del self.MARKER[ii]
self.nmarker -= 1
ii = self.nmarker-1 if ii >= self.nmarker else ii
toconsola('New marker = '+str(self.nmarker),wid=self.cons)
self.MARKER_INDX.set(ii)
_refill(ii)
self.make_plot()
_close()
def _reget():
# ===========
self.MARKER_INDX.set(_wsel.get())
ii = self.MARKER_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.MARKER_LIST = list(range(self.nmarker))
_wsel['values'] = self.MARKER_LIST
_went['textvariable'] = self.MARKER[ii].FILENAME
_wstat['text'] = ' N = '+str(self.MARKER[ii].n)
_wsel.configure(state='!disabled')
_wlab['state'] = '!disabled'
_wlab['textvariable'] = self.MARKER[ii].LABEL
_show['variable'] = self.MARKER[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.MARKER[ii].ALIAS
else:
self.MARKER = []
self.MARKER_LIST = ['0']
self.MARKER_INDX = tk.IntVar()
self.MARKER_INDX.set(0)
_wsel['values'] = self.MARKER_LIST
_went['textvariable'] = ''
_wstat['text'] = ''
_wsel.configure(state='disabled')
_wlab['textvariable'] = ''
_wlab.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
def _add():
# ========
nn = filedialog.askopenfilename(filetypes=[('CSV','*.csv'),
('TXT','*.txt'),
('ALL','*')],
initialdir='./',
parent=self.Window_marker)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
MARKER = geomarker.parameters()
toconsola(MARKER.MESSAGE,wid=self.cons)
MARKER.Read(filename)
if MARKER.n == 0:
return
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=MARKER.FILENAME.get(),N=len(MARKER.lon),wid=self.cons)
self.LAYERS.print()
ii = self.MARKER_INDX.get()
_refill(ii)
#self.make_plot()
# Main window:
# ============
if self.Window_marker is not None:
self.Window_marker.lift()
return
self.Window_marker = tk.Toplevel(self.master)
self.Window_marker.title('Geomarkers')
self.Window_marker.protocol('WM_DELETE_WINDOW',_close)
if self.nmarker > 0:
ii = self.MARKER_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_marker,padding=5)
# Add
ttk.Button(F0,text='Add',command=_add).grid(row=0,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Marker file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.MARKER_INDX, \
values=self.MARKER_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# AAA
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' N = '+str(self.MARKER[ii].n),width=50,justify='left')
_went['textvariable'] = self.MARKER[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
ttk.Label(F0,text='Marker Label').grid(row=2,column=1,padx=3)
_wlab = ttk.Entry(F0,justify='left',width=18)
_wlab.grid(row=2,column=2,columnspan=2,padx=3,sticky='w')
if ii == -1:
_wlab['state'] = 'disabled'
else:
_wlab['textvariable'] = self.MARKER[ii].LABEL
#Alias
ttk.Label(F0,text='Alias').grid(row=3,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=3,column=2,columnspan=2,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_marker,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.MARKER[ii].show
_aent['textvariable'] = self.MARKER[ii].ALIAS
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=8,padx=3)
F1.grid(row=1,column=0)
# ======================
def get_shapefile(self):
# ==========================
SHAPE = shape.parameters()
toconsola(SHAPE.MESSAGE,wid=self.cons)
def _close():
self.Window_shapefile.destroy()
self.Window_shapefile = None
def _done():
ii = self.SHAPE_INDX.get()
if ii >= 0:
if self.SHAPE[ii].CROP.get():
toconsola('Cropping shapefile',wid=self.cons)
xmin = self.PLOT.WEST.get() + self.PLOT.CROP_PAD.get()
xmax = self.PLOT.EAST.get() - self.PLOT.CROP_PAD.get()
ymin = self.PLOT.SOUTH.get() + self.PLOT.CROP_PAD.get()
ymax = self.PLOT.NORTH.get() - self.PLOT.CROP_PAD.get()
bbox = [xmin, xmax, ymin, ymax]
self.SHAPE[ii].Crop(bbox)
self.SHAPE[ii].LABEL.set(_wlab.get())
self.make_plot()
self.Window_shapefile.destroy()
self.Window_shapefile = None
if self.Window_geoconfig is not None:
self.Window_geoconfig.destroy()
self.Window_geoconfig = None
self.Window_geoconfig()
def _clear():
# ===========
'''Note that shape geometries have no time axis in principle'''
if self.nshape == 0:
return
ii = self.SHAPE_INDX.get()
self.LAYERS.erase('SHAPE',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing marker '+str(ii),wid=self.cons)
del self.SHAPE[ii]
self.nmarker -= 1
ii = self.nmarker-1 if ii >= self.nmarker else ii
toconsola('New marker = '+str(self.nmarker),wid=self.cons)
self.SHAPE.set(ii)
_refill(ii)
self.make_plot()
_close()
def _reget():
# ===========
self.SHAPE_INDX.set(_wsel.get())
ii = self.SHAPE_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.SHAPE_LIST = list(range(self.nshape))
_wsel['values'] = self.SHAPE_LIST
_went['textvariable'] = self.SHAPE[ii].FILENAME
_wstat['text'] = ' N = '+str(self.SHAPE[ii].n)+' geometries'
_wsel.configure(state='!disabled')
_wlab['state'] = '!disabled'
_wlab['textvariable'] = self.SHAPE[ii].LABEL
_show['variable'] = self.SHAPE[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.SHAPE[ii].ALIAS
_wcrp['variable'] = self.SHAPE[ii].CROP
_wcrp.configure(state='normal')
_wpad.configure(state='normal')
else:
self.SHAPE = []
self.SHAPE_LIST = ['0']
self.SHAPE_INDX = tk.IntVar()
self.SHAPE_INDX.set(0)
_wsel['values'] = self.SHAPE_LIST
_went['textvariable'] = ''
_wstat['text'] = ''
_wsel.configure(state='disabled')
_wlab['textvariable'] = ''
_wlab.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
_wcrp.configure(state='disabled')
_wpad.configure(state='disabled')
def _add():
# ========
nn = filedialog.askopenfilename(filetypes=[('shp','*.shp')],
initialdir='./',
parent=self.Window_shapefile)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
SHAPE.Read(filename)
if SHAPE.n == 0:
return
self.nshape += 1
self.SHAPE.append(SHAPE)
self.SHAPE_INDX.set(self.nshape-1)
self.SHAPE_LIST = list(range(self.nshape))
self.LAYERS.add(TYPE='SHAPE',Filename=SHAPE.FILENAME.get(),N=SHAPE.n,wid=self.cons)
self.LAYERS.print()
ii = self.SHAPE_INDX.get()
_refill(ii)
# Main window:
# ============
if self.Window_shapefile is not None:
self.Window_shapefile.lift()
return
self.Window_shapefile = tk.Toplevel(self.master)
self.Window_shapefile.title('Shape file')
self.Window_shapefile.protocol('WM_DELETE_WINDOW',_close)
if self.nshape > 0:
ii = self.SHAPE_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_shapefile,padding=5)
# Add
ttk.Button(F0,text='Add',command=_add).grid(row=0,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Shape file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.SHAPE_INDX, \
values=self.SHAPE_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# AAA
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' N = '+str(self.SHAPE[ii].n),width=50,justify='left')
_went['textvariable'] = self.SHAPE[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
ttk.Label(F0,text='Shape Label').grid(row=2,column=1,padx=3)
_wlab = ttk.Entry(F0,justify='left',width=18)
_wlab.grid(row=2,column=2,columnspan=2,padx=3,sticky='w')
if ii == -1:
_wlab['state'] = 'disabled'
else:
_wlab['textvariable'] = self.SHAPE[ii].LABEL
ttk.Label(F0,text='Alias').grid(row=3,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=18,justify='left')
_aent.grid(row=3,column=2,columnspan=2,padx=3,sticky='w')
ttk.Label(F0,text='Crop').grid(row=4,column=1,padx=3)
_wpad = ttk.Entry(F0,textvariable=self.PLOT.CROP_PAD,width=9,justify='left')
_wpad.grid(row=4,column=2,sticky='w',padx=3)
_wcrp = ttk.Checkbutton(F0)
_wcrp.grid(row=4,column=3,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_shapefile,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
_wcrp.configure(state='disabled')
_wpad.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.SHAPE[ii].show
_aent['textvariable'] = self.SHAPE[ii].ALIAS
_wcrp['variable'] = self.SHAPE[ii].CROP
_wpad.configure(state='normal')
_wcrp.configure(state='normal')
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=7,padx=3)
F1.grid(row=1,column=0)
# ================
def get_wms(self):
# ==========================
pass
# ======================
def marker_config(self):
# =======================
'''Widget to configure Markers'''
#self.dot_config(self.MARKER[self.MARKER_INDX.get()])
global ishow
if self.nmarker == 0:
messagebox.showinfo(message='No Marker file opened yet')
return
def _cancel():
# ============
self.Window_dotconfig.destroy()
self.Window_dotconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_dotconfig.destroy()
self.Window_dotconfig = None
def _selected():
# ==============
global ishow
itab = self.Mnb.index('current')
ishow.destroy()
# The usual configuration:
ii = self.MARKER_INDX.get()
_went['textvariable'] = self.MARKER[ii].FILENAME
ishow = ttk.Frame(self.Window_dotconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Marker Aspect')
self.Mnb.add(page2,text='Label Text')
self.Mnb.add(page3,text='Marker coordinates')
self.Mnb.grid()
self.Mnb.select(itab)
# Page0
ttk.Label(page0,
text='Show as text',
padding=3).grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,
variable=self.MARKER[ii].textmode).grid(row=0,
column=1,
padx=3,
sticky='w')
ttk.Label(page0,
text='Generic label',
padding=3).grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,
textvariable=self.MARKER[ii].LABEL).grid(row=1,
column=1,
padx=3,
sticky='w')
# Page 1
dotplot.Configuration(page1,self.MARKER[ii].PLOT)
# Page 2
geomarker.TextConfigure(page2,self.MARKER[ii].PLOT)
# Page 3
geomarker.ShowData(page3,self.MARKER[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
ishow.grid()
def _loadconf():
# =============
'''Load dot configuration'''
ii = self.MARKER_INDX.get()
toconsola('Restoring dot configuration',wid=self.cons)
try:
self.MARKER[ii].PLOT.load(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveconf():
# =============
'''Load dot configuration'''
ii = self.MARKER_INDX.get()
toconsola('Saving dot configuration',wid=self.cons)
try:
self.MARKER[ii].PLOT.save(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load dot configuration from a file'''
ii = self.MARKER_INDX.get()
nn = filedialog.askopenfilename(title='Load dot configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.MARKER[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring dot configuration from '+
self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
try:
self.MARKER[ii].PLOT.load(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveasconf():
# ================
'''Save dot configuration to a file'''
ii = self.MARKER_INDX.get()
nn = filedialog.asksaveasfilename(title='Save dot configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.MARKER[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving dot configuration to '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
try:
self.MARKER[ii].PLOT.save(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_dotconfig is not None:
self.Window_dotconfig.lift()
return
self.Window_dotconfig = tk.Toplevel(self.master)
self.Window_dotconfig.title('Marker plot configuration')
self.Window_dotconfig.resizable(width=True,height=True)
self.Window_dotconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_dotconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_dotconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_dotconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_dotconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.MARKER_INDX,
values=self.MARKER_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.MARKER_INDX.get()
_went['textvariable'] = self.MARKER[ii].FILENAME
ishow = ttk.Frame(self.Window_dotconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Marker Aspect')
self.Mnb.add(page2,text='Label Text')
self.Mnb.add(page3,text='Marker coordinates')
self.Mnb.grid()
# Page0
ttk.Label(page0,
text='Show as text',
padding=3).grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,
variable=self.MARKER[ii].textmode).grid(row=0,
column=1,
padx=3,
sticky='w')
ttk.Label(page0,
text='Generic label',
padding=3).grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,
textvariable=self.MARKER[ii].LABEL).grid(row=1,
column=1,
padx=3,
sticky='w')
# Page 1
dotplot.Configuration(page1,self.MARKER[ii].PLOT)
# Page 2
geomarker.TextConfigure(page2,self.MARKER[ii].PLOT)
# Page 3
geomarker.ShowData(page3,self.MARKER[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew')
ishow.grid()
# ======================
def shape_config(self):
# =======================
'''Widget to configure Markers'''
#self.dot_config(self.MARKER[self.MARKER_INDX.get()])
global ishow
if self.nshape == 0:
messagebox.showinfo(message='No Shape file opened yet')
return
def _cancel():
# ============
self.Window_geoconfig.destroy()
self.Window_geoconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_geoconfig.destroy()
self.Window_geoconfig = None
def _selected():
# ==============
global ishow
# ?????
itab = self.Mnb.index('current')
ishow.destroy()
# The usual configuration:
ii = self.SHAPE_INDX.get()
_went['textvariable'] = self.SHAPE[ii].FILENAME
ishow = ttk.Frame(self.Window_geoconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Geometry Aspect')
self.Mnb.add(page2,text='Text Aspect')
self.Mnb.add(page3,text='Hide Features')
self.Mnb.grid()
self.Mnb.select(itab)
# Page0
ttk.Label(page0, text='Show as text',padding=3). \
grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,variable=self.SHAPE[ii].textmode). \
grid(row=0,column=1,padx=3,sticky='w')
ttk.Label(page0, text='Generic label',padding=3). \
grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,textvariable=self.SHAPE[ii].LABEL). \
grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(page0, text='Label key',padding=3). \
grid(row=2,column=0,padx=3,sticky='e')
_ksel = ttk.Combobox(page0,textvariable=self.SHAPE[ii].LABEL_KEY,
values=self.SHAPE[ii].KEY_LIST,width=12)
_ksel.grid(row=2,column=1,sticky='w',padx=3)
_ksel.bind('<<ComboboxSelected>>',lambda e:self.SHAPE[ii].get_name())
# Page 1
geoplot.Configuration(page1,self.SHAPE[ii].PLOT)
# Page 2
shape.TextConfigure(page2,self.SHAPE[ii].PLOT)
# Page 3
shape.HideData(page3,self.SHAPE[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
ishow.grid()
def _loadconf():
# =============
'''Load dot configuration'''
ii = self.SHAPE_INDX.get()
toconsola('Restoring dot configuration',wid=self.cons)
try:
self.SHAPE[ii].PLOT.load(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveconf():
# =============
'''Load dot configuration'''
ii = self.SHAPE_INDX.get()
toconsola('Saving dot configuration',wid=self.cons)
try:
self.SHAPE[ii].PLOT.save(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load dot configuration from a file'''
ii = self.SHAPE_INDX.get()
nn = filedialog.askopenfilename(title='Load geometry configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.SHAPE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring dot configuration from '+
self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.SHAPE[ii].PLOT.load(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveasconf():
# ================
'''Save dot configuration to a file'''
ii = self.SHAPE_INDX.get()
nn = filedialog.asksaveasfilename(title='Save geometry configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.SHAPE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving dot configuration to '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.SHAPE[ii].PLOT.save(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_geoconfig is not None:
self.Window_geoconfig.lift()
return
self.Window_geoconfig = tk.Toplevel(self.master)
self.Window_geoconfig.title('Shape geometry plot configuration')
self.Window_geoconfig.resizable(width=True,height=True)
self.Window_geoconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_geoconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_geoconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_geoconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_geoconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.SHAPE_INDX,
values=self.SHAPE_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.SHAPE_INDX.get()
_went['textvariable'] = self.SHAPE[ii].FILENAME
ishow = ttk.Frame(self.Window_geoconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Geometry Aspect')
self.Mnb.add(page2,text='Text Aspect')
self.Mnb.add(page3,text='Hide Feature')
self.Mnb.grid()
# Page0
ttk.Label(page0,text='Show as text',padding=3). \
grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,variable=self.SHAPE[ii].textmode). \
grid(row=0, column=1,padx=3, sticky='w')
ttk.Label(page0,text='Generic label',padding=3). \
grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,textvariable=self.SHAPE[ii].LABEL).\
grid(row=1, column=1,padx=3, sticky='w')
ttk.Label(page0, text='Label key',padding=3). \
grid(row=2,column=0,padx=3,sticky='e')
_ksel = ttk.Combobox(page0,textvariable=self.SHAPE[ii].LABEL_KEY,
values=self.SHAPE[ii].KEY_LIST,width=12)
_ksel.grid(row=2,column=1,sticky='w',padx=3)
_ksel.bind('<<ComboboxSelected>>',lambda e:self.SHAPE[ii].get_name())
# Page 1
geoplot.Configuration(page1,self.SHAPE[ii].PLOT)
# Page 2
shape.TextConfigure(page2,self.SHAPE[ii].PLOT)
# Page 3 Si hay Muchas geometrias inmanejable
shape.HideData(page3,self.SHAPE[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew')
ishow.grid()
# =======================
def get_lagrangian(self):
# =======================
'''Widget to retrieve Lagrangian trajectory data'''
self.LSOURCE = tk.StringVar()
self.LSOURCE.set(self.FLOAT_OPTIONS[0])
#EG
self.COUNT=[]
def _cancel():
# ===========
self.Window_float.destroy()
self.Window_float = None
def _close():
# ===========
ii = self.FLOAT_INDX.get()
if self.FLOAT[ii].CROP.get():
nt = self.FLOAT[ii].nrecords
ppi = [i for i in range(nt) if self.FLOAT[ii].DATE[i] >= self.DATE[0]]
ppf = [i for i in range(nt) if self.FLOAT[ii].DATE[i] > self.DATE[-1]]
pi = ppi[0]
pf = ppf[0] - 1
#print('Initial index : ', pi)
#print('Final index : ', pf)
#print(self.FLOAT[ii].nfloats)
#print(self.FLOAT[ii].nrecords)
if self.FLOAT[ii].nfloats > 1:
lon = self.FLOAT[ii].lon[pi:pf+1,:]
lat = self.FLOAT[ii].lat[pi:pf+1,:]
date = self.FLOAT[ii].DATE[pi:pf+1]
TIME = self.FLOAT[ii].TIME[pi:pf+1]
self.FLOAT[ii].lon = lon
self.FLOAT[ii].lat = lat
self.FLOAT[ii].DATE = date
self.FLOAT[ii].TIME = TIME
else:
lon = self.FLOAT[ii].lon[pi:pf+1]
lat = self.FLOAT[ii].lat[pi:pf+1]
date = self.FLOAT[ii].DATE[pi:pf+1]
TIME = self.FLOAT[ii].TIME[pi:pf+1]
self.FLOAT[ii].lon = lon
self.FLOAT[ii].lat = lat
self.FLOAT[ii].DATE = date
self.FLOAT[ii].TIME = TIME
self.FLOAT[ii].nrecords = len(date)
#print('DATE[0] = ',self.FLOAT[ii].DATE[0])
#print('DATE[n] = ',self.FLOAT[ii].DATE[-1])
#print('TIME[0] = ',datetime.datetime.fromtimestamp(self.FLOAT[ii].TIME[0]))
#print('TIME[n] = ',datetime.datetime.fromtimestamp(self.FLOAT[ii].TIME[-1]))
#print(self.FLOAT[ii].lon)
#print(self.FLOAT[ii].lat)
#print(self.FLOAT[ii].date)
self.Window_float.destroy()
self.Window_float = None
self.make_plot()
if self.Window_lineconfig is not None:
self.Window_lineconfig.destroy()
self.Window_lineconfig = None
self.lagrangian_config()
def _clear():
# ===========
if self.nfloat == 0:
return
ii = self.FLOAT_INDX.get()
self.LAYERS.erase('FLOAT',ii,wid=self.cons)
self.LAYERS.print()
#for i in range(self.nfiles):
# if self.FILETYPES[i] == 'FLOAT' and self.FILEORDER[i] == ii:
# del self.FILENAMES[i]
# del self.FILETYPES[i]
# del self.FILEORDER[i]
# del self.SEQUENCES[i]
# del self.SEQLEADER[i]
# del self.SEQNTIMES[i]
#self.nfiles -= 1
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.FLOAT[ii]
self.nfloat -= 1
ii = self.nfloat-1 if ii >= self.nfloat else ii
toconsola('new nfloat = '+str(self.nfloat),wid=self.cons)
self.FLOAT_INDX.set(ii)
_refill(ii)
#_close()
def _reget():
# ===========
self.FLOAT_INDX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
#print("entro refill",ii)
if ii >= 0:
self.COUNT.append(tk.StringVar())
self.COUNT[-1].set(str(ii))
self.FLOAT_LIST = list(range(self.nfloat))
ttk.Label(self.F1,textvariable=self.COUNT[-1],anchor='center', \
background="#fff",foreground="#000000",width=5).grid(row=ii+1,column=0)
ttk.Label(self.F1,textvariable=self.FLOAT[ii].FILENAME,\
background="#fff",foreground="#000000",justify='left').grid(row=ii+1,column=1,padx=3,sticky='w')
ttk.Entry(self.F1,textvariable=self.FLOAT[ii].ALIAS,width=15).grid(row=ii+1,column=2,sticky='w')
tk.Checkbutton(self.F1).grid(row=ii+1,column=3,sticky='we')
tk.Checkbutton(self.F1).grid(row=ii+1,column=4,sticky='we')
#EG _wsel['values'] = self.FLOAT_LIST
#EG _went['textvariable'] = self.FLOAT[ii].FILENAME
#EG _wstat['text'] = ' Nfloats = '+str(self.FLOAT[ii].nfloats)
#EG _wsel.configure(state='normal')
#EG _show.configure(state='normal')
#EG _show['variable']=self.FLOAT[ii].show
#EG _aent.configure(state='normal')
#EG _aent['textvariable'] = self.FLOAT[ii].ALIAS
#EG _wcrp.configure(state='normal')
#EG _wcrp['variable']=self.FLOAT[ii].CROP
else:
self.FLOAT = []
self.FLOAT_LIST = ['0']
self.FLOAT_INDX = tk.IntVar()
self.FLOAT_INDX.set(0)
#EG _wsel['values'] = self.FLOAT_LIST
#EG _went['textvariable'] = ''
#EG _wstat['text'] = ''
#EG _wsel.configure(state='disabled')
#EG _aent.configure(state='disabled')
#EG _show.configure(state='disabled')
#EG _wcrp.configure(state='disabled')
def _add():
# ========
ISOURCE = self.FLOAT_OPTIONS.index(self.LSOURCE.get())
if ISOURCE == 0:
# self.Lagrangian_types=[('Netcdf','*.nc'),('JSON','*.json'),('GEOJSON','*.geojson'),('ALL','*')]
#EG OLD code
# ''' nn = filedialog.askopenfile(parent=self.Window_float, \
# filetypes=types)
# try:
# if empty(nn.name):
# return
# except:
# return
# _load_trajectory(nn.name)
# '''
#EG New code
nn = filedialog.askopenfilenames(parent=self.Window_float,\
filetypes=self.Lagrangian_types)
try:
if len(nn.name) == 0: return
if empty(nn.name): return
except:
toconsola("======= Trajectories ======",tag="o",wid=self.cons)
for filename in nn:
_load_trajectory(filename)
# Remember the selected extension and use as default for the next call
# Consider the last filename and retrieve its extension:
selected_basename,selected_extension = os.path.splitext(filename)
indx = -1
iii = -1
all = -1
for type in self.Lagrangian_types:
indx = indx + 1
if selected_extension in type[1]: iii = indx
if '*' in type[1]: all = indx
# If no extension has been found, we assume that it was the ALL:
if iii == -1: iii = all
self.Lagrangian_types.insert(0,self.Lagrangian_types.pop(iii))
toconsola("=====================",tag="o", wid=self.cons)
elif ISOURCE == 1:
path = '%s' % filedialog.askdirectory(parent=self.Window_float, \
title='Select local trajectory folder')
if empty(path):
return
filelist = folderList(path,'geojson')
if len(filelist) > 0:
for f in filelist:
filename = join(path,f)
toconsola('Loading file: '+filename,wid=self.cons)
_load_trajectory(filename)
elif ISOURCE == 2:
url = simple_form('Select remote trajectory folder','url')
if empty(url):
return
filelist = urlList(url,'geojson')
if len(filelist) > 0:
for filename in filelist:
toconsola('Loading file: '+filename,wid=self.cons)
_load_trajectory(filename)
elif ISOURCE == 3:
filelist = db.select_exp()
if len(filelist) > 0:
for filename in filelist:
_load_trajectory(filename)
def _load_trajectory(filename):
# ==================================
FLT = lagrangian.parameters(wid=self.cons)
FLT.Read(filename)
if FLT.nfloats is None or FLT.nfloats==0 or FLT.nrecords==0:
return
if self.NL > 0:
if FLT.nfloats > 1:
MAPX = []
MAPY = []
for i in range(FLT.nfloats):
f = interpolate.interp1d(FLT.TIME,FLT.lon[:,i],
bounds_error=False, fill_value=np.NaN)
MAPX.append(f(self.TIME))
f = interpolate.interp1d(FLT.TIME,FLT.lat[:,i],
bounds_error=False, fill_value=np.NaN)
MAPY.append(list(f(self.TIME)))
FLT.MAPX = np.array(MAPX).T
FLT.MAPY = np.array(MAPY).T
else:
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon,
bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat,
bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
n = self.LAYERS.n
# Adding a FLOAT in the Drawing class
#
nt = len(FLT.lon)
self.LAYERS.add(TYPE='FLOAT',Filename=FLT.FILENAME.get(),N=nt,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(FLT.FILENAME.get())
#self.FILETYPES.append('FLOAT')
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
#self.FILEORDER.append(self.nfloat-1)
ii = self.FLOAT_INDX.get()
if self.first:
# Set the plot limits according to the sata'''
if self.drawmap is None:
self.PLOT.WEST.set(np.nanmin(FLT.lon)-1)
self.PLOT.EAST.set(np.nanmax(FLT.lon)+1)
self.PLOT.SOUTH.set(np.nanmin(FLT.lat)-1)
self.PLOT.NORTH.set(np.nanmax(FLT.lat)+1)
self.plot_initialize()
#self.PLOT.XLABEL.set('Longitude')
#self.PLOT.YLABEL.set('Latitude')
self.DATE = FLT.DATE.copy()
self.TIME = FLT.TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.first = False
if nt > 1:
if self.NL == 0:
toconsola('FLOAT initiates Time axis',wid=self.cons)
#self.LAYERS.nsequence = 1
#self.LAYERS.INSEQUENCE[n-1].set(True)
#self.LAYERS.SEQUENCER[n-1].set(True)
#self.LAYERS.leader = n-1
#self.LAYERS.seqlen = nt
# self.SEQUENCES[-1].set(True)
# self.SEQLEADER[-1].set(True)
# self.SEQLEADER_INDX = self.nfiles
self.FLOAT[ii].LINK.set(True)
self.FLOAT[ii].MAPX = self.FLOAT[ii].lon.copy()
self.FLOAT[ii].MAPY = self.FLOAT[ii].lat.copy()
self.DATE = self.FLOAT[ii].DATE.copy()
self.TIME = self.FLOAT[ii].TIME.copy()
self.L.set(self.FLOAT[ii].L.get())
self.L_LIST = list(range(nt))
self.NL = nt
self.lbox.configure(state='normal')
self.lbox['values'] = self.L_LIST
if self.L.get() < self.NL-1:
self.bnext.configure(state='normal')
if self.L.get() > 0:
self.bprev.configure(state='normal')
else:
if nt == self.NL:
toconsola('Linking trajectory to TIME axis',wid=self.cons)
# self.LAYERS.nsequence += 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(False)
# self.nsequence += 1
# self.SEQUENCES[-1].set(True)
# self.SEQLEADER[-1].set(False)
self.FLOAT[ii].LINK.set(True)
self.FLOAT[ii].L.set(self.L.get()) #Synchronize records
#print("load self.LAYERS.nsequence",self.LAYERS.nsequence)
#print("abasn refill",self.FLOAT_LIST)
_refill(ii)
# Main window:
# ============
if self.Window_float is None:
self.Window_float = tk.Toplevel(self.master)
self.Window_float.title("Lagrangian Trajectories")
self.Window_float.protocol('WM_DELETE_WINDOW',_cancel)
else:
self.Window_float.lift()
if self.nfloat > 0:
ii = self.FLOAT_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_float,padding=5)
#EG Nueva interface
# Add
#EG ttk.Combobox(F0,textvariable=self.LSOURCE, \
#EG values=self.FLOAT_OPTIONS).grid(row=0,column=0,padx=3)
ttk.Button(F0,text='Import',command=_add).grid(row=0,column=0,padx=3)
ttk.Combobox(F0,textvariable=self.LSOURCE, \
values=self.FLOAT_OPTIONS).grid(row=0,column=1)
F0.grid(row=0,column=0,sticky="w")
# Filename:
ttk.Separator(self.Window_float, orient='horizontal').grid(row=1,column=0,sticky="nesw")
#EG F1 = ttk.Frame(self.Window_float,padding=5)
#EG ttk.Label(F0,text='Float file').grid(row=0,column=1,padx=3)
#EG _wsel = ttk.Combobox(F0,textvariable=self.FLOAT_INDX, \
#EG values=self.FLOAT_LIST,width=5)
#EG _wsel.grid(row=0,column=2)
#EG _wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
#EG _went = ttk.Entry(F0,justify='left',width=50,state='readonly')
#EG _went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
#EG
self.F1 = ttk.Frame(self.Window_float,padding=5)
ttk.Label(self.F1,text='Nfloat',width=5).grid(row=0,column=0)
ttk.Label(self.F1,text='Float file',anchor="center",width=50).grid(row=0,column=1,sticky='we')
ttk.Label(self.F1,text='Alias',anchor="center",width=15).grid(row=0,column=2)
ttk.Label(self.F1,text='Crop').grid(row=0,column=3)
ttk.Label(self.F1,text='Show').grid(row=0,column=4)
self.F1.grid(row=2,column=0)
#EGttk.Label(F1,text='Float file').grid(row=0,column=1,padx=3)
#_wsel = ttk.Combobox(F1,textvariable=self.FLOAT_INDX, \
# values=self.FLOAT_LIST,width=5)
#_wsel.grid(row=0,column=2)
#_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
#_went = ttk.Entry(F1,justify='left',width=50,state='readonly')
#_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# AAA
#if ii == -1:
# _wstat = ttk.Label(F1,text='',width=50,justify='left')
# _wsel.configure(state='disabled')
#else:
# _wstat = ttk.Label(F1,text=' Floats in the file= '+str(self.FLOAT[ii].nfloats),width=50,justify='left')
# _went['textvariable'] = self.FLOAT[ii].FILENAME
#_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
#Alias
#ttk.Label(F1,text='Alias').grid(row=2,column=1,padx=3,pady=3)
#_aent = ttk.Entry(F0,width=15,justify='left')
#_aent.grid(row=2,column=2,columnspan=2,sticky='w')
#_wcrp = ttk.Checkbutton(F0,text='Crop')
#_wcrp.grid(row=3,column=1,sticky='w')
#EGF0.grid(row=0,column=0)
ttk.Separator(self.Window_float, orient='horizontal').grid(row=3,column=0,sticky="nesw")
F2 = ttk.Frame(self.Window_float,padding=5)
if ii == -1:
print('-1',ii)
pass
#_show = ttk.Checkbutton(F2,text='Show')
#_aent.configure(state='disabled')
#_wcrp.configure(state='disabled')
else:
print('nfloats',ii)
pass
#_show = ttk.Checkbutton(F2,text='Show',command=self.make_plot)
#_show['variable']=self.FLOAT[ii].show
#_aent['textvariable'] = self.FLOAT[ii].ALIAS
#_wcrp['variable'] = self.FLOAT[ii].CROP
#_show.grid(row=1,column=5,padx=3)
ttk.Button(F2,text='Cancel',command=_cancel).grid(row=0,column=0,padx=3)
ttk.Button(F2,text='Clear',command=_clear).grid(row=0,column=1,padx=3)
ttk.Button(F2,text='Plot',command=_close).grid(row=0,column=2,padx=3)
F2.grid(row=4,column=0)
# ========================
def currents_config(self):
# ========================
global fshow
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
#else:
# self.vector_config(self.VEC[self.VEC_INDX.get()].VEL)
def _cancel():
# ============
self.Window_vectorconfig.destroy()
self.Window_vectorconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_vectorconfig.destroy()
self.Window_vectorconfig = None
def _selected():
# ==============
global fshow
fshow.destroy()
# The usual configuration:
ii = self.VEC_INDX.get()
_went['textvariable'] = self.VEC[ii].UFILENAME
fshow = ttk.Frame(self.Window_vectorconfig,padding=10)
vectorplot.Configuration(parent=fshow,
PLOT=self.VEC[ii].PLOT)
f0 = ttk.Frame(fshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
fshow.grid()
def _loadconf():
# =============
'''Load vector configuration'''
toconsola('Restoring vector configuration from '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.load(self.VEC[ii].PLOT.UFILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load vector configuration'''
toconsola('Saving vector configuration to '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.save(self.VEC[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to write file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load vector configuration from a file'''
nn = filedialog.askopenfilename(title='Load vector configuration',
parent=self.Window_vectorconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.VEC[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring vector configuration from '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.load(self.VEC[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load vector configuration'''
nn = filedialog.asksaveasfilename(title='Save vector configuration',
parent=self.Window_vectorconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.VEC[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving vector configuration to '+self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.save(self.VEC[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_vectorconfig is not None:
self.Window_vectorconfig.lift()
return
self.Window_vectorconfig = tk.Toplevel(self.master)
self.Window_vectorconfig.title('Vector plot configuration')
self.Window_vectorconfig.resizable(width=True,height=True)
self.Window_vectorconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_vectorconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
self.Window_vectorconfig.config(menu=menubar)
#try:
# self.Window_vectorconfig.config(menu=menubar)
#except AttributeError:
# # master is a toplevel window (Python 2.4/Tkinter 1.63)
# master.tk.call(self.Window_vectorconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_vectorconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.VEC_INDX,
values=self.VEC_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.VEC_INDX.get()
_went['textvariable'] = self.VEC[ii].UFILENAME
fshow = ttk.Frame(self.Window_vectorconfig,padding=10)
vectorplot.Configuration(parent=fshow,
PLOT=self.VEC[ii].PLOT)
f0 = ttk.Frame(fshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
fshow.grid()
# ======================
def saidin_config(self):
# ======================
if empty(self.SAIDIN.FILENAME.get()):
messagebox.showinfo(message='No SST image opened yet')
return
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.make_plot()
self.Window_saidinconfig.destroy()
self.Window_saidinconfig = None
def _loadconf():
# =============
'''Load contour configuration'''
toconsola('Restoring contour configuration from '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.load(self.SAIDIN.PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load contour configuration'''
toconsola('Saving contour configuration to '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load contour configuration from a file'''
nn = filedialog.askopenfilename(title='Load contour configuration',
parent=self.Window_saidinconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.SAIDIN.PLOT.FILECONF = '%s' % nn
toconsola('Restoring contour configuration from '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.load(self.SAIDIN.PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load contour configuration'''
nn = filedialog.asksaveasfilename(title='Save contour configuration',
parent=self.Window_saidinconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.SAIDIN.PLOT.FILECONF = '%s' % nn
toconsola('Saving contour configuration to '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.save(self.SAIDIN.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
if self.Window_saidinconfig is not None:
self.Window_saidinconfig.lift()
return
self.Window_saidinconfig = tk.Toplevel(self.master)
self.Window_saidinconfig.title('SST image configuration')
self.Window_saidinconfig.resizable(width=True,height=True)
self.Window_saidinconfig.protocol('WM_DELETE_WINDOW',_done)
menubar = tk.Menu(self.Window_saidinconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_saidinconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_saidinconfig, "config", "-menu", menubar)
gshow = ttk.Frame(self.Window_saidinconfig,padding=10)
contourplot.Configuration(parent=gshow,
varname=self.SAIDIN.FLD.varname,
units=self.SAIDIN.FLD.units,
missing=self.SAIDIN.FLD.missing,
minval=self.SAIDIN.FLD.minval,
maxval=self.SAIDIN.FLD.maxval,
PLOT=self.SAIDIN.PLOT)
f0 = ttk.Frame(gshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
gshow.grid()
# =======================
def contour_config(self):
# =======================
global gshow
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
def _cancel():
# ============
self.Window_contourconfig.destroy()
self.Window_contourconfig = None
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.Window_contourconfig.destroy()
self.Window_contourconfig = None
self.make_plot()
def _selected():
# ==============
global gshow
gshow.destroy()
# The usual configuration
ii = self.CDF_INDX.get()
_went['textvariable'] = self.CDF[ii].FILENAME
gshow = ttk.Frame(self.Window_contourconfig,padding=10)
contourplot.Configuration(parent=gshow,
varname=self.CDF[ii].FLD.varname,
units=self.CDF[ii].FLD.units,
missing=self.CDF[ii].FLD.missing,
minval=self.CDF[ii].FLD.minval,
maxval=self.CDF[ii].FLD.maxval,
PLOT=self.CDF[ii].PLOT)
f0 = ttk.Frame(gshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
gshow.grid()
def _loadconf():
# =============
'''Load contour configuration'''
toconsola('Restoring contour configuration from '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.load(self.CDF[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load contour configuration'''
toconsola('Saving contour configuration to '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load contour configuration from a file'''
nn = filedialog.askopenfilename(title='Load contour configuration',
parent=self.Window_contourconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.CDF[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring contour configuration from '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.load(self.CDF[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load contour configuration'''
nn = filedialog.asksaveasfilename(title='Save contour configuration',
parent=self.Window_contourconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.CDF[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving contour configuration to '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.save(self.CDF[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_contourconfig is not None:
self.Window_contourconfig.lift()
return
self.Window_contourconfig = tk.Toplevel(self.master)
self.Window_contourconfig.title('Contour plot configuration')
self.Window_contourconfig.resizable(width=True,height=True)
self.Window_contourconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_contourconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_contourconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_contourconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_contourconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.CDF_INDX,
values=self.CDF_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.CDF_INDX.get()
_went ['textvariable'] = self.CDF[ii].FILENAME
gshow = ttk.Frame(self.Window_contourconfig,padding=10)
contourplot.Configuration(parent=gshow,
varname=self.CDF[ii].FLD.varname,
units=self.CDF[ii].FLD.units,
missing=self.CDF[ii].FLD.missing,
minval=self.CDF[ii].FLD.minval,
maxval=self.CDF[ii].FLD.maxval,
PLOT=self.CDF[ii].PLOT)
f0 = ttk.Frame(gshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
gshow.grid()
# ==========================
def lagrangian_config(self):
# ==========================
global hshow
if self.nfloat == 0:
messagebox.showinfo(message='No Trajectory file opened yet')
return
def _cancel():
# ============
self.Window_lineconfig.destroy()
self.Window_lineconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_lineconfig.destroy()
self.Window_lineconfig = None
def _selected():
# ==============
global hshow
hshow.destroy()
# The usual configuration:
ii = self.FLOAT_INDX.get()
_went['textvariable'] = self.FLOAT[ii].FILENAME
hshow = ttk.Frame(self.Window_lineconfig,padding=10)
# Define tabs:
nb = ttk.Notebook(hshow)
page1 = ttk.Frame(nb)
page2 = ttk.Frame(nb)
page3 = ttk.Frame(nb)
nb.add(page1,text='Line Configuration')
nb.add(page2,text='Trajectory options')
nb.add(page3,text='Trajectory data')
nb.grid()
# Page 1
#lineplot.WinConfig(self.Window_lineconfig,LL)
lineplot.Configuration(page1,self.FLOAT[ii].PLOT)
# Page 2
lineplot.Configuration_OnMap(page2,self.FLOAT[ii].PLOT,self.FLOAT[ii])
# Page 3
lagrangian.ShowData(page3,self.FLOAT[ii])
f0 = ttk.Frame(hshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
hshow.grid()
def _loadconf():
# =============
'''Load line configuration'''
toconsola('Restoring line configuration from '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.load(self.FLOAT[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file ',self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load line configuration'''
toconsola('Saving line configuration to '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.save(self.FLOAT[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load line configuration from a file'''
nn = filedialog.askopenfilename(title='Load line configuration',
parent=self.Window_lineconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.FLOAT[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring line configuration from '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.load(self.FLOAT[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load line configuration'''
nn = filedialog.asksaveasfilename(title='Save line configuration',
parent=self.Window_lineconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.FLOAT[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving line configuration to '+self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.save(self.FLOAT[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_lineconfig is not None:
self.Window_lineconfig.lift()
return
self.Window_lineconfig = tk.Toplevel(self.master)
self.Window_lineconfig.title('Trajectory plot configuration')
self.Window_lineconfig.resizable(width=False,height=False)
self.Window_lineconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_lineconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_lineconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_lineconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_lineconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.FLOAT_INDX,
values=self.FLOAT_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration
ii = self.FLOAT_INDX.get()
_went['textvariable'] = self.FLOAT[ii].FILENAME
hshow = ttk.Frame(self.Window_lineconfig,padding=10)
# Define tabs:
nb = ttk.Notebook(hshow)
page1 = ttk.Frame(nb)
page2 = ttk.Frame(nb)
page3 = ttk.Frame(nb)
nb.add(page1,text='Line Configuration')
nb.add(page2,text='Trajectory options')
nb.add(page3,text='Trajectory data')
nb.grid()
# Page 1
#lineplot.WinConfig(self.Window_lineconfig,LL)
lineplot.Configuration(page1,self.FLOAT[ii].PLOT)
# Page 2
lineplot.Configuration_OnMap(page2,self.FLOAT[ii].PLOT,self.FLOAT[ii])
# Page 3
lagrangian.ShowData(page3,self.FLOAT[ii])
f0 = ttk.Frame(hshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
hshow.grid()
# ===================
def lselection(self):
# ===================
'''Sets all files in the SEQUENCE list to the same time step'''
self.L.set(int(self.lbox.get()))
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
L = self.L.get()
if L == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if L == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
for i in range(self.nvec):
if self.VEC[i].LINK.get():
self.VEC[i].L.set(L)
self.VEC[i].read(wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
self.CDF[i].L.set(L)
self.CDF[i].read(wid=self.cons)
# for i in range(self.LAYERS.n):
# if self.LAYERS.INSEQUENCE[i].get():
# jj = self.LAYERS.TYPE_INDEX[i]
# if self.LAYERS.TYPE[i] == 'VEC':
# self.VEC[jj].L.set(L)
# self.VEC[jj].read(wid=self.cons)
# elif self.LAYERS.TYPE[i] == 'FLD':
# self.CDF[jj].L.set(L)
# self.CDF[jj].read(update_lims=False,wid=self.cons)
self.make_plot()
# ==============
def tprev(self):
# ==============
'''Points to the previous time step'''
if self.L.get() > 0:
self.L.set(self.L.get() - 1)
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
if self.L.get() == 0:
self.bprev.configure(state='disabled')
if self.L.get() < self.NL - 1:
self.bnext.configure(state='normal')
for i in range(self.nvec):
if self.VEC[i].LINK.get():
Lm = self.VEC[i].L.get() - 1
self.VEC[i].L.set(Lm)
self.VEC[i].read(wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
Lm = self.CDF[i].L.get() - 1
self.CDF[i].L.set(Lm)
self.CDF[i].read(wid=self.cons)
# for i in range(self.LAYERS.n):
# if self.LAYERS.INSEQUENCE[i].get():
# jj = self.LAYERS.TYPE_INDEX[i]
# if self.LAYERS.TYPE[i] == 'VEC':
# L = self.VEC[jj].L.get()
# Lm = self.VEC[jj].L.get() - 1
# self.VEC[jj].L.set(Lm)
# self.VEC[jj].read(wid=self.cons)
# elif self.LAYERS.TYPE[i] == 'FLD':
# L = self.CDF[jj].L.get()
# Lm = self.CDF[jj].L.get() - 1
# self.CDF[jj].L.set(Lm)
# self.CDF[jj].read(update_lims=False,wid=self.cons)
self.make_plot()
else:
return
# ==============
def tnext(self):
# ==============
'''Points to the next time step'''
if self.L.get() < self.NL - 1:
self.L.set(self.L.get() + 1)
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
if self.L.get() > 0:
self.bprev.configure(state='normal')
for i in range(self.nvec):
if self.VEC[i].LINK.get():
Lp = self.VEC[i].L.get() + 1
self.VEC[i].L.set(Lp)
self.VEC[i].read(wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
Lp = self.CDF[i].L.get() + 1
self.CDF[i].L.set(Lp)
self.CDF[i].read(wid=self.cons)
# for i in range(self.LAYERS.n):
# if self.LAYERS.INSEQUENCE[i].get():
# jj = self.LAYERS.TYPE_INDEX[i]
# if self.LAYERS.TYPE[i] == 'VEC':
# L = self.VEC[jj].L.get()
# Lp = self.VEC[jj].L.get() + 1
# self.VEC[jj].L.set(Lp)
# self.VEC[jj].read(wid=self.cons)
# elif self.LAYERS.TYPE[i] == 'FLD':
# L = self.CDF[jj].L.get()
# Lp = self.CDF[jj].L.get() + 1
# self.CDF[jj].L.set(Lp)
# self.CDF[jj].read(update_lims=False,wid=self.cons)
#toconsola("EG Drawing next.................",wid=self.cons)
self.make_plot()
#toconsola("EG next DOne",wid=self.cons)
else:
return
# # ====================
# def data_update(self):
# # ====================
# '''Makes the new plot according to the user selections. It call self.read to get the new data'''
# self.read_UV(self.FLD.ncid,self.FLD.icdf,self.FLD.uid,self.FLD.vid)
# self.read_S(self.FLD.ncid,self.FLD.icdf,self.FLD.sid)
# self.make_plot()
# ===========================
def get_date(self,ncid,icdf):
# ===========================
self.T_LIST = []
if icdf.idl > -1:
wrk = ncid.variables[icdf.tname][:]
self.T_LIST = list(wrk)
else:
self.T_LIST = []
self.DATE = []
for i in range(icdf.nt):
self.DATE.append(num2date(self.T_LIST[i], \
units=icdf.time_units, \
calendar=icdf.time_calendar))
# ========================
def plot_initialize(self):
# ========================
# Meridian and parallel range and intervalls:
tmp1 = np.trunc(100*(self.PLOT.EAST.get()-self.PLOT.WEST.get())/4)/100
if tmp1 > 1:
tmp1 = np.rint(tmp1)
self.PLOT.MERIDIAN_INT.set(tmp1)
self.PLOT.MERIDIAN_INI.set(np.trunc(self.PLOT.WEST.get()/tmp1 - 2)*tmp1)
self.PLOT.MERIDIAN_FIN.set(np.trunc(self.PLOT.EAST.get()/tmp1 + 2)*tmp1)
tmp1 = None
tmp2 = np.trunc(100*(self.PLOT.NORTH.get() - self.PLOT.SOUTH.get())/4)/100
if tmp2 > 1:
tmp2 = np.rint(tmp2)
self.PLOT.PARALLEL_INT.set(tmp2)
self.PLOT.PARALLEL_INI.set(np.trunc(self.PLOT.SOUTH.get()/tmp2 - 2)*tmp2)
self.PLOT.PARALLEL_FIN.set(np.trunc(self.PLOT.NORTH.get()/tmp2 + 2)*tmp2)
tmp2 = None
# ==================
def make_plot(self):
# ==================
#toconsola("EG make_plot:\n PLOT.OUTPUT_FIGURE: "+str(self.PLOT.OUTPUT_FIGURE.get()),
# wid=self.cons)
if self.PLOT.OUTPUT_FIGURE.get():
if self.fig is None:
#toconsola("\n EGL creation", wid=self.cons)
self.Window_mapa = tk.Toplevel(self.master)
self.Window_mapa.title("COSMO-VIEW plotting tool")
self.Window_mapa.resizable(width=True,height=True)
self.Window_mapa.grid_columnconfigure(0, weight=1)
self.Window_mapa.grid_rowconfigure(0, weight=1)
#self.Window_mapa.wm_geometry("1900x1200")
#self.canvas = None # canvas
# Frame container
topframe = tk.Frame(self.Window_mapa)
topframe.grid_rowconfigure(0, weight=1)
topframe.grid(sticky='swen')
topframe.grid_columnconfigure(0, weight=1)
# Two panels Utilizamos pack en canvas y grid en consola
# Afegim el canvas
top_panel = tk.Frame(topframe, pady = 20)
# Initialize figure,canvas an Plot panel
#self.ax=None
self.fig = Figure(figsize=self.PLOT.SIZE, \
facecolor=self.PLOT.FIGURE_COLOR.get(),dpi=self.PLOT.DPI.get())
#toconsola(" MAP_PLOT: Set projection parameters",wid=self.cons)
proj = map_proj(self.PLOT.MAP_PROJECTION.get(), params=self.params)
self.ax = self.fig.add_subplot(111, projection=proj['proj'])
self.canvas = FigureCanvasTkAgg(self.fig, master=top_panel)
#EG Dibujamos con self.draw_figure
#EG self.canvas.draw()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(self.canvas, top_panel)
toolbar.update()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=1)
#EG event controllers
self.CANVAS_CLICK = self.canvas.mpl_connect('button_press_event',self.canvas_click)
self.canvas.mpl_connect('close_event',self.canvas_closing)
self.canvas.mpl_connect('resize_event',self.canvas_resizing)
top_panel.grid(row=0, column=0, sticky='swen')
self.drawmap = True
else: toconsola(" EG ojo fig existe",wid=self.cons)
self.draw_figure()
# ========================
def setmap(self,target=0):
# ========================
#'''EG OJOJ new setmap Routine focused to set the projection
# We implement a function to manage projection with Cartopy
# map_proj(name,list). See tools module
#'''
#projection = self.PLOT.MAP_PROJECTION.get()
#EG self.toconsola("EG Set map Projection")
#proj = map_proj(projection)
#self.ax = self.fig.add_subplot(111, projection=proj['proj'])
#self.ax.set_extent([ float(self.PLOT.WEST.get()), \
#float(self.PLOT.EAST.get()), float(self.PLOT.SOUTH.get()), \
#float(self.PLOT.NORTH.get())],proj['proj'])
#EG self.ax.coastlines()
#EG Projection
'''
if proj is None:
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
self.ax = self.fig.add_subplot(111, projection=proj['proj'])
'''
return
# ====================
def draw_figure(self):
# ====================
global CONSOLA
toconsola("EG draw_figure:",wid=self.cons)
toconsola((" EG Configuration:\n"+ \
"\t Projection: "+str(self.PLOT.MAP_PROJECTION.get())+ \
"\n\t Domain:\t \t West - East: "+str(float(self.PLOT.WEST.get()))+ \
" - "+str(float(self.PLOT.EAST.get()))+ \
"\n\t \t South - North: "+str(float(self.PLOT.SOUTH.get()))+ \
" - "+str(float(self.PLOT.NORTH.get()))),wid=self.cons)
try:
self.scbar.remove()
except: pass
for bar in self.cdfbar:
try:
bar.remove()
except: pass
self.cdfbar = []
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
self.ax.clear()
font_family = self.PLOT.MAP_FONT_TYPE.get() # Lets see ...
font_size = self.PLOT.LABEL_SIZE.get()
# EPSG
# EG Not necessary
# epsg = int(self.PLOT.EPSG.get())
# Este bloque podría rehacerse ahora
# Temporally deprecated self.PLOT.GEOMAP.get()
self.ax.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
crs=proj['proj'])
#Eg pruebas con projeccions self.ax.coastlines()
toconsola(" EG self.PLOT.GEOMAP: "+str(self.PLOT.GEOMAP.get()),wid=self.cons)
if self.drawmap:
toconsola(" EG draw_figure: call setmap no more needed !",wid=self.cons)
self.drawmap = False
#EG We implement GEBCO+EMODNET Tiles services
toconsola(" EG: RELIEF tiles"+str(self.PLOT.RELIEF_SHOW.get()),wid=self.cons)
if self.PLOT.RELIEF_SHOW.get():
if self.PLOT.RELIEF.get() == 1:
gebco ="GEBCO_2019_Grid"
try:
toconsola("\t EG: GEBCO tiles",wid=self.cons)
self.ax.add_wms(wms='https://www.gebco.net/data_and_products/gebco_web_services/2019/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,360&crs=EPSG:4326&format=image/jpeg&layers=gebco_2019_grid&width=1200&height=600&version=1.3.0',layers=gebco,zorder=0)
except:
toconsola("\t WARNING: GEBCO server failed !, it is disabled......",wid=self.cons)
elif self.PLOT.RELIEF.get() == 2:
emod_land="emodnet:mean_atlas_land"
toconsola("\t EG: EMODNET tiles",wid=self.cons)
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emod_land,zorder=0)
except:
toconsola("\t WARNING: EMODNET server failed !, it is disabled......",wid=self.cons)
else:
#EG Sometimes this situation is possible (i.e. manual edition of conf files)
self.PLOT.RELIEF_SHOW.set(False)
if self.PLOT.EMODNET_ISO.get():
emodnet="emodnet:contours"
toconsola("\t EG: EMODNET contours",wid=self.cons)
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET contours failed !, it is disabled......",wid=self.cons)
# Draw SAIDIN:
#
if not empty(self.SAIDIN.FILENAME.get()):
if self.SAIDIN.show.get():
toconsola("EG plot SAIDIN",wid=self.cons)
#EG Added projection argument, map reference dropped
self.scbar = contourplot.drawing(self.fig,self.ax,proj['proj'],
self.SAIDIN.FLD.xx, self.SAIDIN.FLD.yy,
self.SAIDIN.FLD.data,
self.SAIDIN.FLD.data.mask,
self.SAIDIN.PLOT)
# Draw fields:
#
if self.ncdf > 0:
#EG Added projection argument, map reference dropped
toconsola("EG: plot netcdf",wid=self.cons)
for ii in range(self.ncdf):
if self.CDF[ii].show.get():
self.cdfbar.append(contourplot.drawing(self.fig,
self.ax, proj['proj'],
self.CDF[ii].FLD.xx, self.CDF[ii].FLD.yy,
self.CDF[ii].FLD.data,
self.CDF[ii].FLD.data.mask,
self.CDF[ii].PLOT))
# Draw currents:
#
if self.nvec > 0:
toconsola("EG plot currents",wid=self.cons)
for ii in range(self.nvec):
if self.VEC[ii].show.get():
vectorplot.drawing(self.ax, proj['proj'], self.VEC[ii])
# Draw floats:
#
if self.nfloat > 0:
toconsola("EG plot floats",wid=self.cons)
for ii in range(self.nfloat):
self.FLOAT[ii].L.set(self.L.get())
lagrangian.drawing(self.ax, proj['proj'], self.FLOAT[ii])
# Draw markers:
#
mrklines = []
mrklabls = []
if self.nmarker > 0:
toconsola("EG plot markers",wid=self.cons)
for ii in range(self.nmarker):
#EG Added projection argument, reference map and fig dropped
lmrk = geomarker.drawing(self.ax, proj['proj'], self.MARKER[ii])
mrklines.append(lmrk)
mrklabls.append(self.MARKER[ii].LABEL.get())
# Draw SHAPES:
#
if self.nshape > 0:
toconsola("EG plot shapes",wid=self.cons)
for ii in range(self.nshape):
toconsola("\tSHAPE"+str(ii),wid=self.cons)
#EG Added projection argument, reference map and fig
lmrk = shape.drawing(self.ax, proj['proj'], self.SHAPE[ii])
if lmrk is not None:
mrklines.append(lmrk)
mrklabls.append(self.SHAPE[ii].LABEL.get())
# Draw Ellipses:
#
if self.nellipse > 0:
for ii in range(self.nellipse):
ellipse.drawing(self.ax, proj['proj'], self.ELLIPSE[ii])
# Draw Patches:
#
if self.npatch > 0:
for ii in range(self.npatch):
patch.drawing(self.ax, proj['proj'], self.PATCH[ii])
# Draw Features:
#
if self.FEATURE.n > 0:
for ii in range(self.FEATURE.n):
self.FEATURE.DATA[ii].drawing(self.ax, proj['proj'])
#EG Coastlines
#toconsola("EG: COASTLINES"+str(self.PLOT.COASTLINE_SHOW.get()),wid=self.cons)
if self.PLOT.COASTLINE_SHOW.get():
if self.PLOT.COASTLINE_SOURCE.get() == 2:
emodnet="coastlines"
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET coastlines !, it is disabled......",wid=self.cons)
else:
toconsola("\t EG COASTLINE: Natural_Earth (50m by default) or EMODNET wms",wid=self.cons)
self.ax.coastlines(self.PLOT.MAP_RESOLUTION.get(),color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
if self.PLOT.ISOBAT_NPLOT > 0:
toconsola("EG plot Custom ISOBATHS",wid=self.cons)
# Plot isobaths and its legend:
lines, labels = [], []
toconsola("\t lABEL_SHOW"+str(self.PLOT.ISOBAT_LABEL_SHOW.get()),wid=self.cons)
for ii in range(self.PLOT.nisobat):
label = None
if self.PLOT.ISOBAT_LABEL_SHOW.get():
label = self.PLOT.ISOBAT_LABEL[ii]
try:
color = eval(self.PLOT.ISOBAT_COLOR[ii].get())
except:
color = self.PLOT.ISOBAT_COLOR[ii].get()
if self.PLOT.ISOBAT_SHOW[ii]:
toconsola("\t EG ISOBATA:"+str(self.PLOT.ISOBAT_LABEL[ii]),wid=self.cons)
z = self.PLOT.ISOBAT_DATA[ii]
isox,isoy = z['lon'],z['lat']
for i in range(len(isox)):
if isox[i] > 1e29:
isox[i], isoy[i] = np.nan, np.nan
isbt, = self.ax.plot(isox,isoy,marker=None,
linestyle=self.PLOT.ISOBAT_STYLE[ii].get(),
linewidth=self.PLOT.ISOBAT_WIDTH[ii].get(),
#transform=proj['proj'],
transform=ccrs.PlateCarree(),
color=color)
lines.append(isbt)
labels.append(label)
if self.PLOT.ISOBAT_LEGEND.SHOW.get():
toconsola("\t self.PLOT.ISOBAT_LEGEND.SHOW"+str(self.PLOT.ISOBAT_LEGEND.SHOW.get()),wid=self.cons)
fontsize = self.PLOT.ISOBAT_LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.ISOBAT_LEGEND.FONTSIZE.get() < 1:
fontsize = None
if self.PLOT.ISOBAT_LEGEND.MODE.get() == 1:
mode = 'expand'
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
try: pass
except: pass
# Anchor BBOX:
if self.PLOT.ISOBAT_LEGEND.USE_BB.get():
bb = [self.PLOT.ISOBAT_LEGEND.BBx.get(),
self.PLOT.ISOBAT_LEGEND.BBy.get()]
else:
bb = None
Ilegend = self.ax.legend(lines,labels, \
#title=self.PLOT.ISOBAT_LEGEND.TITLE.get(),
#title_fontsize=24,
loc=self.PLOT.ISOBAT_LEGEND.LOC.get(),
ncol=self.PLOT.ISOBAT_LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.ISOBAT_LEGEND.FRAMEON.get(),
fancybox=self.PLOT.ISOBAT_LEGEND.FANCYBOX.get(),
shadow=self.PLOT.ISOBAT_LEGEND.SHADOW.get(),
framealpha=self.PLOT.ISOBAT_LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.ISOBAT_LEGEND.COLOR.get(),
edgecolor=self.PLOT.ISOBAT_LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.ISOBAT_LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.ISOBAT_LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.ISOBAT_LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.ISOBAT_LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.ISOBAT_LEGEND.LABELSPACING.get())
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
Ilegend.set_title(self.PLOT.ISOBAT_LEGEND.TITLE.get(),
prop=self.PLOT.ISOBAT_LEGEND.TITLEFONT)
if self.PLOT.WATER_COLOR.get() != 'None':
#toconsola("PLOT.WATER_COLOR por defecto 50m",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical', 'ocean', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.WATER_COLOR.get()),zorder=self.PLOT.WATER_ZORDER.get())
if self.PLOT.LAND_COLOR.get() != 'None':
#toconsola("PLOT.LAND_COLOR por defecto 50m",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical', 'land', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.LAND_COLOR.get()),zorder=self.PLOT.LAND_ZORDER.get())
if self.PLOT.COUNTRYLINE_SHOW.get():
#toconsola("PLOT.COUNTRYLINE",wid=self.cons)
self.ax.add_feature(cfeat.BORDERS,edgecolor=self.PLOT.COUNTRYLINE_COLOR.get(),
linewidth=self.PLOT.COUNTRYLINE_WIDTH.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1)
if self.PLOT.RIVERS_SHOW.get():
#toconsola("PLOT.RIVERS",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical','rivers_and_lakes_centerlines', \
self.PLOT.MAP_RESOLUTION.get(), \
linewidth=self.PLOT.RIVERS_WIDTH.get(),
edgecolor=self.PLOT.RIVERS_COLOR.get(),zorder=self.PLOT.LAND_ZORDER.get()+1))
#self.ax.coastlines(resolution='110m')
#self.ax.gridlines()
if self.PLOT.GRID_SHOW.get():
toconsola("EG PLOT.GRID"+self.PLOT.GRID_LINESTYLE.get(),wid=self.cons)
#EG adaptar falat comprobar
#def setcolor(x,color):
# for m in x:
# for t in x[m][1]:
# t.set_color(color)
vmeridians = np.arange(self.PLOT.MERIDIAN_INI.get(), \
self.PLOT.MERIDIAN_FIN.get(), \
self.PLOT.MERIDIAN_INT.get())
vparallels = np.arange(self.PLOT.PARALLEL_INI.get(), \
self.PLOT.PARALLEL_FIN.get(), \
self.PLOT.PARALLEL_INT.get())
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
#gl = self.ax.gridlines(crs=proj['proj'],draw_labels=True,
gl = self.ax.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,
linewidth=self.PLOT.GRID_LINEWIDTH.get(),
color=self.PLOT.GRID_FONTCOLOR.get(),
alpha=self.PLOT.GRID_ALPHA.get(),
linestyle=self.PLOT.GRID_LINESTYLE.get(),
zorder=self.PLOT.GRID_ZORDER.get())
# Lines visibility
gl.xlines, gl.ylines = True, True
if self.PLOT.GRID_LINESTYLE.get() == "None":
gl.xlines, gl.ylines = False, False
# xy labels visibility
if CARTOPY_VERSION < '0.18':
# Works with 0.17
gl.xlabels_top = self.PLOT.GRID_NORTH.get()
gl.xlabels_bottom = self.PLOT.GRID_SOUTH.get()
gl.ylabels_left = self.PLOT.GRID_WEST.get()
gl.ylabels_right = self.PLOT.GRID_EAST.get()
else:
# Works with > 0.18
gl.top_labels = self.PLOT.GRID_NORTH.get()
gl.bottom_labels = self.PLOT.GRID_SOUTH.get()
gl.left_labels = self.PLOT.GRID_WEST.get()
gl.right_labels = self.PLOT.GRID_EAST.get()
gl.xlocator = mticker.FixedLocator(vmeridians)
gl.ylocator = mticker.FixedLocator(vparallels)
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
#gl.xpadding , gl.ypadding = self.PLOT.LABEL_PAD.get(), self.PLOT.LABEL_PAD.get()
#else:
# # Default: no labels, no grid just Latitude and Longitude
# toconsola("EG XYLabels ..\n\t"+self.PLOT.XLABEL.get()+" - "+self.PLOT.YLABEL.get(),wid=self.cons)
# font_family = self.PLOT.MAP_FONT_TYPE.get()
# font_size = self.PLOT.LABEL_SIZE.get()
font_weight = 'normal'
font = {'family' : font_family, 'weight' : font_weight,
'color' : self.PLOT.TEXT_COLOR.get(),
'size' : font_size}
# -0.07
self.ax.text(-self.PLOT.YLABEL_PAD.get(), 0.55, self.PLOT.YLABEL.get(), va="bottom", \
ha="center", rotation="vertical", rotation_mode="anchor",
transform=self.ax.transAxes,fontdict=font)
# -0.2
self.ax.text(0.5, -self.PLOT.XLABEL_PAD.get(), self.PLOT.XLABEL.get(), va="bottom", \
ha="center", rotation="horizontal", rotation_mode="anchor",
transform=self.ax.transAxes,fontdict=font)
# Title
toconsola("Plot Title: "+self.PLOT.TITLE.get(),wid=self.cons)
self.ax.set_title(self.PLOT.TITLE.get(),fontproperties=self.PLOT.TITLEFONT)
px,py = self.ax.title.get_position()
dy = self.PLOT.TITLE_PAD.get()/self.fig.get_dpi()
self.ax.title.set_position((px,py+dy))
if self.PLOT.GEOMAP.get():
#toconsola("EG PLOT.GEOMAP 2 scale: Not yet implemented",wid=self.cons)
if self.PLOT.SCALE_SHOW.get():
try:
YOFFSET = float(self.PLOT.SCALE_YOFFSET.get())
except: YOFFSET = None
try:
LINEWIDTH = float(self.PLOT.SCALE_LINEWIDTH.get())
except: LINEWIDTH = None
#EG no parecefuncionarojo scale_bar from tools
toconsola("EG bar scale", wid=self.cons)
scale_bar(self.ax,proj=ccrs.PlateCarree(),
location=[self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
length=self.PLOT.SCALE_LENGTH.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
zorder=self.PLOT.SCALE_ZORDER.get(),
linewidth=LINEWIDTH)
#scale_bar(self.ax, self.PLOT.SCALE_LENGTH.get(), \
# [self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
# linewidth=LINEWIDTH)
'''EG To be implemented with Cartopy
print("EG PLOT.GEOMAP 2 drawmapscale")
self.m.drawmapscale(self.PLOT.SCALE_X.get(),
self.PLOT.SCALE_Y.get(),
self.PLOT.SCALE_XO.get(),
self.PLOT.SCALE_YO.get(),
length=self.PLOT.SCALE_LENGTH.get(),
units=self.PLOT.SCALE_UNITS.get(),
barstyle=self.PLOT.SCALE_STYLE.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
yoffset=YOFFSET,
labelstyle=self.PLOT.SCALE_LABELSTYLE.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fillcolor1=self.PLOT.SCALE_FILLCOLOR1.get(),
fillcolor2=self.PLOT.SCALE_FILLCOLOR2.get(),
format=self.PLOT.SCALE_FORMAT.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
linewidth=LINEWIDTH)
'''
# Time stamp
try:
self.time_stamp.remove()
except: pass
if len(self.DATE) > 0:
toconsola("EG Time stamp: len(self.DATE) > 0", wid=self.cons)
if self.PLOT.TIMESTAMP_SHOW.get():
toconsola("EG Time stamp: "+str(self.DATE[self.L.get()]), wid=self.cons)
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.ax.annotate(str(self.DATE[self.L.get()]), \
xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', \
color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(), \
fontfamily=font_family, \
fontweight=font_weight, \
annotation_clip=False)
if self.PLOT.LOGO_DISPLAY.get() == 1: self.plot_logo()
self.ax.callbacks.connect('xlim_changed', self.on_xlims_change)
self.ax.callbacks.connect('ylim_changed', self.on_ylims_change)
if len(mrklines) > 0 and self.PLOT.LEGEND.SHOW.get():
toconsola("EG self.nmarker ?",wid=self.cons)
fontsize = self.PLOT.LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.LEGEND.FONTSIZE.get() < 1: fontsize = None
if self.PLOT.LEGEND.MODE.get() == 1: mode = 'expand'
# Anchor BBOX:
if self.PLOT.LEGEND.USE_BB.get():
bb = [self.PLOT.LEGEND.BBx.get(),
self.PLOT.LEGEND.BBy.get()]
else:
bb = None
#try:
toconsola("EG ax.legend",wid=self.cons)
legend = self.ax.legend(mrklines,mrklabls,
loc=self.PLOT.LEGEND.LOC.get(),
ncol=self.PLOT.LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.LEGEND.FRAMEON.get(),
fancybox=self.PLOT.LEGEND.FANCYBOX.get(),
shadow=self.PLOT.LEGEND.SHADOW.get(),
framealpha=self.PLOT.LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.LEGEND.COLOR.get(),
edgecolor=self.PLOT.LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.LEGEND.LABELSPACING.get())
#except: pass
try:
self.ax.add_artist(Ilegend)
except:
pass
if not empty(self.PLOT.LEGEND.TITLE.get()):
try:
legend.set_title(self.PLOT.LEGEND.TITLE.get(),
prop=self.PLOT.LEGEND.TITLEFONT)
except: pass
self.canvas.draw()
toconsola("End draw_figure:",wid=self.cons)
return
# ============================
def make_Mplot(self,proj=None):
# ============================
'''Plotting the maps using CARTOPY,
output directed to Movie window'''
try:
self.SAIDIN.Mcbar.remove()
except: pass
try:
self.Mscbar.remove()
except: pass
for bar in self.Mcdfbar:
try:
bar.remove()
except: pass
#EG recover the cartopy projection
if proj is None:
rproj = map_proj(self.PLOT.MAP_PROJECTION.get())
proj = rproj['proj']
self.Mcdfbar = []
self.Max.clear()
font_family = self.PLOT.MAP_FONT_TYPE.get() # Lets see ...
font_size = self.PLOT.LABEL_SIZE.get()
# Deshabilitado temporalmente EPSG
# epsg = int(self.PLOT.EPSG.get())
# SOUTH = float(self.PLOT.SOUTH.get())
# NORTH = float(self.PLOT.NORTH.get())
# WEST = float(self.PLOT.WEST.get())
# EAST = float(self.PLOT.EAST.get())
self.Max.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
crs=proj)
if self.Mdrawmap:
#EG no se necesita mas self.setmap(self.Max,1)
self.Mdrawmap = False
#toconsola("EG: RELIEF tiles",wid=self.cons)
if self.PLOT.RELIEF_SHOW.get():
if self.PLOT.RELIEF.get() == 1:
gebco ="GEBCO_2019_Grid"
try:
#toconsola("\tEG: GEBCO tiles",wid=self.cons)
self.Max.add_wms(wms='https://www.gebco.net/data_and_products/gebco_web_services/2019/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,360&crs=EPSG:4326&format=image/jpeg&layers=gebco_2019_grid&width=1200&height=600&version=1.3.0',layers=gebco,zorder=0)
except:
toconsola("\tWARNING: GEBCO server failed !, it is disabled......",wid=self.cons)
elif self.PLOT.RELIEF.get() == 2:
emod_land="emodnet:mean_atlas_land"
#toconsola("\tEG: EMODNET tiles",wid=self.cons)
try:
self.Max.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emod_land,zorder=0)
except:
toconsola("\tWARNING: EMODNET server failed !, it is disabled......",wid=self.cons)
else:
#EG Sometimes this situation is possible (i.e. manual edition of conf files)
self.PLOT.RELIEF_SHOW.set(False)
if self.PLOT.EMODNET_ISO.get():
emodnet="emodnet:contours"
#toconsola("EG: EMODNET contours",wid=self.cons)
try:
self.Max.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET contours failed !, it is disabled......",wid=self.cons)
# Draw SAIDIN:
if not empty(self.SAIDIN.FILENAME.get()):
if self.SAIDIN.show.get():
self.Mscbar = contourplot.drawing(self.Mfig,self.Max, proj,\
self.SAIDIN.FLD.xx,self.SAIDIN.FLD.yy, \
self.SAIDIN.FLD.data, \
self.SAIDIN.FLD.data.mask, \
self.SAIDIN.PLOT)
# Draw fields:
if self.ncdf > 0:
for ii in range(self.ncdf):
if self.CDF[ii].show.get():
self.Mcdfbar.append(contourplot.drawing(self.Mfig,self.Max, proj,\
self.CDF[ii].FLD.xx, \
self.CDF[ii].FLD.yy, \
self.CDF[ii].FLD.data, \
self.CDF[ii].FLD.data.mask, \
self.CDF[ii].PLOT))
# Draw currents:
if self.nvec > 0:
for ii in range(self.nvec):
if self.VEC[ii].show.get():
vectorplot.drawing(self.Max,proj,self.VEC[ii])
# Draw floats:
if self.nfloat > 0:
for ii in range(self.nfloat):
self.FLOAT[ii].L.set(self.L.get())
lagrangian.drawing(self.Max,proj,self.FLOAT[ii])
# Draw markers:
mrklines = []
mrklabls = []
if self.nmarker > 0:
for ii in range(self.nmarker):
lmrk = geomarker.drawing(self.Max,proj,self.MARKER[ii])
mrklines.append(lmrk)
mrklabls.append(self.MARKER[ii].LABEL.get())
# Draw shapes:
if self.nshape > 0:
for ii in range(self.nshape):
#toconsola("\tSHAPE"+str(ii),wid=self.cons)
#EG Added projection argument, reference map and fig
lmrk = shape.drawing(self.Max,proj,self.SHAPE[ii])
if lmrk is not None:
mrklines.append(lmrk)
mrklabls.append(self.SHAPE[ii].LABEL.get())
# Draw Ellipses:
if self.nellipse > 0:
for ii in range(self.nellipse):
ellipse.drawing(self.Max,proj,self.ELLIPSE[ii])
# Draw patches:
#
if self.npatch > 0:
for ii in range(self.npatch):
patch.drawing(self.Max,proj,self.PATCH[ii])
#EG Coastlines
#toconsola("EG: COASTLINES",wid=self.cons)
if self.PLOT.COASTLINE_SHOW.get():
if self.PLOT.COASTLINE_SOURCE.get() == 2:
emodnet="coastlines"
try:
self.Max.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',
layers=emodnet,
color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
except:
toconsola("WARNING: EMODNET coastlines !, it is disabled......",wid=self.cons)
else:
#toconsola("EG COASTLINE: Natural_Earth (50m by default) or EMODNET wms",wid=self.cons)
self.Max.coastlines(self.PLOT.MAP_RESOLUTION.get(),
color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
if self.PLOT.ISOBAT_NPLOT > 0:
#toconsola("EG Custom ISOBATHS",wid=self.cons)
# Plot isobaths and its legend:
lines, labels = [], []
toconsola("\t lABEL_SHOW",self.PLOT.ISOBAT_LABEL_SHOW.get(),wid=self.cons)
for ii in range(self.PLOT.nisobat):
label = None
if self.PLOT.ISOBAT_LABEL_SHOW.get():
label = self.PLOT.ISOBAT_LABEL[ii]
try:
color = eval(self.PLOT.ISOBAT_COLOR[ii].get())
except:
color = self.PLOT.ISOBAT_COLOR[ii].get()
if self.PLOT.ISOBAT_SHOW[ii]:
#toconsola("\t EG ISOBATA:",self.PLOT.ISOBAT_LABEL[ii],wid=self.cons)
z = self.PLOT.ISOBAT_DATA[ii]
isox,isoy = z['lon'],z['lat']
for i in range(len(isox)):
if isox[i] > 1e29:
isox[i], isoy[i] = np.nan, np.nan
isbt, = self.Max.plot(isox,isoy,marker=None,
linestyle=self.PLOT.ISOBAT_STYLE[ii].get(),
linewidth=self.PLOT.ISOBAT_WIDTH[ii].get(),
transform=ccrs.PlateCarree(),
color=color)
lines.append(isbt)
labels.append(label)
if self.PLOT.ISOBAT_LEGEND.SHOW.get():
#toconsola("\t EG self.PLOT.ISOBAT_LEGEND.SHOW",wid=self.cons)
fontsize = self.PLOT.ISOBAT_LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.ISOBAT_LEGEND.FONTSIZE.get() < 1:
fontsize = None
if self.PLOT.ISOBAT_LEGEND.MODE.get() == 1:
mode = 'expand'
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
try: pass
except: pass
# Anchor BBOX:
if self.PLOT.ISOBAT_LEGEND.USE_BB.get():
bb = [self.PLOT.ISOBAT_LEGEND.BBx.get(),
self.PLOT.ISOBAT_LEGEND.BBy.get()]
else:
bb = None
Ilegend = self.Max.legend(lines,labels, \
#title=self.PLOT.ISOBAT_LEGEND.TITLE.get(),
#title_fontsize=24,
loc=self.PLOT.ISOBAT_LEGEND.LOC.get(),
ncol=self.PLOT.ISOBAT_LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.ISOBAT_LEGEND.FRAMEON.get(),
fancybox=self.PLOT.ISOBAT_LEGEND.FANCYBOX.get(),
shadow=self.PLOT.ISOBAT_LEGEND.SHADOW.get(),
framealpha=self.PLOT.ISOBAT_LEGEND.ALPHA.get(),
mode=mode,
facecolor=self.PLOT.ISOBAT_LEGEND.COLOR.get(),
edgecolor=self.PLOT.ISOBAT_LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.ISOBAT_LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.ISOBAT_LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.ISOBAT_LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.ISOBAT_LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.ISOBAT_LEGEND.LABELSPACING.get())
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
Ilegend.set_title(self.PLOT.ISOBAT_LEGEND.TITLE.get(),
prop=self.PLOT.ISOBAT_LEGEND.TITLEFONT)
if self.PLOT.WATER_COLOR.get() != 'None':
#toconsola("EG PLOT.WATER_COLOR por defecto 50m",wid=self.cons)
self.Max.add_feature(cfeat.NaturalEarthFeature('physical', 'ocean', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.WATER_COLOR.get()),zorder=self.PLOT.WATER_ZORDER.get())
if self.PLOT.LAND_COLOR.get() != 'None':
#toconsola("EG PLOT.LAND_COLOR por defecto 50m",wid=self.cons)
self.Max.add_feature(cfeat.NaturalEarthFeature('physical', 'land', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.LAND_COLOR.get()),zorder=self.PLOT.LAND_ZORDER.get())
if self.PLOT.COUNTRYLINE_SHOW.get():
#toconsola("EG PLOT.COUNTRYLINE",wid=self.cons)
self.Max.add_feature(cfeat.BORDERS,edgecolor=self.PLOT.COUNTRYLINE_COLOR.get(),
linewidth=self.PLOT.COUNTRYLINE_WIDTH.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1)
if self.PLOT.RIVERS_SHOW.get():
toconsola("EG PLOT.RIVERS",wid=self.cons)
#print("EG PLOT.RIVERS")
self.Max.add_feature(cfeat.NaturalEarthFeature('physical','rivers_and_lakes_centerlines', \
self.PLOT.MAP_RESOLUTION.get(), \
linewidth=self.PLOT.RIVERS_WIDTH.get(),
edgecolor=self.PLOT.RIVERS_COLOR.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1))
if self.PLOT.GRID_SHOW.get():
#toconsola("EG PLOT.GRID"+str(self.PLOT.GRID_LINESTYLE.get()),wid=self.cons)
vmeridians = np.arange(self.PLOT.MERIDIAN_INI.get(), \
self.PLOT.MERIDIAN_FIN.get(), \
self.PLOT.MERIDIAN_INT.get())
vparallels = np.arange(self.PLOT.PARALLEL_INI.get(), \
self.PLOT.PARALLEL_FIN.get(), \
self.PLOT.PARALLEL_INT.get())
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
gl = self.Max.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,
linewidth=self.PLOT.GRID_LINEWIDTH.get(),
color=self.PLOT.GRID_FONTCOLOR.get(),
alpha=self.PLOT.GRID_ALPHA.get(),
linestyle=self.PLOT.GRID_LINESTYLE.get(),
zorder=self.PLOT.GRID_ZORDER.get())
# Lines visibility
gl.xlines, gl.ylines = True, True
if self.PLOT.GRID_LINESTYLE.get() == "None":
gl.xlines, gl.ylines = False, False
# xy labels visibility
if CARTOPY_VERSION < '0.18':
# Works with 0.17
gl.xlabels_top = self.PLOT.GRID_NORTH.get()
gl.xlabels_bottom = self.PLOT.GRID_SOUTH.get()
gl.ylabels_left = self.PLOT.GRID_WEST.get()
gl.ylabels_right = self.PLOT.GRID_EAST.get()
else:
# Works with > 0.18
gl.top_labels = self.PLOT.GRID_NORTH.get()
gl.bottom_labels = self.PLOT.GRID_SOUTH.get()
gl.left_labels = self.PLOT.GRID_WEST.get()
gl.right_labels = self.PLOT.GRID_EAST.get()
gl.xlocator = mticker.FixedLocator(vmeridians)
gl.ylocator = mticker.FixedLocator(vparallels)
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
#gl.xpadding , gl.ypadding = self.PLOT.LABEL_PAD.get(), self.PLOT.LABEL_PAD.get()
else:
# Default: no labels, no grid just Latitude and Longitude
#toconsola("EG XYLabels ..\n\t"+self.PLOT.XLABEL.get()+self.PLOT.YLABEL.get(),wid=self.cons)
#print("EG XYLabels ..\n\t",self.PLOT.XLABEL.get(),self.PLOT.YLABEL.get())
#font_family = self.PLOT.MAP_FONT_TYPE.get()
#font_size = self.PLOT.LABEL_SIZE.get()
font_weight = 'normal'
font = {'family' : font_family, 'weight' : font_weight,
'color' : self.PLOT.TEXT_COLOR.get(),
'size' : font_size}
self.Max.text(-self.PLOT.YLABEL_PAD.get(), 0.55, self.PLOT.YLABEL.get(), va="bottom", \
ha="center", rotation="vertical", rotation_mode="anchor",
transform=self.Max.transAxes,fontdict=font)
self.Max.text(0.5, -self.PLOT.XLABEL_PAD.get(), self.PLOT.XLABEL.get(), va="bottom", \
ha="center", rotation="horizontal", rotation_mode="anchor",
transform=self.Max.transAxes,fontdict=font)
# Title
#toconsola("Title:\n"+self.PLOT.TITLE.get(),wid=self.cons)
self.Max.set_title(self.PLOT.TITLE.get(),fontproperties=self.PLOT.TITLEFONT)
px,py = self.Max.title.get_position()
dy = self.PLOT.TITLE_PAD.get()/self.fig.get_dpi()
self.Max.title.set_position((px,py+dy))
if self.PLOT.GEOMAP.get():
#toconsola("EG PLOT.GEOMAP 2 scale: Not yet implemented",wid=self.cons)
#print("EG PLOT.GEOMAP 2 scale: Not yet implemented")
if self.PLOT.SCALE_SHOW.get():
try:
YOFFSET = float(self.PLOT.SCALE_YOFFSET.get())
except: YOFFSET = None
try:
LINEWIDTH = float(self.PLOT.SCALE_LINEWIDTH.get())
except: LINEWIDTH = None
#EG no parecefuncionarojo scale_bar from tools
#toconsola("EG bar scale",wid=self.cons)
#scale_bar(self.Max, 1)
scale_bar(self.Max,proj=ccrs.PlateCarree(),
location=[self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
length=self.PLOT.SCALE_LENGTH.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
zorder=self.PLOT.SCALE_ZORDER.get(),
linewidth=LINEWIDTH)
# Time stamp
try:
self.Mtime_stamp.remove()
except: pass
if len(self.DATE) > 0:
if self.PLOT.TIMESTAMP_SHOW.get():
#toconsola("EG Time stamp: "+str(self.DATE[self.L.get()]),wid=self.cons)
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.Max.annotate(self.DATE[self.L.get()], \
xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', \
color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(), \
fontfamily=font_family, fontweight=font_weight, \
annotation_clip=False)
if self.PLOT.LOGO_DISPLAY.get() == 1: self.plot_logo()
if len(mrklines) > 0 and self.PLOT.LEGEND.SHOW.get():
fontsize = self.PLOT.LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.LEGEND.FONTSIZE.get() < 1: fontsize = None
if self.PLOT.LEGEND.MODE.get() == 1: mode = 'expand'
# Anchor BBOX:
if self.PLOT.LEGEND.USE_BB.get():
bb = [self.PLOT.LEGEND.BBx.get(),
self.PLOT.LEGEND.BBy.get()]
else:
bb = None
#try:
#toconsola("EG ax.legend",wid=self.cons)
legend = self.Max.legend(mrklines,mrklabls,
loc=self.PLOT.LEGEND.LOC.get(),
ncol=self.PLOT.LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.LEGEND.FRAMEON.get(),
fancybox=self.PLOT.LEGEND.FANCYBOX.get(),
shadow=self.PLOT.LEGEND.SHADOW.get(),
framealpha=self.PLOT.LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.LEGEND.COLOR.get(),
edgecolor=self.PLOT.LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.LEGEND.LABELSPACING.get())
#except: pass
try:
self.Max.add_artist(Ilegend)
except:
pass
if not empty(self.PLOT.LEGEND.TITLE.get()):
try:
legend.set_title(self.PLOT.LEGEND.TITLE.get(),
prop=self.PLOT.LEGEND.TITLEFONT)
except:
pass
# self.Max.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
# float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
# crs=proj)
self.Mcanvas.draw()
return
def trajectory_editor(self):
# ==========================
''' Launch the editor of a trajectory '''
def _close():
# ===========
self.Window_editor.destroy()
self.Window_editor = None
# Check if the window was closed by EDITOR !!
if self.Window_editor is None:
pass
else:
try:
self.Window_editor.lift()
except:
self.Window_editor = None
if self.Window_editor is None:
self.Window_editor = tk.Toplevel(self.master)
self.Window_editor.title('GEOJSON EDITOR')
self.Window_editor.resizable(width=False,height=False)
self.Window_editor.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_editor.lift()
return
if self.nfloat == 0:
jeditor.EDITOR(self.Window_editor,wid=self.cons)
else:
jeditor.EDITOR(self.Window_editor, \
self.FLOAT[self.FLOAT_INDX.get()].FILENAME.get(),\
wid=self.cons)
def contour_mean(self):
# ==========================
''' Calculates the long term mean of a contour field '''
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
ii = self.CDF_INDX.get()
if self.CDF[ii].PARENT is None:
toconsola('Calculating mean of current CONTOUR field')
else:
ii = self.CDF[ii].PARENT
toconsola('Calculating mean of PARENT CONTOUR field, ii=',ii)
K = self.CDF[ii].K.get()
L = self.CDF[ii].L.get()
nt = self.CDF[ii].FLD.icdf.nt
for L in range(0,nt):
data = self.CDF[ii].FLD.read(K=K,L=L,wid=self.cons)
if L==0:
num = data.copy()
else:
num = num + data
CDF = CONTOUR()
CDF.SOURCE = 'MEAN'
CDF.PARENT = ii # The index to PARENT data
CDF.FLD.data = num / nt
CDF.FLD.minval = np.nanmin(data)
CDF.FLD.maxval = np.nanmax(data)
toconsola('Min val = '+str(CDF.FLD.minval),wid=self.cons)
toconsola('Max val = '+str(CDF.FLD.maxval),wid=self.cons)
CDF.K.set(K)
if len(self.CDF[ii].Z_LIST) > 0:
CDF.K_LIST = [K]
CDF.Z_LIST = [self.CDF[ii].Z_LIST[K]]
# Middle of the time segment
t2 = 0.5*(self.CDF[ii].T_LIST[0]+self.CDF[ii].T_LIST[-1])
CDF.L.set(0)
CDF.L_LIST = [0]
CDF.T_LIST = [t2]
try:
CDF.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
CDF.DATE = [0.5*(self.CDF[ii].FLD.icdf.nt-1)]
CDF.ALIAS.set('Average')
CDF.FLD.x = self.CDF[ii].FLD.x
CDF.FLD.y = self.CDF[ii].FLD.y
CDF.FLD.xx = self.CDF[ii].FLD.xx
CDF.FLD.yy = self.CDF[ii].FLD.yy
CDF.FLD.ndims = self.CDF[ii].FLD.ndims
CDF.FLD.with_axes = self.CDF[ii].FLD.with_axes
CDF.FLD.units = self.CDF[ii].FLD.units
CDF.FLD.missing = self.CDF[ii].FLD.missing
CDF.FLD.varname = self.CDF[ii].FLD.varname
CDF.FLD.varid = self.CDF[ii].FLD.varid
CDF.FLD.xmin = self.CDF[ii].FLD.xmin
CDF.FLD.xmax = self.CDF[ii].FLD.xmax
CDF.FLD.ymin = self.CDF[ii].FLD.ymin
CDF.FLD.ymax = self.CDF[ii].FLD.ymax
CDF.FILENAME.set(self.CDF[ii].FILENAME.get())
CDF.varname.set(CDF.FLD.varname)
CDF.FLD.nc = Dataset(self.CDF[ii].FILENAME.get())
CDF.FLD.icdf = tools.geocdf(wid=self.cons)
# We copy the original icdf information
conf = self.CDF[ii].FLD.icdf.conf_get()
CDF.FLD.icdf.conf_set(conf)
CDF.FLD.icdf.VAR_MENU = [CDF.FLD.varname]
# Add the appropriate changes
CDF.FLD.icdf.nt = 1
conf = self.CDF[ii].PLOT.conf_get()
CDF.PLOT.conf_set(conf)
CDF.show.set(True)
self.CDF[ii].show.set(False)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
# Adding a VECTOR in the Drawing class
#
self.LAYERS.add(TYPE='FLD',Filename=self.CDF[ii].FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.CDF[ii].FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def contour_var(self):
# ==========================
''' Calculates the long term variance of a contour field '''
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
ii = self.CDF_INDX.get()
if self.CDF[ii].PARENT is None:
toconsola('Calculating variance of current CONTOUR field')
else:
ii = self.CDF[ii].PARENT
toconsola('Calculating variance of PARENT CONTOUR field, ii=',ii)
K = self.CDF[ii].K.get()
L = self.CDF[ii].L.get()
nt = self.CDF[ii].FLD.icdf.nt
if nt <= 1:
messagebox.showinfo(message='Variance requires more than one time records')
return
for L in range(0,nt):
data = self.CDF[ii].FLD.read(K=K,L=L,wid=self.cons)
if L==0:
num1 = data.copy()
num2 = np.square(data)
else:
num1 += data
num2 += np.square(data)
#data = num2/nt - np.square(num1/nt)
data = num2/(nt-1) - np.square(num1)/(nt*(nt-1))
CDF = CONTOUR()
CDF.SOURCE = 'VARIANCE'
CDF.PARENT = ii
CDF.FLD.data = data.copy()
CDF.FLD.minval = float(data.min())
CDF.FLD.maxval = float(data.max())
toconsola('Variance Min val = '+str(CDF.FLD.minval),wid=self.cons)
toconsola('Variance Max val = '+str(CDF.FLD.maxval),wid=self.cons)
# Middle of the time segment
t2 = 0.5*(self.CDF[ii].T_LIST[0]+self.CDF[ii].T_LIST[-1])
CDF.K.set(K)
CDF.L.set(0)
CDF.K_LIST = [K]
CDF.L_LIST = [0]
CDF.Z_LIST = [self.CDF[ii].Z_LIST[K]]
CDF.T_LIST = [t2]
try:
CDF.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
CDF.DATE = [0.5*(self.CDF[ii].FLD.icdf.nt-1)]
CDF.ALIAS.set('Variance')
CDF.FLD.x = self.CDF[ii].FLD.x
CDF.FLD.y = self.CDF[ii].FLD.y
CDF.FLD.xx = self.CDF[ii].FLD.xx
CDF.FLD.yy = self.CDF[ii].FLD.yy
CDF.FLD.ndims = self.CDF[ii].FLD.ndims
CDF.FLD.with_axes = self.CDF[ii].FLD.with_axes
CDF.FLD.units = self.CDF[ii].FLD.units
CDF.FLD.missing = self.CDF[ii].FLD.missing
CDF.FLD.varname = self.CDF[ii].FLD.varname
CDF.FLD.varid = self.CDF[ii].FLD.varid
CDF.FLD.xmin = self.CDF[ii].FLD.xmin
CDF.FLD.xmax = self.CDF[ii].FLD.xmax
CDF.FLD.ymin = self.CDF[ii].FLD.ymin
CDF.FLD.ymax = self.CDF[ii].FLD.ymax
CDF.FILENAME.set(self.CDF[ii].FILENAME.get())
CDF.varname.set(CDF.FLD.varname)
CDF.FLD.nc = Dataset(self.CDF[ii].FILENAME.get())
CDF.FLD.icdf = tools.geocdf(wid=self.cons)
# We copy the original icdf information
conf = self.CDF[ii].FLD.icdf.conf_get()
CDF.FLD.icdf.conf_set(conf)
# Add the appropriate changes
CDF.FLD.icdf.VAR_MENU = [CDF.FLD.varname]
CDF.FLD.icdf.nt = 1
conf = self.CDF[ii].PLOT.conf_get()
CDF.PLOT.conf_set(conf)
toconsola('Setting contour intervals ...',wid=self.cons)
try:
CDF.PLOT.CONTOUR_MIN.set(myround(CDF.FLD.minval))
except:
CDF.PLOT.CONTOUR_MIN.set(CDF.FLD.minval)
try:
CDF.PLOT.CONTOUR_MAX.set(myround(CDF.FLD.maxval))
except:
CDF.PLOT.CONTOUR_MAX.set(CDF.FLD.maxval)
dd = CDF.PLOT.CONTOUR_MAX.get() - CDF.PLOT.CONTOUR_MIN.get()
try:
CDF.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
except:
CDF.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
CDF.show.set(True)
self.CDF[ii].show.set(False)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
self.LAYERS.add(TYPE='FLD',Filename=self.CDF[ii].FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.CDF[ii].FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def get_map_coords(self):
# ====================
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_xysel.destroy()
self.Window_xysel = None
def _done():
# -----------
_close()
if self.Window_xysel is None:
self.CAPTURE_POINT = True
self.Window_xysel = tk.Toplevel(self.master)
self.Window_xysel.title('Select point')
self.Window_xysel.resizable(width=False,height=False)
self.Window_xysel.protocol('WM_DELETE_WINDOW',_close)
F0 = ttk.Frame(self.Window_xysel,padding=5,borderwidth=5)
ttk.Label(F0,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='x = ',width=5).grid(row=1,column=0,sticky='e')
ttk.Entry(F0,textvariable=self.pxo,width=15).grid(row=1,column=1,columnspan=3,sticky='ew',pady=5)
ttk.Label(F0,text='y = ',width=5).grid(row=1,column=4,sticky='e')
ttk.Entry(F0,textvariable=self.pyo,width=15).grid(row=1,column=5,columnspan=3,sticky='ew',pady=5)
ttk.Button(F0,text='Cancel',command=_close).grid(row=2,column=4,sticky='e',padx=5)
ttk.Button(F0,text='Done',command=_done).grid(row=2,column=5,sticky='e',padx=5)
F0.grid()
self.Window_xysel.wait_window()
return [self.pxo.get(), self.pyo.get()]
def vector_series(self):
# ==========================
''' Opens a figure and shows the time series of the velocity.
The user has selected a point. '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
K = self.VEC[ii].K.get()
nt = self.VEC[ii].U.icdf.nt
ndims = self.VEC[ii].U.ndims
if nt == 1:
messagebox.showinfo(message='Single time step. No time series')
return
yy = self.get_map_coords()
xo = yy[0]; yo = yy[1]
dis = (xo-self.VEC[ii].U.xx)**2 + (yo-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
self.VEC[ii].jo.set(jo)
self.VEC[ii].io.set(io)
toconsola('Vector selected point: '+str(io)+', '+str(jo),wid=self.cons)
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
t = []
for i in range(nt):
t.append(datetime.datetime.strptime(str(self.VEC[ii].DATE[i]),'%Y-%m-%d %H:%M:%S'))
Window = tk.Toplevel(self.master)
Window.title('PLOTXY')
Window.resizable(width=False,height=False)
#Window.protocol('WM_DELETE_WINDOW',_close)
plotxy.PLOTXY(Window,t=t,u=u,v=v)
def vector_mean(self):
# ==========================
''' Calculates the long term mean of a vector field '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
if self.VEC[ii].PARENT is None:
pass
else:
ii = self.VEC[ii].PARENT
K = self.VEC[ii].K.get()
L = self.VEC[ii].L.get()
nt = self.VEC[ii].U.icdf.nt
for L in range(0,nt):
print('L = ', L)
udata = self.VEC[ii].U.read(K=K,L=L,wid=self.cons)
vdata = self.VEC[ii].V.read(K=K,L=L,wid=self.cons)
#ny, nx = udata.shape
#udata = udata.reshape((1,ny,nx))
#vdata = vdata.reshape((1,ny,nx))
#if L==0:
# unum = udata.copy()
# vnum = vdata.copy()
#else:
# unum = np.ma.concatenate([unum,udata])
# vnum = np.ma.concatenate([vnum,vdata])
if L==0:
unum = udata.copy()
vnum = vdata.copy()
else:
unum = unum + udata
vnum = vnum + vdata
VEC = VECTOR()
# Make sure that the missing value is NaN:
#udata = unum.mean(axis=0)
#vdata = vnum.mean(axis=0)
VEC.SOURCE = 'MEAN'
VEC.PARENT = ii
udata = unum / nt
vdata = vnum / nt
_u = udata.filled(fill_value=np.nan)
_v = vdata.filled(fill_value=np.nan)
udata = np.ma.masked_equal(_u,np.nan); del _u
vdata = np.ma.masked_equal(_v,np.nan); del _v
VEC.U.data = udata
VEC.V.data = vdata
VEC.K.set(K)
if len(self.VEC[ii].Z_LIST) > 0:
VEC.K_LIST = [K]
VEC.Z_LIST = [self.VEC[ii].Z_LIST[K]]
VEC.L.set(0)
VEC.L_LIST = [0]
# Middle of the time segment
t2 = 0.5*(self.VEC[ii].T_LIST[0]+self.VEC[ii].T_LIST[-1])
VEC.T_LIST = [t2]
try:
VEC.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
VEC.DATE = [0.5*(self.VEC[ii].U.icdf.nt-1)]
VEC.grid_type.set(VEC.grid_type.get())
VEC.ALIAS.set('Average')
VEC.U.x = self.VEC[ii].U.x
VEC.U.y = self.VEC[ii].U.y
VEC.U.xx = self.VEC[ii].U.xx
VEC.U.yy = self.VEC[ii].U.yy
VEC.U.ndims = self.VEC[ii].U.ndims
VEC.U.with_axes = self.VEC[ii].U.with_axes
VEC.U.units = self.VEC[ii].U.units
VEC.U.missing = self.VEC[ii].U.missing
VEC.U.varname = self.VEC[ii].U.varname
VEC.U.varid = self.VEC[ii].U.varid
VEC.U.xmin = self.VEC[ii].U.xmin
VEC.U.xmax = self.VEC[ii].U.xmax
VEC.V.x = self.VEC[ii].V.x
VEC.V.y = self.VEC[ii].V.y
VEC.V.xx = self.VEC[ii].V.xx
VEC.V.yy = self.VEC[ii].V.yy
VEC.V.ndims = self.VEC[ii].V.ndims
VEC.V.with_axes = self.VEC[ii].V.with_axes
VEC.V.units = self.VEC[ii].V.units
VEC.V.missing = self.VEC[ii].V.missing
VEC.V.varname = self.VEC[ii].V.varname
VEC.V.varid = self.VEC[ii].V.varid
VEC.V.xmin = self.VEC[ii].V.xmin
VEC.V.xmax = self.VEC[ii].V.xmax
VEC.UFILENAME.set(self.VEC[ii].UFILENAME.get())
VEC.VFILENAME.set(self.VEC[ii].VFILENAME.get())
VEC.uname.set(VEC.U.varname)
VEC.vname.set(VEC.V.varname)
VEC.U.nc = Dataset(self.VEC[ii].UFILENAME.get())
VEC.V.nc = Dataset(self.VEC[ii].VFILENAME.get())
VEC.U.icdf = tools.geocdf(wid=self.cons)
VEC.V.icdf = tools.geocdf(wid=self.cons)
conf = self.VEC[ii].U.icdf.conf_get()
VEC.U.icdf.conf_set(conf)
VEC.U.icdf.VAR_MENU = [VEC.U.varname]
VEC.U.icdf.nt = 1
conf = self.VEC[ii].V.icdf.conf_get()
VEC.V.icdf.conf_set(conf)
VEC.V.icdf.VAR_MENU = [VEC.V.varname]
VEC.V.icdf.nt = 1
conf = self.VEC[ii].PLOT.conf_get()
VEC.PLOT.conf_set(conf)
VEC.show.set(True)
self.VEC[ii].show.set(False)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
self.LAYERS.add(TYPE='VEC',Filename=self.VEC[ii].UFILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.VEC[ii].UFILENAME.get())
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def marker_editor(self):
# ====================
MARKER = geomarker.parameters()
marklabel = tk.StringVar()
# Map projection
#
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_markered.destroy()
self.Window_markered = None
def _done():
# -----------
_close()
MARKER.SOURCE = 'VIEWER'
MARKER.FILENAME.set(None)
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=None,N=len(MARKER.lon),wid=self.cons)
self.LAYERS.print()
ii = self.MARKER_INDX.get()
self.make_plot()
def _clear():
# -----------
global log
log.delete('1.0','end')
marklabel.set('')
def _add():
# ---------
''' Add the new mark '''
#string = '\t {} {} {} \n'.format(self.pxo.get(),self.pyo.get(),marklabel.get())
string = '%9.4f, %9.4f, %s\n' %(self.pxo.get(),self.pyo.get(),marklabel.get())
print('string = ', string)
log.insert('end',string)
MARKER.lon.append(self.pxo.get())
MARKER.lat.append(self.pyo.get())
MARKER.label.append(marklabel.get())
MARKER.n = len(MARKER.lon)
geomarker.drawing(self.ax, proj['proj'], MARKER)
self.canvas.draw()
marklabel.set('')
def _load():
# ---------
global log
''' Load an existent marker filek '''
nn = filedialog.askopenfilename(filetypes=[('CSV','*.csv'),
('TXT','*.txt'),
('ALL','*')],
initialdir='./',
parent=self.Window_marker)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
MARKER.Read(filename)
if MARKER.n == 0:
return
for l in range(MARKER.n):
string = '%9.4f, %9.4f, %s\n' %(MARKER.lon[l], \
MARKER.lat[l],
MARKER.label[l])
log.insert('end',string)
def _save():
# ---------
global log
aa = log.get("1.0","end-1c")
''' Save markers onto file '''
filetypes = [('Text file','.txt')]
nn = filedialog.asksaveasfilename(title='Save marker file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
filename = '%s' %nn
toconsola('Saving entries to file ' +filename,wid=self.cons)
f = open(filename,'w')
f.write(aa)
f.close()
if self.Window_markered is None:
self.CAPTURE_POINT = True
self.Window_markered = tk.Toplevel(self.master)
self.Window_markered.title('Marker editor')
self.Window_markered.resizable(width=False,height=False)
self.Window_markered.protocol('WM_DELETE_WINDOW',_close)
F0 = ttk.Frame(self.Window_markered,padding=5,borderwidth=5)
ttk.Label(F0,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='x',width=12).grid(row=1,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='y').grid(row=1,column=1,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='Label').grid(row=1,column=2,columnspan=6,sticky='we',pady=10)
ttk.Entry(F0,textvariable=self.pxo,width=12).grid(row=2,column=0,columnspan=1,sticky='ew',pady=5)
ttk.Entry(F0,textvariable=self.pyo,width=12).grid(row=2,column=1,columnspan=1,sticky='ew',pady=5)
ttk.Entry(F0,textvariable=marklabel,width=12).grid(row=2,column=2,columnspan=1,sticky='ew',pady=5)
ttk.Button(F0,text='Add',command=_add).grid(row=2,column=3,sticky='ew',pady=5)
global log
log = tk.Text(F0,height=5)
log.grid(row=3,column=0,columnspan=4,padx=10,pady=10,sticky='nsew')
#log.configure(state='disabled')
# Scrollbar
scrollb = tk.Scrollbar(F0,command=log.yview)
scrollb.grid(row=3,column=4,sticky='nsew',padx=2,pady=2)
log['yscrollcommand'] = scrollb.set
ttk.Button(F0,text='Clear',command=_clear).grid(row=4,column=0,sticky='e',padx=5)
ttk.Button(F0,text='Load',command=_load).grid(row=4,column=1,sticky='e',padx=5)
ttk.Button(F0,text='Save',command=_save).grid(row=4,column=2,sticky='e',padx=5)
ttk.Button(F0,text='Done',command=_done).grid(row=4,column=3,sticky='e',padx=5)
F0.grid()
# ====================
def get_ellipse(self):
# ====================
''' Widget to read Ellipses '''
self.ESOURCE = tk.StringVar()
ELLIPSE = ellipse.ELLIPSE()
self.ESOURCE.set(self.ELLIPSE_OPTIONS[0])
def _cancel():
# ===========
self.Window_gellipse.destroy()
self.Window_gellipse = None
def _close():
# ===========
self.Window_gellipse.destroy()
self.Window_gellipse = None
self.make_plot()
if self.Window_cellipse is not None:
self.Window_cellipse.destroy()
self.Window_cellipse = None
def _done():
# ===========
_close()
def _clear():
# ===========
if self.nellipse == 0:
return
ii = self.ELLIPSE_INDX.get()
self.LAYERS.erase('ELLIPSE',ii,wid=self.cons)
self.LAYERS.print()
#for i in range(self.nfiles):
# if self.FILETYPES[i] == 'ELLIPSE' and self.FILEORDER[i] == ii:
# del self.FILENAMES[i]
# del self.FILETYPES[i]
# del self.FILEORDER[i]
# del self.SEQUENCES[i]
# del self.SEQLEADER[i]
# del self.SEQNTIMES[i]
# self.nfiles -= 1
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.ELLIPSE[ii]
self.nellipse -= 1
ii = self.nellipse-1 if ii >= self.nellipse else ii
toconsola('New ellipse = '+str(ii),wid=self.cons)
self.ELLIPSE_INDX.set(ii)
_refill(ii)
def _reget():
# ===========
self.ELLIPSE_INDEX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ============
if ii >= 0:
self.ELLIPSE_LIST = list(range(self.nellipse))
_wsel['values'] = self.ELLIPSE_LIST
_went['textvariable'] = self.ELLIPSE[ii].FILENAME
_wstat['text'] = 'Number ellipses = '+str(self.ELLIPSE[ii].n)
_wsel.configure(state='normal')
_show['variable'] = self.ELLIPSE[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.ELLIPSE[ii].ALIAS
else:
self.ELLIPSE = []
self.ELLIPSE_LIST = ['0']
self.ELLIPSE_INDX.set(0)
#_wsel['values'] = self.ELLIPSE_LIST
_wsel['values'] = None
_went['textvariable'] = None
_wstat['text'] = ''
_wsel.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
self.make_plot()
def _add():
# ===========
ISOURCE = self.ELLIPSE_OPTIONS.index(self.ESOURCE.get())
types=[('TXT','*.txt'),('ALL','*')]
nn = filedialog.askopenfilename(parent=self.Window_gellipse, \
filetypes=types)
if len(nn) == 0:
return
filename = '%s' % nn
toconsola('Reading ELLIPSE file '+filename,wid=self.cons)
ELLIPSE.Read(filename)
if ELLIPSE.n == 0:
return
self.nellipse += 1
self.ELLIPSE.append(ELLIPSE)
self.ELLIPSE_INDX.set(self.nellipse-1)
self.ELLIPSE_LIST = list(range(self.nellipse))
self.LAYERS.add(TYPE='ELLIPSE',Filename=filename,N=ELLIPSE.n,wid=self.cons)
self.LAYERS.print()
ii = self.ELLIPSE_INDX.get()
_refill(ii)
# Main Window ...
# ================
if self.Window_gellipse is None:
self.Window_gellipse = tk.Toplevel(self.master)
self.Window_gellipse.title('Variance ellipses')
self.Window_gellipse.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_gellipse.lift()
if self.nellipse > 0:
ii = self.ELLIPSE_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_gellipse,padding=5)
#Add
ttk.Combobox(F0,textvariable=self.ESOURCE, \
values=self.ELLIPSE_OPTIONS).grid(row=0,column=0,padx=3)
ttk.Button(F0,text='Import',command=_add).grid(row=1,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Ellipse file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.ELLIPSE_INDX, \
values=self.ELLIPSE_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' Ellipses in the file= '+str(self.ELLIPSE[ii].n),width=50,justify='left')
_went['textvariable'] = self.ELLIPSE[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
#Alias
ttk.Label(F0,text='Alias').grid(row=2,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=2,column=2,columnspan=2,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_gellipse,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.ELLIPSE[ii].show
_aent['textvariable'] = self.ELLIPSE[ii].ALIAS
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Cancel',command=_cancel).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_close).grid(row=1,column=8,padx=3)
F1.grid(row=1,column=0)
# ====================
def calc_ellipse(self):
# ====================
''' Widget to calculate ellipse from velocity field '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
if self.VEC[ii].PARENT is None:
toconsola('Calculating mean of current VECTOR field')
else:
ii = self.VEC[ii].PARENT
toconsola('Calculating mean of PARENT VECTOR field, ii=',ii)
K = self.VEC[ii].K.get()
nt = self.VEC[ii].U.icdf.nt
ndims = self.VEC[ii].U.ndims
ELLIPSE = ellipse.ELLIPSE()
ELLIPSE.SOURCE = 'VIEWER'
ELLIPSE.FILENAME.set(None)
# Map projection
#
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
try:
self.pzo.set(self.VEC[ii].Z_LIST[K])
except:
self.pzo.set(0)
SUM = tk.DoubleVar()
SVM = tk.DoubleVar()
SUM = tk.DoubleVar()
SPM = tk.DoubleVar()
SAA = tk.DoubleVar()
SBB = tk.DoubleVar()
SPP = tk.DoubleVar()
SXO = tk.DoubleVar()
SYO = tk.DoubleVar()
if nt == 1:
messagebox.showinfo(message='Single time step. No variance ellipses')
return
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_cellipse.destroy()
self.Window_cellipse = None
def _cancel():
# -----------
global log
ELLIPSE = ellipse.ELLIPSE()
log.delete('1.0','end')
self.make_plot()
def _done():
# -----------
filename = self.VEC[ii].UFILENAME.get()
ELLIPSE.SOURCE = 'VIEWER'
ELLIPSE.PARENT = ii
self.nellipse += 1
self.ELLIPSE.append(ELLIPSE)
self.ELLIPSE_INDX.set(self.nellipse-1)
self.ELLIPSE_LIST = list(range(self.nellipse))
self.LAYERS.add(TYPE='ELLIPSE',Filename=filename,N=len(ELLIPSE.xo),wid=self.cons)
self.LAYERS.print()
_close()
self.make_plot()
def _calc():
# ---------
dis = (self.pxo.get()-self.VEC[ii].U.xx)**2 + (self.pyo.get()-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
self.VEC[ii].jo.set(jo)
self.VEC[ii].io.set(io)
toconsola('Vector selected point: '+str(io)+', '+str(jo),wid=self.cons)
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
mu = np.mean(u)
mv = np.mean(v)
mphi = np.angle(mu+1j*mv)
print('Angle mean current = ', mphi, 180*mphi/np.pi)
u = u - np.mean(u)
v = v - np.mean(v)
suu = np.dot(u,u)
svv = np.dot(v,v)
suv = np.dot(u,v)
Tra = suu + svv
Det = suu*svv - suv*suv
a2 = 0.5*(Tra + np.sqrt(Tra*Tra - 4*Det))
b2 = 0.5*(Tra - np.sqrt(Tra*Tra - 4*Det))
aphi = 0.5*np.arctan2(2*suv,suu-svv)
print('Test: ',2*suv/(suu-svv), np.tan(2*aphi))
print('Eddy kinetic energy: ', 0.5*Tra)
print('Total eddy variance: ', a2 + b2, Tra)
print('Directional eddy variance: ', a2 - b2)
print('Isotropic eddy variance: ', 2*b2)
print('Polarization factor: ', (a2-b2)/(a2+b2))
print('Variance angle: ', aphi, 180*aphi/np.pi)
SXO.set(self.pxo.get())
SYO.set(self.pyo.get())
SUM.set(mu)
SVM.set(mv)
SPM.set(180*mphi/np.pi)
SAA.set(np.sqrt(a2))
SBB.set(np.sqrt(b2))
SPP.set(180*aphi/np.pi)
def _add():
# ---------
global log
ELLIPSE.n += 1
ELLIPSE.xo.append(SXO.get())
ELLIPSE.yo.append(SYO.get())
ELLIPSE.zo.append(self.pzo.get())
ELLIPSE.a.append(SAA.get())
ELLIPSE.b.append(SBB.get())
ELLIPSE.phim.append(SPM.get())
ELLIPSE.phia.append(SPP.get())
_wnn['text'] = 'n = %d' % ELLIPSE.n
i = -1
string = '%8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f\n' % (ELLIPSE.xo[i], \
ELLIPSE.yo[i], \
ELLIPSE.zo[i], \
ELLIPSE.phim[i], \
ELLIPSE.phia[i], \
ELLIPSE.a[i], \
ELLIPSE.b[i])
log.insert('end',string)
SUM.set(None)
SVM.set(None)
SPM.set(None)
SAA.set(None)
SBB.set(None)
SPP.set(None)
ellipse.drawing(self.ax, proj['proj'], ELLIPSE)
self.canvas.draw()
def _save():
# ---------
''' Save ellipses onto file '''
filetypes = [('Text file','.txt')]
nn = filedialog.asksaveasfilename(title='Save Ellipse file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
filename = '%s' %nn
toconsola('Saving entries to file ' +filename,wid=self.cons)
f = open(filename,'w')
for i in range(len(ELLIPSE.xo)):
string = '%8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f\n' % (ELLIPSE.xo[i], \
ELLIPSE.yo[i], \
ELLIPSE.zo[i], \
ELLIPSE.phim[i], \
ELLIPSE.phia[i], \
ELLIPSE.a[i], \
ELLIPSE.b[i])
f.write(string)
f.close()
def _load():
# ---------
''' Loads ellipses position from file,
and calculateis ellipse parameters using current vector data '''
global log
nn = filedialog.askopenfilename(title='Load ellipse data',
parent=self.Window_cellipse,
initialdir='./')
if len(nn) == 0:
return
filename = '%s' % nn
with open(filename) as datafile:
for line in datafile.readlines():
line = line.strip()
columns = line.split(',')
ELLIPSE.xo.append(float(columns[0]))
ELLIPSE.yo.append(float(columns[1]))
ELLIPSE.zo.append(float(columns[2]))
dis = (ELLIPSE.xo[-1]-self.VEC[ii].U.xx)**2 + (ELLIPSE.yo[-1]-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
# Read the data
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
mu = np.mean(u)
mv = np.mean(v)
mphi = np.angle(mu+1j*mv)
u = u - np.mean(u)
v = v - np.mean(v)
suu = np.dot(u,u)
svv = np.dot(v,v)
suv = np.dot(u,v)
Tra = suu + svv
Det = suu*svv - suv*suv
a2 = 0.5*(Tra + np.sqrt(Tra*Tra - 4*Det))
b2 = 0.5*(Tra - np.sqrt(Tra*Tra - 4*Det))
aphi = 0.5*np.arctan2(2*suv,suu-svv)
ELLIPSE.phim.append(180*mphi/np.pi)
ELLIPSE.a.append(np.sqrt(a2))
ELLIPSE.b.append(np.sqrt(b2))
ELLIPSE.phia.append(180*aphi/np.pi)
ELLIPSE.n += 1
_wnn['text'] = 'n = %d' % ELLIPSE.n
i = -1
string = '%8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f\n' % (ELLIPSE.xo[i], \
ELLIPSE.yo[i], ELLIPSE.zo[i], ELLIPSE.phim[i], ELLIPSE.phia[i], \
ELLIPSE.a[i], ELLIPSE.b[i])
log.insert('end',string)
SUM.set(None)
SVM.set(None)
SPM.set(None)
SAA.set(None)
SBB.set(None)
SPP.set(None)
ellipse.drawing(self.ax, proj['proj'], ELLIPSE)
self.canvas.draw()
# Main widget
# -----------
if self.Window_cellipse is not None:
self.Window_cellipse.lift()
return
self.CAPTURE_POINT = True
self.Window_cellipse = tk.Toplevel(self.master)
self.Window_cellipse.title('Ellipse calculator')
self.Window_cellipse.resizable(width=False,height=False)
self.Window_cellipse.protocol('WM_DELETE_WINDOW',_close)
Fm = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Label(Fm,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we')
Fm.grid()
F0 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Label(F0,text='x').grid(row=1,column=0,sticky='we',padx=6)
_wsx = ttk.Entry(F0,textvariable=self.pxo,width=15,justify='left')
_wsx.grid(row=1,column=1,sticky='ew',padx=6)
ttk.Label(F0,text='y').grid(row=1,column=2,sticky='we',padx=6)
_wsy = ttk.Entry(F0,textvariable=self.pyo,width=15,justify='left')
_wsy.grid(row=1,column=3,sticky='ew',padx=6)
ttk.Label(F0,text='z').grid(row=1,column=4,sticky='we',padx=6)
_wsz = ttk.Entry(F0,textvariable=self.pzo,width=15,justify='left',state='readonly')
_wsz.grid(row=1,column=5,sticky='ew',padx=6)
ttk.Button(F0,text='Get Ellipse',command=_calc).grid(row=1,column=6,sticky='ew',pady=5,padx=6)
F0.grid()
F1 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Label(F1,text='U mean',width=9).grid(row=1,column=0,sticky='e',padx=3)
_wum = ttk.Entry(F1,textvariable=SUM,width=15,justify='left',state='readonly')
_wum.grid(row=1,column=1,sticky='ew',padx=3)
ttk.Label(F1,text='V mean',width=9).grid(row=1,column=2,sticky='e',padx=3)
_wvm = ttk.Entry(F1,textvariable=SVM,width=15,justify='left',state='readonly')
_wvm.grid(row=1,column=3,sticky='ew',padx=3)
ttk.Label(F1,text='Mean angle',width=9).grid(row=1,column=4,sticky='e',padx=3)
_wpm = ttk.Entry(F1,textvariable=SPM,width=15,justify='left',state='readonly')
_wpm.grid(row=1,column=5,sticky='ew',padx=3)
ttk.Label(F1,text='a',width=9,justify='right').grid(row=2,column=0,sticky='e',padx=3)
_waa = ttk.Entry(F1,textvariable=SAA,width=15,justify='left',state='readonly')
_waa.grid(row=2,column=1,sticky='ew',padx=3)
ttk.Label(F1,text='b',width=9).grid(row=2,column=2,sticky='e',padx=3)
_wbb = ttk.Entry(F1,textvariable=SBB,width=15,justify='left',state='readonly')
_wbb.grid(row=2,column=3,sticky='ew',padx=3)
ttk.Label(F1,text='Anom angle',width=9).grid(row=2,column=4,sticky='e',padx=3)
_wpp = ttk.Entry(F1,textvariable=SPP,width=15,justify='left',state='readonly')
_wpp.grid(row=2,column=5,sticky='ew',padx=3)
F1.grid()
F2 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
global log
_wnn = ttk.Label(F2,text='n = 0',width=6)
_wnn.grid(row=0,column=0,sticky='ew',padx=3)
log = tk.Text(F2,height=5)
log.grid(row=3,column=0,columnspan=5,padx=10,pady=10,sticky='nsew')
# Scrollbar
scrollb = tk.Scrollbar(F2,command=log.yview)
scrollb.grid(row=3,column=5,sticky='nsew',padx=2,pady=2)
log['yscrollcommand'] = scrollb.set
F2.grid()
#SUM = tk.DoubleVar()
#SVM = tk.DoubleVar()
#SUM = tk.DoubleVar()
#SPM = tk.DoubleVar()
#SAA = tk.DoubleVar()
#SBB = tk.DoubleVar()
#SPP = tk.DoubleVar()
F3 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Button(F3,text='Clear',command=_cancel).grid(row=4,column=0,sticky='e',padx=5)
ttk.Button(F3,text='Load',command=_load).grid(row=4,column=1,sticky='e',padx=5)
ttk.Button(F3,text='Save',command=_save).grid(row=4,column=2,sticky='e',padx=5)
ttk.Button(F3,text='Add',command=_add).grid(row=4,column=3,sticky='e',padx=5)
ttk.Button(F3,text='Done',command=_done).grid(row=4,column=4,sticky='e',padx=5)
F3.grid()
def ellipse_config(self):
# =======================
if self.nellipse == 0:
messagebox.showinfo(message='No ellipse variance specified yet')
return
ii = self.ELLIPSE_INDX.get()
global eshow
def _cancel():
# ============
self.Window_ellipseconfig.destroy()
self.Window_ellipseconfig = None
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.Window_ellipseconfig.destroy()
self.Window_ellipseconfig = None
self.make_plot()
def _loadconf():
# =============
'''Load ellipse configuration'''
toconsola('Restoring ellipse configuration from '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.load(self.ELLIPSE[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load ellipse configuration'''
toconsola('Saving ellipse configuration to '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load ellipse configuration from a file'''
nn = filedialog.askopenfilename(title='Load ellipse configuration',
parent=self.Window_ellipseconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.ELLIPSE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring ellipse configuration from '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.load(self.ELLIPSE[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load ellipse configuration'''
nn = filedialog.asksaveasfilename(title='Save ellipse configuration',
parent=self.Window_ellipseconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.ELLIPSE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving ellipse configuration to '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.save(self.ELLIPSE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_ellipseconfig is not None:
self.Window_ellipseconfig.lift()
return
def _selected():
# ===============
global eshow
eshow.destroy()
ii = self.ELLIPSE_INDX.get()
eshow = ttk.Frame(self.Window_ellipseconfig,padding=10)
ellipse.Configuration2(eshow,self.ELLIPSE[ii])
f0 = ttk.Frame(eshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
eshow.grid()
# Main window
# ============
self.Window_ellipseconfig = tk.Toplevel(self.master)
self.Window_ellipseconfig.title('Ellipse plot configuration')
self.Window_ellipseconfig.resizable(width=True,height=True)
self.Window_ellipseconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_ellipseconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_ellipseconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_ellipseconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_ellipseconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.ELLIPSE_INDX,
values=self.ELLIPSE_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
_went ['textvariable'] = self.ELLIPSE[ii].FILENAME
eshow = ttk.Frame(self.Window_ellipseconfig,padding=10)
ellipse.Configuration2(eshow,self.ELLIPSE[ii])
f0 = ttk.Frame(eshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
eshow.grid()
# ====================
def get_patch(self):
# ====================
''' Widget to add patches '''
global _wcx,_wcy,_wcr,_wxw,_wxe,_wys,_wyn
PATCH = patch.PATCH()
PATCH.SOURCE = 'VIEWER'
def _close():
# ===========
self.Window_patch.destroy()
self.Window_patch = None
def _cancel():
# ===========
PATCH = patch.PATCH()
_close()
def _done():
# ===========
if PATCH.TYPE.get() == 'Rectangle':
xo = float(_wxw.get())
x1 = float(_wxe.get())
yo = float(_wys.get())
y1 = float(_wyn.get())
PATCH.xo.set(xo)
PATCH.yo.set(yo)
PATCH.dx.set(x1-xo)
PATCH.dy.set(y1-yo)
PATCH.show.set(True)
if PATCH.TYPE.get() == 'Circle':
xo = float(_wcx.get())
yo = float(_wcy.get())
rr = float(_wcr.get())
PATCH.xo.set(xo)
PATCH.yo.set(yo)
PATCH.dx.set(np.abs(rr))
PATCH.dy.set(np.abs(rr))
PATCH.show.set(True)
self.npatch += 1
self.PATCH.append(PATCH)
self.PATCH_INDX.set(self.npatch-1)
self.PATCH_LIST = list(range(self.npatch))
self.LAYERS.add(TYPE='PATCH',Filename=None,N=1,wid=self.cons)
self.LAYERS.print()
_close()
self.make_plot()
def _sel():
# =========
if PATCH.TYPE.get() == 'Rectangle':
_wxw.configure(state='normal')
_wxe.configure(state='normal')
_wys.configure(state='normal')
_wyn.configure(state='normal')
_wcx.configure(state='disabled')
_wcy.configure(state='disabled')
_wcr.configure(state='disabled')
elif PATCH.TYPE.get() == 'Circle':
_wxw.configure(state='disabled')
_wxe.configure(state='disabled')
_wys.configure(state='disabled')
_wyn.configure(state='disabled')
_wcx.configure(state='normal')
_wcy.configure(state='normal')
_wcr.configure(state='normal')
# Main Window ...
# ================
if self.Window_patch is None:
self.Window_patch = tk.Toplevel(self.master)
self.Window_patch.title('Add/configura Patch')
self.Window_patch.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_patch.lift()
F0 = ttk.Frame(self.Window_patch,padding=5)
# Add
ttk.Radiobutton(F0,text='Rectangle',variable=PATCH.TYPE,value='Rectangle',command=_sel).grid(row=0,column=0,sticky='w',padx=3)
ttk.Radiobutton(F0,text='Circle',variable=PATCH.TYPE,value='Circle',command=_sel).grid(row=1,column=0,sticky='w',padx=3)
_wxw = ttk.Entry(F0,width=8,justify='left')
_wxw.grid(row=0,column=1,padx=3)
_wxe = ttk.Entry(F0,width=8,justify='left')
_wxe.grid(row=0,column=2,padx=3)
_wys = ttk.Entry(F0,width=8,justify='left')
_wys.grid(row=0,column=3,padx=3)
_wyn = ttk.Entry(F0,width=8,justify='left')
_wyn.grid(row=0,column=4,padx=3)
ttk.Label(F0,text='West, East, South, Nord').grid(row=0,column=5,padx=3,sticky='w')
_wcx = ttk.Entry(F0,width=8,justify='left')
_wcx.grid(row=1,column=1,padx=3)
_wcy = ttk.Entry(F0,width=8,justify='left')
_wcy.grid(row=1,column=2,padx=3)
_wcr = ttk.Entry(F0,width=8,justify='left')
_wcr.grid(row=1,column=3,padx=3)
ttk.Label(F0,text='X, Y, Radius').grid(row=1,column=5,padx=3,sticky='w')
_wxw.configure(state='disabled')
_wxe.configure(state='disabled')
_wys.configure(state='disabled')
_wyn.configure(state='disabled')
_wcx.configure(state='disabled')
_wcy.configure(state='disabled')
_wcr.configure(state='disabled')
#Alias
ttk.Label(F0,text='Alias').grid(row=2,column=0,padx=3,pady=3)
_aent = ttk.Entry(F0,textvariable=PATCH.ALIAS,width=17,justify='left')
_aent.grid(row=2,column=1,columnspan=2,sticky='w')
#
F0.grid(row=0,column=0)
if self.nellipse > 0:
ii = self.ELLIPSE_INDX.get()
else:
ii = -1
F1 = ttk.Frame(self.Window_patch,padding=5)
ttk.Button(F1,text='Cancel',command=_cancel).grid(row=1,column=2,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=3,padx=3)
F1.grid(row=1,column=0)
# =======================
def patch_config(self):
# =======================
if self.npatch == 0:
messagebox.showinfo(message='No patch added yet')
return
ii = self.PATCH_INDX.get()
global pshow
def _cancel():
# ============
self.Window_patchconfig.destroy()
self.Window_patchconfig = None
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.Window_patchconfig.destroy()
self.Window_patchconfig = None
self.make_plot()
def _loadconf():
# =============
'''Load patch configuration'''
toconsola('Restoring patch configuration from '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.load(self.PATCH[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load patch configuration'''
toconsola('Saving patch configuration to '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load patch configuration from a file'''
nn = filedialog.askopenfilename(title='Load patch configuration',
parent=self.Window_patchconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.PATCH[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring patch configuration from '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.load(self.PATCH[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load patch configuration'''
nn = filedialog.asksaveasfilename(title='Save patch configuration',
parent=self.Window_patchconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.PATCH[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving patch configuration to '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.save(self.PATCH[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_patchconfig is not None:
self.Window_patchconfig.lift()
return
def _selected():
# ==============
ii = self.FLOAT_INDX.get()
global pshow
pshow.destroy()
pshow = ttk.Frame(self.Window_patchconfig,padding=10)
patch.Configuration(pshow,self.PATCH[ii])
f0 = ttk.Frame(pshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
pshow.grid()
# Main window
# ============
self.Window_patchconfig = tk.Toplevel(self.master)
self.Window_patchconfig.title('Patch configuration')
self.Window_patchconfig.resizable(width=True,height=True)
self.Window_patchconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_patchconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_patchconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_patchconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_patchconfig,padding=10)
ttk.Label(fsel,text="Patch: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.PATCH_INDX,
values=self.PATCH_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
#_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
#_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
#_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
#_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
#_went ['textvariable'] = self.PATCH[ii].FILENAME
pshow = ttk.Frame(self.Window_patchconfig,padding=10)
patch.Configuration(pshow,self.PATCH[ii])
f0 = ttk.Frame(pshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
pshow.grid()
# =======================================================
def skill_Liu(self):
# =======================================================
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
if self.nfloat == 0:
messagebox.showinfo(message='No Lagrangian file opened yet')
return
def _get_release():
# =================
if self.time_ini.get() == 0:
# The release point is defined as the buoy position
# at the earliest model time step. It may be a missmatch
# between the actual release of the buoy and the location
# of the buoy at the model time step.
#
for i in range(len(T)):
if np.isnan(X[i]) or np.isnan(Y[i]):
pass
else:
ko = i
break
txo.set(X[ko])
tyo.set(Y[ko])
try:
tzo.set(self.VEC[ii].Z[0])
except:
tzo.set(0)
tdo.set(self.VEC[ii].DATE[ko])
tto.set(T[ko])
tdt.set(T[ko] - T[0])
else:
res = tools.initial_position(self.VEC[ii],self.FLOAT[jj],wid=self.cons)
if res is None:
messagebox.showinfo(message='No initial position has been found')
return
ko = res[0][self.Fp.get()]
txo.set(res[1][self.Fp.get()][0])
tyo.set(res[2][self.Fp.get()][0])
tzo.set(res[3][self.Fp.get()][0])
tdo.set(res[4][self.Fp.get()])
tto.set(res[5][self.Fp.get()][0])
tdt.set(res[6][self.Fp.get()][0])
ii = self.VEC_INDX.get()
jj = self.FLOAT_INDX.get()
NFLOATS = self.FLOAT[jj].nfloats
FLOAT_LIST = []
for i in range(NFLOATS):
FLOAT_LIST.append(i)
T = self.VEC[ii].TIME
X = self.FLOAT[jj].Fx(T)
Y = self.FLOAT[jj].Fy(T)
txo = tk.DoubleVar()
tyo = tk.DoubleVar()
tzo = tk.DoubleVar()
tdo = tk.DoubleVar()
tto = tk.DoubleVar()
tdt = tk.DoubleVar()
_get_release()
RELEASE_TIME = tto.get()
PERIOD = tk.IntVar()
PERIOD_LIST = []
_wlst = None
global have_run, NT
have_run = False
NT = None
FFx = None
FFy = None
global velocity_model
global velocity_buoy
# ===================
class buoy():
# ===================
def __init__(self):
# =================
# Define the structure
self.lon = []
self.lat = []
self.date = [] # Date as datetime structure
self.time = [] # Date as a number
def _close():
# ===========
self.Window_skill.destroy()
self.Window_skill = None
def _done():
# ==========
global have_run, NT
global separation_mod_obs,displacement_buoy,displacement_model,B,M,model_color
if not have_run:
with open(self.release_file.get(),'w') as f:
ss = "%9.3f, %9.3f, %9.3f, %9.0f\n" % (txo.get(), tyo.get(), tzo.get(), tdt.get())
f.write(ss)
CLM = clm.parameters()
command = CLM.PATH.get() + CLM.BIN.get()
options = ' -U file='+self.VEC[ii].UFILENAME.get()
options += ' x='+self.VEC[ii].U.icdf.xname
options += ' y='+self.VEC[ii].U.icdf.yname
if self.VEC[ii].U.icdf.idz >= 0:
options += ' z='+self.VEC[ii].U.icdf.zname
options += ' t='+self.VEC[ii].U.icdf.tname
options += ' u='+self.VEC[ii].uname.get()
options += ' -V file='+self.VEC[ii].VFILENAME.get()
options += ' x='+self.VEC[ii].V.icdf.xname
options += ' y='+self.VEC[ii].V.icdf.yname
if self.VEC[ii].V.icdf.idz >= 0:
options += ' z='+self.VEC[ii].V.icdf.zname
options += ' t='+self.VEC[ii].V.icdf.tname
options += ' v='+self.VEC[ii].vname.get()
options += ' -release ' + self.release_file.get()
options += ' -idt %d ' % self.clm_idt.get()
options += ' -out '+ self.out_file.get()
command += options
toconsola(command,wid=self.cons)
os.system(command)
if os.path.isfile(self.out_file.get()):
FLT = lagrangian.parameters()
toconsola(FLT.MESSAGE,wid=self.cons)
FLT.Read(self.out_file.get())
if FLT is None:
return
have_run = True
else:
have_run = False
if not have_run:
return
# Add the Lagrangian simulation to the cosmo-view layers !!!
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon, bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat, bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
FLT.SOURCE = 'clm'
FLT.PLOT.LINE_COLOR.set(self.VEC[ii].PLOT.CURRENT_COLOR.get())
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
model_color = self.FLOAT[-1].PLOT.LINE_COLOR.get()
nt = len(FLT.TIME)
self.LAYERS.add(TYPE='FLOAT',Filename=FLT.FILENAME.get(),N=nt,wid=self.cons)
self.make_plot()
# Clean model and buoy:
ind = []
for i in range(len(FLT.lon)):
if np.isnan(FLT.lon[i]) or np.isnan(FLT.lat[i]):
ind.append(i)
if len(ind)>0:
aa = np.delete(FLT.lon,ind)
FLT.lon = aa
aa = np.delete(FLT.lat,ind)
FLT.lat = aa
aa = np.delete(FLT.DATE,ind)
FLT.DATE = aa
aa = np.delete(FLT.TIME,ind)
FLT.TIME = aa
FLT.nrecords = len(FLT.DATE)
print('LON From the model: ', FLT.lon[0:5])
print('DATE From the model: ', FLT.DATE[0:5])
# Once cropped, check if the model has been able to explicitly save the
# RELEASE point:
#
if FLT.TIME[0] > RELEASE_TIME:
FLT.lon = np.insert(FLT.lon,0,xo[0])
FLT.lat = np.insert(FLT.lat,0,yo[0])
FLT.DATE = np.insert(FLT.DATE,0,do)
FLT.TIME = np.insert(FLT.TIME,0,to[0])
# New interpolation function:
#
FFx = interpolate.interp1d(FLT.TIME,FLT.lon, bounds_error=False, fill_value=np.NaN)
FFy = interpolate.interp1d(FLT.TIME,FLT.lat, bounds_error=False, fill_value=np.NaN)
# Subsampled time axis:
#
duration_hours = (FLT.TIME[-1] - FLT.TIME[0])/3600
nt = int(duration_hours/self.time_sampling.get())
if FLT.TIME[0] > RELEASE_TIME:
print('Inserting release position at cropped model solution ...')
dd = [do]
tt = [do.timestamp()]
else:
dd = []
tt = []
rr = FLT.DATE[0] + datetime.timedelta(hours=-self.time_sampling.get())
for i in range(nt):
rr += datetime.timedelta(hours=self.time_sampling.get())
dd.append(rr) # Date, every time_sampling hours
tt.append(rr.timestamp()) # Ordinal time, every time_sampling hours
dd = np.array(dd)
tt = np.array(tt)
nt = len(dd)
# Interpolate the geojson onto the constructed time axis
#
B = buoy()
B.lon = self.FLOAT[jj].Fx(tt)
B.lat = self.FLOAT[jj].Fy(tt)
B.date = dd[:]
B.time = tt[:]
# Interpolate the model
#
M = buoy()
M.lon = FFx(tt)
M.lat = FFy(tt)
M.date = dd[:]
M.time = tt[:]
d = len(M.lon)
a = np.arange(12,d,12)
if np.remainder(d-1,12) == 0:
PERIOD_LIST = list(a)
else:
PERIOD_LIST = list(np.append(a,d))
_wlst['values'] = PERIOD_LIST
_wlst.configure(state='normal')
_wlst.set(PERIOD_LIST[-1])
global velocity_model
global velocity_buoy
# Displacement of the model (not used in any calculation):
dl = []
uu = []
for i in range(1,len(M.lon)):
dl.append(tools.haversine((M.lon[i-1],M.lat[i-1]),(M.lon[i],M.lat[i])))
dt = M.time[i] - M.time[i-1] #Difference in seconds
if dt > 1:
uu.append(dl[i-1]/dt)
else:
uu.append(0)
displacement_model = np.array(dl)
velocity_model = np.array(uu)
#print('Model displacement: ', displacement_model)
print('Model velocity:', velocity_model)
# Displacement of the buoy:
dl = []
uu = []
for i in range(1,len(B.lon)):
dl.append(tools.haversine((B.lon[i-1],B.lat[i-1]),(B.lon[i],B.lat[i])))
dt = B.time[i] - B.time[i-1] #Difference in seconds
if dt > 1:
uu.append(dl[i-1]/dt)
else:
uu.append(0)
displacement_buoy = np.array(dl)
velocity_buoy = np.array(uu)
#print('Buoy displacement: ', displacement_buoy)
print('Buoy velocity:', velocity_buoy)
# Separation buoy - model:
dl = []
for i in range(len(B.lon)):
dl.append(tools.haversine((B.lon[i],B.lat[i]),(M.lon[i],M.lat[i])))
separation_mod_obs = np.array(dl)
#print('Separation : ', separation_mod_obs)
NT = len(B.lon)
final_separation = separation_mod_obs[NT-1]
print('final separation ', final_separation)
index_s = []
num = np.sum(separation_mod_obs[1:NT]) # d_1 + d_2 + d_3 + ...
l_n = []
for i in range(NT-1):
l_n.append(np.sum(displacement_buoy[0:i+1]))
den = np.sum(np.array(l_n)) # Denominator: l_1 + l_2 + l_3 + ...
index_s = num/den
# Histogram of velocitities:
#
#tools.dhist(velocity_buoy[:NT-1],velocity_model[:NT-1])
# LIU index:
#
if index_s < self.index_n.get():
ss = 1 - index_s/self.index_n.get()
else:
ss = 0
print('Index s : ', index_s)
print('Skill score, ss = ', ss)
# COSMO index:
# We define it as a product of a geometric index and an arithmetic index to
# account both for the direction and the normalized closeness of the
# predicted and observed positions:
# Buoy bearing:
buoy_bearing = tools.initial_bearing((B.lon[0],B.lat[0]),(B.lon[NT-1],B.lat[NT-1]))
model_bearing = tools.initial_bearing((M.lon[0],M.lat[0]),(M.lon[NT-1],M.lat[NT-1]))
print('Buoy bearing: ', buoy_bearing)
print('Model bearing: ', model_bearing)
theta = tools.angle_diff(buoy_bearing,model_bearing)
direction_factor = np.cos(np.pi*theta/180)
if direction_factor < 0:
direction_factor = 0
distance_factor = 1/(1+final_separation/l_n[NT-2])
print('Bearing angle difference:' , theta)
print('Direction factor (max(0,cos(theta)):' , direction_factor)
print('buoy model prediction distance: ', final_separation)
print('Buoy travelled distance): ', l_n[-1])
print('Distance factor : ', distance_factor)
cosmo_index = direction_factor * distance_factor
print('COSMO index : ', cosmo_index)
fig = plt.figure(2)
ax = plt.axes([0.15,0.10,0.80,0.66])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.grid(True)
ax.plot(B.lon[0],B.lat[0],'or',ms=8)
ax.plot(B.lon[0:NT],B.lat[0:NT],'+r',ms=3)
ax.plot(B.lon[0:NT],B.lat[0:NT],'-',color=self.FLOAT[jj].PLOT.LINE_COLOR.get(),linewidth=2,label='Buoy')
ax.plot(M.lon[0:NT],M.lat[0:NT],'+b',ms=3)
ax.plot(M.lon[0:NT],M.lat[0:NT],'-',color=model_color,linewidth=2,label='Model')
#for k in range(len(B.lon)):
for k in range(NT):
ax.plot([B.lon[k],M.lon[k]], \
[B.lat[k],M.lat[k]], \
'--k',linewidth=0.5)
ax.legend()
string = 'Initial point and date: (%.3f,%.3f) %s ' % (B.lon[0], B.lat[0], B.date[0])
ax.text(0.2,0.98,string,ha='left',va='top',transform=fig.transFigure)
string = 'Simulation length %.1f hours, i.e. %.2f days ' % ((M.time[NT-1]-M.time[0])/3600 ,(M.time[NT-1]-M.time[0])/86400 )
ax.text(0.2,0.95,string,ha='left',va='top',transform=fig.transFigure)
string = 'Displacement: Buoy = %d km; Model = %d km' % (np.sum(displacement_buoy[0:NT-1])/1000, \
np.sum(displacement_model[0:NT-1])/1000)
ax.text(0.2,0.92,string,ha='left',va='top',transform=fig.transFigure)
string = 'Final distance between model and buoy = %d km' % (final_separation/1000)
ax.text(0.2,0.89,string,ha='left',va='top',transform=fig.transFigure)
string = 'LW2011 Index s = %.2f, n = %.2f. Skill score = %.2f' % (index_s, self.index_n.get(), ss)
ax.text(0.2,0.86,string,ha='left',va='top',transform=fig.transFigure)
string = 'Bearing: Buoy = %d; Model = %d. Difference = %d ' % (buoy_bearing, model_bearing, theta)
ax.text(0.2,0.83,string,ha='left',va='top',transform=fig.transFigure)
string = 'Factor: Direction = %.2f; Distance = %.2f. Combined = %.2f ' % (direction_factor, distance_factor, cosmo_index)
ax.text(0.2,0.80,string,ha='left',va='top',transform=fig.transFigure)
plt.show()
def _floatselect():
# =================
print(self.Fp.get())
def _releasechange():
# =================
if 'model' in self.skill_release.get():
self.time_ini.set(0)
else:
self.time_ini.set(1)
_get_release()
def _lselection():
# =================
global NT
NT = PERIOD.get() + 1
print('NT = ', NT)
# Main window:
if self.Window_skill is None:
self.Window_skill = tk.Toplevel(self.master)
self.Window_skill.title('Trajectory model evaluation')
self.Window_skill.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_skill.lift()
FF = ttk.Frame(self.Window_skill,padding=5)
tk.Label(FF,text='Float:').grid(row=0,column=0,padx=3,sticky='e')
_wsf = ttk.Combobox(FF,textvariable=self.Fp,values=FLOAT_LIST,width=3)
_wsf.grid(row=0,column=1,sticky='w')
_wsf.bind('<<ComboboxSelected>>',lambda e: _floatselect())
if NFLOATS == 1:
_wsf.configure(state='disabled')
FF.grid()
F0 = ttk.Frame(self.Window_skill,padding=5)
#ttk.Label(F0,text='Model initial date : '+str(self.VEC[ii].DATE[0])).grid(row=0,column=0,columnspan=3,padx=3,stick='w')
#ttk.Label(F0,text='Trajectory initial date: '+str(self.FLOAT[jj].date[0])).grid(row=1,column=0,columnspan=3,padx=3,stick='w')
tk.Label(F0,text='Model initial date:').grid(row=0,column=0,padx=3,stick='w')
e = tk.Entry(F0)
e.grid(row=0,column=1,padx=3,stick='w')
e.insert(0,str(self.VEC[ii].DATE[0]))
e.configure(state='readonly')
tk.Label(F0,text='Buoy initial date:').grid(row=1,column=0,padx=3,stick='w')
e = tk.Entry(F0)
e.grid(row=1,column=1,padx=3,stick='w')
e.insert(0,str(self.FLOAT[jj].DATE[0]))
e.configure(state='readonly')
ttk.Label(F0,text='Release at: ').grid(row=2,column=0,padx=3,stick='w')
_wrl = ttk.Combobox(F0,textvariable=self.skill_release,values=self.skill_release_VALUES)
_wrl.grid(row=2,column=1,padx=3,sticky='w')
_wrl.bind('<<ComboboxSelected>>',lambda e: _releasechange())
tk.Label(F0,text='Release date:').grid(row=3,column=0,padx=3,stick='e')
tk.Entry(F0,textvariable=tdo,justify='left',width=12,state='readonly').grid(row=3,column=1,padx=3,sticky='ew')
ttk.Label(F0,text='Initial point (xo, yo, zo, to): ').grid(row=4,column=0,columnspan=3,padx=3,stick='w')
F0.grid()
F2 = ttk.Frame(self.Window_skill)
#txo.set(xo[0])
#tyo.set(yo[0])
#tzo.set(zo[0])
#tdo.set(do)
#tto.set(Dt[0])
tk.Entry(F2,textvariable=txo,justify='left',width=12,state='readonly').grid(row=0,column=0,padx=3,sticky='ew')
tk.Entry(F2,textvariable=tyo,justify='left',width=12,state='readonly').grid(row=0,column=1,padx=3,sticky='ew')
tk.Entry(F2,textvariable=tzo,justify='left',width=12,state='readonly').grid(row=0,column=2,padx=3,sticky='ew')
tk.Entry(F2,textvariable=tdt,justify='left',width=12,state='readonly').grid(row=0,column=3,padx=3,sticky='ew')
F2.grid()
F3 = ttk.Frame(self.Window_skill)
tk.Label(F3,text='Release filename:').grid(row=0,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.release_file,justify='left',width=40).grid(row=0,column=1,padx=3,sticky='w')
tk.Label(F3,text='Trajectory filename:').grid(row=1,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.out_file,justify='left',width=40).grid(row=1,column=1,padx=3,sticky='w')
tk.Label(F3,text='clm option -idt:').grid(row=2,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.clm_idt,justify='left',width=40).grid(row=2,column=1,padx=3,sticky='w')
tk.Label(F3,text='Normalization factor, n:').grid(row=3,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.index_n,justify='left',width=40).grid(row=3,column=1,padx=3,sticky='w')
tk.Label(F3,text='Target prediction:').grid(row=4,column=0,padx=3,sticky='w')
_wlst = ttk.Combobox(F3,textvariable=PERIOD,width=5)
_wlst.grid(row=4,column=1,padx=3,sticky='w')
_wlst.configure(state='disabled')
_wlst.bind('<<ComboboxSelected>>',lambda e: _lselection())
#aa = self.ax.plot(xo[0],yo[0],'o',
# ms=9.0,linestyle='dotted',
# color='red',
# mfc='none',
# zorder=100,
# transform=ccrs.PlateCarree())
#
# self.canvas.draw()
F3.grid()
F1 = ttk.Frame(self.Window_skill,padding=5)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=2,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=3,padx=3)
F1.grid()
# =======================================================
def converter(self):
# =======================================================
NS = tk.StringVar()
EW = tk.StringVar()
D1x = tk.DoubleVar()
D1y = tk.DoubleVar()
D2xd = tk.IntVar()
D2yd = tk.IntVar()
D2xm = tk.DoubleVar()
D2ym = tk.DoubleVar()
D3xd = tk.IntVar()
D3yd = tk.IntVar()
D3xm = tk.IntVar()
D3ym = tk.IntVar()
D3xs = tk.DoubleVar()
D3ys = tk.DoubleVar()
EWL = ['E','W']
NSL = ['N','S']
EW.set(EWL[0])
NS.set(NSL[0])
def _close():
# ===========
self.Window_converter.destroy()
self.Window_converter = None
if self.Window_converter is None:
self.Window_converter = tk.Toplevel(self.master)
self.Window_converter.title('Coordinate Converter')
self.Window_converter.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_converter.lift()
def _DD():
# =========
print('Converting from Decimal Degrees')
dd = D1x.get()
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
D2xd.set(int(degrees))
D2xm.set(minutes+seconds/60)
D3xd.set(int(degrees))
D3xm.set(int(minutes))
D3xs.set(seconds)
dd = D1y.get()
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
D2yd.set(int(degrees))
D2ym.set(minutes+seconds/60)
D3yd.set(int(degrees))
D3ym.set(int(minutes))
D3ys.set(seconds)
def _DDM():
# =========
print('Converting from Degrees Decimal Minutes')
D2xm.set(abs(D2xm.get()))
D2ym.set(abs(D2ym.get()))
if D2xd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D2xd.get())
d3 = dd*3600 + D2xm.get()*60
D1x.set(factor*d3/3600)
minutes,seconds = divmod(d3,60)
degrees,minutes = divmod(minutes,60)
D3xd.set(int(factor*degrees))
D3xm.set(int(minutes))
D3xs.set(seconds)
D2xd.set(int(factor*degrees))
D2xm.set(minutes+seconds/60)
if D2yd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D2yd.get())
d3 = dd*3600 + D2ym.get()*60
minutes,seconds = divmod(d3,60)
degrees,minutes = divmod(minutes,60)
D1y.set(factor*d3/3600)
D2yd.set(int(factor*degrees))
D2ym.set(minutes+seconds/60)
D3yd.set(int(factor*degrees))
D3ym.set(int(minutes))
D3ys.set(seconds)
def _DMS():
# =========
print('Converting from Degrees Minutes Seconds')
D3xm.set(abs(D3xm.get()))
D3xs.set(abs(D3xs.get()))
D3ym.set(abs(D3ym.get()))
D3ys.set(abs(D3ys.get()))
if D3xd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D3xd.get())
d3 = dd*3600 + D3xm.get()*60 + D3xs.get()
D1x.set(factor*d3/3600)
d2 = int(np.floor(d3/3600))
m2 = (d3 - d2*3600)/60
D2xd.set(int(factor*d2))
D2xm.set(m2)
if D3yd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D3yd.get())
d3 = dd*3600 + D3ym.get()*60 + D3ys.get()
D1y.set(factor*d3/3600)
d2 = int(np.floor(d3/3600))
m2 = (d3 - d2*3600)/60
D2yd.set(int(factor*d2))
D2ym.set(m2)
# Styles
tpad = ttk.Style()
tpad.configure("tpad.TLabelframe",padding=[20,5,5,10])
# DD
F0 =ttk.LabelFrame(self.Window_converter,text='Decimal Degrees (DD)',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F0,text='Longitude').grid(row=0,column=0,sticky='w')
_d1x = ttk.Entry(F0,textvariable=D1x,width=20)
_d1x.grid(row=0,column=1,sticky='ew')
ttk.Label(F0,text='\u00b0',width=1).grid(row=0,column=2,sticky='w')
_d1xh = ttk.Combobox(F0,textvariable=EW,values=EWL,width=3)
EW.set('E')
_d1xh.grid(row=0,column=3,sticky='ew')
ttk.Label(F0,text='Latitude').grid(row=1,column=0,sticky='w')
_d1y = ttk.Entry(F0,textvariable=D1y,width=20)
_d1y.grid(row=1,column=1,sticky='ew')
ttk.Label(F0,text='\u00b0',width=1).grid(row=1,column=2,sticky='w')
_d1yh = ttk.Combobox(F0,textvariable=NS,values=NSL,width=3)
_d1yh.grid(row=1,column=3,sticky='ew')
ttk.Button(F0,text='Ok',width=4,command=_DD).grid(row=1,column=4,sticky='ew',padx=3)
F0.grid(row=0,column=0,padx=5,pady=10,sticky='ewsn')
# DDM
F1 =ttk.LabelFrame(self.Window_converter,text='Degrees Decimal Minutes (DDM)',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F1,text='Longitude').grid(row=0,column=0,sticky='w')
_d2xd = ttk.Entry(F1,textvariable=D2xd,width=4)
_d2xd.grid(row=0,column=1,sticky='ew')
ttk.Label(F1,text='\u00b0',width=1).grid(row=0,column=2,sticky='w')
_d2xd = ttk.Entry(F1,textvariable=D2xm,width=14)
_d2xd.grid(row=0,column=3,sticky='ew')
ttk.Label(F1,text="'",width=1).grid(row=0,column=4,sticky='w')
_d2xh = ttk.Combobox(F1,textvariable=EW,values=EWL,width=3)
_d2xh.grid(row=0,column=5,sticky='w')
ttk.Label(F1,text='Latitude').grid(row=1,column=0,sticky='w')
_d2yd = ttk.Entry(F1,textvariable=D2yd,width=4)
_d2yd.grid(row=1,column=1,sticky='ew')
ttk.Label(F1,text='\u00b0',width=1).grid(row=1,column=2,sticky='w')
_d2ym = ttk.Entry(F1,textvariable=D2ym,width=14)
_d2ym.grid(row=1,column=3,sticky='ew')
ttk.Label(F1,text="'",width=1).grid(row=1,column=4,sticky='w')
_d2yh = ttk.Combobox(F1,textvariable=NS,values=NSL,width=3)
_d2yh.grid(row=1,column=5,sticky='ew')
ttk.Button(F1,text='Ok',width=4,command=_DDM).grid(row=1,column=6,sticky='ew',padx=3)
F1.grid(row=1,column=0,padx=5,pady=10,sticky='ewsn')
# DMS
F2 =ttk.LabelFrame(self.Window_converter,text='Degrees Minutes Seconds (DMS)',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F2,text='Longitude').grid(row=0,column=0,sticky='w')
_d3xd = ttk.Entry(F2,textvariable=D3xd,width=4)
_d3xd.grid(row=0,column=1,sticky='ew')
ttk.Label(F2,text='\u00b0',width=1).grid(row=0,column=2,sticky='w')
_d3xm = ttk.Entry(F2,textvariable=D3xm,width=3)
_d3xm.grid(row=0,column=3,sticky='ew')
ttk.Label(F2,text="'",width=1).grid(row=0,column=4,sticky='w')
_d3xm = ttk.Entry(F2,textvariable=D3xs,width=9)
_d3xm.grid(row=0,column=5,sticky='ew')
ttk.Label(F2,text='"',width=1).grid(row=0,column=6,sticky='w')
_d3xh = ttk.Combobox(F2,textvariable=EW,values=EWL,width=3)
_d3xh.grid(row=0,column=7,sticky='w')
ttk.Label(F2,text='Latitude').grid(row=1,column=0,sticky='w')
_d3yd = ttk.Entry(F2,textvariable=D3yd,width=4)
_d3yd.grid(row=1,column=1,sticky='ew')
ttk.Label(F2,text='\u00b0',width=1).grid(row=1,column=2,sticky='w')
_d3ym = ttk.Entry(F2,textvariable=D3ym,width=3)
_d3ym.grid(row=1,column=3,sticky='ew')
ttk.Label(F2,text="'",width=1).grid(row=1,column=4,sticky='w')
_d3ym = ttk.Entry(F2,textvariable=D3ys,width=9)
_d3ym.grid(row=1,column=5,sticky='ew')
ttk.Label(F2,text='"',width=1).grid(row=1,column=6,sticky='w')
_d3yh = ttk.Combobox(F2,textvariable=NS,values=NSL,width=3)
_d3yh.grid(row=1,column=7,sticky='ew')
ttk.Button(F2,text='Ok',width=4,command=_DMS).grid(row=1,column=8,sticky='ew',padx=3)
F2.grid(row=2,column=0,padx=5,pady=10,sticky='ewsn')
# =======================
def set_time(self):
# =======================
global initial_DATE
global final_DATE
global time_updated
global time_layer
TSELECTION = tk.StringVar()
initial_date = tk.StringVar()
final_date = tk.StringVar()
time_interval= tk.DoubleVar()
time_updated = False
time_layer = -1
try:
backup_TIME = self.TIME.copy()
backup_DATE = self.DATE.copy()
backup_NL = self.NL
initial_date.set(self.DATE[0])
final_date.set(self.DATE[self.NL-1])
time_interval.set(self.TIME[2] - self.TIME[1])
initial_DATE = self.DATE[0]
final_DATE = self.DATE[self.NL-1]
except:
backup_NL = 0
now = datetime.datetime.now().date()
now = datetime.datetime.combine(now,datetime.datetime.min.time())
initial_DATE = now
final_DATE = now + datetime.timedelta(1)
initial_date.set(initial_DATE)
final_date.set(final_DATE)
time_interval.set(0)
def _cancel():
# ============
print("In _cancel: ",initial_date.get())
if backup_NL > 0:
self.TIME = backup_TIME.copy()
self.DATE = backup_DATE.copy()
self.NL = backup_NL
self.Window_settime.destroy()
self.Window_settime = None
def _done():
# ==========
global time_updated
global time_layer
# Get the initial and final date stamps:
#
initial_DATE = datetime.datetime.strptime(initial_date.get(),'%Y-%m-%d %H:%M:%S')
final_DATE = datetime.datetime.strptime(final_date.get(),'%Y-%m-%d %H:%M:%S')
initial_TIME = initial_DATE.timestamp()
final_TIME = final_DATE.timestamp()
if final_TIME < initial_TIME:
time_updated = False
_cancel()
return
if time_interval.get() == 0:
time_updated = False
_cancel()
return
if time_updated:
print("Updating drawing TIME and DATE ...")
# Unlink all the layers except the one selected
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
if i == time_layer:
linked = True
else:
linked = False
print(i, TYPE, ii, linked)
if TYPE == 'VEC':
self.VEC[ii].LINK.set(linked)
elif TYPE == 'FLD':
self.CDF[ii].LINK.set(linked)
elif TYPE == 'FLT':
self.FLOAT[ii].LINK.set(linked)
self.NL = int((final_TIME - initial_TIME) / time_interval.get() + 1)
print(initial_TIME,final_TIME,time_interval.get(), self.NL)
self.TIME = []
self.DATE = []
for i in range(self.NL):
self.TIME.append(initial_TIME + i*time_interval.get())
self.DATE.append(datetime.datetime.fromtimestamp(self.TIME[i]))
print(self.DATE[i])
# Interpolate Lagrangian trajectories
#
for ii in range(self.nfloat):
if self.FLOAT[ii].nfloats > 1:
MAPX = []
MAPY = []
for i in range(self.FLOAT[ii].nfloats):
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon[:,i],
bounds_error=False, fill_value=np.NaN)
MAPX.append(f(self.TIME))
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat[:,i],
bounds_error=False, fill_value=np.NaN)
MAPY.append(list(f(self.TIME)))
self.FLOAT[ii].MAPX = np.array(MAPX).T
self.FLOAT[ii].MAPY = np.array(MAPY).T
else:
self.FLOAT[ii].Fx = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPX = self.FLOAT[ii].Fx(self.TIME)
self.FLOAT[ii].Fy = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPY = self.FLOAT[ii].Fy(self.TIME)
# Update time widgets
self.L.set(0)
self.L_LIST = list(range(self.NL))
self.lbox['values'] = self.L_LIST
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.Window_settime.destroy()
self.Window_settime = None
self.make_plot()
def _autotime():
# ==============
global time_updated
global time_layer
print('In autotime')
layer_selected = TSELECTION.get()
print(layer_selected)
if empty(layer_selected):
return
for i in range(self.LAYERS.n):
layer_name = os.path.basename(self.LAYERS.FILENAME[i])
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
print(layer_name,TYPE,ii)
if TYPE == 'VEC':
layer_ref = self.VEC[ii].ALIAS.get()
if layer_ref == layer_selected or layer_name == layer_selected:
print('Found it !!!!!')
time_updated = True
time_layer = i
self.TIME = self.VEC[ii].TIME.copy()
self.DATE = self.VEC[ii].DATE.copy()
self.NL = self.LAYERS.NREC[i]
elif TYPE == 'FLD':
layer_ref = self.CDF[ii].ALIAS.get()
if layer_ref == layer_selected or layer_name == layer_selected:
print('Found it !!!!!')
time_updated = True
time_layer = i
self.TIME = self.CDF[ii].TIME.copy()
self.DATE = self.CDF[ii].DATE.copy()
self.NL = self.LAYERS.NREC[i]
elif TYPE == 'FLT':
layer_ref = self.FLOAT[ii].ALIAS.get()
if layer_ref == layer_selected or layer_name == layer_selected:
time_updated = True
time_layer = i
self.TIME = self.FLOAT[ii].TIME.copy()
self.DATE = self.FLOAT[ii].DATE.copy()
self.NL = self.LAYERS.NREC[i]
if time_updated:
initial_date.set(self.DATE[0])
final_date.set(self.DATE[self.NL-1])
time_interval.set(self.TIME[2] - self.TIME[1])
def _initime():
# ==============
global time_updated
global initial_DATE
initial_DATE = datetime.datetime.strptime(initial_date.get(),'%Y-%m-%d %H:%M:%S')
time_updated = True
def _initime2():
# ==============
global time_updated
global initial_DATE
global cal
global top
top.destroy()
top = None
aa = cal.selection_get()
initial_DATE = initial_DATE.replace(year=aa.year,month=aa.month,day=aa.day)
initial_date.set(initial_DATE)
time_updated = True
def _inical():
# =============
global time_updated
global final_DATE
global cal
global top
top = tk.Toplevel(self.master)
cal = Calendar(top, font="Arial 14", selectmode='day', locale='en_US',
disabledforeground='red',cursor="hand1",
year=initial_DATE.year,month=initial_DATE.month,day=initial_DATE.day)
cal.grid()
ttk.Button(top, text="ok", command=_initime2).grid()
time_updated = True
def _fintime():
# ==============
global time_updated
global final_DATE
final_DATE = datetime.datetime.strptime(final_date.get(),'%Y-%m-%d %H:%M:%S')
time_updated = True
def _fintime2():
# ==============
global time_updated
global final_DATE
global cal
global top
top.destroy()
top = None
aa = cal.selection_get()
final_DATE = final_DATE.replace(year=aa.year,month=aa.month,day=aa.day)
final_date.set(final_DATE)
time_updated = True
def _fincal():
# =============
global time_updated
global final_DATE
global cal
global top
top = tk.Toplevel(self.master)
cal = Calendar(top, font="Arial 14", selectmode='day', locale='en_US',
disabledforeground='red',cursor="hand1",
year=final_DATE.year,month=final_DATE.month,day=final_DATE.day)
cal.grid()
ttk.Button(top, text="ok", command=_fintime2).grid()
time_updated = True
def _dt():
# =============
global time_updated
#time_updated = True
if time_interval.get() == 0:
messagebox.showinfo(message='Error: Time interval cannot be zero')
time_updated = False
else:
time_updated = True
def _reverse():
# =============
global time_updated
global initial_DATE
global final_DATE
if time_interval.get() != 0:
time_interval.set(-time_interval.get())
tmpDATE = final_DATE
final_DATE = initial_DATE
initial_DATE = tmpDATE
del tmpDATE
final_date.set(final_DATE)
initial_date.set(initial_DATE)
time_updated = True
# Main window
# ============
self.Window_settime = tk.Toplevel(self.master)
self.Window_settime.title('Set time axis')
self.Window_settime.resizable(width=True,height=True)
self.Window_settime.protocol('WM_DELETE_WINDOW',_cancel)
tpad = ttk.Style()
tpad.configure("tpad.TLabelframe",padding=[20,5,5,10])
# Make a list of all potential files to define the Time Axis:
#
tlist = []
layer_ref = ''
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
if TYPE == 'VEC':
layer_ref = self.VEC[ii].ALIAS.get()
elif TYPE == 'FLD':
layer_ref = self.CDF[ii].ALIAS.get()
elif TYPE == 'FLT':
layer_ref = self.FLOAT[ii].ALIAS.get()
else:
print('Unknown file type in time axis')
if empty(layer_ref):
layer_ref = os.path.basename(self.LAYERS.FILENAME[i])
tlist.append(layer_ref)
F0 = ttk.Frame(self.Window_settime,padding=10)
F1=ttk.LabelFrame(F0,text='Automatic time selection',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F1,text="Select field: ").grid(row=1,column=0,sticky='e',padx=3)
_was = ttk.Combobox(F1,textvariable=TSELECTION,values=tlist,width=14)
_was.grid(row=1,column=1,sticky='w',padx=3)
_was.bind('<<ComboboxSelected>>',lambda e: _autotime())
if len(tlist) == 0:
_was.configure(state='disabled')
F1.grid(row=0,column=0,columnspan=3)
F2=ttk.LabelFrame(F0,text='Manual time selection',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F2,text="Initial time: ").grid(row=0,column=0,sticky='e',padx=3)
_wini = tk.Entry(F2,textvariable=initial_date,width=18)
_wini.bind('<Return>',lambda e: _initime())
_wini.grid(row=0,column=1,sticky='w',padx=3)
tk.Button(F2,text='Select',command=_inical).grid(row=0,column=2,sticky='w',padx=3)
ttk.Label(F2,text="Final time: ").grid(row=1,column=0,sticky='e',padx=3)
_wfin = tk.Entry(F2,textvariable=final_date,width=18)
_wfin.bind('<Return>',lambda e: _fintime())
_wfin.grid(row=1,column=1,sticky='w',padx=3)
tk.Button(F2,text='Select',command=_fincal).grid(row=1,column=2,sticky='w',padx=3)
ttk.Label(F2,text="Time interval (seconds): ").grid(row=2,column=0,sticky='e',padx=3)
_wtdt = tk.Entry(F2,textvariable=time_interval,width=18)
_wtdt.bind('<Return>',lambda e: _dt())
_wtdt.grid(row=2,column=1,sticky='w',padx=3)
F2.grid(row=1,column=0,columnspan=3)
F0.grid()
F1 = ttk.Frame(self.Window_settime,padding=5)
ttk.Button(F1,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(F1,text='Reverse',command=_reverse,padding=5). \
grid(row=0,column=2,padx=3)
ttk.Button(F1,text='Done',command=_done,padding=5). \
grid(row=0,column=3,padx=3)
F1.grid(sticky='ew',columnspan=2)
# =======================
def ruler(self):
# =======================
global first
global cross
global _cc
global _kk
global _ll
global xo
global yo
try:
self.canvas.mpl_disconnect(self.CANVAS_CLICK)
except:
self.make_plot()
self.canvas.mpl_disconnect(self.CANVAS_CLICK)
first = True
cross = None
_cc = None
_kk = None
_ll = None
def _done():
# ==========
global _cc
global _kk
global _ll
global cross
global line
global annotation
cross.clear()
line.clear()
annotation.remove()
self.make_plot()
self.canvas.mpl_disconnect(_cc)
self.canvas.mpl_disconnect(_kk)
self.canvas.mpl_disconnect(_ll)
self.master.unbind('<Key>')
self.CANVAS_CLICK = self.canvas.mpl_connect('button_press_event',self.canvas_click)
def _canvas_click(event):
# ============================
global first
global cross
global xo
global yo
global _ll
global line
global annotation
if first:
first = False
xo = event.xdata
yo = event.ydata
cross = self.ax.plot(xo,yo,'+',ms=20,transform=ccrs.PlateCarree())
self.canvas.draw()
messagebox.showinfo(message='Use left mouse to select the second point. ESC to quit')
string = 'Calculating distances from point ({0:.3f},{1:.3f})'.format(xo,yo)
line = self.ax.plot(xo,yo,color='k',lw=0.8,ls='--',zorder=100,transform=ccrs.PlateCarree())
annotation = self.ax.annotate('',xy=(xo,yo), \
ha='right', \
va='bottom', \
xycoords='data',
bbox=dict(boxstyle='round,pad=0.5',fc='yellow',alpha=0.75))
else:
line[0].set_visible(False)
if event.inaxes:
dist = haversine((xo,yo),(event.xdata,event.ydata)) / 1000.
string = 'Distance to ({0:8.3f},{1:8.3f}): {2:7.1f} km (ESC to quit)'.format(event.xdata,event.ydata,dist)
line[0].set_data([xo,event.xdata],[yo,event.ydata])
line[0].set_visible(True)
annotation.xytext = event.xdata, event.ydata
annotation.set_text('{0:7.1f} km'.format(dist))
self.canvas.draw()
toconsola(string,wid=self.cons)
def _key_handler(event):
# ============================
if event.keycode == 9:
_done()
def _key_handler2(event):
# ============================
if event.key == 'escape':
_done()
# Main window
# ============
_cc = self.canvas.mpl_connect('button_press_event',_canvas_click)
_kk = self.canvas.mpl_connect('key_press_event',_key_handler2)
self.master.bind('<Key>',_key_handler)
messagebox.showinfo(message='Select a starting point with the left mouse button. ESC to quit')
# =======================
def atlas(self):
# =======================
def _cancel():
# ============
self.Window_atlas.destroy()
# Main window
# ============
self.Window_atlas = tk.Toplevel(self.master)
self.Window_atlas.title('Set time axis')
self.Window_atlas.resizable(width=False,height=False)
self.Window_atlas.protocol('WM_DELETE_WINDOW',_cancel)
climatology.winClim(self.Window_atlas,wid=self.cons)
# ====================
def save_frames(self):
# ====================
''' Save (PNG) a series of frames frames '''
# -----------
def _close():
# -----------
self.Window_sframe.destroy()
self.Window_sframe = None
def _done():
# ----------
L_Backup = self.L.get()
d = np.int(np.ceil(np.log10(self.NL)))
fmt = '0%d' % d
for L in range(self.PLOT.SFRAME_L1.get(),self.PLOT.SFRAME_L2.get()+1,self.PLOT.SFRAME_LSTEP.get()):
self.L.set(L)
self.PLOT.TLABEL.set(self.DATE[L])
ofile = self.PLOT.SFRAME_PREFIX.get()
if (self.PLOT.SFRAME_POSTFIX_MODE.get() == 0):
ofile = ofile + format(L,fmt) + '.png'
else:
postfix = self.DATE[L].isoformat(sep='T')
postfix = postfix.replace(":","")
postfix = postfix.replace("-","")
ofile = ofile + postfix + '.png'
print('saving frame in ', ofile)
for i in range(self.nvec):
if self.VEC[i].LINK.get():
self.VEC[i].L.set(L)
self.VEC[i].read(update_lims=False,wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
self.CDF[i].L.set(L)
self.CDF[i].read(update_lims=False,wid=self.cons)
self.make_plot()
self.fig.savefig(ofile,
dpi=self.PLOT.DPI.get(),
bbox_inches='tight')
messagebox.showinfo(parent=self.Window_sframe,message='Frames have been saved')
self.L.set(L_Backup)
def _loadconf():
# -------------
'''Load SAVE FRAME configuration'''
toconsola('Retrieving SAVING FRAME defaults.',wid=self.cons)
#print('Retrieving VIDEO defaults.')
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
self.PLOT.SFRAME_PREFIX.set(conf['FRAME_PREFIX'])
self.PLOT.SFRAME_POSTFIX_MODE.set(conf['FRAME_POSTFIX_MODE'])
self.PLOT.SFRAME_LSTEP.set(conf['FRAME_STEP'])
def _saveconf():
# -------------
'''Save SAVE FRAME configuration'''
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
toconsola('Updating SAVING FRAME defaults.',wid=self.cons)
#print('Updating VIDEO defaults.')
conf['FRAME_PREFIX'] = self.PLOT.SFRAME_PREFIX.get()
conf['FRAME_POSTFIX_MODE'] = self.PLOT.SFRAME_POSTFIX_MODE.get()
conf['FRAME_STEP'] = self.PLOT.SFRAME_LSTEP.get()
with io.open(self.PLOT.FILECONF,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# Main
# ----
if self.LAYERS.n == 0:
messagebox.showinfo(message='No layers have been added')
return
if self.Window_sframe is not None:
self.Window_sframe.lift()
return
self.Window_sframe = tk.Toplevel(self.master)
self.Window_sframe.title('Save frames')
self.Window_sframe.resizable(width=True,height=True)
self.Window_sframe.protocol('WM_DELETE_WINDOW',_close)
# Menu:
menubar = tk.Menu(self.Window_sframe)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_sframe.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_sframe.tk.call(self.Window_sframe, "config", "-menu", menubar)
# Widgets
#
F0 = ttk.Frame(self.Window_sframe,borderwidth=5,padding=5)
ttk.Label(F0,text='Output prefix : ').grid(row=0,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_PREFIX,width=40).grid(row=0,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Output postfix mode : ').grid(row=1,column=0)
ttk.Radiobutton(F0,text=' Frame number',variable=self.PLOT.SFRAME_POSTFIX_MODE,value=0).\
grid(row=1,column=1,padx=3)
ttk.Radiobutton(F0,text=' Date and time',variable=self.PLOT.SFRAME_POSTFIX_MODE,value=1).\
grid(row=2,column=1,padx=3)
ttk.Label(F0,text='Initial frame : ').grid(row=3,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_L1,width=7).grid(row=3,column=1,sticky='w')
ttk.Label(F0,text='Final frame : ').grid(row=4,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_L2,width=7).grid(row=4,column=1,sticky='w')
ttk.Label(F0,text='Frame step : ').grid(row=5,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_LSTEP,width=7).grid(row=5,column=1,sticky='w')
done = ttk.Button(F0,text='Do it',command=_done)
done.grid(row=6,column=3,padx=3)
done.bind("<Return>",lambda e:_done())
close = ttk.Button(F0,text='Close',command=_close)
close.grid(row=6,column=4,padx=3)
close.bind("<Return>",lambda e:_close())
F0.grid()
# ====================
def get_feature(self):
# ====================
''' Widget to read Features '''
self.FSOURCE = tk.StringVar()
FEATURE = feature.parameters()
self.FSOURCE.set(self.FEATURE.OPTIONS[0])
def _cancel():
# ===========
self.Window_feature.destroy()
self.Window_feature = None
def _close():
# ===========
self.Window_feature.destroy()
self.Window_feature = None
self.make_plot()
def _done():
# ===========
_close()
def _clear():
# ===========
if self.FEATURE.n == 0:
return
ii = self.FEATURE.INDX.get()
self.LAYERS.erase('FEATURE',ii,wid=self.cons)
self.LAYERS.print()
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.FEATURE.DATA[ii]
self.FEATURE.n -= 1
ii = self.FEATURE.n-1 if ii >= self.FEATURE.n else ii
toconsola('New feature = '+str(ii),wid=self.cons)
self.FEATURE.INDX.set(ii)
_refill(ii)
def _reget():
# ===========
self.FEATURE.INDEX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ============
if ii >= 0:
self.FEATURE.LIST = list(range(self.FEATURE.n))
_wsel['values'] = self.FEATURE.LIST
_went['textvariable'] = self.FEATURE.DATA[ii].FILENAME
_wstat['text'] = 'Number feature = '+str(self.FEATURE.DATA[ii].n)
_wsel.configure(state='normal')
_show['variable'] = self.FEATURE.DATA[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.FEATURE.DATA[ii].ALIAS
else:
self.FEATURE.DATA = []
self.FEATURE.LIST = ['0']
self.FEATURE.INDX.set(0)
#_wsel['values'] = self.FEATURE_LIST
_wsel['values'] = None
_went['textvariable'] = None
_wstat['text'] = ''
_wsel.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
self.make_plot()
def _add():
# ===========
ISOURCE = self.FEATURE.OPTIONS.index(self.FSOURCE.get())
types=[('JSON','*.json'),('GEOJSON','*.geojson'),('ALL','*')]
nn = filedialog.askopenfilename(parent=self.Window_feature, \
filetypes=types)
if len(nn) == 0:
return
filename = '%s' % nn
toconsola('Reading FEATURE file '+filename,wid=self.cons)
FEATURE.Read(filename)
if FEATURE.n == 0:
return
self.FEATURE.n += 1
self.FEATURE.DATA.append(FEATURE)
self.FEATURE.INDX.set(self.FEATURE.n-1)
self.FEATURE.LIST = list(range(self.FEATURE.n))
self.LAYERS.add(TYPE='FEATURE',Filename=filename,N=FEATURE.n,wid=self.cons)
self.LAYERS.print()
ii = self.FEATURE.INDX.get()
_refill(ii)
# Main Window ...
# ================
if self.Window_feature is None:
self.Window_feature = tk.Toplevel(self.master)
self.Window_feature.title('JSON feature')
self.Window_feature.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_feature.lift()
if self.FEATURE.n > 0:
ii = self.FEATURE.INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_feature,padding=5)
#Add
ttk.Combobox(F0,textvariable=self.FSOURCE, \
values=self.FEATURE.OPTIONS).grid(row=0,column=0,padx=3)
ttk.Button(F0,text='Import',command=_add).grid(row=1,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Feature file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.FEATURE.INDX, \
values=self.FEATURE.LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' Features in the file= '+str(self.FEATURE.DATA[ii].n),width=50,justify='left')
_went['textvariable'] = self.FEATURE.DATA[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
#Alias
ttk.Label(F0,text='Alias').grid(row=2,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=2,column=2,columnspan=2,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_feature,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.FEATURE.DATA[ii].show
_aent['textvariable'] = self.FEATURE.DATA[ii].ALIAS
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Cancel',command=_cancel).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_close).grid(row=1,column=8,padx=3)
F1.grid(row=1,column=0)
def feature_config(self):
# =======================
if self.FEATURE.n == 0:
messagebox.showinfo(message='No feature selected yet')
return
if self.Window_featureconfig is None:
feature.Configuration_Menu(self,self.FEATURE)
else:
self.Window_featureconfig.lift()
|
quimbp/cosmo
|
modules/cosmo/drawing.py
|
Python
|
mit
| 475,691
|
[
"NetCDF"
] |
4917991fd1b398142cfe78afea401dd769cd4bb8c61f1c3b9c01156d61888558
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import dataclasses
from functools import partial
import itertools
import csv
import queue
import time
import threading
import nibabel as nb
import numpy as np
try:
import Trekker
has_trekker = True
except ImportError:
has_trekker = True
try:
import invesalius.data.elfin as elfin
import invesalius.data.elfin_processing as elfin_process
has_robot = True
except ImportError:
has_robot = False
import wx
import vtk
try:
import wx.lib.agw.foldpanelbar as fpb
except ImportError:
import wx.lib.foldpanelbar as fpb
import wx.lib.colourselect as csel
import wx.lib.masked.numctrl
from invesalius.pubsub import pub as Publisher
from time import sleep
import invesalius.constants as const
if has_trekker:
import invesalius.data.brainmesh_handler as brain
import invesalius.data.imagedata_utils as imagedata_utils
import invesalius.data.slice_ as sl
import invesalius.data.tractography as dti
import invesalius.data.record_coords as rec
import invesalius.data.vtk_utils as vtk_utils
import invesalius.data.bases as db
import invesalius.gui.dialogs as dlg
import invesalius.project as prj
import invesalius.session as ses
from invesalius import utils
from invesalius.gui import utils as gui_utils
from invesalius.navigation.icp import ICP
from invesalius.navigation.navigation import Navigation
from invesalius.navigation.tracker import Tracker
from invesalius.navigation.robot import Robot
from invesalius.data.converters import to_vtk
from invesalius.net.neuronavigation_api import NeuronavigationApi
HAS_PEDAL_CONNECTION = True
try:
from invesalius.net.pedal_connection import PedalConnection
except ImportError:
HAS_PEDAL_CONNECTION = False
BTN_NEW = wx.NewId()
BTN_IMPORT_LOCAL = wx.NewId()
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(inner_panel, 1, wx.EXPAND|wx.GROW|wx.BOTTOM|wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
default_colour = self.GetBackgroundColour()
background_colour = wx.Colour(255,255,255)
self.SetBackgroundColour(background_colour)
txt_nav = wx.StaticText(self, -1, _('Select fiducials and navigate'),
size=wx.Size(90, 20))
txt_nav.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# Create horizontal sizer to represent lines in the panel
txt_sizer = wx.BoxSizer(wx.HORIZONTAL)
txt_sizer.Add(txt_nav, 1, wx.EXPAND|wx.GROW, 5)
# Fold panel which contains navigation configurations
fold_panel = FoldPanel(self)
fold_panel.SetBackgroundColour(default_colour)
# Add line sizer into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(txt_sizer, 0, wx.GROW|wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add(fold_panel, 1, wx.GROW|wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.AddSpacer(5)
main_sizer.Fit(self)
self.SetSizerAndFit(main_sizer)
self.Update()
self.SetAutoLayout(1)
self.sizer = main_sizer
class FoldPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerFoldPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(inner_panel, 0, wx.EXPAND|wx.GROW)
sizer.Fit(self)
self.SetSizerAndFit(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerFoldPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.__bind_events()
# Fold panel and its style settings
# FIXME: If we dont insert a value in size or if we set wx.DefaultSize,
# the fold_panel doesnt show. This means that, for some reason, Sizer
# is not working properly in this panel. It might be on some child or
# parent panel. Perhaps we need to insert the item into the sizer also...
# Study this.
fold_panel = fpb.FoldPanelBar(self, -1, wx.DefaultPosition,
(10, 330), 0, fpb.FPB_SINGLE_FOLD)
# Initialize Tracker and PedalConnection objects here so that they are available to several panels.
#
tracker = Tracker()
pedal_connection = PedalConnection() if HAS_PEDAL_CONNECTION else None
neuronavigation_api = NeuronavigationApi()
# Fold panel style
style = fpb.CaptionBarStyle()
style.SetCaptionStyle(fpb.CAPTIONBAR_GRADIENT_V)
style.SetFirstColour(default_colour)
style.SetSecondColour(default_colour)
# Fold 1 - Navigation panel
item = fold_panel.AddFoldPanel(_("Neuronavigation"), collapsed=True)
ntw = NeuronavigationPanel(
parent=item,
tracker=tracker,
pedal_connection=pedal_connection,
neuronavigation_api=neuronavigation_api,
)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, ntw, spacing=0,
leftSpacing=0, rightSpacing=0)
fold_panel.Expand(fold_panel.GetFoldPanel(0))
# Fold 2 - Object registration panel
item = fold_panel.AddFoldPanel(_("Object registration"), collapsed=True)
otw = ObjectRegistrationPanel(item, tracker, pedal_connection)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, otw, spacing=0,
leftSpacing=0, rightSpacing=0)
# Fold 3 - Markers panel
item = fold_panel.AddFoldPanel(_("Markers"), collapsed=True)
mtw = MarkersPanel(item, tracker)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, mtw, spacing= 0,
leftSpacing=0, rightSpacing=0)
# Fold 4 - Tractography panel
if has_trekker:
item = fold_panel.AddFoldPanel(_("Tractography"), collapsed=True)
otw = TractographyPanel(item)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, otw, spacing=0,
leftSpacing=0, rightSpacing=0)
# Fold 5 - DBS
self.dbs_item = fold_panel.AddFoldPanel(_("Deep Brain Stimulation"), collapsed=True)
dtw = DbsPanel(self.dbs_item) #Atribuir nova var, criar panel
fold_panel.ApplyCaptionStyle(self.dbs_item, style)
fold_panel.AddFoldPanelWindow(self.dbs_item, dtw, spacing= 0,
leftSpacing=0, rightSpacing=0)
self.dbs_item.Hide()
# Fold 6 - Sessions
item = fold_panel.AddFoldPanel(_("Sessions"), collapsed=False)
stw = SessionPanel(item)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, stw, spacing= 0,
leftSpacing=0, rightSpacing=0)
# Check box for camera update in volume rendering during navigation
tooltip = wx.ToolTip(_("Update camera in volume"))
checkcamera = wx.CheckBox(self, -1, _('Vol. camera'))
checkcamera.SetToolTip(tooltip)
checkcamera.SetValue(const.CAM_MODE)
checkcamera.Bind(wx.EVT_CHECKBOX, self.OnVolumeCamera)
self.checkcamera = checkcamera
# Check box to use serial port to trigger pulse signal and create markers
tooltip = wx.ToolTip(_("Enable serial port communication to trigger pulse and create markers"))
checkbox_serial_port = wx.CheckBox(self, -1, _('Serial port'))
checkbox_serial_port.SetToolTip(tooltip)
checkbox_serial_port.SetValue(False)
checkbox_serial_port.Bind(wx.EVT_CHECKBOX, partial(self.OnEnableSerialPort, ctrl=checkbox_serial_port))
self.checkbox_serial_port = checkbox_serial_port
# Check box for object position and orientation update in volume rendering during navigation
tooltip = wx.ToolTip(_("Show and track TMS coil"))
checkobj = wx.CheckBox(self, -1, _('Show coil'))
checkobj.SetToolTip(tooltip)
checkobj.SetValue(False)
checkobj.Disable()
checkobj.Bind(wx.EVT_CHECKBOX, self.OnShowObject)
self.checkobj = checkobj
# if sys.platform != 'win32':
self.checkcamera.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
checkbox_serial_port.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
checkobj.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
line_sizer = wx.BoxSizer(wx.HORIZONTAL)
line_sizer.Add(checkcamera, 0, wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, 5)
line_sizer.Add(checkbox_serial_port, 0, wx.ALIGN_CENTER)
line_sizer.Add(checkobj, 0, wx.RIGHT | wx.LEFT, 5)
line_sizer.Fit(self)
# Panel sizer to expand fold panel
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(fold_panel, 0, wx.GROW|wx.EXPAND)
sizer.Add(line_sizer, 1, wx.GROW | wx.EXPAND)
sizer.Fit(self)
self.track_obj = False
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
def __bind_events(self):
Publisher.subscribe(self.OnCheckStatus, 'Navigation status')
Publisher.subscribe(self.OnShowObject, 'Update track object state')
Publisher.subscribe(self.OnVolumeCamera, 'Change camera checkbox')
Publisher.subscribe(self.OnShowDbs, "Active dbs folder")
Publisher.subscribe(self.OnHideDbs, "Deactive dbs folder")
def OnShowDbs(self):
self.dbs_item.Show()
def OnHideDbs(self):
self.dbs_item.Hide()
def OnCheckStatus(self, nav_status, vis_status):
if nav_status:
self.checkbox_serial_port.Enable(False)
self.checkobj.Enable(False)
else:
self.checkbox_serial_port.Enable(True)
if self.track_obj:
self.checkobj.Enable(True)
def OnEnableSerialPort(self, evt, ctrl):
if ctrl.GetValue():
from wx import ID_OK
dlg_port = dlg.SetCOMPort(select_baud_rate=False)
if dlg_port.ShowModal() != ID_OK:
ctrl.SetValue(False)
return
com_port = dlg_port.GetCOMPort()
baud_rate = 115200
Publisher.sendMessage('Update serial port', serial_port_in_use=True, com_port=com_port, baud_rate=baud_rate)
else:
Publisher.sendMessage('Update serial port', serial_port_in_use=False)
def OnShowObject(self, evt=None, flag=None, obj_name=None, polydata=None, use_default_object=True):
if not evt:
if flag:
self.checkobj.Enable(True)
self.checkobj.SetValue(True)
self.track_obj = True
Publisher.sendMessage('Status target button', status=True)
else:
self.checkobj.Enable(False)
self.checkobj.SetValue(False)
self.track_obj = False
Publisher.sendMessage('Status target button', status=False)
Publisher.sendMessage('Update show object state', state=self.checkobj.GetValue())
def OnVolumeCamera(self, evt=None, status=None):
if not evt:
self.checkcamera.SetValue(status)
Publisher.sendMessage('Update volume camera state', camera_state=self.checkcamera.GetValue())
class NeuronavigationPanel(wx.Panel):
def __init__(self, parent, tracker, pedal_connection, neuronavigation_api):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.SetAutoLayout(1)
self.__bind_events()
# Initialize global variables
self.pedal_connection = pedal_connection
self.navigation = Navigation(
pedal_connection=pedal_connection,
neuronavigation_api=neuronavigation_api,
)
self.icp = ICP()
self.tracker = tracker
self.robot = Robot(tracker)
self.nav_status = False
self.tracker_fiducial_being_set = None
self.current_coord = 0, 0, 0, None, None, None
# Initialize list of buttons and numctrls for wx objects
self.btns_set_fiducial = [None, None, None, None, None, None]
self.numctrls_fiducial = [[], [], [], [], [], []]
# ComboBox for spatial tracker device selection
tracker_options = [_("Select tracker:")] + self.tracker.get_trackers()
select_tracker_elem = wx.ComboBox(self, -1, "", size=(145, -1),
choices=tracker_options, style=wx.CB_DROPDOWN|wx.CB_READONLY)
tooltip = wx.ToolTip(_("Choose the tracking device"))
select_tracker_elem.SetToolTip(tooltip)
select_tracker_elem.SetSelection(const.DEFAULT_TRACKER)
select_tracker_elem.Bind(wx.EVT_COMBOBOX, partial(self.OnChooseTracker, ctrl=select_tracker_elem))
self.select_tracker_elem = select_tracker_elem
# ComboBox for tracker reference mode
tooltip = wx.ToolTip(_("Choose the navigation reference mode"))
choice_ref = wx.ComboBox(self, -1, "",
choices=const.REF_MODE, style=wx.CB_DROPDOWN|wx.CB_READONLY)
choice_ref.SetSelection(const.DEFAULT_REF_MODE)
choice_ref.SetToolTip(tooltip)
choice_ref.Bind(wx.EVT_COMBOBOX, partial(self.OnChooseReferenceMode, ctrl=select_tracker_elem))
self.choice_ref = choice_ref
# Toggle buttons for image fiducials
for n, fiducial in enumerate(const.IMAGE_FIDUCIALS):
button_id = fiducial['button_id']
label = fiducial['label']
tip = fiducial['tip']
ctrl = wx.ToggleButton(self, button_id, label=label)
ctrl.SetMinSize((gui_utils.calc_width_needed(ctrl, 3), -1))
ctrl.SetToolTip(wx.ToolTip(tip))
ctrl.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnImageFiducials, n))
self.btns_set_fiducial[n] = ctrl
# Push buttons for tracker fiducials
for n, fiducial in enumerate(const.TRACKER_FIDUCIALS):
button_id = fiducial['button_id']
label = fiducial['label']
tip = fiducial['tip']
ctrl = wx.ToggleButton(self, button_id, label=label)
ctrl.SetMinSize((gui_utils.calc_width_needed(ctrl, 3), -1))
ctrl.SetToolTip(wx.ToolTip(tip))
ctrl.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnTrackerFiducials, n, ctrl=ctrl))
self.btns_set_fiducial[n + 3] = ctrl
# TODO: Find a better alignment between FRE, text and navigate button
# Fiducial registration error text and checkbox
txt_fre = wx.StaticText(self, -1, _('FRE:'))
tooltip = wx.ToolTip(_("Fiducial registration error"))
txtctrl_fre = wx.TextCtrl(self, value="", size=wx.Size(60, -1), style=wx.TE_CENTRE)
txtctrl_fre.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD))
txtctrl_fre.SetBackgroundColour('WHITE')
txtctrl_fre.SetEditable(0)
txtctrl_fre.SetToolTip(tooltip)
self.txtctrl_fre = txtctrl_fre
# Toggle button for neuronavigation
tooltip = wx.ToolTip(_("Start navigation"))
btn_nav = wx.ToggleButton(self, -1, _("Navigate"), size=wx.Size(80, -1))
btn_nav.SetToolTip(tooltip)
btn_nav.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnNavigate, btn_nav=btn_nav))
# "Refine" text and checkbox
txt_icp = wx.StaticText(self, -1, _('Refine:'))
tooltip = wx.ToolTip(_(u"Refine the coregistration"))
checkbox_icp = wx.CheckBox(self, -1, _(' '))
checkbox_icp.SetValue(False)
checkbox_icp.Enable(False)
checkbox_icp.Bind(wx.EVT_CHECKBOX, partial(self.OnCheckboxICP, ctrl=checkbox_icp))
checkbox_icp.SetToolTip(tooltip)
self.checkbox_icp = checkbox_icp
# "Pedal pressed" text and an indicator (checkbox) for pedal press
if pedal_connection is not None and pedal_connection.in_use:
txt_pedal_pressed = wx.StaticText(self, -1, _('Pedal pressed:'))
tooltip = wx.ToolTip(_(u"Is the pedal pressed"))
checkbox_pedal_pressed = wx.CheckBox(self, -1, _(' '))
checkbox_pedal_pressed.SetValue(False)
checkbox_pedal_pressed.Enable(False)
checkbox_pedal_pressed.SetToolTip(tooltip)
pedal_connection.add_callback(name='gui', callback=checkbox_pedal_pressed.SetValue)
self.checkbox_pedal_pressed = checkbox_pedal_pressed
else:
txt_pedal_pressed = None
self.checkbox_pedal_pressed = None
# "Lock to target" text and checkbox
tooltip = wx.ToolTip(_(u"Allow triggering stimulation pulse only if the coil is at the target"))
lock_to_target_text = wx.StaticText(self, -1, _('Lock to target:'))
lock_to_target_checkbox = wx.CheckBox(self, -1, _(' '))
lock_to_target_checkbox.SetValue(False)
lock_to_target_checkbox.Enable(False)
lock_to_target_checkbox.Bind(wx.EVT_CHECKBOX, partial(self.OnLockToTargetCheckbox, ctrl=lock_to_target_checkbox))
lock_to_target_checkbox.SetToolTip(tooltip)
self.lock_to_target_checkbox = lock_to_target_checkbox
# Image and tracker coordinates number controls
for m in range(len(self.btns_set_fiducial)):
for n in range(3):
self.numctrls_fiducial[m].append(
wx.lib.masked.numctrl.NumCtrl(parent=self, integerWidth=4, fractionWidth=1))
# Sizers to group all GUI objects
choice_sizer = wx.FlexGridSizer(rows=1, cols=2, hgap=5, vgap=5)
choice_sizer.AddMany([(select_tracker_elem, wx.LEFT),
(choice_ref, wx.RIGHT)])
coord_sizer = wx.GridBagSizer(hgap=5, vgap=5)
for m in range(len(self.btns_set_fiducial)):
coord_sizer.Add(self.btns_set_fiducial[m], pos=wx.GBPosition(m, 0))
for n in range(3):
coord_sizer.Add(self.numctrls_fiducial[m][n], pos=wx.GBPosition(m, n+1))
if m in range(1, 6):
self.numctrls_fiducial[m][n].SetEditable(False)
nav_sizer = wx.FlexGridSizer(rows=1, cols=5, hgap=5, vgap=5)
nav_sizer.AddMany([(txt_fre, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(txtctrl_fre, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(btn_nav, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(txt_icp, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(checkbox_icp, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL)])
checkboxes_sizer = wx.FlexGridSizer(rows=1, cols=4, hgap=5, vgap=5)
checkboxes_sizer.AddMany([(lock_to_target_text, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(lock_to_target_checkbox, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL)])
if pedal_connection is not None and pedal_connection.in_use:
checkboxes_sizer.AddMany([(txt_pedal_pressed, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(checkbox_pedal_pressed, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL)])
group_sizer = wx.FlexGridSizer(rows=10, cols=1, hgap=5, vgap=5)
group_sizer.AddGrowableCol(0, 1)
group_sizer.AddGrowableRow(0, 1)
group_sizer.AddGrowableRow(1, 1)
group_sizer.AddGrowableRow(2, 1)
group_sizer.SetFlexibleDirection(wx.BOTH)
group_sizer.AddMany([(choice_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL),
(coord_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL),
(nav_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL),
(checkboxes_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL)])
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
main_sizer.Add(group_sizer, 1)# wx.ALIGN_CENTER_HORIZONTAL, 10)
self.sizer = main_sizer
self.SetSizer(main_sizer)
self.Fit()
def __bind_events(self):
Publisher.subscribe(self.LoadImageFiducials, 'Load image fiducials')
Publisher.subscribe(self.SetImageFiducial, 'Set image fiducial')
Publisher.subscribe(self.SetTrackerFiducial, 'Set tracker fiducial')
Publisher.subscribe(self.UpdateTrackObjectState, 'Update track object state')
Publisher.subscribe(self.UpdateImageCoordinates, 'Set cross focal point')
Publisher.subscribe(self.OnDisconnectTracker, 'Disconnect tracker')
Publisher.subscribe(self.UpdateObjectRegistration, 'Update object registration')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.UpdateTrekkerObject, 'Update Trekker object')
Publisher.subscribe(self.UpdateNumTracts, 'Update number of tracts')
Publisher.subscribe(self.UpdateSeedOffset, 'Update seed offset')
Publisher.subscribe(self.UpdateSeedRadius, 'Update seed radius')
Publisher.subscribe(self.UpdateSleep, 'Update sleep')
Publisher.subscribe(self.UpdateNumberThreads, 'Update number of threads')
Publisher.subscribe(self.UpdateTractsVisualization, 'Update tracts visualization')
Publisher.subscribe(self.UpdatePeelVisualization, 'Update peel visualization')
Publisher.subscribe(self.EnableACT, 'Enable ACT')
Publisher.subscribe(self.UpdateACTData, 'Update ACT data')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
Publisher.subscribe(self.UpdateTarget, 'Update target')
Publisher.subscribe(self.OnStartNavigation, 'Start navigation')
Publisher.subscribe(self.OnStopNavigation, 'Stop navigation')
def LoadImageFiducials(self, label, coord):
fiducial = self.GetFiducialByAttribute(const.IMAGE_FIDUCIALS, 'label', label)
fiducial_index = fiducial['fiducial_index']
fiducial_name = fiducial['fiducial_name']
if self.btns_set_fiducial[fiducial_index].GetValue():
print("Fiducial {} already set, not resetting".format(label))
return
Publisher.sendMessage('Set image fiducial', fiducial_name=fiducial_name, coord=coord[0:3])
self.btns_set_fiducial[fiducial_index].SetValue(True)
for m in [0, 1, 2]:
self.numctrls_fiducial[fiducial_index][m].SetValue(coord[m])
def GetFiducialByAttribute(self, fiducials, attribute_name, attribute_value):
found = [fiducial for fiducial in fiducials if fiducial[attribute_name] == attribute_value]
assert len(found) != 0, "No fiducial found for which {} = {}".format(attribute_name, attribute_value)
return found[0]
def SetImageFiducial(self, fiducial_name, coord):
fiducial = self.GetFiducialByAttribute(const.IMAGE_FIDUCIALS, 'fiducial_name', fiducial_name)
fiducial_index = fiducial['fiducial_index']
self.navigation.SetImageFiducial(fiducial_index, coord)
def SetTrackerFiducial(self, fiducial_name):
if not self.tracker.IsTrackerInitialized():
dlg.ShowNavigationTrackerWarning(0, 'choose')
return
fiducial = self.GetFiducialByAttribute(const.TRACKER_FIDUCIALS, 'fiducial_name', fiducial_name)
fiducial_index = fiducial['fiducial_index']
# XXX: The reference mode is fetched from navigation object, however it seems like not quite
# navigation-related attribute here, as the reference mode used during the fiducial registration
# is more concerned with the calibration than the navigation.
#
ref_mode_id = self.navigation.GetReferenceMode()
self.tracker.SetTrackerFiducial(ref_mode_id, fiducial_index)
self.ResetICP()
self.tracker.UpdateUI(self.select_tracker_elem, self.numctrls_fiducial[3:6], self.txtctrl_fre)
def UpdatePeelVisualization(self, data):
self.navigation.peel_loaded = data
def UpdateNavigationStatus(self, nav_status, vis_status):
self.nav_status = nav_status
if nav_status and self.icp.m_icp is not None:
self.checkbox_icp.Enable(True)
else:
self.checkbox_icp.Enable(False)
def UpdateTrekkerObject(self, data):
# self.trk_inp = data
self.navigation.trekker = data
def UpdateNumTracts(self, data):
self.navigation.n_tracts = data
def UpdateSeedOffset(self, data):
self.navigation.seed_offset = data
def UpdateSeedRadius(self, data):
self.navigation.seed_radius = data
def UpdateSleep(self, data):
self.navigation.UpdateSleep(data)
def UpdateNumberThreads(self, data):
self.navigation.n_threads = data
def UpdateTractsVisualization(self, data):
self.navigation.view_tracts = data
def UpdateACTData(self, data):
self.navigation.act_data = data
def UpdateTarget(self, coord):
self.navigation.target = coord
self.lock_to_target_checkbox.Enable(True)
self.lock_to_target_checkbox.SetValue(True)
self.navigation.SetLockToTarget(True)
def EnableACT(self, data):
self.navigation.enable_act = data
def UpdateImageCoordinates(self, position):
# TODO: Change from world coordinates to matrix coordinates. They are better for multi software communication.
self.current_coord = position
for m in [0, 1, 2]:
if not self.btns_set_fiducial[m].GetValue():
for n in [0, 1, 2]:
self.numctrls_fiducial[m][n].SetValue(float(position[n]))
def UpdateObjectRegistration(self, data=None):
self.navigation.obj_reg = data
def UpdateTrackObjectState(self, evt=None, flag=None, obj_name=None, polydata=None, use_default_object=True):
self.navigation.track_obj = flag
def ResetICP(self):
self.icp.ResetICP()
self.checkbox_icp.Enable(False)
self.checkbox_icp.SetValue(False)
def OnDisconnectTracker(self):
if self.tracker.tracker_id == const.ROBOT:
self.robot.StopRobotThreadNavigation()
self.tracker.DisconnectTracker()
self.ResetICP()
self.tracker.UpdateUI(self.select_tracker_elem, self.numctrls_fiducial[3:6], self.txtctrl_fre)
def OnLockToTargetCheckbox(self, evt, ctrl):
value = ctrl.GetValue()
self.navigation.SetLockToTarget(value)
def OnChooseTracker(self, evt, ctrl):
Publisher.sendMessage('Update status text in GUI',
label=_("Configuring tracker ..."))
if hasattr(evt, 'GetSelection'):
choice = evt.GetSelection()
else:
choice = None
self.tracker.SetTracker(choice)
if self.tracker.tracker_id == const.ROBOT:
self.tracker.ConnectToRobot(self.navigation, self.tracker, self.robot)
self.ResetICP()
self.tracker.UpdateUI(ctrl, self.numctrls_fiducial[3:6], self.txtctrl_fre)
Publisher.sendMessage('Update status text in GUI', label=_("Ready"))
def OnChooseReferenceMode(self, evt, ctrl):
self.navigation.SetReferenceMode(evt.GetSelection())
# When ref mode is changed the tracker coordinates are set to zero
self.tracker.ResetTrackerFiducials()
# Some trackers do not accept restarting within this time window
# TODO: Improve the restarting of trackers after changing reference mode
self.ResetICP()
print("Reference mode changed!")
def OnImageFiducials(self, n, evt):
fiducial_name = const.IMAGE_FIDUCIALS[n]['fiducial_name']
# XXX: This is still a bit hard to read, could be cleaned up.
label = list(const.BTNS_IMG_MARKERS[evt.GetId()].values())[0]
if self.btns_set_fiducial[n].GetValue():
coord = self.numctrls_fiducial[n][0].GetValue(),\
self.numctrls_fiducial[n][1].GetValue(),\
self.numctrls_fiducial[n][2].GetValue(), None, None, None
Publisher.sendMessage('Set image fiducial', fiducial_name=fiducial_name, coord=coord[0:3])
colour = (0., 1., 0.)
size = 2
seed = 3 * [0.]
Publisher.sendMessage('Create marker', coord=coord, colour=colour, size=size,
label=label, seed=seed)
else:
for m in [0, 1, 2]:
self.numctrls_fiducial[n][m].SetValue(float(self.current_coord[m]))
Publisher.sendMessage('Set image fiducial', fiducial_name=fiducial_name, coord=np.nan)
Publisher.sendMessage('Delete fiducial marker', label=label)
def OnTrackerFiducials(self, n, evt, ctrl):
# Do not allow several tracker fiducials to be set at the same time.
if self.tracker_fiducial_being_set is not None and self.tracker_fiducial_being_set != n:
ctrl.SetValue(False)
return
# Called when the button for setting the tracker fiducial is enabled and either pedal is pressed
# or the button is pressed again.
#
def set_fiducial_callback(state):
if state:
fiducial_name = const.TRACKER_FIDUCIALS[n]['fiducial_name']
Publisher.sendMessage('Set tracker fiducial', fiducial_name=fiducial_name)
ctrl.SetValue(False)
self.tracker_fiducial_being_set = None
if ctrl.GetValue():
self.tracker_fiducial_being_set = n
if self.pedal_connection is not None:
self.pedal_connection.add_callback(
name='fiducial',
callback=set_fiducial_callback,
remove_when_released=True,
)
else:
set_fiducial_callback(True)
if self.pedal_connection is not None:
self.pedal_connection.remove_callback(name='fiducial')
def OnStopNavigation(self):
select_tracker_elem = self.select_tracker_elem
choice_ref = self.choice_ref
self.navigation.StopNavigation()
if self.tracker.tracker_id == const.ROBOT:
Publisher.sendMessage('Robot target matrix', robot_tracker_flag=False,
m_change_robot_to_head=None)
# Enable all navigation buttons
choice_ref.Enable(True)
select_tracker_elem.Enable(True)
for btn_c in self.btns_set_fiducial:
btn_c.Enable(True)
def CheckFiducialRegistrationError(self):
self.navigation.UpdateFiducialRegistrationError(self.tracker)
fre, fre_ok = self.navigation.GetFiducialRegistrationError(self.icp)
self.txtctrl_fre.SetValue(str(round(fre, 2)))
if fre_ok:
self.txtctrl_fre.SetBackgroundColour('GREEN')
else:
self.txtctrl_fre.SetBackgroundColour('RED')
return fre_ok
def OnStartNavigation(self):
select_tracker_elem = self.select_tracker_elem
choice_ref = self.choice_ref
if not self.tracker.AreTrackerFiducialsSet() or not self.navigation.AreImageFiducialsSet():
wx.MessageBox(_("Invalid fiducials, select all coordinates."), _("InVesalius 3"))
elif not self.tracker.IsTrackerInitialized():
dlg.ShowNavigationTrackerWarning(0, 'choose')
errors = True
else:
# Prepare GUI for navigation.
Publisher.sendMessage("Toggle Cross", id=const.SLICE_STATE_CROSS)
Publisher.sendMessage("Hide current mask")
# Disable all navigation buttons.
choice_ref.Enable(False)
select_tracker_elem.Enable(False)
for btn_c in self.btns_set_fiducial:
btn_c.Enable(False)
self.navigation.StartNavigation(self.tracker)
if not self.CheckFiducialRegistrationError():
# TODO: Exhibit FRE in a warning dialog and only starts navigation after user clicks ok
print("WARNING: Fiducial registration error too large.")
self.icp.StartICP(self.navigation, self.tracker)
if self.icp.use_icp:
self.checkbox_icp.Enable(True)
self.checkbox_icp.SetValue(True)
# Update FRE once more after starting the navigation, due to the optional use of ICP,
# which improves FRE.
self.CheckFiducialRegistrationError()
def OnNavigate(self, evt, btn_nav):
select_tracker_elem = self.select_tracker_elem
choice_ref = self.choice_ref
nav_id = btn_nav.GetValue()
if not nav_id:
Publisher.sendMessage("Stop navigation")
tooltip = wx.ToolTip(_("Start neuronavigation"))
btn_nav.SetToolTip(tooltip)
else:
Publisher.sendMessage("Start navigation")
if self.nav_status:
tooltip = wx.ToolTip(_("Stop neuronavigation"))
btn_nav.SetToolTip(tooltip)
else:
btn_nav.SetValue(False)
def ResetUI(self):
for m in range(0, 3):
self.btns_set_fiducial[m].SetValue(False)
for n in range(0, 3):
self.numctrls_fiducial[m][n].SetValue(0.0)
def OnCheckboxICP(self, evt, ctrl):
self.icp.SetICP(self.navigation, ctrl.GetValue())
self.CheckFiducialRegistrationError()
def OnCloseProject(self):
self.ResetUI()
Publisher.sendMessage('Disconnect tracker')
Publisher.sendMessage('Update object registration')
Publisher.sendMessage('Update track object state', flag=False, obj_name=False)
Publisher.sendMessage('Delete all markers')
Publisher.sendMessage("Update marker offset state", create=False)
Publisher.sendMessage("Remove tracts")
Publisher.sendMessage("Set cross visibility", visibility=0)
# TODO: Reset camera initial focus
Publisher.sendMessage('Reset cam clipping range')
self.navigation.StopNavigation()
self.navigation.__init__(
pedal_connection=self.pedal_connection,
)
self.tracker.__init__()
self.icp.__init__()
self.robot.__init__(self.tracker)
class ObjectRegistrationPanel(wx.Panel):
def __init__(self, parent, tracker, pedal_connection):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.coil_list = const.COIL
self.tracker = tracker
self.pedal_connection = pedal_connection
self.nav_prop = None
self.obj_fiducials = None
self.obj_orients = None
self.obj_ref_mode = None
self.obj_name = None
self.timestamp = const.TIMESTAMP
self.SetAutoLayout(1)
self.__bind_events()
# Button for creating new coil
tooltip = wx.ToolTip(_("Create new coil"))
btn_new = wx.Button(self, -1, _("New"), size=wx.Size(65, 23))
btn_new.SetToolTip(tooltip)
btn_new.Enable(1)
btn_new.Bind(wx.EVT_BUTTON, self.OnLinkCreate)
self.btn_new = btn_new
# Button for import config coil file
tooltip = wx.ToolTip(_("Load coil configuration file"))
btn_load = wx.Button(self, -1, _("Load"), size=wx.Size(65, 23))
btn_load.SetToolTip(tooltip)
btn_load.Enable(1)
btn_load.Bind(wx.EVT_BUTTON, self.OnLinkLoad)
self.btn_load = btn_load
# Save button for object registration
tooltip = wx.ToolTip(_(u"Save object registration file"))
btn_save = wx.Button(self, -1, _(u"Save"), size=wx.Size(65, 23))
btn_save.SetToolTip(tooltip)
btn_save.Enable(1)
btn_save.Bind(wx.EVT_BUTTON, self.ShowSaveObjectDialog)
self.btn_save = btn_save
# Create a horizontal sizer to represent button save
line_save = wx.BoxSizer(wx.HORIZONTAL)
line_save.Add(btn_new, 1, wx.LEFT | wx.TOP | wx.RIGHT, 4)
line_save.Add(btn_load, 1, wx.LEFT | wx.TOP | wx.RIGHT, 4)
line_save.Add(btn_save, 1, wx.LEFT | wx.TOP | wx.RIGHT, 4)
# Change angles threshold
text_angles = wx.StaticText(self, -1, _("Angle threshold [degrees]:"))
spin_size_angles = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23))
spin_size_angles.SetRange(0.1, 99)
spin_size_angles.SetValue(const.COIL_ANGLES_THRESHOLD)
spin_size_angles.Bind(wx.EVT_TEXT, partial(self.OnSelectAngleThreshold, ctrl=spin_size_angles))
spin_size_angles.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectAngleThreshold, ctrl=spin_size_angles))
# Change dist threshold
text_dist = wx.StaticText(self, -1, _("Distance threshold [mm]:"))
spin_size_dist = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23))
spin_size_dist.SetRange(0.1, 99)
spin_size_dist.SetValue(const.COIL_ANGLES_THRESHOLD)
spin_size_dist.Bind(wx.EVT_TEXT, partial(self.OnSelectDistThreshold, ctrl=spin_size_dist))
spin_size_dist.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectDistThreshold, ctrl=spin_size_dist))
# Change timestamp interval
text_timestamp = wx.StaticText(self, -1, _("Timestamp interval [s]:"))
spin_timestamp_dist = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc = 0.1)
spin_timestamp_dist.SetRange(0.5, 60.0)
spin_timestamp_dist.SetValue(self.timestamp)
spin_timestamp_dist.Bind(wx.EVT_TEXT, partial(self.OnSelectTimestamp, ctrl=spin_timestamp_dist))
spin_timestamp_dist.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectTimestamp, ctrl=spin_timestamp_dist))
self.spin_timestamp_dist = spin_timestamp_dist
# Create a horizontal sizer to threshold configs
line_angle_threshold = wx.BoxSizer(wx.HORIZONTAL)
line_angle_threshold.AddMany([(text_angles, 1, wx.EXPAND | wx.GROW | wx.TOP| wx.RIGHT | wx.LEFT, 5),
(spin_size_angles, 0, wx.ALL | wx.EXPAND | wx.GROW, 5)])
line_dist_threshold = wx.BoxSizer(wx.HORIZONTAL)
line_dist_threshold.AddMany([(text_dist, 1, wx.EXPAND | wx.GROW | wx.TOP| wx.RIGHT | wx.LEFT, 5),
(spin_size_dist, 0, wx.ALL | wx.EXPAND | wx.GROW, 5)])
line_timestamp = wx.BoxSizer(wx.HORIZONTAL)
line_timestamp.AddMany([(text_timestamp, 1, wx.EXPAND | wx.GROW | wx.TOP| wx.RIGHT | wx.LEFT, 5),
(spin_timestamp_dist, 0, wx.ALL | wx.EXPAND | wx.GROW, 5)])
# Check box for trigger monitoring to create markers from serial port
checkrecordcoords = wx.CheckBox(self, -1, _('Record coordinates'))
checkrecordcoords.SetValue(False)
checkrecordcoords.Enable(0)
checkrecordcoords.Bind(wx.EVT_CHECKBOX, partial(self.OnRecordCoords, ctrl=checkrecordcoords))
self.checkrecordcoords = checkrecordcoords
# Check box to track object or simply the stylus
checktrack = wx.CheckBox(self, -1, _('Track object'))
checktrack.SetValue(False)
checktrack.Enable(0)
checktrack.Bind(wx.EVT_CHECKBOX, partial(self.OnTrackObject, ctrl=checktrack))
self.checktrack = checktrack
line_checks = wx.BoxSizer(wx.HORIZONTAL)
line_checks.Add(checkrecordcoords, 0, wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, 5)
line_checks.Add(checktrack, 0, wx.RIGHT | wx.LEFT, 5)
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(line_save, 0, wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL, 5)
main_sizer.Add(line_angle_threshold, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5)
main_sizer.Add(line_dist_threshold, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5)
main_sizer.Add(line_timestamp, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5)
main_sizer.Add(line_checks, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 10)
main_sizer.Fit(self)
self.SetSizer(main_sizer)
self.Update()
def __bind_events(self):
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.OnRemoveObject, 'Remove object data')
def UpdateNavigationStatus(self, nav_status, vis_status):
if nav_status:
self.checkrecordcoords.Enable(1)
self.checktrack.Enable(0)
self.btn_save.Enable(0)
self.btn_new.Enable(0)
self.btn_load.Enable(0)
else:
self.OnRecordCoords(nav_status, self.checkrecordcoords)
self.checkrecordcoords.SetValue(False)
self.checkrecordcoords.Enable(0)
self.btn_save.Enable(1)
self.btn_new.Enable(1)
self.btn_load.Enable(1)
if self.obj_fiducials is not None:
self.checktrack.Enable(1)
#Publisher.sendMessage('Enable target button', True)
def OnSelectAngleThreshold(self, evt, ctrl):
Publisher.sendMessage('Update angle threshold', angle=ctrl.GetValue())
def OnSelectDistThreshold(self, evt, ctrl):
Publisher.sendMessage('Update dist threshold', dist_threshold=ctrl.GetValue())
def OnSelectTimestamp(self, evt, ctrl):
self.timestamp = ctrl.GetValue()
def OnRecordCoords(self, evt, ctrl):
if ctrl.GetValue() and evt:
self.spin_timestamp_dist.Enable(0)
self.thr_record = rec.Record(ctrl.GetValue(), self.timestamp)
elif (not ctrl.GetValue() and evt) or (ctrl.GetValue() and not evt) :
self.spin_timestamp_dist.Enable(1)
self.thr_record.stop()
elif not ctrl.GetValue() and not evt:
None
def OnTrackObject(self, evt, ctrl):
Publisher.sendMessage('Update track object state', flag=evt.GetSelection(), obj_name=self.obj_name)
def OnComboCoil(self, evt):
# coil_name = evt.GetString()
coil_index = evt.GetSelection()
Publisher.sendMessage('Change selected coil', self.coil_list[coil_index][1])
def OnLinkCreate(self, event=None):
if self.tracker.IsTrackerInitialized():
dialog = dlg.ObjectCalibrationDialog(self.tracker, self.pedal_connection)
try:
if dialog.ShowModal() == wx.ID_OK:
self.obj_fiducials, self.obj_orients, self.obj_ref_mode, self.obj_name, polydata, use_default_object = dialog.GetValue()
if np.isfinite(self.obj_fiducials).all() and np.isfinite(self.obj_orients).all():
self.checktrack.Enable(1)
Publisher.sendMessage('Update object registration',
data=(self.obj_fiducials, self.obj_orients, self.obj_ref_mode, self.obj_name))
Publisher.sendMessage('Update status text in GUI',
label=_("Ready"))
# Enable automatically Track object, Show coil and disable Vol. Camera
self.checktrack.SetValue(True)
Publisher.sendMessage(
'Update track object state',
flag=True,
obj_name=self.obj_name,
polydata=polydata,
use_default_object=use_default_object,
)
Publisher.sendMessage('Change camera checkbox', status=False)
except wx._core.PyAssertionError: # TODO FIX: win64
pass
else:
dlg.ShowNavigationTrackerWarning(0, 'choose')
def OnLinkLoad(self, event=None):
filename = dlg.ShowLoadSaveDialog(message=_(u"Load object registration"),
wildcard=_("Registration files (*.obr)|*.obr"))
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# coil_path = 'magstim_coil_dell_laptop.obr'
# filename = os.path.join(data_dir, coil_path)
try:
if filename:
with open(filename, 'r') as text_file:
data = [s.split('\t') for s in text_file.readlines()]
registration_coordinates = np.array(data[1:]).astype(np.float32)
self.obj_fiducials = registration_coordinates[:, :3]
self.obj_orients = registration_coordinates[:, 3:]
self.obj_name = data[0][1]
self.obj_ref_mode = int(data[0][-1])
self.checktrack.Enable(1)
self.checktrack.SetValue(True)
Publisher.sendMessage('Update object registration',
data=(self.obj_fiducials, self.obj_orients, self.obj_ref_mode, self.obj_name))
Publisher.sendMessage('Update status text in GUI',
label=_("Object file successfully loaded"))
Publisher.sendMessage('Update track object state', flag=True, obj_name=self.obj_name)
Publisher.sendMessage('Change camera checkbox', status=False)
wx.MessageBox(_("Object file successfully loaded"), _("InVesalius 3"))
except:
wx.MessageBox(_("Object registration file incompatible."), _("InVesalius 3"))
Publisher.sendMessage('Update status text in GUI', label="")
def ShowSaveObjectDialog(self, evt):
if np.isnan(self.obj_fiducials).any() or np.isnan(self.obj_orients).any():
wx.MessageBox(_("Digitize all object fiducials before saving"), _("Save error"))
else:
filename = dlg.ShowLoadSaveDialog(message=_(u"Save object registration as..."),
wildcard=_("Registration files (*.obr)|*.obr"),
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_filename="object_registration.obr", save_ext="obr")
if filename:
hdr = 'Object' + "\t" + utils.decode(self.obj_name, const.FS_ENCODE) + "\t" + 'Reference' + "\t" + str('%d' % self.obj_ref_mode)
data = np.hstack([self.obj_fiducials, self.obj_orients])
np.savetxt(filename, data, fmt='%.4f', delimiter='\t', newline='\n', header=hdr)
wx.MessageBox(_("Object file successfully saved"), _("Save"))
def OnCloseProject(self):
self.OnRemoveObject()
def OnRemoveObject(self):
self.checkrecordcoords.SetValue(False)
self.checkrecordcoords.Enable(0)
self.checktrack.SetValue(False)
self.checktrack.Enable(0)
self.nav_prop = None
self.obj_fiducials = None
self.obj_orients = None
self.obj_ref_mode = None
self.obj_name = None
self.timestamp = const.TIMESTAMP
Publisher.sendMessage('Update track object state', flag=False, obj_name=False)
class MarkersPanel(wx.Panel):
@dataclasses.dataclass
class Marker:
"""Class for storing markers. @dataclass decorator simplifies
setting default values, serialization, etc."""
x : float = 0
y : float = 0
z : float = 0
alpha : float = dataclasses.field(default = None)
beta : float = dataclasses.field(default = None)
gamma : float = dataclasses.field(default = None)
r : float = 0
g : float = 1
b : float = 0
size : int = 2
label : str = '*'
x_seed : float = 0
y_seed : float = 0
z_seed : float = 0
is_target : bool = False
session_id : int = 1
# x, y, z, alpha, beta, gamma can be jointly accessed as coord
@property
def coord(self):
return list((self.x, self.y, self.z, self.alpha, self.beta, self.gamma))
@coord.setter
def coord(self, new_coord):
self.x, self.y, self.z, self.alpha, self.beta, self.gamma = new_coord
# r, g, b can be jointly accessed as colour
@property
def colour(self):
return list((self.r, self.g, self.b),)
@colour.setter
def colour(self, new_colour):
self.r, self.g, self.b = new_colour
# x_seed, y_seed, z_seed can be jointly accessed as seed
@property
def seed(self):
return list((self.x_seed, self.y_seed, self.z_seed),)
@seed.setter
def seed(self, new_seed):
self.x_seed, self.y_seed, self.z_seed = new_seed
@classmethod
def to_string_headers(cls):
"""Return the string containing tab-separated list of field names (headers)."""
res = [field.name for field in dataclasses.fields(cls)]
res.extend(['x_world', 'y_world', 'z_world', 'alpha_world', 'beta_world', 'gamma_world'])
return '\t'.join(map(lambda x: '\"%s\"' % x, res))
def to_string(self):
"""Serialize to excel-friendly tab-separated string"""
res = ''
for field in dataclasses.fields(self.__class__):
if field.type is str:
res += ('\"%s\"\t' % getattr(self, field.name))
else:
res += ('%s\t' % str(getattr(self, field.name)))
if self.alpha is not None and self.beta is not None and self.gamma is not None:
# Add world coordinates (in addition to the internal ones).
position_world, orientation_world = imagedata_utils.convert_invesalius_to_world(
position=[self.x, self.y, self.z],
orientation=[self.alpha, self.beta, self.gamma],
)
else:
position_world, orientation_world = imagedata_utils.convert_invesalius_to_world(
position=[self.x, self.y, self.z],
orientation=[0,0,0],
)
res += '\t'.join(map(lambda x: 'N/A' if x is None else str(x), (*position_world, *orientation_world)))
return res
def from_string(self, inp_str):
"""Deserialize from a tab-separated string. If the string is not
properly formatted, might throw an exception and leave the object
in an inconsistent state."""
for field, str_val in zip(dataclasses.fields(self.__class__), inp_str.split('\t')):
if field.type is float and str_val != 'None':
setattr(self, field.name, float(str_val))
if field.type is float and str_val == 'None':
setattr(self, field.name, None)
if field.type is int:
setattr(self, field.name, int(str_val))
if field.type is str:
setattr(self, field.name, str_val[1:-1]) # remove the quotation marks
if field.type is bool:
setattr(self, field.name, str_val=='True')
@dataclasses.dataclass
class Robot_Marker:
"""Class for storing robot target."""
m_robot_target : list = None
@property
def robot_target_matrix(self):
return self.m_robot_target
@robot_target_matrix.setter
def robot_target_matrix(self, new_m_robot_target):
self.m_robot_target = new_m_robot_target
def __init__(self, parent, tracker):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.SetAutoLayout(1)
self.tracker = tracker
self.__bind_events()
self.session = ses.Session()
self.current_coord = 0, 0, 0, None, None, None
self.current_seed = 0, 0, 0
self.current_robot_target_matrix = [None] * 9
self.markers = []
self.robot_markers = []
self.nav_status = False
self.raw_target_robot = None, None
self.marker_colour = const.MARKER_COLOUR
self.marker_size = const.MARKER_SIZE
self.arrow_marker_size = const.ARROW_MARKER_SIZE
self.current_session = 1
# Change marker size
spin_size = wx.SpinCtrl(self, -1, "", size=wx.Size(40, 23))
spin_size.SetRange(1, 99)
spin_size.SetValue(self.marker_size)
spin_size.Bind(wx.EVT_TEXT, partial(self.OnSelectSize, ctrl=spin_size))
spin_size.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectSize, ctrl=spin_size))
# Marker colour select
select_colour = csel.ColourSelect(self, -1, colour=[255*s for s in self.marker_colour], size=wx.Size(20, 23))
select_colour.Bind(csel.EVT_COLOURSELECT, partial(self.OnSelectColour, ctrl=select_colour))
btn_create = wx.Button(self, -1, label=_('Create marker'), size=wx.Size(135, 23))
btn_create.Bind(wx.EVT_BUTTON, self.OnCreateMarker)
sizer_create = wx.FlexGridSizer(rows=1, cols=3, hgap=5, vgap=5)
sizer_create.AddMany([(spin_size, 1),
(select_colour, 0),
(btn_create, 0)])
# Buttons to save and load markers and to change its visibility as well
btn_save = wx.Button(self, -1, label=_('Save'), size=wx.Size(65, 23))
btn_save.Bind(wx.EVT_BUTTON, self.OnSaveMarkers)
btn_load = wx.Button(self, -1, label=_('Load'), size=wx.Size(65, 23))
btn_load.Bind(wx.EVT_BUTTON, self.OnLoadMarkers)
btn_visibility = wx.ToggleButton(self, -1, _("Hide"), size=wx.Size(65, 23))
btn_visibility.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnMarkersVisibility, ctrl=btn_visibility))
sizer_btns = wx.FlexGridSizer(rows=1, cols=3, hgap=5, vgap=5)
sizer_btns.AddMany([(btn_save, 1, wx.RIGHT),
(btn_load, 0, wx.LEFT | wx.RIGHT),
(btn_visibility, 0, wx.LEFT)])
# Buttons to delete or remove markers
btn_delete_single = wx.Button(self, -1, label=_('Remove'), size=wx.Size(65, 23))
btn_delete_single.Bind(wx.EVT_BUTTON, self.OnDeleteMultipleMarkers)
btn_delete_all = wx.Button(self, -1, label=_('Delete all'), size=wx.Size(135, 23))
btn_delete_all.Bind(wx.EVT_BUTTON, self.OnDeleteAllMarkers)
sizer_delete = wx.FlexGridSizer(rows=1, cols=2, hgap=5, vgap=5)
sizer_delete.AddMany([(btn_delete_single, 1, wx.RIGHT),
(btn_delete_all, 0, wx.LEFT)])
# List of markers
self.lc = wx.ListCtrl(self, -1, style=wx.LC_REPORT, size=wx.Size(0,120))
self.lc.InsertColumn(const.ID_COLUMN, '#')
self.lc.SetColumnWidth(const.ID_COLUMN, 28)
self.lc.InsertColumn(const.SESSION_COLUMN, 'Session')
self.lc.SetColumnWidth(const.SESSION_COLUMN, 52)
self.lc.InsertColumn(const.LABEL_COLUMN, 'Label')
self.lc.SetColumnWidth(const.LABEL_COLUMN, 118)
self.lc.InsertColumn(const.TARGET_COLUMN, 'Target')
self.lc.SetColumnWidth(const.TARGET_COLUMN, 45)
if self.session.debug:
self.lc.InsertColumn(const.X_COLUMN, 'X')
self.lc.SetColumnWidth(const.X_COLUMN, 45)
self.lc.InsertColumn(const.Y_COLUMN, 'Y')
self.lc.SetColumnWidth(const.Y_COLUMN, 45)
self.lc.InsertColumn(const.Z_COLUMN, 'Z')
self.lc.SetColumnWidth(const.Z_COLUMN, 45)
self.lc.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnMouseRightDown)
self.lc.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemBlink)
self.lc.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnStopItemBlink)
# Add all lines into main sizer
group_sizer = wx.BoxSizer(wx.VERTICAL)
group_sizer.Add(sizer_create, 0, wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 5)
group_sizer.Add(sizer_btns, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 5)
group_sizer.Add(sizer_delete, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 5)
group_sizer.Add(self.lc, 0, wx.EXPAND | wx.ALL, 5)
group_sizer.Fit(self)
self.SetSizer(group_sizer)
self.Update()
def __bind_events(self):
Publisher.subscribe(self.UpdateCurrentCoord, 'Set cross focal point')
Publisher.subscribe(self.OnDeleteMultipleMarkers, 'Delete fiducial marker')
Publisher.subscribe(self.OnDeleteAllMarkers, 'Delete all markers')
Publisher.subscribe(self.CreateMarker, 'Create marker')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
Publisher.subscribe(self.UpdateSeedCoordinates, 'Update tracts')
Publisher.subscribe(self.OnChangeCurrentSession, 'Current session changed')
Publisher.subscribe(self.UpdateRobotCoordinates, 'Update raw coordinates')
def __find_target_marker(self):
"""
Return the index of the marker currently selected as target (there
should be at most one). If there is no such marker, return None.
"""
for i in range(len(self.markers)):
if self.markers[i].is_target:
return i
return None
def __get_selected_items(self):
"""
Returns a (possibly empty) list of the selected items in the list control.
"""
selection = []
next = self.lc.GetFirstSelected()
while next != -1:
selection.append(next)
next = self.lc.GetNextSelected(next)
return selection
def __delete_multiple_markers(self, index):
"""
Delete multiple markers indexed by index. index must be sorted in
the ascending order.
"""
for i in reversed(index):
del self.markers[i]
del self.robot_markers[i]
self.lc.DeleteItem(i)
for n in range(0, self.lc.GetItemCount()):
self.lc.SetItem(n, 0, str(n + 1))
Publisher.sendMessage('Remove multiple markers', index=index)
def __set_marker_as_target(self, idx):
"""
Set marker indexed by idx as the new target. idx must be a valid index.
"""
# Find the previous target
prev_idx = self.__find_target_marker()
# If the new target is same as the previous do nothing.
if prev_idx == idx:
return
# Unset the previous target
if prev_idx is not None:
self.markers[prev_idx].is_target = False
self.lc.SetItemBackgroundColour(prev_idx, 'white')
Publisher.sendMessage('Set target transparency', status=False, index=prev_idx)
self.lc.SetItem(prev_idx, const.TARGET_COLUMN, "")
# Set the new target
self.markers[idx].is_target = True
self.lc.SetItemBackgroundColour(idx, 'RED')
self.lc.SetItem(idx, const.TARGET_COLUMN, _("Yes"))
Publisher.sendMessage('Update target', coord=self.markers[idx].coord)
Publisher.sendMessage('Set target transparency', status=True, index=idx)
wx.MessageBox(_("New target selected."), _("InVesalius 3"))
@staticmethod
def __list_fiducial_labels():
"""Return the list of marker labels denoting fiducials."""
return list(itertools.chain(*(const.BTNS_IMG_MARKERS[i].values() for i in const.BTNS_IMG_MARKERS)))
def UpdateCurrentCoord(self, position):
self.current_coord = list(position)
def UpdateNavigationStatus(self, nav_status, vis_status):
if not nav_status:
self.current_coord = [None, None, None]
self.nav_status = False
else:
self.nav_status = True
def UpdateSeedCoordinates(self, root=None, affine_vtk=None, coord_offset=(0, 0, 0), coord_offset_w=(0, 0, 0)):
self.current_seed = coord_offset_w
def UpdateRobotCoordinates(self, coordinates_raw, markers_flag):
self.raw_target_robot = coordinates_raw[1], coordinates_raw[2]
def OnMouseRightDown(self, evt):
# TODO: Enable the "Set as target" only when target is created with registered object
menu_id = wx.Menu()
edit_id = menu_id.Append(0, _('Edit label'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuEditMarkerLabel, edit_id)
color_id = menu_id.Append(2, _('Edit color'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuSetColor, color_id)
menu_id.AppendSeparator()
target_menu = menu_id.Append(1, _('Set as target'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuSetTarget, target_menu)
menu_id.AppendSeparator()
send_target_to_robot = menu_id.Append(3, _('Send target to robot'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuSendTargetToRobot, send_target_to_robot)
# Enable "Send target to robot" button only if tracker is robot, if navigation is on and if target is not none
m_target_robot = np.array([self.robot_markers[self.lc.GetFocusedItem()].robot_target_matrix])
if self.tracker.tracker_id == const.ROBOT and self.nav_status and m_target_robot.any():
send_target_to_robot.Enable(True)
else:
send_target_to_robot.Enable(False)
# TODO: Create the remove target option so the user can disable the target without removing the marker
# target_menu_rem = menu_id.Append(3, _('Remove target'))
# menu_id.Bind(wx.EVT_MENU, self.OnMenuRemoveTarget, target_menu_rem)
target_menu.Enable(True)
self.PopupMenu(menu_id)
menu_id.Destroy()
def OnItemBlink(self, evt):
Publisher.sendMessage('Blink Marker', index=self.lc.GetFocusedItem())
def OnStopItemBlink(self, evt):
Publisher.sendMessage('Stop Blink Marker')
def OnMenuEditMarkerLabel(self, evt):
list_index = self.lc.GetFocusedItem()
if list_index != -1:
new_label = dlg.ShowEnterMarkerID(self.lc.GetItemText(list_index, const.LABEL_COLUMN))
self.markers[list_index].label = str(new_label)
self.lc.SetItem(list_index, const.LABEL_COLUMN, new_label)
else:
wx.MessageBox(_("No data selected."), _("InVesalius 3"))
def OnMenuSetTarget(self, evt):
idx = self.lc.GetFocusedItem()
if idx != -1:
self.__set_marker_as_target(idx)
else:
wx.MessageBox(_("No data selected."), _("InVesalius 3"))
def OnMenuSetColor(self, evt):
index = self.lc.GetFocusedItem()
if index == -1:
wx.MessageBox(_("No data selected."), _("InVesalius 3"))
return
color_current = [ch * 255 for ch in self.markers[index].colour]
color_new = dlg.ShowColorDialog(color_current=color_current)
if color_new:
assert len(color_new) == 3
# XXX: Seems like a slightly too early point for rounding; better to round only when the value
# is printed to the screen or file.
#
self.markers[index].colour = [round(s / 255.0, 3) for s in color_new]
Publisher.sendMessage('Set new color', index=index, color=color_new)
def OnMenuSendTargetToRobot(self, evt):
if isinstance(evt, int):
self.lc.Focus(evt)
m_target_robot = self.robot_markers[self.lc.GetFocusedItem()].robot_target_matrix
Publisher.sendMessage('Reset robot process')
Publisher.sendMessage('Robot target matrix', robot_tracker_flag=True, m_change_robot_to_head=m_target_robot)
def OnDeleteAllMarkers(self, evt=None):
if evt is not None:
result = dlg.ShowConfirmationDialog(msg=_("Remove all markers? Cannot be undone."))
if result != wx.ID_OK:
return
if self.__find_target_marker() is not None:
Publisher.sendMessage('Disable or enable coil tracker', status=False)
if evt is not None:
wx.MessageBox(_("Target deleted."), _("InVesalius 3"))
self.markers = []
self.robot_markers = []
Publisher.sendMessage('Remove all markers', indexes=self.lc.GetItemCount())
self.lc.DeleteAllItems()
Publisher.sendMessage('Stop Blink Marker', index='DeleteAll')
def OnDeleteMultipleMarkers(self, evt=None, label=None):
# OnDeleteMultipleMarkers is used for both pubsub and button click events
# Pubsub is used for fiducial handle and button click for all others
# called through pubsub
if not evt:
index = []
if label and (label in self.__list_fiducial_labels()):
for id_n in range(self.lc.GetItemCount()):
item = self.lc.GetItem(id_n, const.LABEL_COLUMN)
if item.GetText() == label:
self.lc.Focus(item.GetId())
index = [self.lc.GetFocusedItem()]
# called from button click
else:
index = self.__get_selected_items()
if index:
if self.__find_target_marker() in index:
Publisher.sendMessage('Disable or enable coil tracker', status=False)
if self.tracker.tracker_id == const.ROBOT:
Publisher.sendMessage('Robot target matrix', robot_tracker_flag=False,
m_change_robot_to_head=[])
wx.MessageBox(_("Target deleted."), _("InVesalius 3"))
self.__delete_multiple_markers(index)
else:
if evt: # Don't show the warning if called through pubsub
wx.MessageBox(_("No data selected."), _("InVesalius 3"))
def OnCreateMarker(self, evt):
self.CreateMarker()
def OnLoadMarkers(self, evt):
"""Loads markers from file and appends them to the current marker list.
The file should contain no more than a single target marker. Also the
file should not contain any fiducials already in the list."""
filename = dlg.ShowLoadSaveDialog(message=_(u"Load markers"),
wildcard=const.WILDCARD_MARKER_FILES)
if not filename:
return
try:
with open(filename, 'r') as file:
magick_line = file.readline()
assert magick_line.startswith(const.MARKER_FILE_MAGICK_STRING)
ver = int(magick_line.split('_')[-1])
if ver != 0:
wx.MessageBox(_("Unknown version of the markers file."), _("InVesalius 3"))
return
file.readline() # skip the header line
# Read the data lines and create markers
for line in file.readlines():
marker = self.Marker()
marker.from_string(line)
self.CreateMarker(coord=marker.coord, colour=marker.colour, size=marker.size,
label=marker.label, is_target=False, seed=marker.seed, session_id=marker.session_id)
if marker.label in self.__list_fiducial_labels():
Publisher.sendMessage('Load image fiducials', label=marker.label, coord=marker.coord)
# If the new marker has is_target=True, we first create
# a marker with is_target=False, and then call __set_marker_as_target
if marker.is_target:
self.__set_marker_as_target(len(self.markers) - 1)
except Exception as e:
wx.MessageBox(_("Invalid markers file."), _("InVesalius 3"))
def OnMarkersVisibility(self, evt, ctrl):
if ctrl.GetValue():
Publisher.sendMessage('Hide all markers', indexes=self.lc.GetItemCount())
ctrl.SetLabel('Show')
else:
Publisher.sendMessage('Show all markers', indexes=self.lc.GetItemCount())
ctrl.SetLabel('Hide')
def OnSaveMarkers(self, evt):
prj_data = prj.Project()
timestamp = time.localtime(time.time())
stamp_date = '{:0>4d}{:0>2d}{:0>2d}'.format(timestamp.tm_year, timestamp.tm_mon, timestamp.tm_mday)
stamp_time = '{:0>2d}{:0>2d}{:0>2d}'.format(timestamp.tm_hour, timestamp.tm_min, timestamp.tm_sec)
sep = '-'
parts = [stamp_date, stamp_time, prj_data.name, 'markers']
default_filename = sep.join(parts) + '.mkss'
filename = dlg.ShowLoadSaveDialog(message=_(u"Save markers as..."),
wildcard=const.WILDCARD_MARKER_FILES,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_filename=default_filename)
if not filename:
return
try:
with open(filename, 'w', newline='') as file:
file.writelines(['%s%i\n' % (const.MARKER_FILE_MAGICK_STRING, const.CURRENT_MARKER_FILE_VERSION)])
file.writelines(['%s\n' % self.Marker.to_string_headers()])
file.writelines('%s\n' % marker.to_string() for marker in self.markers)
file.close()
except:
wx.MessageBox(_("Error writing markers file."), _("InVesalius 3"))
def OnSelectColour(self, evt, ctrl):
# TODO: Make sure GetValue returns 3 numbers (without alpha)
self.marker_colour = [colour / 255.0 for colour in ctrl.GetValue()][:3]
def OnSelectSize(self, evt, ctrl):
self.marker_size = ctrl.GetValue()
def OnChangeCurrentSession(self, new_session_id):
self.current_session = new_session_id
def CreateMarker(self, coord=None, colour=None, size=None, label='*', is_target=False, seed=None, session_id=None):
new_marker = self.Marker()
new_marker.coord = coord or self.current_coord
new_marker.colour = colour or self.marker_colour
new_marker.size = size or self.marker_size
new_marker.label = label
new_marker.is_target = is_target
new_marker.seed = seed or self.current_seed
new_marker.session_id = session_id or self.current_session
if self.tracker.tracker_id == const.ROBOT and self.nav_status:
self.current_robot_target_matrix = db.compute_robot_to_head_matrix(self.raw_target_robot)
else:
self.current_robot_target_matrix = [None] * 9
new_robot_marker = self.Robot_Marker()
new_robot_marker.robot_target_matrix = self.current_robot_target_matrix
# Note that ball_id is zero-based, so we assign it len(self.markers) before the new marker is added
if all([elem is not None for elem in new_marker.coord[3:]]):
Publisher.sendMessage('Add arrow marker', arrow_id=len(self.markers),
size=self.arrow_marker_size,
color=new_marker.colour,
coord=new_marker.coord)
else:
Publisher.sendMessage('Add marker', ball_id=len(self.markers),
size=new_marker.size,
colour=new_marker.colour,
coord=new_marker.coord[:3])
self.markers.append(new_marker)
self.robot_markers.append(new_robot_marker)
# Add item to list control in panel
num_items = self.lc.GetItemCount()
self.lc.InsertItem(num_items, str(num_items + 1))
self.lc.SetItem(num_items, const.SESSION_COLUMN, str(new_marker.session_id))
self.lc.SetItem(num_items, const.LABEL_COLUMN, new_marker.label)
if self.session.debug:
self.lc.SetItem(num_items, const.X_COLUMN, str(round(new_marker.x, 1)))
self.lc.SetItem(num_items, const.Y_COLUMN, str(round(new_marker.y, 1)))
self.lc.SetItem(num_items, const.Z_COLUMN, str(round(new_marker.z, 1)))
self.lc.EnsureVisible(num_items)
class DbsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
class TractographyPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.affine = np.identity(4)
self.affine_vtk = None
self.trekker = None
self.n_tracts = const.N_TRACTS
self.peel_depth = const.PEEL_DEPTH
self.view_tracts = False
self.seed_offset = const.SEED_OFFSET
self.seed_radius = const.SEED_RADIUS
self.sleep_nav = const.SLEEP_NAVIGATION
self.brain_opacity = const.BRAIN_OPACITY
self.brain_peel = None
self.brain_actor = None
self.n_peels = const.MAX_PEEL_DEPTH
self.p_old = np.array([[0., 0., 0.]])
self.tracts_run = None
self.trekker_cfg = const.TREKKER_CONFIG
self.nav_status = False
self.peel_loaded = False
self.SetAutoLayout(1)
self.__bind_events()
# Button for import config coil file
tooltip = wx.ToolTip(_("Load FOD"))
btn_load = wx.Button(self, -1, _("FOD"), size=wx.Size(50, 23))
btn_load.SetToolTip(tooltip)
btn_load.Enable(1)
btn_load.Bind(wx.EVT_BUTTON, self.OnLinkFOD)
# self.btn_load = btn_load
# Save button for object registration
tooltip = wx.ToolTip(_(u"Load Trekker configuration parameters"))
btn_load_cfg = wx.Button(self, -1, _(u"Configure"), size=wx.Size(65, 23))
btn_load_cfg.SetToolTip(tooltip)
btn_load_cfg.Enable(1)
btn_load_cfg.Bind(wx.EVT_BUTTON, self.OnLoadParameters)
# self.btn_load_cfg = btn_load_cfg
# Button for creating new coil
tooltip = wx.ToolTip(_("Load brain visualization"))
btn_mask = wx.Button(self, -1, _("Brain"), size=wx.Size(50, 23))
btn_mask.SetToolTip(tooltip)
btn_mask.Enable(1)
btn_mask.Bind(wx.EVT_BUTTON, self.OnLinkBrain)
# self.btn_new = btn_new
# Button for creating new coil
tooltip = wx.ToolTip(_("Load anatomical labels"))
btn_act = wx.Button(self, -1, _("ACT"), size=wx.Size(50, 23))
btn_act.SetToolTip(tooltip)
btn_act.Enable(1)
btn_act.Bind(wx.EVT_BUTTON, self.OnLoadACT)
# self.btn_new = btn_new
# Create a horizontal sizer to represent button save
line_btns = wx.BoxSizer(wx.HORIZONTAL)
line_btns.Add(btn_load, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
line_btns.Add(btn_load_cfg, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
line_btns.Add(btn_mask, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
line_btns.Add(btn_act, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
# Change peeling depth
text_peel_depth = wx.StaticText(self, -1, _("Peeling depth (mm):"))
spin_peel_depth = wx.SpinCtrl(self, -1, "", size=wx.Size(50, 23))
spin_peel_depth.Enable(1)
spin_peel_depth.SetRange(0, const.MAX_PEEL_DEPTH)
spin_peel_depth.SetValue(const.PEEL_DEPTH)
spin_peel_depth.Bind(wx.EVT_TEXT, partial(self.OnSelectPeelingDepth, ctrl=spin_peel_depth))
spin_peel_depth.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectPeelingDepth, ctrl=spin_peel_depth))
# Change number of tracts
text_ntracts = wx.StaticText(self, -1, _("Number tracts:"))
spin_ntracts = wx.SpinCtrl(self, -1, "", size=wx.Size(50, 23))
spin_ntracts.Enable(1)
spin_ntracts.SetRange(1, 2000)
spin_ntracts.SetValue(const.N_TRACTS)
spin_ntracts.Bind(wx.EVT_TEXT, partial(self.OnSelectNumTracts, ctrl=spin_ntracts))
spin_ntracts.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectNumTracts, ctrl=spin_ntracts))
# Change seed offset for computing tracts
text_offset = wx.StaticText(self, -1, _("Seed offset (mm):"))
spin_offset = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc = 0.1)
spin_offset.Enable(1)
spin_offset.SetRange(0, 100.0)
spin_offset.SetValue(self.seed_offset)
spin_offset.Bind(wx.EVT_TEXT, partial(self.OnSelectOffset, ctrl=spin_offset))
spin_offset.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectOffset, ctrl=spin_offset))
# self.spin_offset = spin_offset
# Change seed radius for computing tracts
text_radius = wx.StaticText(self, -1, _("Seed radius (mm):"))
spin_radius = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc=0.1)
spin_radius.Enable(1)
spin_radius.SetRange(0, 100.0)
spin_radius.SetValue(self.seed_radius)
spin_radius.Bind(wx.EVT_TEXT, partial(self.OnSelectRadius, ctrl=spin_radius))
spin_radius.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectRadius, ctrl=spin_radius))
# self.spin_radius = spin_radius
# Change sleep pause between navigation loops
text_sleep = wx.StaticText(self, -1, _("Sleep (s):"))
spin_sleep = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc=0.01)
spin_sleep.Enable(1)
spin_sleep.SetRange(0.01, 10.0)
spin_sleep.SetValue(self.sleep_nav)
spin_sleep.Bind(wx.EVT_TEXT, partial(self.OnSelectSleep, ctrl=spin_sleep))
spin_sleep.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectSleep, ctrl=spin_sleep))
# Change opacity of brain mask visualization
text_opacity = wx.StaticText(self, -1, _("Brain opacity:"))
spin_opacity = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc=0.1)
spin_opacity.Enable(0)
spin_opacity.SetRange(0, 1.0)
spin_opacity.SetValue(self.brain_opacity)
spin_opacity.Bind(wx.EVT_TEXT, partial(self.OnSelectOpacity, ctrl=spin_opacity))
spin_opacity.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectOpacity, ctrl=spin_opacity))
self.spin_opacity = spin_opacity
# Create a horizontal sizer to threshold configs
border = 1
line_peel_depth = wx.BoxSizer(wx.HORIZONTAL)
line_peel_depth.AddMany([(text_peel_depth, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_peel_depth, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_ntracts = wx.BoxSizer(wx.HORIZONTAL)
line_ntracts.AddMany([(text_ntracts, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_ntracts, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_offset = wx.BoxSizer(wx.HORIZONTAL)
line_offset.AddMany([(text_offset, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_offset, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_radius = wx.BoxSizer(wx.HORIZONTAL)
line_radius.AddMany([(text_radius, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_radius, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_sleep = wx.BoxSizer(wx.HORIZONTAL)
line_sleep.AddMany([(text_sleep, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_sleep, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_opacity = wx.BoxSizer(wx.HORIZONTAL)
line_opacity.AddMany([(text_opacity, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_opacity, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
# Check box to enable tract visualization
checktracts = wx.CheckBox(self, -1, _('Enable tracts'))
checktracts.SetValue(False)
checktracts.Enable(0)
checktracts.Bind(wx.EVT_CHECKBOX, partial(self.OnEnableTracts, ctrl=checktracts))
self.checktracts = checktracts
# Check box to enable surface peeling
checkpeeling = wx.CheckBox(self, -1, _('Peel surface'))
checkpeeling.SetValue(False)
checkpeeling.Enable(0)
checkpeeling.Bind(wx.EVT_CHECKBOX, partial(self.OnShowPeeling, ctrl=checkpeeling))
self.checkpeeling = checkpeeling
# Check box to enable tract visualization
checkACT = wx.CheckBox(self, -1, _('ACT'))
checkACT.SetValue(False)
checkACT.Enable(0)
checkACT.Bind(wx.EVT_CHECKBOX, partial(self.OnEnableACT, ctrl=checkACT))
self.checkACT = checkACT
border_last = 1
line_checks = wx.BoxSizer(wx.HORIZONTAL)
line_checks.Add(checktracts, 0, wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, border_last)
line_checks.Add(checkpeeling, 0, wx.ALIGN_CENTER | wx.RIGHT | wx.LEFT, border_last)
line_checks.Add(checkACT, 0, wx.RIGHT | wx.LEFT, border_last)
# Add line sizers into main sizer
border = 1
border_last = 10
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(line_btns, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, border_last)
main_sizer.Add(line_peel_depth, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_ntracts, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_offset, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_radius, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_sleep, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_opacity, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_checks, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, border_last)
main_sizer.Fit(self)
self.SetSizer(main_sizer)
self.Update()
def __bind_events(self):
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.OnUpdateTracts, 'Set cross focal point')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
def OnSelectPeelingDepth(self, evt, ctrl):
self.peel_depth = ctrl.GetValue()
if self.checkpeeling.GetValue():
actor = self.brain_peel.get_actor(self.peel_depth)
Publisher.sendMessage('Update peel', flag=True, actor=actor)
Publisher.sendMessage('Get peel centers and normals', centers=self.brain_peel.peel_centers,
normals=self.brain_peel.peel_normals)
Publisher.sendMessage('Get init locator', locator=self.brain_peel.locator)
self.peel_loaded = True
def OnSelectNumTracts(self, evt, ctrl):
self.n_tracts = ctrl.GetValue()
# self.tract.n_tracts = ctrl.GetValue()
Publisher.sendMessage('Update number of tracts', data=self.n_tracts)
def OnSelectOffset(self, evt, ctrl):
self.seed_offset = ctrl.GetValue()
# self.tract.seed_offset = ctrl.GetValue()
Publisher.sendMessage('Update seed offset', data=self.seed_offset)
def OnSelectRadius(self, evt, ctrl):
self.seed_radius = ctrl.GetValue()
# self.tract.seed_offset = ctrl.GetValue()
Publisher.sendMessage('Update seed radius', data=self.seed_radius)
def OnSelectSleep(self, evt, ctrl):
self.sleep_nav = ctrl.GetValue()
# self.tract.seed_offset = ctrl.GetValue()
Publisher.sendMessage('Update sleep', data=self.sleep_nav)
def OnSelectOpacity(self, evt, ctrl):
self.brain_actor.GetProperty().SetOpacity(ctrl.GetValue())
Publisher.sendMessage('Update peel', flag=True, actor=self.brain_actor)
def OnShowPeeling(self, evt, ctrl):
# self.view_peeling = ctrl.GetValue()
if ctrl.GetValue():
actor = self.brain_peel.get_actor(self.peel_depth)
self.peel_loaded = True
Publisher.sendMessage('Update peel visualization', data=self.peel_loaded)
else:
actor = None
self.peel_loaded = False
Publisher.sendMessage('Update peel visualization', data= self.peel_loaded)
Publisher.sendMessage('Update peel', flag=ctrl.GetValue(), actor=actor)
def OnEnableTracts(self, evt, ctrl):
self.view_tracts = ctrl.GetValue()
Publisher.sendMessage('Update tracts visualization', data=self.view_tracts)
if not self.view_tracts:
Publisher.sendMessage('Remove tracts')
Publisher.sendMessage("Update marker offset state", create=False)
def OnEnableACT(self, evt, ctrl):
# self.view_peeling = ctrl.GetValue()
# if ctrl.GetValue():
# act_data = self.brain_peel.get_actor(self.peel_depth)
# else:
# actor = None
Publisher.sendMessage('Enable ACT', data=ctrl.GetValue())
def UpdateNavigationStatus(self, nav_status, vis_status):
self.nav_status = nav_status
def OnLinkBrain(self, event=None):
Publisher.sendMessage('Begin busy cursor')
inv_proj = prj.Project()
peels_dlg = dlg.PeelsCreationDlg(wx.GetApp().GetTopWindow())
ret = peels_dlg.ShowModal()
method = peels_dlg.method
if ret == wx.ID_OK:
slic = sl.Slice()
ww = slic.window_width
wl = slic.window_level
affine_vtk = vtk.vtkMatrix4x4()
if method == peels_dlg.FROM_FILES:
matrix_shape = tuple(inv_proj.matrix_shape)
try:
affine = slic.affine.copy()
except AttributeError:
affine = np.eye(4)
affine[1, -1] -= matrix_shape[1]
affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(affine)
self.brain_peel = brain.Brain(self.n_peels, ww, wl, affine_vtk)
if method == peels_dlg.FROM_MASK:
choices = [i for i in inv_proj.mask_dict.values()]
mask_index = peels_dlg.cb_masks.GetSelection()
mask = choices[mask_index]
self.brain_peel.from_mask(mask)
else:
mask_path = peels_dlg.mask_path
self.brain_peel.from_mask_file(mask_path)
self.brain_actor = self.brain_peel.get_actor(self.peel_depth)
self.brain_actor.GetProperty().SetOpacity(self.brain_opacity)
Publisher.sendMessage('Update peel', flag=True, actor=self.brain_actor)
Publisher.sendMessage('Get peel centers and normals', centers=self.brain_peel.peel_centers,
normals=self.brain_peel.peel_normals)
Publisher.sendMessage('Get init locator', locator=self.brain_peel.locator)
self.checkpeeling.Enable(1)
self.checkpeeling.SetValue(True)
self.spin_opacity.Enable(1)
Publisher.sendMessage('Update status text in GUI', label=_("Brain model loaded"))
self.peel_loaded = True
Publisher.sendMessage('Update peel visualization', data= self.peel_loaded)
peels_dlg.Destroy()
Publisher.sendMessage('End busy cursor')
def OnLinkFOD(self, event=None):
Publisher.sendMessage('Begin busy cursor')
filename = dlg.ShowImportOtherFilesDialog(const.ID_NIFTI_IMPORT, msg=_("Import Trekker FOD"))
# Juuso
# data_dir = os.environ.get('OneDriveConsumer') + '\\data\\dti'
# FOD_path = 'sub-P0_dwi_FOD.nii'
# Baran
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# FOD_path = 'Baran_FOD.nii'
# filename = os.path.join(data_dir, FOD_path)
if not self.affine_vtk:
slic = sl.Slice()
prj_data = prj.Project()
matrix_shape = tuple(prj_data.matrix_shape)
spacing = tuple(prj_data.spacing)
img_shift = spacing[1] * (matrix_shape[1] - 1)
self.affine = slic.affine.copy()
self.affine[1, -1] -= img_shift
self.affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(self.affine)
if filename:
Publisher.sendMessage('Update status text in GUI', label=_("Busy"))
try:
self.trekker = Trekker.initialize(filename.encode('utf-8'))
self.trekker, n_threads = dti.set_trekker_parameters(self.trekker, self.trekker_cfg)
self.checktracts.Enable(1)
self.checktracts.SetValue(True)
self.view_tracts = True
Publisher.sendMessage('Update Trekker object', data=self.trekker)
Publisher.sendMessage('Update number of threads', data=n_threads)
Publisher.sendMessage('Update tracts visualization', data=1)
Publisher.sendMessage('Update status text in GUI', label=_("Trekker initialized"))
# except:
# wx.MessageBox(_("Unable to initialize Trekker, check FOD and config files."), _("InVesalius 3"))
except:
Publisher.sendMessage('Update status text in GUI', label=_("Trekker initialization failed."))
wx.MessageBox(_("Unable to load FOD."), _("InVesalius 3"))
Publisher.sendMessage('End busy cursor')
def OnLoadACT(self, event=None):
if self.trekker:
Publisher.sendMessage('Begin busy cursor')
filename = dlg.ShowImportOtherFilesDialog(const.ID_NIFTI_IMPORT, msg=_("Import anatomical labels"))
# Baran
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# act_path = 'Baran_trekkerACTlabels_inFODspace.nii'
# filename = os.path.join(data_dir, act_path)
if not self.affine_vtk:
slic = sl.Slice()
prj_data = prj.Project()
matrix_shape = tuple(prj_data.matrix_shape)
spacing = tuple(prj_data.spacing)
img_shift = spacing[1] * (matrix_shape[1] - 1)
self.affine = slic.affine.copy()
self.affine[1, -1] -= img_shift
self.affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(self.affine)
try:
Publisher.sendMessage('Update status text in GUI', label=_("Busy"))
if filename:
act_data = nb.squeeze_image(nb.load(filename))
act_data = nb.as_closest_canonical(act_data)
act_data.update_header()
act_data_arr = act_data.get_fdata()
self.checkACT.Enable(1)
self.checkACT.SetValue(True)
# ACT rules should be as follows:
self.trekker.pathway_stop_at_entry(filename.encode('utf-8'), -1) # outside
self.trekker.pathway_discard_if_ends_inside(filename.encode('utf-8'), 1) # wm
self.trekker.pathway_discard_if_enters(filename.encode('utf-8'), 0) # csf
Publisher.sendMessage('Update ACT data', data=act_data_arr)
Publisher.sendMessage('Enable ACT', data=True)
Publisher.sendMessage('Update status text in GUI', label=_("Trekker ACT loaded"))
except:
Publisher.sendMessage('Update status text in GUI', label=_("ACT initialization failed."))
wx.MessageBox(_("Unable to load ACT."), _("InVesalius 3"))
Publisher.sendMessage('End busy cursor')
else:
wx.MessageBox(_("Load FOD image before the ACT."), _("InVesalius 3"))
def OnLoadParameters(self, event=None):
import json
filename = dlg.ShowLoadSaveDialog(message=_(u"Load Trekker configuration"),
wildcard=_("JSON file (*.json)|*.json"))
try:
# Check if filename exists, read the JSON file and check if all parameters match
# with the required list defined in the constants module
# if a parameter is missing, raise an error
if filename:
with open(filename) as json_file:
self.trekker_cfg = json.load(json_file)
assert all(name in self.trekker_cfg for name in const.TREKKER_CONFIG)
if self.trekker:
self.trekker, n_threads = dti.set_trekker_parameters(self.trekker, self.trekker_cfg)
Publisher.sendMessage('Update Trekker object', data=self.trekker)
Publisher.sendMessage('Update number of threads', data=n_threads)
Publisher.sendMessage('Update status text in GUI', label=_("Trekker config loaded"))
except (AssertionError, json.decoder.JSONDecodeError):
# Inform user that file is not compatible
self.trekker_cfg = const.TREKKER_CONFIG
wx.MessageBox(_("File incompatible, using default configuration."), _("InVesalius 3"))
Publisher.sendMessage('Update status text in GUI', label="")
def OnUpdateTracts(self, position):
"""
Minimal working version of tract computation. Updates when cross sends Pubsub message to update.
Position refers to the coordinates in InVesalius 2D space. To represent the same coordinates in the 3D space,
flip_x the coordinates and multiply the z coordinate by -1. This is all done in the flix_x function.
:param arg: event for pubsub
:param position: list or array with the x, y, and z coordinates in InVesalius space
"""
# Minimal working version of tract computation
# It updates when cross updates
# pass
if self.view_tracts and not self.nav_status:
# print("Running during navigation")
coord_flip = list(position[:3])
coord_flip[1] = -coord_flip[1]
dti.compute_and_visualize_tracts(self.trekker, coord_flip, self.affine, self.affine_vtk,
self.n_tracts)
def OnCloseProject(self):
self.trekker = None
self.trekker_cfg = const.TREKKER_CONFIG
self.checktracts.SetValue(False)
self.checktracts.Enable(0)
self.checkpeeling.SetValue(False)
self.checkpeeling.Enable(0)
self.checkACT.SetValue(False)
self.checkACT.Enable(0)
self.spin_opacity.SetValue(const.BRAIN_OPACITY)
self.spin_opacity.Enable(0)
Publisher.sendMessage('Update peel', flag=False, actor=self.brain_actor)
self.peel_depth = const.PEEL_DEPTH
self.n_tracts = const.N_TRACTS
Publisher.sendMessage('Remove tracts')
class SessionPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
# session count spinner
self.__spin_session = wx.SpinCtrl(self, -1, "", size=wx.Size(40, 23))
self.__spin_session.SetRange(1, 99)
self.__spin_session.SetValue(1)
self.__spin_session.Bind(wx.EVT_TEXT, self.OnSessionChanged)
self.__spin_session.Bind(wx.EVT_SPINCTRL, self.OnSessionChanged)
sizer_create = wx.FlexGridSizer(rows=1, cols=1, hgap=5, vgap=5)
sizer_create.AddMany([(self.__spin_session, 1)])
def OnSessionChanged(self, evt):
Publisher.sendMessage('Current session changed', new_session_id=self.__spin_session.GetValue())
class InputAttributes(object):
# taken from https://stackoverflow.com/questions/2466191/set-attributes-from-dictionary-in-python
def __init__(self, *initial_data, **kwargs):
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
|
paulojamorim/invesalius3
|
invesalius/gui/task_navigator.py
|
Python
|
gpl-2.0
| 98,486
|
[
"VTK"
] |
bbba4f5f419080f09d33643519d2fa65a0e0097c15db26aa41f5dcd8d4ca468a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
setup_requirements = [
# TODO(briancohan): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='fmapi',
version='0.1.0',
description="Python interface for FDTs, CFAST, and FDS",
long_description=readme + '\n\n' + history,
author="Brian Cohan",
author_email='briancohan@gmail.com',
url='https://github.com/briancohan/fmapi',
packages=find_packages(include=['fmapi']),
entry_points={
'console_scripts': [
'fmapi=fmapi.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='fmapi',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
briancohan/fmapi
|
setup.py
|
Python
|
mit
| 1,771
|
[
"Brian"
] |
252134288a0ecaa789bd97df4a242617477042116845dcbf313f2c4cc5682010
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import logging
from django.core.cache import caches
from rest_framework.throttling import ScopedRateThrottle
from galaxy.main.models import Role
class RoleDownloadCountThrottle(ScopedRateThrottle):
cache = caches['download_count']
role_id = None
def __init__(self):
self.role_id = None
self.logger = logging.getLogger(__name__)
super(RoleDownloadCountThrottle, self).__init__()
def allow_request(self, request, view):
self.logger.debug('RoleDownloadCountThrottle:')
if request.query_params.get('owner__username') or request.query_params.get('namespace'):
if request.query_params.get('name'):
# this is a download request
if request.query_params.get('owner__username', None):
role_namespace = request.query_params['owner__username']
else:
role_namespace = request.query_params['namespace']
role_name = request.query_params['name']
try:
# attempt to lookup role first. if that fails, we don't want get_cache_key to be called.
role = Role.objects.get(namespace=role_namespace, name=role_name)
self.role_id = role.id
allowed = super(RoleDownloadCountThrottle, self).allow_request(request, view)
if not allowed:
# user downloaded requested role already
self.logger.debug('user requested role %s.%s already.' % (role_namespace, role_name))
return True
role.download_count += 1
role.save()
except Exception as e:
self.logger.error('Error finding role %s.%s - %s' % (role_namespace,
role_name,
str(e.args)))
return True
def get_cache_key(self, request, view):
"""
Generate a unique cache key by concatenating the user id
with the '.throttle_scope` and the primary key of the request
"""
ident = self.get_ident(request)
self.logger.debug("RoleDownloadCountThrottle cache key: %s_%s_%s" % (self.scope,
ident,
self.role_id))
return self.cache_format % {
'scope': self.scope,
'ident': "%s_%s" % (ident, self.role_id)
}
|
chouseknecht/galaxy
|
galaxy/api/throttling.py
|
Python
|
apache-2.0
| 3,329
|
[
"Galaxy"
] |
8c34628f8edd9dd4a5c9d2c497f350978293688043cfaa21eaa1e7cdcf9903f7
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mox
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova import test
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': FAKEUUID,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, (nw, info) in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None}
self.assertThat(nw, matchers.DictMatches(check))
check = {'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': 'fe80::def',
'ip6s': 'DONTCARE',
'ips': 'DONTCARE',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'should_create_vlan': False,
'should_create_bridge': False}
self.assertThat(info, matchers.DictMatches(check))
check = [{'enabled': 'DONTCARE',
'ip': '2001:db8:0:1::%x' % nid,
'netmask': 64,
'gateway': 'fe80::def'}]
self.assertThat(info['ip6s'], matchers.DictListMatches(check))
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
'ip': '192.168.%d.%03d' % (nid, ip_num + 99),
'netmask': '255.255.255.0',
'gateway': '192.168.%d.1' % nid}
for ip_num in xrange(1, num_fixed_ips + 1)]
self.assertThat(info['ips'], matchers.DictListMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[1])
ip = fixed_ips[1].copy()
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_add_fixed_ip_instance_using_id_without_vpn(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id':0, 'name':'test'}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_add_fixed_ip_instance_using_uuid_without_vpn(self):
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id':0, 'name':'test'}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['uuid'])
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
def test_instance_dns(self):
fixedip = '192.168.0.101'
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id':0, 'name':'test'}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixedip)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
addresses = instance_manager.get_entries_by_name('test-00001',
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'uuid': '42',
'display_name': HOST})
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
reserved=True).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, 0, network, vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
context_admin,
instance['id'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'display_name': HOST,
'uuid': FAKEUUID})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, 0, network)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_validate_networks(self):
def network_get(_context, network_id, project_only='allow_none'):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
fixed_ips[1]['network_id'] = networks[1]['id']
fixed_ips[1]['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixed_ips[1])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id + '1'}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = {'address': '10.0.0.1',
'project_id': 'testproject'}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': 1}
def fake3(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': None,
'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
def test_associate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return '10.0.0.1'
# floating ip that's already associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# floating ip that isn't associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False, 'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise exception.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
def fake_fixed_ip_get(context, fixed_ip_id):
return {'address': 'old', 'instance_uuid': 'fake_uuid'}
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), 'old')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_floating_ip_init_host(self):
def get_all_by_host(_context, _host):
return [{'interface': 'foo',
'address': 'foo'},
{'interface': 'fakeiface',
'address': 'fakefloat',
'fixed_ip_id': 1},
{'interface': 'bar',
'address': 'bar',
'fixed_ip_id': 2}]
self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host',
get_all_by_host)
def fixed_ip_get(_context, fixed_ip_id):
if fixed_ip_id == 1:
return {'address': 'fakefixed'}
raise exception.FixedIpNotFound(id=fixed_ip_id)
self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get)
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=False)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface='fooiface')
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fooiface')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# floating ip that is associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1,
'project_id': ctxt.project_id}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False,
'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1,
'auto_assigned': True,
'project_id': ctxt.project_id}
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'uuid': FAKEUUID,
'display_name': HOST})
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}],
'availability_zone': '',
'uuid': FAKEUUID})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_ip_association_and_allocation_of_other_project(self):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project"""
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
address = '1.2.3.4'
float_addr = db.floating_ip_create(context1.elevated(),
{'address': address,
'project_id': context1.project_id})
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid'])
# Associate the IP with non-admin user context
self.assertRaises(exception.NotAuthorized,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.NotAuthorized,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.NotAuthorized,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
def test_deallocate_fixed(self):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return"""
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
def vif_get(_context, _vif_id):
return {'address': 'fake_mac'}
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
network = db.network_get(elevated, fixed['network_id'])
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(network['bridge'], fixed['address'], 'fake_mac')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
self.assertFalse(fixed['allocated'])
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(db, 'network_get', network_get)
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': 0,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
newfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
def test_deallocate_fixed_no_vif(self):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return"""
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
def test_fixed_ip_cleanup_fail(self):
# Verify IP is not deallocated if the security group refresh fails.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
network = db.network_get(elevated, fixed['network_id'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, fix_addr, 'fake')
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
self.assertTrue(fixed['allocated'])
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None):
return None
def test_deallocate_for_instance_passes_host_info(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.instance_get = lambda _x, _y: dict(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
db.fixed_ip_get_by_instance = lambda x, y: [dict(address='1.2.3.4',
network_id='ignoredid')]
manager.deallocate_for_instance(
ctx, instance_id='ignore', host='somehost')
self.assertEquals([
(ctx, '1.2.3.4', 'somehost')
], manager.deallocate_fixed_ip_calls)
def test_remove_fixed_ip_from_instance(self):
manager = fake_network.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(self.context, 99, HOST,
'10.0.0.1')
self.assertEquals(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = fake_network.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_smaller_subnet_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/25'}])
self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_split_smaller_cidr_in_use2(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/27' in cidrs)
def test_validate_cidrs_split_all_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# ValueError: cidr already in use
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
def test_validate_cidrs_conflict_existing_supernet(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_get_instance_uuids_by_ip_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
def test_get_instance_uuids_by_ipv6_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
def test_get_instance_uuids_by_ip(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
def test_get_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
def test_get_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NetworkNotFoundForUUID(uuid='fake')
)
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
def test_get_all_networks(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all')
manager.db.network_get_all(mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
def test_disassociate_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
def test_disassociate_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NetworkNotFoundForUUID(uuid='fake')
)
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes."""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return {}
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class BackdoorPortTestCase(test.TestCase):
"""Tests nova.network.manager.get_backdoor_port."""
def setUp(self):
super(BackdoorPortTestCase, self).setUp()
self.manager = network_manager.NetworkManager()
self.manager.backdoor_port = 59697
self.context = context.RequestContext('fake', 'fake')
def test_backdoor_port(self):
port = self.manager.get_backdoor_port(self.context)
self.assertEqual(port, self.manager.backdoor_port)
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id)
self.assertEquals(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
fixed_ips=fixed_ip,
host=self.network.host,
project_id=project_id)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_disassociate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': 12
}
fixed_ip = {
'network_id': None,
'instance_uuid': 'instance-uuid'
}
network = {
'multi_host': True
}
instance = {
'host': 'some-other-host'
}
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network.db,
'floating_ip_get_by_address',
lambda _x, _y: floating_ip)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
self.stubs.Set(self.network.db,
'fixed_ip_get',
lambda _x, _y: fixed_ip)
self.stubs.Set(self.network,
'_get_network_by_id',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
'instance_get_by_uuid',
lambda _x, _y: instance)
self.stubs.Set(self.network.db,
'service_get_by_host_and_topic',
lambda _x, _y, _z: 'service')
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
def test_associate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': None
}
fixed_ip = {
'network_id': None,
'instance_uuid': 'instance-uuid'
}
network = {
'multi_host': True
}
instance = {
'host': 'some-other-host'
}
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network.db,
'floating_ip_get_by_address',
lambda _x, _y: floating_ip)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
self.stubs.Set(self.network.db,
'fixed_ip_get_by_address',
lambda _x, _y: fixed_ip)
self.stubs.Set(self.network,
'_get_network_by_id',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
'instance_get_by_uuid',
lambda _x, _y: instance)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = db.api.instance_create(self.context, {
'project_id': self.project_id, 'deleted': True})
network = db.api.network_create_safe(self.context.elevated(), {
'project_id': self.project_id})
addr = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance['uuid'], 'address': '10.1.1.1',
'network_id': network['id']})
fixed = db.fixed_ip_get_by_address(
self.context.elevated(read_deleted='yes'), addr)
db.api.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance['uuid'],
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context,
instance_id=instance['id'])
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = db.api.instance_create(self.context, {
'project_id': self.project_id})
network = db.api.network_create_safe(self.context.elevated(), {
'project_id': self.project_id})
addr = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance['uuid'], 'address': '10.1.1.1',
'network_id': network['id']})
fixed = db.fixed_ip_get_by_address(
self.context.elevated(read_deleted='yes'), addr)
db.api.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.api.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance['uuid'],
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context,
instance_id=instance['id'])
def test_migrate_instance_start(self):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return {'address': address,
'fixed_ip_id': 0}
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
def fake_fixed_ip_get(context, fixed_ip_id):
return {'instance_uuid': 'fake_uuid',
'address': '10.0.0.2'}
def fake_remove_floating_ip(floating_addr, fixed_addr, interface):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
pass
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
self.stubs.Set(self.network.db, 'floating_ip_update',
fake_floating_ip_update)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
def test_migrate_instance_finish(self):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return {'address': address,
'fixed_ip_id': 0}
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
def fake_fixed_ip_get(context, fixed_ip_id):
return {'instance_uuid': 'fake_uuid',
'address': '10.0.0.2'}
def fake_add_floating_ip(floating_addr, fixed_addr, interface):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
pass
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
self.stubs.Set(self.network.db, 'floating_ip_update',
fake_floating_ip_update)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 2)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[1]['domain'], domain2)
self.assertEquals(domains[0]['project'], 'testproject')
self.assertEquals(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEquals(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 'fake_net',
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise exception.DBError("If you're smart, you'll retry!")
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network.add_virtual_interface(ctxt, 'fake_uuid', 'fake_net')
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.get_floating_ip,
self.context, 'fake-id')
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 1)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
maoy/zknova
|
nova/tests/network/test_manager.py
|
Python
|
apache-2.0
| 94,348
|
[
"FEFF"
] |
b40d8d80d522338b2939709ebbced70eb2a555c3ec64e798788406d8b89dab1b
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`lib` module contains most of the components and libraries that make
OpenLP work.
"""
import logging
import os
from PyQt4 import QtCore, QtGui, Qt
log = logging.getLogger(__name__)
class ServiceItemContext(object):
"""
The context in which a Service Item is being generated
"""
Preview = 0
Live = 1
Service = 2
class ImageSource(object):
"""
This enumeration class represents different image sources. An image sources
states where an image is used. This enumeration class is need in the context
of the :class:~openlp.core.lib.imagemanager`.
``ImagePlugin``
This states that an image is being used by the image plugin.
``Theme``
This says, that the image is used by a theme.
"""
ImagePlugin = 1
Theme = 2
class MediaType(object):
"""
An enumeration class for types of media.
"""
Audio = 1
Video = 2
class SlideLimits(object):
"""
Provides an enumeration for behaviour of OpenLP at the end limits of each
service item when pressing the up/down arrow keys
"""
End = 1
Wrap = 2
Next = 3
class ServiceItemAction(object):
"""
Provides an enumeration for the required action moving between service
items by left/right arrow keys
"""
Previous = 1
PreviousLastSlide = 2
Next = 3
def translate(context, text, comment=None, encoding=QtCore.QCoreApplication.CodecForTr, n=-1,
translate=QtCore.QCoreApplication.translate):
"""
A special shortcut method to wrap around the Qt4 translation functions.
This abstracts the translation procedure so that we can change it if at a
later date if necessary, without having to redo the whole of OpenLP.
``context``
The translation context, used to give each string a context or a
namespace.
``text``
The text to put into the translation tables for translation.
``comment``
An identifying string for when the same text is used in different roles
within the same context.
"""
return translate(context, text, comment, encoding, n)
def get_text_file_string(text_file):
"""
Open a file and return its content as unicode string. If the supplied file
name is not a file then the function returns False. If there is an error
loading the file or the content can't be decoded then the function will
return None.
``textfile``
The name of the file.
"""
if not os.path.isfile(text_file):
return False
file_handle = None
content_string = None
try:
file_handle = open(text_file, u'r')
if not file_handle.read(3) == '\xEF\xBB\xBF':
# no BOM was found
file_handle.seek(0)
content = file_handle.read()
content_string = content.decode(u'utf-8')
except (IOError, UnicodeError):
log.exception(u'Failed to open text file %s' % text_file)
finally:
if file_handle:
file_handle.close()
return content_string
def str_to_bool(stringvalue):
"""
Convert a string version of a boolean into a real boolean.
``stringvalue``
The string value to examine and convert to a boolean type.
"""
if isinstance(stringvalue, bool):
return stringvalue
return unicode(stringvalue).strip().lower() in (u'true', u'yes', u'y')
def build_icon(icon):
"""
Build a QIcon instance from an existing QIcon, a resource location, or a
physical file location. If the icon is a QIcon instance, that icon is
simply returned. If not, it builds a QIcon instance from the resource or
file name.
``icon``
The icon to build. This can be a QIcon, a resource string in the form
``:/resource/file.png``, or a file location like ``/path/to/file.png``.
"""
button_icon = QtGui.QIcon()
if isinstance(icon, QtGui.QIcon):
button_icon = icon
elif isinstance(icon, basestring):
if icon.startswith(u':/'):
button_icon.addPixmap(QtGui.QPixmap(icon), QtGui.QIcon.Normal, QtGui.QIcon.Off)
else:
button_icon.addPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(icon)), QtGui.QIcon.Normal, QtGui.QIcon.Off)
elif isinstance(icon, QtGui.QImage):
button_icon.addPixmap(QtGui.QPixmap.fromImage(icon), QtGui.QIcon.Normal, QtGui.QIcon.Off)
return button_icon
def image_to_byte(image):
"""
Resize an image to fit on the current screen for the web and returns
it as a byte stream.
``image``
The image to converted.
"""
log.debug(u'image_to_byte - start')
byte_array = QtCore.QByteArray()
# use buffer to store pixmap into byteArray
buffie = QtCore.QBuffer(byte_array)
buffie.open(QtCore.QIODevice.WriteOnly)
image.save(buffie, "PNG")
log.debug(u'image_to_byte - end')
# convert to base64 encoding so does not get missed!
return byte_array.toBase64()
def create_thumb(image_path, thumb_path, return_icon=True, size=None):
"""
Create a thumbnail from the given image path and depending on
``return_icon`` it returns an icon from this thumb.
``image_path``
The image file to create the icon from.
``thumb_path``
The filename to save the thumbnail to.
``return_icon``
States if an icon should be build and returned from the thumb. Defaults
to ``True``.
``size``
Allows to state a own size to use. Defaults to ``None``, which means
that a default height of 88 is used.
"""
ext = os.path.splitext(thumb_path)[1].lower()
reader = QtGui.QImageReader(image_path)
if size is None:
ratio = float(reader.size().width()) / float(reader.size().height())
reader.setScaledSize(QtCore.QSize(int(ratio * 88), 88))
else:
reader.setScaledSize(size)
thumb = reader.read()
thumb.save(thumb_path, ext[1:])
if not return_icon:
return
if os.path.exists(thumb_path):
return build_icon(unicode(thumb_path))
# Fallback for files with animation support.
return build_icon(unicode(image_path))
def validate_thumb(file_path, thumb_path):
"""
Validates whether an file's thumb still exists and if is up to date.
**Note**, you must **not** call this function, before checking the
existence of the file.
``file_path``
The path to the file. The file **must** exist!
``thumb_path``
The path to the thumb.
"""
if not os.path.exists(thumb_path):
return False
image_date = os.stat(file_path).st_mtime
thumb_date = os.stat(thumb_path).st_mtime
return image_date <= thumb_date
def resize_image(image_path, width, height, background=u'#000000'):
"""
Resize an image to fit on the current screen.
``image_path``
The path to the image to resize.
``width``
The new image width.
``height``
The new image height.
``background``
The background colour. Defaults to black.
DO NOT REMOVE THE DEFAULT BACKGROUND VALUE!
"""
log.debug(u'resize_image - start')
reader = QtGui.QImageReader(image_path)
# The image's ratio.
image_ratio = float(reader.size().width()) / float(reader.size().height())
resize_ratio = float(width) / float(height)
# Figure out the size we want to resize the image to (keep aspect ratio).
if image_ratio == resize_ratio:
size = QtCore.QSize(width, height)
elif image_ratio < resize_ratio:
# Use the image's height as reference for the new size.
size = QtCore.QSize(image_ratio * height, height)
else:
# Use the image's width as reference for the new size.
size = QtCore.QSize(width, 1 / (image_ratio / width))
reader.setScaledSize(size)
preview = reader.read()
if image_ratio == resize_ratio:
# We neither need to centre the image nor add "bars" to the image.
return preview
real_width = preview.width()
real_height = preview.height()
# and move it to the centre of the preview space
new_image = QtGui.QImage(width, height, QtGui.QImage.Format_ARGB32_Premultiplied)
painter = QtGui.QPainter(new_image)
painter.fillRect(new_image.rect(), QtGui.QColor(background))
painter.drawImage((width - real_width) / 2, (height - real_height) / 2, preview)
return new_image
def check_item_selected(list_widget, message):
"""
Check if a list item is selected so an action may be performed on it
``list_widget``
The list to check for selected items
``message``
The message to give the user if no item is selected
"""
if not list_widget.selectedIndexes():
QtGui.QMessageBox.information(list_widget.parent(),
translate('OpenLP.MediaManagerItem', 'No Items Selected'), message)
return False
return True
def clean_tags(text):
"""
Remove Tags from text for display
"""
text = text.replace(u'<br>', u'\n')
text = text.replace(u'{br}', u'\n')
text = text.replace(u' ', u' ')
for tag in FormattingTags.get_html_tags():
text = text.replace(tag[u'start tag'], u'')
text = text.replace(tag[u'end tag'], u'')
return text
def expand_tags(text):
"""
Expand tags HTML for display
"""
for tag in FormattingTags.get_html_tags():
text = text.replace(tag[u'start tag'], tag[u'start html'])
text = text.replace(tag[u'end tag'], tag[u'end html'])
return text
def check_directory_exists(directory, do_not_log=False):
"""
Check a theme directory exists and if not create it
``directory``
The directory to make sure exists
``do_not_log``
To not log anything. This is need for the start up, when the log isn't ready.
"""
if not do_not_log:
log.debug(u'check_directory_exists %s' % directory)
try:
if not os.path.exists(directory):
os.makedirs(directory)
except IOError:
pass
def create_separated_list(stringlist):
"""
Returns a string that represents a join of a list of strings with a
localized separator. This function corresponds to
QLocale::createSeparatedList which was introduced in Qt 4.8 and implements
the algorithm from http://www.unicode.org/reports/tr35/#ListPatterns
``stringlist``
List of unicode strings
"""
if Qt.PYQT_VERSION_STR >= u'4.9' and Qt.qVersion() >= u'4.8':
return QtCore.QLocale().createSeparatedList(stringlist)
if not stringlist:
return u''
elif len(stringlist) == 1:
return stringlist[0]
elif len(stringlist) == 2:
return translate('OpenLP.core.lib', '%1 and %2',
'Locale list separator: 2 items') % (stringlist[0], stringlist[1])
else:
merged = translate('OpenLP.core.lib', '%1, and %2',
u'Locale list separator: end') % (stringlist[-2], stringlist[-1])
for index in reversed(range(1, len(stringlist) - 2)):
merged = translate('OpenLP.core.lib', '%1, %2',
u'Locale list separator: middle') % (stringlist[index], merged)
return translate('OpenLP.core.lib', '%1, %2',
u'Locale list separator: start') % (stringlist[0], merged)
from registry import Registry
from uistrings import UiStrings
from eventreceiver import Receiver
from screen import ScreenList
from settings import Settings
from listwidgetwithdnd import ListWidgetWithDnD
from formattingtags import FormattingTags
from spelltextedit import SpellTextEdit
from settingsmanager import SettingsManager
from plugin import PluginStatus, StringContent, Plugin
from pluginmanager import PluginManager
from settingstab import SettingsTab
from serviceitem import ServiceItem, ServiceItemType, ItemCapabilities
from htmlbuilder import build_html, build_lyrics_format_css, build_lyrics_outline_css
from toolbar import OpenLPToolbar
from dockwidget import OpenLPDockWidget
from imagemanager import ImageManager
from renderer import Renderer
from mediamanageritem import MediaManagerItem
|
marmyshev/transitions
|
openlp/core/lib/__init__.py
|
Python
|
gpl-2.0
| 14,177
|
[
"Brian"
] |
7d3bf1fc63400ecd11ffa72b11c288fdf04373a8ca821ba3fa32ceb10c91a16c
|
from __future__ import division, absolute_import, print_function
import os
import sys
import types
import re
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import product, ndarray, ufunc
__all__ = ['issubclass_', 'issubsctype', 'issubdtype',
'deprecate', 'deprecate_with_doc', 'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds',
'safe_eval']
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def get_numarray_include(type=None):
"""
Return the directory that contains the numarray \\*.h header files.
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
Parameters
----------
type : any, optional
If `type` is not None, the location of the NumPy headers is returned
as well.
Returns
-------
dirs : str or list of str
If `type` is None, `dirs` is a string containing the path to the
numarray headers.
If `type` is not None, `dirs` is a list of strings with first the
path(s) to the numarray headers, followed by the path to the NumPy
headers.
Notes
-----
Useful when using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_numarray_include()])
...
"""
from numpy.numarray import get_numarray_include_dirs
include_dirs = get_numarray_include_dirs()
if type is None:
return include_dirs[0]
else:
return include_dirs + [get_include()]
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.__name__
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in which
case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case
the deprecation message is that `old_name` is deprecated. If given,
the deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the docstring
after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays present
in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name], ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original=0
else:
cache[idv] = name
namestr = name
original=1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10, maxname)
sp2 = max(10, maxshape)
sp3 = max(10, maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
for k in range(len(sta)):
val = sta[k]
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3]))
print("\nUpper bound on total bytes = %d" % totalbytes)
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is ``stdout``.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent to
``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc, inspect
if hasattr(object, '_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
import numpy.numarray as nn
nn.info(object, output=output, numpy=1)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print("\n *** Repeat reference found in %s *** " % namestr, file=output)
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
info(obj)
print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
print("\n *** Total of %d references found. ***" % numfound, file=output)
elif inspect.isfunction(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.__func__))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
if methods != []:
print("\n\nMethods:\n", file=output)
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print(" %s -- %s" % (meth, methstr), file=output)
elif (sys.version_info[0] < 3
and isinstance(object, types.InstanceType)):
# check for __call__ method
# types.InstanceType is the type of the instances of oldstyle classes
print("Instance of class: ", object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.__func__))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object, 'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc = inspect.getdoc(object.__call__)
if doc is not None:
print(inspect.getdoc(object.__call__), file=output)
print(inspect.getdoc(object), file=output)
else:
print(inspect.getdoc(object), file=output)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object.__func__))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module, ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
except:
print("Not available for this object.", file=output)
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats: return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print("\n".join(help_text))
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
if sys.version_info[0] >= 3:
# In Python3 stderr, stdout are text files.
from io import StringIO
else:
from StringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen: continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if os.path.isfile(this_py) and mod_path.endswith('.py'):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError: # ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except AttributeError:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
For an example of usage, see `safe_eval`.
See Also
--------
safe_eval
"""
if sys.version_info[0] < 3:
def visit(self, node, **kw):
cls = node.__class__
meth = getattr(self, 'visit'+cls.__name__, self.default)
return meth(node, **kw)
def default(self, node, **kw):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v)) for k, v in node.items])
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnaryAdd(self, node, **kw):
return +self.visit(node.getChildNodes()[0])
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
def visitName(self, node, **kw):
if node.name == 'False':
return False
elif node.name == 'True':
return True
elif node.name == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.name)
else:
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def visitNameConstant(self, node):
return node.value
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains non-literal
code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local imports to speed up numpy's import time.
import warnings
with warnings.catch_warnings():
# compiler package is deprecated for 3.x, which is already solved here
warnings.simplefilter('ignore', DeprecationWarning)
try:
import compiler
except ImportError:
import ast as compiler
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
except SyntaxError as err:
raise
try:
return walker.visit(ast)
except SyntaxError as err:
raise
#-----------------------------------------------------------------------------
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/numpy/lib/utils.py
|
Python
|
gpl-3.0
| 36,117
|
[
"VisIt"
] |
06899a5bd4fae1f1db1a51c5dcd0bcef0ac6c99e57311c89145cdf55bf98412c
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Step functions for optimization methods."""
from absl import logging
import jax
from jax import numpy as jnp
from jax.flatten_util import ravel_pytree
def gradient_descent_step(data, loss_f, model_param, options):
"""Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
# Handle deep nets
grad, unravel_fn = ravel_pytree(grad)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad
return unravel_fn(model_param), options
def backtracking(next_candidate, stop_cond, step_size_init, options, verbose=0):
"""Backtracking line search.
Args:
next_candidate: a function generating a candidate from a step size.
stop_cond: a function determining whether to stop or not from a step size
and a candidate.
step_size_init: the initial step size to try.
options: a dictionary containing line search specific options.
verbose: whether to enable verbose output or not.
Returns:
step_size, next_candidate
"""
max_iter = options.get('max_linesearch', 20)
step_factor = options.get('step_factor', 0.5)
step_size = step_size_init
next_iter = next_candidate(step_size)
for it in range(max_iter):
if stop_cond(step_size, next_iter):
break
step_size *= step_factor
next_iter = next_candidate(step_size)
if it == max_iter - 1 and verbose:
print('Line search did not converge.')
return step_size, next_iter
def gradient_descent_line_search_step(
data, loss_f, model_param, options):
"""Gradient Descent optimization with line search step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and updated step size.
"""
options = dict(options)
beta = options.get('beta', 0.9)
beta_prime = options.get('beta_prime', 1e-4)
step_size = options.get('step_size', 10000.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
inputs, labels = data[0], data[1]
loss_with_data_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(loss_with_data_f)
value, grad = value_and_grad_f(model_param)
# Maximum learning rate allowed from Theorem 5 in Gunasekar et al. 2017
if options['bound_step']:
# Bound by dual of L2
b_const = jnp.max(jnp.linalg.norm(inputs, ord=2, axis=0))
step_size = min(step_size, 1 / (b_const * b_const * value))
grad, unravel_fn = ravel_pytree(grad)
x, unravel_fn = ravel_pytree(model_param)
# If we normalize step_size will be harder to tune.
direction = -grad
# TODO(fartash): consider using the condition in FISTA
def next_candidate(step_size):
next_iter = x + step_size * direction
next_value, next_grad = value_and_grad_f(unravel_fn(next_iter))
next_grad, _ = ravel_pytree(next_grad)
return next_iter, next_value, next_grad
def stop_cond(step_size, res):
_, next_value, next_grad = res
gd = jnp.sum(grad * direction)
# Strong Wolfe condition.
cond1 = next_value <= value + beta_prime * step_size * gd
cond2 = jnp.sum(jnp.abs(next_grad * direction)) >= beta * gd
return cond1 and cond2
step_size, res = backtracking(
next_candidate, stop_cond, step_size, options=options)
next_param = res[0]
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(next_param), options
def coordinate_descent_step(data, loss_f, model_param, options):
"""Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
grad_max = grad * (jnp.abs(grad) == jnp.abs(grad).max())
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_max
return unravel_fn(model_param), options
def coordinate_descent_line_search_step(data, loss_f, model_param, options):
"""Coordinate Descent with line search optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
options = dict(options)
beta = options.get('beta', 0.9)
beta_prime = options.get('beta_prime', 1e-4)
step_size = options.get('step_size', 10000.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
inputs, labels = data[0], data[1]
loss_with_data_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(loss_with_data_f)
value, grad = value_and_grad_f(model_param)
grad_max = grad * (jnp.abs(grad) == jnp.abs(grad).max())
# Maximum learning rate allowed from Theorem 5 in Gunasekar et al. 2017
if options['bound_step']:
# Bound by dual of L1
b_const = jnp.max(jnp.linalg.norm(inputs, ord=jnp.inf, axis=0))
step_size = min(step_size, 1 / (b_const * b_const * value))
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
x, unravel_fn = ravel_pytree(model_param)
# If we normalize step_size will be harder to tune.
direction = -grad_max
# TODO(fartash): consider using the condition in FISTA
def next_candidate(step_size):
next_iter = x + step_size * direction
next_value, next_grad = value_and_grad_f(unravel_fn(next_iter))
next_grad, _ = ravel_pytree(next_grad)
return next_iter, next_value, next_grad
def stop_cond(step_size, res):
_, next_value, next_grad = res
gd = jnp.sum(grad * direction)
# Strong Wolfe condition.
cond1 = next_value <= value + beta_prime * step_size * gd
cond2 = jnp.sum(jnp.abs(next_grad * direction)) >= beta * gd
return cond1 and cond2
step_size, res = backtracking(
next_candidate, stop_cond, step_size, options=options)
next_param = res[0]
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(next_param), options
def coordinate_descent_topk_step(data, loss_f, model_param, options, k=2):
"""Coordinate Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
k: An integere for the number of topk elements.
Returns:
Updated model parameters and options.
"""
# TODO(fartash): add k to config.py to be part of options.
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
_, coords = jax.lax.top_k(jnp.abs(grad.T), k)
grad_max = 0 * grad
grad_max = grad_max.at[coords].set(grad[coords])
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_max
return unravel_fn(model_param), options
def sign_gradient_descent_step(data, loss_f, model_param, options):
"""Sign Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
grad_sign = jnp.abs(grad).sum() * jnp.sign(grad)
# Handle deep nets
grad_sign, unravel_fn = ravel_pytree(grad_sign)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_sign
return unravel_fn(model_param), options
def fista_step(data, loss_and_prox_op, model_param, options):
"""Fista optimization step for solving regularized problem.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_and_prox_op: Tuple of (loss_f, prox_g)
loss_f is the loss function that takes in model_param, inputs, and labels.
prox_g is the proximity operator for g.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and updated step size.
"""
options = dict(options)
step_size = options.get('step_size', 1.0)
acceleration = options.get('acceleration', True)
t = options.get('t', 1.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
loss_f, prox_g = loss_and_prox_op
inputs, labels = data[0], data[1]
fun_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(fun_f)
x, unravel_fn = ravel_pytree(model_param)
y = options.get('y', x)
value_f, grad_f = value_and_grad_f(unravel_fn(y))
grad_f, unravel_fn = ravel_pytree(grad_f)
def next_candidate(step_size):
return prox_g(y - grad_f * step_size, step_size)
def stop_cond(step_size, next_iter):
diff = next_iter - y
sqdist = jnp.sum(diff**2)
# We do not compute the non-smooth term (g in the paper)
# as it cancels out from value_F and value_Q.
value_bigf = fun_f(next_iter)
value_bigq = value_f + jnp.sum(diff * grad_f) + 0.5 / step_size * sqdist
return value_bigf <= value_bigq
x_old = x
step_size, x = backtracking(next_candidate, stop_cond, step_size, options)
# Acceleration.
if acceleration:
t_next = (1 + jnp.sqrt(1 + 4 * t**2)) / 2.
y = x + (t - 1) / t_next * (x - x_old)
t = t_next
options['y'] = y
options['t'] = t
else:
y = x
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(x), options
def get_optimizer_step(options):
"""Return an optimizer given its name."""
name = options['name']
if name == 'gd' or name == 'cvxpy': # TODO(fartash): do cvxpy the right way
return gradient_descent_step, options
if name == 'gd_ls':
return gradient_descent_line_search_step, options
if name == 'cd':
return coordinate_descent_step, options
if name == 'cd_ls':
return coordinate_descent_line_search_step, options
if name == 'signgd':
return sign_gradient_descent_step, options
if name == 'cdk':
return coordinate_descent_topk_step, options
if name == 'fista':
return fista_step, options
raise Exception('Invalid optimizer.')
|
google-research/google-research
|
robust_optim/optim.py
|
Python
|
apache-2.0
| 12,482
|
[
"CDK"
] |
69e842e67f369d1284aa3db7cca5318fcb438884e26b35413e57269be89ab6cf
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
import textwrap
from buildbot import config
from buildbot.process import properties
from buildbot.status.results import EXCEPTION
from buildbot.status.results import FAILURE
from buildbot.status.results import SKIPPED
from buildbot.status.results import SUCCESS
from buildbot.status.results import WARNINGS
from buildbot.steps import shell
from buildbot.test.fake.remotecommand import Expect
from buildbot.test.fake.remotecommand import ExpectRemoteRef
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util import compat
from buildbot.test.util import config as configmixin
from buildbot.test.util import steps
from twisted.trial import unittest
class TestShellCommandExecution(steps.BuildStepMixin, unittest.TestCase, configmixin.ConfigErrorsMixin):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_doStepIf_False(self):
self.setupStep(
shell.ShellCommand(command="echo hello", doStepIf=False))
self.expectOutcome(result=SKIPPED,
status_text=["'echo", "hello'", "skipped"])
return self.runStep()
def test_constructor_args_strings(self):
step = shell.ShellCommand(workdir='build', command="echo hello",
usePTY=False, description="echoing",
descriptionDone="echoed")
self.assertEqual(step.description, ['echoing'])
self.assertEqual(step.descriptionDone, ['echoed'])
def test_constructor_args_lists(self):
step = shell.ShellCommand(workdir='build', command="echo hello",
usePTY=False, description=["echoing"],
descriptionDone=["echoed"])
self.assertEqual(step.description, ['echoing'])
self.assertEqual(step.descriptionDone, ['echoed'])
def test_constructor_args_kwargs(self):
# this is an ugly way to define an API, but for now check that
# the RemoteCommand arguments are properly passed on
step = shell.ShellCommand(workdir='build', command="echo hello",
want_stdout=0, logEnviron=False)
self.assertEqual(step.remote_kwargs, dict(want_stdout=0,
logEnviron=False, workdir='build',
usePTY='slave-config'))
def test_constructor_args_validity(self):
# this checks that an exception is raised for invalid arguments
self.assertRaisesConfigError(
"Invalid argument(s) passed to RemoteShellCommand: ",
lambda: shell.ShellCommand('build', "echo Hello World",
wrongArg1=1, wrongArg2='two'))
def test_describe_no_command(self):
step = shell.ShellCommand(workdir='build')
self.assertEqual((step.describe(), step.describe(done=True)),
(['???'],) * 2)
def test_describe_from_empty_command(self):
# this is more of a regression test for a potential failure, really
step = shell.ShellCommand(workdir='build', command=' ')
self.assertEqual((step.describe(), step.describe(done=True)),
(['???'],) * 2)
def test_describe_from_short_command(self):
step = shell.ShellCommand(workdir='build', command="true")
self.assertEqual((step.describe(), step.describe(done=True)),
(["'true'"],) * 2)
def test_describe_from_short_command_list(self):
step = shell.ShellCommand(workdir='build', command=["true"])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'true'"],) * 2)
def test_describe_from_med_command(self):
step = shell.ShellCommand(command="echo hello")
self.assertEqual((step.describe(), step.describe(done=True)),
(["'echo", "hello'"],) * 2)
def test_describe_from_med_command_list(self):
step = shell.ShellCommand(command=["echo", "hello"])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'echo", "hello'"],) * 2)
def test_describe_from_long_command(self):
step = shell.ShellCommand(command="this is a long command")
self.assertEqual((step.describe(), step.describe(done=True)),
(["'this", "is", "...'"],) * 2)
def test_describe_from_long_command_list(self):
step = shell.ShellCommand(command="this is a long command".split())
self.assertEqual((step.describe(), step.describe(done=True)),
(["'this", "is", "...'"],) * 2)
def test_describe_from_nested_command_list(self):
step = shell.ShellCommand(command=["this", ["is", "a"], "nested"])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'this", "is", "...'"],) * 2)
def test_describe_from_nested_command_tuples(self):
step = shell.ShellCommand(command=["this", ("is", "a"), "nested"])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'this", "is", "...'"],) * 2)
def test_describe_from_nested_command_list_empty(self):
step = shell.ShellCommand(command=["this", [], ["is", "a"], "nested"])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'this", "is", "...'"],) * 2)
def test_describe_from_nested_command_list_deep(self):
step = shell.ShellCommand(command=[["this", [[["is", ["a"]]]]]])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'this", "is", "...'"],) * 2)
def test_describe_custom(self):
step = shell.ShellCommand(command="echo hello",
description=["echoing"], descriptionDone=["echoed"])
self.assertEqual((step.describe(), step.describe(done=True)),
(['echoing'], ['echoed']))
def test_describe_with_suffix(self):
step = shell.ShellCommand(command="echo hello", descriptionSuffix="suffix")
self.assertEqual((step.describe(), step.describe(done=True)),
(["'echo", "hello'", 'suffix'],) * 2)
def test_describe_custom_with_suffix(self):
step = shell.ShellCommand(command="echo hello",
description=["echoing"], descriptionDone=["echoed"],
descriptionSuffix="suffix")
self.assertEqual((step.describe(), step.describe(done=True)),
(['echoing', 'suffix'], ['echoed', 'suffix']))
def test_describe_no_command_with_suffix(self):
step = shell.ShellCommand(workdir='build', descriptionSuffix="suffix")
self.assertEqual((step.describe(), step.describe(done=True)),
(['???', 'suffix'],) * 2)
def test_describe_unrendered_WithProperties(self):
step = shell.ShellCommand(command=properties.WithProperties(''))
self.assertEqual((step.describe(), step.describe(done=True)),
(['???'],) * 2)
def test_describe_unrendered_custom_new_style_class_rendarable(self):
step = shell.ShellCommand(command=object())
self.assertEqual((step.describe(), step.describe(done=True)),
(['???'],) * 2)
def test_describe_unrendered_custom_old_style_class_rendarable(self):
class C:
pass
step = shell.ShellCommand(command=C())
self.assertEqual((step.describe(), step.describe(done=True)),
(['???'],) * 2)
def test_describe_unrendered_WithProperties_list(self):
step = shell.ShellCommand(
command=['x', properties.WithProperties(''), 'y'])
self.assertEqual((step.describe(), step.describe(done=True)),
(["'x", "y'"],) * 2)
def test_run_simple(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello"))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
usePTY="slave-config")
+ 0
)
self.expectOutcome(result=SUCCESS, status_text=["'echo", "hello'"])
return self.runStep()
def test_run_list(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'],
usePTY="slave-config")
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["'trial", "-b", "...'"])
return self.runStep()
def test_run_nested_description(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=properties.FlattenList(['trial', ['-b', '-B'], 'buildbot.test']),
description=properties.FlattenList(['test', ['done']])))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'],
usePTY="slave-config")
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=['test', 'done'])
return self.runStep()
def test_run_nested_command(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=['trial', ['-b', '-B'], 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'],
usePTY="slave-config")
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["'trial", "-b", "...'"])
return self.runStep()
def test_run_nested_deeply_command(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=[['trial', ['-b', ['-B']]], 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'],
usePTY="slave-config")
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["'trial", "-b", "...'"])
return self.runStep()
def test_run_nested_empty_command(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=['trial', [], '-b', [], 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', 'buildbot.test'],
usePTY="slave-config")
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["'trial", "-b", "...'"])
return self.runStep()
def test_run_env(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello"),
slave_env=dict(DEF='HERE'))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
usePTY="slave-config",
env=dict(DEF='HERE'))
+ 0
)
self.expectOutcome(result=SUCCESS, status_text=["'echo", "hello'"])
return self.runStep()
def test_run_env_override(self):
self.setupStep(
shell.ShellCommand(workdir='build', env={'ABC': '123'},
command="echo hello"),
slave_env=dict(ABC='XXX', DEF='HERE'))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
usePTY="slave-config",
env=dict(ABC='123', DEF='HERE'))
+ 0
)
self.expectOutcome(result=SUCCESS, status_text=["'echo", "hello'"])
return self.runStep()
def test_run_usePTY(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello",
usePTY=False))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
usePTY=False)
+ 0
)
self.expectOutcome(result=SUCCESS, status_text=["'echo", "hello'"])
return self.runStep()
def test_run_usePTY_old_slave(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello",
usePTY=True),
slave_version=dict(shell='1.1'))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello')
+ 0
)
self.expectOutcome(result=SUCCESS, status_text=["'echo", "hello'"])
return self.runStep()
def test_run_decodeRC(self, rc=1, results=WARNINGS, extra_text=["warnings"]):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello",
decodeRC={1: WARNINGS}))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
usePTY="slave-config")
+ rc
)
self.expectOutcome(result=results, status_text=["'echo", "hello'"] + extra_text)
return self.runStep()
def test_run_decodeRC_defaults(self):
return self.test_run_decodeRC(2, FAILURE, extra_text=["failed"])
def test_run_decodeRC_defaults_0_is_failure(self):
return self.test_run_decodeRC(0, FAILURE, extra_text=["failed"])
class TreeSize(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_run_success(self):
self.setupStep(shell.TreeSize())
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['du', '-s', '-k', '.'])
+ ExpectShell.log('stdio', stdout='9292 .\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["treesize", "9292 KiB"])
self.expectProperty('tree-size-KiB', 9292)
return self.runStep()
def test_run_misparsed(self):
self.setupStep(shell.TreeSize())
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['du', '-s', '-k', '.'])
+ ExpectShell.log('stdio', stdio='abcdef\n')
+ 0
)
self.expectOutcome(result=WARNINGS,
status_text=["treesize", "unknown"])
return self.runStep()
def test_run_failed(self):
self.setupStep(shell.TreeSize())
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['du', '-s', '-k', '.'])
+ ExpectShell.log('stdio', stderr='abcdef\n')
+ 1
)
self.expectOutcome(result=FAILURE,
status_text=["treesize", "unknown"])
return self.runStep()
class SetPropertyFromCommand(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_constructor_conflict(self):
self.assertRaises(config.ConfigErrors, lambda:
shell.SetPropertyFromCommand(property='foo', extract_fn=lambda: None))
def test_run_property(self):
self.setupStep(shell.SetPropertyFromCommand(property="res", command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="cmd")
+ ExpectShell.log('stdio', stdout='\n\nabcdef\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["property 'res' set"])
self.expectProperty("res", "abcdef") # note: stripped
self.expectLogfile('property changes', r"res: 'abcdef'")
return self.runStep()
def test_run_property_no_strip(self):
self.setupStep(shell.SetPropertyFromCommand(property="res", command="cmd",
strip=False))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="cmd")
+ ExpectShell.log('stdio', stdout='\n\nabcdef\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["property 'res' set"])
self.expectProperty("res", "\n\nabcdef\n")
self.expectLogfile('property changes', r"res: '\n\nabcdef\n'")
return self.runStep()
def test_run_failure(self):
self.setupStep(shell.SetPropertyFromCommand(property="res", command="blarg"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="blarg")
+ ExpectShell.log('stdio', stderr='cannot blarg: File not found')
+ 1
)
self.expectOutcome(result=FAILURE,
status_text=["'blarg'", "failed"])
self.expectNoProperty("res")
return self.runStep()
def test_run_extract_fn(self):
def extract_fn(rc, stdout, stderr):
self.assertEqual((rc, stdout, stderr), (0, 'startend', 'STARTEND'))
return dict(a=1, b=2)
self.setupStep(shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="cmd")
+ ExpectShell.log('stdio', stdout='start', stderr='START')
+ ExpectShell.log('stdio', stdout='end')
+ ExpectShell.log('stdio', stderr='END')
+ 0
)
self.expectOutcome(result=SUCCESS,
status_text=["2 properties set"])
self.expectLogfile('property changes', 'a: 1\nb: 2')
self.expectProperty("a", 1)
self.expectProperty("b", 2)
return self.runStep()
def test_run_extract_fn_cmdfail(self):
def extract_fn(rc, stdout, stderr):
self.assertEqual((rc, stdout, stderr), (3, '', ''))
return dict(a=1, b=2)
self.setupStep(shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="cmd")
+ 3
)
# note that extract_fn *is* called anyway
self.expectOutcome(result=FAILURE,
status_text=["2 properties set"])
self.expectLogfile('property changes', 'a: 1\nb: 2')
return self.runStep()
def test_run_extract_fn_cmdfail_empty(self):
def extract_fn(rc, stdout, stderr):
self.assertEqual((rc, stdout, stderr), (3, '', ''))
return dict()
self.setupStep(shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="cmd")
+ 3
)
# note that extract_fn *is* called anyway, but returns no properties
self.expectOutcome(result=FAILURE,
status_text=["'cmd'", "failed"])
return self.runStep()
@compat.usesFlushLoggedErrors
def test_run_extract_fn_exception(self):
def extract_fn(rc, stdout, stderr):
raise RuntimeError("oh noes")
self.setupStep(shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command="cmd")
+ 0
)
# note that extract_fn *is* called anyway, but returns no properties
self.expectOutcome(result=EXCEPTION,
status_text=["setproperty", "exception"])
d = self.runStep()
d.addCallback(lambda _:
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1))
return d
class SetPropertyDeprecation(unittest.TestCase):
"""
Tests for L{shell.SetProperty}
"""
def test_deprecated(self):
"""
Accessing L{shell.SetProperty} reports a deprecation error.
"""
shell.SetProperty
warnings = self.flushWarnings([self.test_deprecated])
self.assertEqual(len(warnings), 1)
self.assertIdentical(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'],
"buildbot.steps.shell.SetProperty was deprecated in Buildbot 0.8.8: "
"It has been renamed to SetPropertyFromCommand"
)
class Configure(unittest.TestCase):
def test_class_attrs(self):
# nothing too exciting here, but at least make sure the class is present
step = shell.Configure()
self.assertEqual(step.command, ['./configure'])
class WarningCountingShellCommand(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_warnings(self):
self.setupStep(shell.WarningCountingShellCommand(workdir='w',
command=['make']))
self.expectCommands(
ExpectShell(workdir='w', usePTY='slave-config',
command=["make"])
+ ExpectShell.log('stdio', stdout='blarg success!')
+ 0
)
self.expectOutcome(result=SUCCESS, status_text=["'make'"])
self.expectProperty("warnings-count", 0)
return self.runStep()
def test_default_pattern(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make']))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=["make"])
+ ExpectShell.log('stdio',
stdout='normal: foo\nwarning: blarg!\nalso normal')
+ 0
)
self.expectOutcome(result=WARNINGS, status_text=["'make'", "warnings"])
self.expectProperty("warnings-count", 1)
self.expectLogfile("warnings (1)", "warning: blarg!\n")
return self.runStep()
def test_custom_pattern(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make'],
warningPattern=r"scary:.*"))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=["make"])
+ ExpectShell.log('stdio',
stdout='scary: foo\nwarning: bar\nscary: bar')
+ 0
)
self.expectOutcome(result=WARNINGS, status_text=["'make'", "warnings"])
self.expectProperty("warnings-count", 2)
self.expectLogfile("warnings (2)", "scary: foo\nscary: bar\n")
return self.runStep()
def test_maxWarnCount(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make'],
maxWarnCount=9))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=["make"])
+ ExpectShell.log('stdio', stdout='warning: noo!\n' * 10)
+ 0
)
self.expectOutcome(result=FAILURE, status_text=["'make'", "failed"])
self.expectProperty("warnings-count", 10)
return self.runStep()
def test_fail_with_warnings(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make']))
self.expectCommands(
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=["make"])
+ ExpectShell.log('stdio', stdout='warning: I might fail')
+ 3
)
self.expectOutcome(result=FAILURE, status_text=["'make'", "failed"])
self.expectProperty("warnings-count", 1)
self.expectLogfile("warnings (1)", "warning: I might fail\n")
return self.runStep()
def do_test_suppressions(self, step, supps_file='', stdout='',
exp_warning_count=0, exp_warning_log='',
exp_exception=False):
self.setupStep(step)
# Invoke the expected callbacks for the suppression file upload. Note
# that this assumes all of the remote_* are synchronous, but can be
# easily adapted to suit if that changes (using inlineCallbacks)
def upload_behavior(command):
writer = command.args['writer']
writer.remote_write(supps_file)
writer.remote_close()
self.expectCommands(
# step will first get the remote suppressions file
Expect('uploadFile', dict(blocksize=32768, maxsize=None,
slavesrc='supps', workdir='wkdir',
writer=ExpectRemoteRef(shell.StringFileWriter)))
+ Expect.behavior(upload_behavior),
# and then run the command
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=["make"])
+ ExpectShell.log('stdio', stdout=stdout)
+ 0
)
if exp_exception:
self.expectOutcome(result=EXCEPTION,
status_text=["shell", "exception"])
else:
if exp_warning_count != 0:
self.expectOutcome(result=WARNINGS,
status_text=["'make'", "warnings"])
self.expectLogfile("warnings (%d)" % exp_warning_count,
exp_warning_log)
else:
self.expectOutcome(result=SUCCESS,
status_text=["'make'"])
self.expectProperty("warnings-count", exp_warning_count)
return self.runStep()
def test_suppressions(self):
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps')
supps_file = textwrap.dedent("""\
# example suppressions file
amar.c : .*unused variable.*
holding.c : .*invalid access to non-static.*
""").strip()
stdout = textwrap.dedent("""\
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
amar.c: In function 'write_record':
amar.c:164: warning: unused variable 'x'
amar.c:164: warning: this should show up
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
holding.c: In function 'holding_thing':
holding.c:984: warning: invalid access to non-static 'y'
""")
exp_warning_log = textwrap.dedent("""\
amar.c:164: warning: this should show up
""")
return self.do_test_suppressions(step, supps_file, stdout, 1,
exp_warning_log)
def test_suppressions_directories(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor)
supps_file = textwrap.dedent("""\
# these should be suppressed:
amar-src/amar.c : XXX
.*/server-src/.* : AAA
# these should not, as the dirs do not match:
amar.c : YYY
server-src.* : BBB
""").strip()
# note that this uses the unicode smart-quotes that gcc loves so much
stdout = textwrap.dedent(u"""\
make: Entering directory \u2019amar-src\u2019
amar.c:164: warning: XXX
amar.c:165: warning: YYY
make: Leaving directory 'amar-src'
make: Entering directory "subdir"
make: Entering directory 'server-src'
make: Entering directory `one-more-dir`
holding.c:999: warning: BBB
holding.c:1000: warning: AAA
""")
exp_warning_log = textwrap.dedent("""\
amar.c:165: warning: YYY
holding.c:999: warning: BBB
""")
return self.do_test_suppressions(step, supps_file, stdout, 2,
exp_warning_log)
def test_suppressions_directories_custom(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor,
directoryEnterPattern="^IN: (.*)",
directoryLeavePattern="^OUT:")
supps_file = "dir1/dir2/abc.c : .*"
stdout = textwrap.dedent(u"""\
IN: dir1
IN: decoy
OUT: decoy
IN: dir2
abc.c:123: warning: hello
""")
return self.do_test_suppressions(step, supps_file, stdout, 0, '')
def test_suppressions_linenos(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor)
supps_file = "abc.c:.*:100-199\ndef.c:.*:22"
stdout = textwrap.dedent(u"""\
abc.c:99: warning: seen 1
abc.c:150: warning: unseen
def.c:22: warning: unseen
abc.c:200: warning: seen 2
""")
exp_warning_log = textwrap.dedent(u"""\
abc.c:99: warning: seen 1
abc.c:200: warning: seen 2
""")
return self.do_test_suppressions(step, supps_file, stdout, 2,
exp_warning_log)
@compat.usesFlushLoggedErrors
def test_suppressions_warningExtractor_exc(self):
def warningExtractor(step, line, match):
raise RuntimeError("oh noes")
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor)
supps_file = 'x:y' # need at least one supp to trigger warningExtractor
stdout = "abc.c:99: warning: seen 1"
d = self.do_test_suppressions(step, supps_file, stdout,
exp_exception=True)
d.addCallback(lambda _:
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1))
return d
def test_suppressions_addSuppression(self):
# call addSuppression "manually" from a subclass
class MyWCSC(shell.WarningCountingShellCommand):
def start(self):
self.addSuppression([('.*', '.*unseen.*', None, None)])
return shell.WarningCountingShellCommand.start(self)
def warningExtractor(step, line, match):
return line.split(':', 2)
step = MyWCSC(command=['make'], suppressionFile='supps',
warningExtractor=warningExtractor)
stdout = textwrap.dedent(u"""\
abc.c:99: warning: seen 1
abc.c:150: warning: unseen
abc.c:200: warning: seen 2
""")
exp_warning_log = textwrap.dedent(u"""\
abc.c:99: warning: seen 1
abc.c:200: warning: seen 2
""")
return self.do_test_suppressions(step, '', stdout, 2,
exp_warning_log)
def test_warnExtractFromRegexpGroups(self):
step = shell.WarningCountingShellCommand(command=['make'])
we = shell.WarningCountingShellCommand.warnExtractFromRegexpGroups
line, pat, exp_file, exp_lineNo, exp_text = \
('foo:123:text', '(.*):(.*):(.*)', 'foo', 123, 'text')
self.assertEqual(we(step, line, re.match(pat, line)),
(exp_file, exp_lineNo, exp_text))
class Compile(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_class_args(self):
# since this step is just a pre-configured WarningCountingShellCommand,
# there' not much to test!
step = self.setupStep(shell.Compile())
self.assertEqual(step.name, "compile")
self.assertTrue(step.haltOnFailure)
self.assertTrue(step.flunkOnFailure)
self.assertEqual(step.description, ["compiling"])
self.assertEqual(step.descriptionDone, ["compile"])
self.assertEqual(step.command, ["make", "all"])
class Test(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
self.setUpBuildStep()
def tearDown(self):
self.tearDownBuildStep()
def test_setTestResults(self):
step = self.setupStep(shell.Test())
step.setTestResults(total=10, failed=3, passed=5, warnings=3)
self.assertEqual(self.step_statistics, {
'tests-total': 10,
'tests-failed': 3,
'tests-passed': 5,
'tests-warnings': 3,
})
# ensure that they're additive
step.setTestResults(total=1, failed=2, passed=3, warnings=4)
self.assertEqual(self.step_statistics, {
'tests-total': 11,
'tests-failed': 5,
'tests-passed': 8,
'tests-warnings': 7,
})
def test_describe_not_done(self):
step = self.setupStep(shell.Test())
self.assertEqual(step.describe(), ['testing'])
def test_describe_done(self):
step = self.setupStep(shell.Test())
self.step_statistics['tests-total'] = 93
self.step_statistics['tests-failed'] = 10
self.step_statistics['tests-passed'] = 20
self.step_statistics['tests-warnings'] = 30
self.assertEqual(step.describe(done=True), ['test', '93 tests',
'20 passed', '30 warnings', '10 failed'])
def test_describe_done_no_total(self):
step = self.setupStep(shell.Test())
self.step_statistics['tests-total'] = 0
self.step_statistics['tests-failed'] = 10
self.step_statistics['tests-passed'] = 20
self.step_statistics['tests-warnings'] = 30
# describe calculates 60 = 10+20+30
self.assertEqual(step.describe(done=True), ['test', '60 tests',
'20 passed', '30 warnings', '10 failed'])
|
mitya57/debian-buildbot
|
buildbot/test/unit/test_steps_shell.py
|
Python
|
gpl-2.0
| 36,531
|
[
"exciting"
] |
14dde78dcfca2eb6770e89774d18bb23f9099aa974398f8e8911b8252aa0f301
|
#!/usr/bin/env python3
"""this is a short script that runs findRFI and saves the results to a python pickle file"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from LoLIM.utilities import processed_data_dir, logger
from LoLIM.IO.raw_tbb_IO import MultiFile_Dal1, filePaths_by_stationName
#from LoLIM.findRFI_OLD import FindRFI
from LoLIM.findRFI import FindRFI
#from LoLIM.findRFI_TST import FindRFI
from os import mkdir
from os.path import isdir
from pickle import dump
## these lines are anachronistic and should be fixed at some point
from LoLIM import utilities
utilities.default_raw_data_loc = "/home/brian/KAP_data_link/lightning_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
if __name__ == "__main__":
timeID = "D20190424T194432.504Z"
output_folder = "/findRFI"
out_fname = "/findRFI_results"
block_size = 2**16
initial_block = 250
num_blocks = 20
max_blocks = 500
skip_stations = ['CS201']
processed_data_dir = processed_data_dir(timeID)
output_fpath = processed_data_dir + output_folder
if not isdir(output_fpath):
mkdir(output_fpath)
log = logger()
log.set(output_fpath+'/log.txt')
log("timeID:", timeID)
log("initial_block:", initial_block)
log("num_blocks:", num_blocks)
log("max_blocks:", max_blocks)
log("skip_stations:", skip_stations)
# log.take_stdout()
#### get paths to raw data by station ####
raw_fpaths = filePaths_by_stationName(timeID)
output = {}
station_log = logger()
station_log.take_stdout()
for station in raw_fpaths.keys():
if station in skip_stations:
continue
path = output_fpath + '/' + station
if not isdir(path):
mkdir(path)
station_log.set(path+'/log.txt')
print("station", station)
TBB_data = MultiFile_Dal1( raw_fpaths[station] )
out = FindRFI(TBB_data, block_size, initial_block, num_blocks, max_blocks, verbose=True, figure_location=path, num_dbl_z=1000)
if out is None:
log("cannot find RFI for station:", station)
else:
output[station] = out
print()
with open(output_fpath+out_fname, 'wb') as fout:
dump(output, fout)
|
Bhare8972/LOFAR-LIM
|
LIM_scripts/examples/run_findRFI.py
|
Python
|
mit
| 2,489
|
[
"Brian"
] |
d6cc4389d11b619529b15e32d5350b8261e0a4560f4f4bc7673432b12e377ab6
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Testing the crystal class of statdyn."""
from pathlib import Path
import hoomd
import numpy as np
import pytest
from hypothesis import given, settings
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import floats, integers, tuples
from statdyn import crystals
from statdyn.simulation import initialise
from statdyn.simulation.helper import SimulationParams
TEST_CLASSES = [
crystals.Crystal,
crystals.CrysTrimer,
crystals.TrimerP2,
crystals.TrimerP2gg,
crystals.TrimerPg,
crystals.SquareCircle,
crystals.CubicSphere,
]
output_dir = Path('test/output')
output_dir.mkdir(exist_ok=True)
PARAMETERS = SimulationParams(
temperature=0.4,
num_steps=100,
outfile_path=output_dir,
crystal=crystals.TrimerP2(),
cell_dimensions=(32, 40),
)
@pytest.mark.parametrize("crys_class", TEST_CLASSES)
def test_init(crys_class):
"""Check the class will initialise."""
crys_class()
@pytest.mark.parametrize("crys_class", TEST_CLASSES)
def test_get_orientations(crys_class):
"""Test the orientation is returned as a float."""
crys = crys_class()
orient = crys.get_orientations()
assert orient.dtype == np.float32
@pytest.mark.parametrize("crys_class", TEST_CLASSES)
def test_get_unitcell(crys_class):
"""Test the return type is correct."""
crys = crys_class()
assert isinstance(crys.get_unitcell(), hoomd.lattice.unitcell)
@pytest.mark.parametrize("crys_class", TEST_CLASSES)
def test_compute_volume(crys_class):
"""Test the return type of the volume computation."""
crys = crys_class()
assert isinstance(crys.compute_volume(), float)
@pytest.mark.parametrize("crys_class", TEST_CLASSES)
def test_abs_positions(crys_class):
"""Check the absolute positions function return corectly shaped matrix."""
crys = crys_class()
assert crys.get_abs_positions().shape == np.array(crys.positions).shape
def get_distance(pos_a, pos_b, box):
"""Compute the periodic distance between two numpy arrays."""
ortho_box = np.array((box.Lx, box.Ly, box.Lz))
delta_x = pos_b - pos_a
delta_x -= ortho_box * (delta_x > ortho_box * 0.5)
delta_x += ortho_box * (delta_x <= -ortho_box * 0.5)
return np.sqrt(np.square(delta_x).sum(axis=1))
class mybox(object):
"""Simple box class."""
def __init__(self):
"""init."""
self.Lx = 1.
self.Ly = 1.
self.Lz = 1.
@given(arrays(np.float, 3, elements=floats(min_value=-1, max_value=1)),
arrays(np.float, 3, elements=floats(min_value=-1, max_value=1)))
def test_get_distance(pos_a, pos_b):
"""Test the periodic distance function."""
box = mybox()
diff = get_distance(np.array([pos_a]), np.array([pos_b]), box)
print(diff, diff-np.sqrt(2))
assert get_distance(np.array([pos_a]), np.array([pos_b]), box) <= np.sqrt(3)
@given(tuples(integers(max_value=30, min_value=1),
integers(max_value=30, min_value=1)))
@settings(max_examples=3, deadline=None)
def test_cell_dimensions(cell_dimensions):
"""Test cell paramters work properly."""
snap = initialise.init_from_crystal(PARAMETERS)
for i in snap.particles.position:
distances = get_distance(i, snap.particles.position, snap.box) < 1.1
assert np.sum(distances) <= 3
|
malramsay64/MD-Molecules-Hoomd
|
test/crystal_test.py
|
Python
|
mit
| 3,465
|
[
"CRYSTAL"
] |
8fc2cf55a24822243c5d009c81ccd98abe1759184e8d68e8424111fcd75de216
|
# J. Pocahontas Olson June 2016
# An API simulating a source of Fibonacci numbers with some noise.
# For instance, poorly defined bumps on tree trunks, a hard to measure bunny population,
# visual scan of arrangement of sunflower seeds, or any other system where you
# know your values are fibonacci numbers, but some noise has been introduced in your measurement.
import fibonacci_module as fb # Custom module made for this project
import numpy as np
import random
# Constants to set range of Fibonacci numbers, and gaussian noise
FIB_MAX = 1000
GAUSSIAN_MEAN = 1
GAUSSIAN_STDEV = 0.1
## Add Gausian noise to simulate real-world data collection.
# Noise scales by size of number to mimic that you're likely to be more
# precise if you have a few to count, and off by more when there are large
# numbers involved.
def add_noise(list_of_pure_data):
noisy_data = []
for i in range(len(list_of_pure_data)):
noisy_data.append(np.random.normal(GAUSSIAN_MEAN,GAUSSIAN_STDEV,1)[0]*list_of_pure_data[i])
return noisy_data
## API call, mimicking real-world data collection
def get_data(how_much):
# Get how_much random Fibonacci numbers
fibnumbers = fb.fibList(FIB_MAX)
data = []
for i in range(how_much):
data.append(fibnumbers[random.randrange(1, FIB_MAX, 1)])
# Add Gausian noise to simulate real-world data
add_noise(data)
return data
if __name__ == "__main__":
SHOW_THIS_MANY = 20
print("..Obtaining", SHOW_THIS_MANY, "data points, with values obtained from the first", FIB_MAX)
print("..fibonacci numbers, with Gausian noise of mean", GAUSSIAN_MEAN,"and standard deviation", GAUSSIAN_STDEV, ".)\n")
print(get_data(SHOW_THIS_MANY))
|
Aturen/Fibonacci
|
noisy_input_API.py
|
Python
|
apache-2.0
| 1,743
|
[
"Gaussian"
] |
b61752d3eb324d5a7c48d19f6141528e84bbbcff0983e6629108299d1a3956f0
|
import warnings
import numpy as np
from .. import coding, conventions
from ..core import indexing
from ..core.pycompat import integer_types
from ..core.utils import FrozenDict, HiddenKeyDict
from ..core.variable import Variable
from .common import AbstractWritableDataStore, BackendArray, _encode_variable_name
# need some special secret attributes to tell us the dimensions
DIMENSION_KEY = "_ARRAY_DIMENSIONS"
def encode_zarr_attr_value(value):
"""
Encode a attribute value as something that can be serialized as json
Many xarray datasets / variables have numpy arrays and values. This
function handles encoding / decoding of such items.
ndarray -> list
scalar array -> scalar
other -> other (no change)
"""
if isinstance(value, np.ndarray):
encoded = value.tolist()
# this checks if it's a scalar number
elif isinstance(value, np.generic):
encoded = value.item()
else:
encoded = value
return encoded
class ZarrArrayWrapper(BackendArray):
__slots__ = ("datastore", "dtype", "shape", "variable_name")
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
self.dtype = dtype
def get_array(self):
return self.datastore.ds[self.variable_name]
def __getitem__(self, key):
array = self.get_array()
if isinstance(key, indexing.BasicIndexer):
return array[key.tuple]
elif isinstance(key, indexing.VectorizedIndexer):
return array.vindex[
indexing._arrayize_vectorized_indexer(key, self.shape).tuple
]
else:
assert isinstance(key, indexing.OuterIndexer)
return array.oindex[key.tuple]
# if self.ndim == 0:
# could possibly have a work-around for 0d data here
def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name):
"""
Given encoding chunks (possibly None) and variable chunks (possibly None)
"""
# zarr chunk spec:
# chunks : int or tuple of ints, optional
# Chunk shape. If not provided, will be guessed from shape and dtype.
# if there are no chunks in encoding and the variable data is a numpy
# array, then we let zarr use its own heuristics to pick the chunks
if var_chunks is None and enc_chunks is None:
return None
# if there are no chunks in encoding but there are dask chunks, we try to
# use the same chunks in zarr
# However, zarr chunks needs to be uniform for each array
# http://zarr.readthedocs.io/en/latest/spec/v1.html#chunks
# while dask chunks can be variable sized
# http://dask.pydata.org/en/latest/array-design.html#chunks
if var_chunks and enc_chunks is None:
if any(len(set(chunks[:-1])) > 1 for chunks in var_chunks):
raise ValueError(
"Zarr requires uniform chunk sizes except for final chunk. "
f"Variable named {name!r} has incompatible dask chunks: {var_chunks!r}. "
"Consider rechunking using `chunk()`."
)
if any((chunks[0] < chunks[-1]) for chunks in var_chunks):
raise ValueError(
"Final chunk of Zarr array must be the same size or smaller "
f"than the first. Variable named {name!r} has incompatible Dask chunks {var_chunks!r}."
"Consider either rechunking using `chunk()` or instead deleting "
"or modifying `encoding['chunks']`."
)
# return the first chunk for each dimension
return tuple(chunk[0] for chunk in var_chunks)
# from here on, we are dealing with user-specified chunks in encoding
# zarr allows chunks to be an integer, in which case it uses the same chunk
# size on each dimension.
# Here we re-implement this expansion ourselves. That makes the logic of
# checking chunk compatibility easier
if isinstance(enc_chunks, integer_types):
enc_chunks_tuple = ndim * (enc_chunks,)
else:
enc_chunks_tuple = tuple(enc_chunks)
if len(enc_chunks_tuple) != ndim:
# throw away encoding chunks, start over
return _determine_zarr_chunks(None, var_chunks, ndim, name)
for x in enc_chunks_tuple:
if not isinstance(x, int):
raise TypeError(
"zarr chunk sizes specified in `encoding['chunks']` "
"must be an int or a tuple of ints. "
f"Instead found encoding['chunks']={enc_chunks_tuple!r} "
f"for variable named {name!r}."
)
# if there are chunks in encoding and the variable data is a numpy array,
# we use the specified chunks
if var_chunks is None:
return enc_chunks_tuple
# the hard case
# DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk
# this avoids the need to get involved in zarr synchronization / locking
# From zarr docs:
# "If each worker in a parallel computation is writing to a separate
# region of the array, and if region boundaries are perfectly aligned
# with chunk boundaries, then no synchronization is required."
# TODO: incorporate synchronizer to allow writes from multiple dask
# threads
if var_chunks and enc_chunks_tuple:
for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks):
for dchunk in dchunks[:-1]:
if dchunk % zchunk:
raise NotImplementedError(
f"Specified zarr chunks encoding['chunks']={enc_chunks_tuple!r} for "
f"variable named {name!r} would overlap multiple dask chunks {var_chunks!r}. "
"This is not implemented in xarray yet. "
"Consider either rechunking using `chunk()` or instead deleting "
"or modifying `encoding['chunks']`."
)
if dchunks[-1] > zchunk:
raise ValueError(
"Final chunk of Zarr array must be the same size or "
"smaller than the first. "
f"Specified Zarr chunk encoding['chunks']={enc_chunks_tuple}, "
f"for variable named {name!r} "
f"but {dchunks} in the variable's Dask chunks {var_chunks} is "
"incompatible with this encoding. "
"Consider either rechunking using `chunk()` or instead deleting "
"or modifying `encoding['chunks']`."
)
return enc_chunks_tuple
raise AssertionError("We should never get here. Function logic must be wrong.")
def _get_zarr_dims_and_attrs(zarr_obj, dimension_key):
# Zarr arrays do not have dimenions. To get around this problem, we add
# an attribute that specifies the dimension. We have to hide this attribute
# when we send the attributes to the user.
# zarr_obj can be either a zarr group or zarr array
try:
dimensions = zarr_obj.attrs[dimension_key]
except KeyError:
raise KeyError(
"Zarr object is missing the attribute `%s`, which is "
"required for xarray to determine variable dimensions." % (dimension_key)
)
attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key])
return dimensions, attributes
def extract_zarr_variable_encoding(variable, raise_on_invalid=False, name=None):
"""
Extract zarr encoding dictionary from xarray Variable
Parameters
----------
variable : xarray.Variable
raise_on_invalid : bool, optional
Returns
-------
encoding : dict
Zarr encoding for `variable`
"""
encoding = variable.encoding.copy()
valid_encodings = {"chunks", "compressor", "filters", "cache_metadata"}
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError(
"unexpected encoding parameters for zarr " "backend: %r" % invalid
)
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
chunks = _determine_zarr_chunks(
encoding.get("chunks"), variable.chunks, variable.ndim, name
)
encoding["chunks"] = chunks
return encoding
# Function below is copied from conventions.encode_cf_variable.
# The only change is to raise an error for object dtypes.
def encode_zarr_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
var = conventions.encode_cf_variable(var, name=name)
# zarr allows unicode, but not variable-length strings, so it's both
# simpler and more compact to always encode as UTF-8 explicitly.
# TODO: allow toggling this explicitly via dtype in encoding.
coder = coding.strings.EncodedStringCoder(allows_unicode=True)
var = coder.encode(var, name=name)
var = coding.strings.ensure_fixed_length_bytes(var)
return var
class ZarrStore(AbstractWritableDataStore):
"""Store for reading and writing data via zarr
"""
__slots__ = (
"append_dim",
"ds",
"_consolidate_on_close",
"_group",
"_read_only",
"_synchronizer",
)
@classmethod
def open_group(
cls,
store,
mode="r",
synchronizer=None,
group=None,
consolidated=False,
consolidate_on_close=False,
):
import zarr
open_kwargs = dict(mode=mode, synchronizer=synchronizer, path=group)
if consolidated:
# TODO: an option to pass the metadata_key keyword
zarr_group = zarr.open_consolidated(store, **open_kwargs)
else:
zarr_group = zarr.open_group(store, **open_kwargs)
return cls(zarr_group, consolidate_on_close)
def __init__(self, zarr_group, consolidate_on_close=False):
self.ds = zarr_group
self._read_only = self.ds.read_only
self._synchronizer = self.ds.synchronizer
self._group = self.ds.path
self._consolidate_on_close = consolidate_on_close
self.append_dim = None
def open_store_variable(self, name, zarr_array):
data = indexing.LazilyOuterIndexedArray(ZarrArrayWrapper(name, self))
dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array, DIMENSION_KEY)
attributes = dict(attributes)
encoding = {
"chunks": zarr_array.chunks,
"compressor": zarr_array.compressor,
"filters": zarr_array.filters,
}
# _FillValue needs to be in attributes, not encoding, so it will get
# picked up by decode_cf
if getattr(zarr_array, "fill_value") is not None:
attributes["_FillValue"] = zarr_array.fill_value
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.arrays()
)
def get_attrs(self):
attributes = dict(self.ds.attrs.asdict())
return attributes
def get_dimensions(self):
dimensions = {}
for k, v in self.ds.arrays():
try:
for d, s in zip(v.attrs[DIMENSION_KEY], v.shape):
if d in dimensions and dimensions[d] != s:
raise ValueError(
"found conflicting lengths for dimension %s "
"(%d != %d)" % (d, s, dimensions[d])
)
dimensions[d] = s
except KeyError:
raise KeyError(
"Zarr object is missing the attribute `%s`, "
"which is required for xarray to determine "
"variable dimensions." % (DIMENSION_KEY)
)
return dimensions
def set_dimensions(self, variables, unlimited_dims=None):
if unlimited_dims is not None:
raise NotImplementedError(
"Zarr backend doesn't know how to handle unlimited dimensions"
)
def set_attributes(self, attributes):
self.ds.attrs.put(attributes)
def encode_variable(self, variable):
variable = encode_zarr_variable(variable)
return variable
def encode_attribute(self, a):
return encode_zarr_attr_value(a)
def store(
self,
variables,
attributes,
check_encoding_set=frozenset(),
writer=None,
unlimited_dims=None,
):
"""
Top level method for putting data on this store, this method:
- encodes variables/attributes
- sets dimensions
- sets variables
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer : ArrayWriter
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
dimension on which the zarray will be appended
only needed in append mode
"""
existing_variables = {
vn for vn in variables if _encode_variable_name(vn) in self.ds
}
new_variables = set(variables) - existing_variables
variables_without_encoding = {vn: variables[vn] for vn in new_variables}
variables_encoded, attributes = self.encode(
variables_without_encoding, attributes
)
if len(existing_variables) > 0:
# there are variables to append
# their encoding must be the same as in the store
ds = open_zarr(self.ds.store, group=self.ds.path, chunks=None)
variables_with_encoding = {}
for vn in existing_variables:
variables_with_encoding[vn] = variables[vn].copy(deep=False)
variables_with_encoding[vn].encoding = ds[vn].encoding
variables_with_encoding, _ = self.encode(variables_with_encoding, {})
variables_encoded.update(variables_with_encoding)
self.set_attributes(attributes)
self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims)
self.set_variables(
variables_encoded, check_encoding_set, writer, unlimited_dims=unlimited_dims
)
def sync(self):
pass
def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):
"""
This provides a centralized method to set the variables on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer :
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
for vn, v in variables.items():
name = _encode_variable_name(vn)
check = vn in check_encoding_set
attrs = v.attrs.copy()
dims = v.dims
dtype = v.dtype
shape = v.shape
fill_value = attrs.pop("_FillValue", None)
if v.encoding == {"_FillValue": None} and fill_value is None:
v.encoding = {}
if self.append_dim is not None and self.append_dim in dims:
# resize existing variable
zarr_array = self.ds[name]
append_axis = dims.index(self.append_dim)
new_region = [slice(None)] * len(dims)
new_region[append_axis] = slice(zarr_array.shape[append_axis], None)
region = tuple(new_region)
new_shape = list(zarr_array.shape)
new_shape[append_axis] += v.shape[append_axis]
zarr_array.resize(new_shape)
elif name in self.ds:
# override existing variable
zarr_array = self.ds[name]
region = None
else:
# new variable
encoding = extract_zarr_variable_encoding(
v, raise_on_invalid=check, name=vn
)
encoded_attrs = {}
# the magic for storing the hidden dimension data
encoded_attrs[DIMENSION_KEY] = dims
for k2, v2 in attrs.items():
encoded_attrs[k2] = self.encode_attribute(v2)
if coding.strings.check_vlen_dtype(dtype) == str:
dtype = str
zarr_array = self.ds.create(
name, shape=shape, dtype=dtype, fill_value=fill_value, **encoding
)
zarr_array.attrs.put(encoded_attrs)
region = None
writer.add(v.data, zarr_array, region=region)
def close(self):
if self._consolidate_on_close:
import zarr
zarr.consolidate_metadata(self.ds.store)
def open_zarr(
store,
group=None,
synchronizer=None,
chunks="auto",
decode_cf=True,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables=None,
consolidated=False,
overwrite_encoded_chunks=False,
**kwargs,
):
"""Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
"""
if "auto_chunk" in kwargs:
auto_chunk = kwargs.pop("auto_chunk")
if auto_chunk:
chunks = "auto" # maintain backwards compatibility
else:
chunks = None
warnings.warn(
"auto_chunk is deprecated. Use chunks='auto' instead.",
FutureWarning,
stacklevel=2,
)
if kwargs:
raise TypeError(
"open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())
)
if not isinstance(chunks, (int, dict)):
if chunks != "auto" and chunks is not None:
raise ValueError(
"chunks must be an int, dict, 'auto', or None. "
"Instead found %s. " % chunks
)
if chunks == "auto":
try:
import dask.array # noqa
except ImportError:
chunks = None
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
)
# TODO: this is where we would apply caching
return ds
# Zarr supports a wide range of access modes, but for now xarray either
# reads or writes from a store, never both. For open_zarr, we only read
mode = "r"
zarr_store = ZarrStore.open_group(
store,
mode=mode,
synchronizer=synchronizer,
group=group,
consolidated=consolidated,
)
ds = maybe_decode_store(zarr_store)
# auto chunking needs to be here and not in ZarrStore because variable
# chunks do not survive decode_cf
# return trivial case
if not chunks:
return ds
# adapted from Dataset.Chunk()
if isinstance(chunks, int):
chunks = dict.fromkeys(ds.dims, chunks)
if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):
chunks = dict(zip(ds.dims, chunks))
def get_chunk(name, var, chunks):
chunk_spec = dict(zip(var.dims, var.encoding.get("chunks")))
# Coordinate labels aren't chunked
if var.ndim == 1 and var.dims[0] == name:
return chunk_spec
if chunks == "auto":
return chunk_spec
for dim in var.dims:
if dim in chunks:
spec = chunks[dim]
if isinstance(spec, int):
spec = (spec,)
if isinstance(spec, (tuple, list)) and chunk_spec[dim]:
if any(s % chunk_spec[dim] for s in spec):
warnings.warn(
"Specified Dask chunks %r would "
"separate Zarr chunk shape %r for "
"dimension %r. This significantly "
"degrades performance. Consider "
"rechunking after loading instead."
% (chunks[dim], chunk_spec[dim], dim),
stacklevel=2,
)
chunk_spec[dim] = chunks[dim]
return chunk_spec
def maybe_chunk(name, var, chunks):
from dask.base import tokenize
chunk_spec = get_chunk(name, var, chunks)
if (var.ndim > 0) and (chunk_spec is not None):
# does this cause any data to be read?
token2 = tokenize(name, var._data)
name2 = "zarr-%s" % token2
var = var.chunk(chunk_spec, name=name2, lock=None)
if overwrite_encoded_chunks and var.chunks is not None:
var.encoding["chunks"] = tuple(x[0] for x in var.chunks)
return var
else:
return var
variables = {k: maybe_chunk(k, v, chunks) for k, v in ds.variables.items()}
return ds._replace_vars_and_dims(variables)
|
shoyer/xarray
|
xarray/backends/zarr.py
|
Python
|
apache-2.0
| 25,673
|
[
"NetCDF"
] |
4d326f87c35625e05ea0346abfaa3d395e167b1f51bad9712f0c3b5b5c418a53
|
#! /usr/bin/env python3
# Table Printer Chap. 6
# Function for taking lists of strings and displays in an organized table
tableData = [['apples','oranges','cherries','bananas'],
['Alice','Bob','Carol','David'],
['dogs','cats','moose','goose']]
def printTable(dataLists):
colWidths = [0] * len(dataLists)
for i in colWidths:
colWidths = max(dataLists[i], key=len)
y = len(colWidths)
for x in range(len(dataLists[0])):
print(str(dataLists[0][x]).rjust(y) + str(dataLists[1][x]).rjust(y) + str(dataLists[2][x]).rjust(y))
printTable(tableData)
|
lotcom/automateBoringstuffPython
|
Chap6PracPrintTable.py
|
Python
|
cc0-1.0
| 625
|
[
"MOOSE"
] |
09941de4fdfcaf3c5dad1b694d304d64845ca6a39000e6b79aaf77d81f00efa2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from builtins import str
from singa import tensor
from singa import singa_wrap as singa
from singa import device
from singa import autograd
import numpy as np
autograd.training = True
CTensor = singa.Tensor
gpu_dev = device.create_cuda_gpu()
cpu_dev = device.get_default_device()
dy = CTensor([2, 1, 2, 2])
singa.Gaussian(0.0, 1.0, dy)
def _tuple_to_string(t):
lt = [str(x) for x in t]
return '(' + ', '.join(lt) + ')'
def prepare_inputs_targets_for_rnn_test():
x_0 = np.random.random((2, 3)).astype(np.float32)
x_1 = np.random.random((2, 3)).astype(np.float32)
x_2 = np.random.random((2, 3)).astype(np.float32)
h_0 = np.zeros((2, 2)).astype(
np.float32)
t_0 = np.random.random((2, 2)).astype(np.float32)
t_1 = np.random.random((2, 2)).astype(np.float32)
t_2 = np.random.random((2, 2)).astype(np.float32)
x0 = tensor.Tensor(device=gpu_dev, data=x_0)
x1 = tensor.Tensor(device=gpu_dev, data=x_1)
x2 = tensor.Tensor(device=gpu_dev, data=x_2)
h0 = tensor.Tensor(device=gpu_dev, data=h_0)
t0 = tensor.Tensor(device=gpu_dev, data=t_0)
t1 = tensor.Tensor(device=gpu_dev, data=t_1)
t2 = tensor.Tensor(device=gpu_dev, data=t_2)
inputs = [x0, x1, x2]
targets = [t0, t1, t2]
return inputs, targets, h0
class TestPythonOperation(unittest.TestCase):
def check_shape(self, actual, expect):
self.assertEqual(actual, expect, 'shape mismatch, actual shape is %s'
' exepcted is %s' % (_tuple_to_string(actual),
_tuple_to_string(expect))
)
def test_conv2d_gpu(self):
# (in_channels, out_channels, kernel_size)
conv_0 = autograd.Conv2d(3, 1, 2)
conv_without_bias_0 = autograd.Conv2d(3, 1, 2, bias=False)
gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
gpu_input_tensor.gaussian(0.0, 1.0)
y = conv_0(gpu_input_tensor) # PyTensor
dx, dW, db = y.creator.backward(dy) # CTensor
self.check_shape(y.shape, (2, 1, 2, 2))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(dW.shape(), (1, 3, 2, 2))
self.check_shape(db.shape(), (1,))
# forward without bias
y_without_bias = conv_without_bias_0(gpu_input_tensor)
self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
def test_conv2d_cpu(self):
# (in_channels, out_channels, kernel_size)
conv_1 = autograd.Conv2d(3, 1, 2)
conv_without_bias_1 = autograd.Conv2d(3, 1, 2, bias=False)
cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev)
cpu_input_tensor.gaussian(0.0, 1.0)
y = conv_1(cpu_input_tensor) # PyTensor
dx, dW, db = y.creator.backward(dy) # CTensor
self.check_shape(y.shape, (2, 1, 2, 2))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(dW.shape(), (1, 3, 2, 2))
self.check_shape(db.shape(), (1,))
# forward without bias
y_without_bias = conv_without_bias_1(cpu_input_tensor)
self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
def test_SeparableConv2d_gpu(self):
separ_conv=autograd.SeparableConv2d(8, 16, 3, padding=1)
x=np.random.random((10,8,28,28)).astype(np.float32)
x=tensor.Tensor(device=gpu_dev, data=x)
#y = separ_conv(x)
y1 = separ_conv.spacial_conv(x)
y2 = separ_conv.depth_conv(y1)
dy1, dW_depth, _ = y2.creator.backward(y2.data)
dx, dW_spacial, _ = y1.creator.backward(dy1)
self.check_shape(y2.shape, (10, 16, 28, 28))
self.check_shape(dy1.shape(), (10, 8, 28, 28))
self.check_shape(dW_depth.shape(), (16, 8, 1, 1))
self.check_shape(dx.shape(), (10, 8, 28, 28))
self.check_shape(dW_spacial.shape(), (8, 1, 3, 3))
def test_batchnorm2d_gpu(self):
batchnorm_0 = autograd.BatchNorm2d(3)
gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
gpu_input_tensor.gaussian(0.0, 1.0)
dy = CTensor([2, 3, 3, 3])
singa.Gaussian(0.0, 1.0, dy)
y = batchnorm_0(gpu_input_tensor)
dx, ds, db = y.creator.backward(dy)
self.check_shape(y.shape, (2, 3, 3, 3))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(ds.shape(), (3,))
self.check_shape(db.shape(), (3,))
def test_vanillaRNN_gpu_tiny_ops_shape_check(self):
# gradients shape check.
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
rnn = autograd.RNN(3, 2)
hs, _ = rnn(inputs, h0)
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
# d=autograd.infer_dependency(loss.creator)
# print(d)
for t, dt in autograd.backward(loss):
self.check_shape(t.shape, dt.shape)
def test_LSTM_gpu_tiny_ops_shape_check(self):
# gradients shape check.
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
c_0 = np.random.random((2, 1)).astype(np.float32)
c0 = tensor.Tensor(device=gpu_dev, data=c_0)
rnn = autograd.LSTM(3, 2)
hs, _, _ = rnn(inputs, (h0, c0))
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
# d=autograd.infer_dependency(loss.creator)
# print(d)
for t, dt in autograd.backward(loss):
self.check_shape(t.shape, dt.shape)
def gradients_check(self, func, param, autograds, h=0.0005, df=1):
# param: PyTensor
# autograds: numpy_tensor
p = tensor.to_numpy(param)
it = np.nditer(p, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
diff = np.zeros_like(p)
diff[idx] += h
diff = tensor.from_numpy(diff)
diff.to_device(gpu_dev)
param += diff
pos = func()
pos = tensor.to_numpy(pos)
param -= diff
param -= diff
neg = func()
neg = tensor.to_numpy(neg)
numerical_grad = np.sum((pos - neg) * df) / (2 * h)
#print((autograds[idx] - numerical_grad)/numerical_grad)
# threshold set as -5% to +5%
#self.assertAlmostEqual((autograds[idx] - numerical_grad)/(numerical_grad+0.0000001), 0., places=1)
self.assertAlmostEqual(
autograds[idx] - numerical_grad, 0., places=2)
it.iternext()
def test_numerical_gradients_check_for_vallina_rnn(self):
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
rnn = autograd.RNN(3, 2)
def valinna_rnn_forward():
hs, _ = rnn(inputs, h0)
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
#grads = autograd.gradients(loss)
return loss
loss1 = valinna_rnn_forward()
auto_grads = autograd.gradients(loss1)
for param in rnn.params:
auto_grad = tensor.to_numpy(auto_grads[param])
self.gradients_check(valinna_rnn_forward, param, auto_grad)
def test_numerical_gradients_check_for_lstm(self):
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
c_0 = np.zeros((2, 2)).astype(np.float32)
c0 = tensor.Tensor(device=gpu_dev, data=c_0)
rnn = autograd.LSTM(3, 2)
def lstm_forward():
hs, _, _ = rnn(inputs, (h0, c0))
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
return loss
loss1 = lstm_forward()
auto_grads = autograd.gradients(loss1)
for param in rnn.params:
auto_grad = tensor.to_numpy(auto_grads[param])
self.gradients_check(lstm_forward, param, auto_grad)
def test_MeanSquareError(self):
X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32)
T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
t=tensor.from_numpy(T)
x.to_device(gpu_dev)
t.to_device(gpu_dev)
loss= autograd.mse_loss(x,t)
dx=loss.creator.backward()[0]
loss_np=tensor.to_numpy(loss)[0]
self.assertAlmostEqual(loss_np, 0.0366666, places=4)
self.check_shape(dx.shape(), (3, 2))
def test_Abs(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([0.8,1.2,3.3,3.6,0.5,0.5]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.abs(x)
dx=result.creator.backward(x.data)
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT)
self.check_shape(dx.shape(), (3, 2))
def test_Exp(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.exp(X)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.exp(x)
print("exp")
print(result)
dx=result.creator.backward(x.data)
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
self.check_shape(dx.shape(), (3, 2))
def test_LeakyRelu(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([0.8,-0.012,3.3,-0.036,-0.005,0.5]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.leakyrelu(x)
dx=result.creator.backward(x.data)
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT)
self.check_shape(dx.shape(), (3, 2))
if __name__ == '__main__':
unittest.main()
|
nusdbsystem/incubator-singa
|
test/python/test_operation.py
|
Python
|
apache-2.0
| 11,250
|
[
"Gaussian"
] |
7e3167b4080d7127d02946624efd5e82b8d6b0d92ebeab2bd5cd619d8f15c613
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date
from datetime import timedelta
from dateutil import relativedelta
from lxml import etree
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.resource.faces import task as Task
from openerp.osv import fields, osv
from openerp.tools import float_is_zero
from openerp.tools.translate import _
class project_expenses(osv.osv):
_name = 'project.expenses'
_description = 'Project Expenses'
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_columns = {
'name' : fields.char('Expense Name',required=True),
'category':fields.selection([('direct_cost','Other Direct Cost'),('direct_cost_nbec','Other Direct Cost(NBEC)'),
('direct_cost_seiodc','Other Direct Cost(SEI-ODC)'),('sga','SGA'),
('sga_nbec','SGA(NBEC)'),
('sga_seiodc','SGA(SEI-ODC)')],string="Expense Type"),
'amount':fields.integer('Amount'),
'date_from': fields.date('Date from', required=True, select=1, readonly=False),
'date_to': fields.date('Date to', required=True, select=1, readonly=False),
'currency_id' : fields.many2one('res.currency', "Currency", required=True,help="The currency the field is expressed in."),
}
_defaults = {
"currency_id": _get_currency,
"date_from" : lambda *a: time.strftime('%Y-%m-01'),
"date_to" : lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10]
}
class project_billing_rate(osv.osv):
_name = 'project.billing.rate'
_description = 'Project Billing Rate'
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_columns = {
'name' : fields.char('Name',required=True),
'role' : fields.char('Project Role'),
'rate': fields.integer('Project Role Rate'),
'currency_id' : fields.many2one('res.currency', "Currency", required=True, help="The currency the field is expressed in."),
}
_order = "id"
_defaults = {
"currency_id": _get_currency
}
class project_billing_rate_card(osv.osv):
_name = 'project.billing.rate.card'
_description = 'Project Billing Rate Card'
_columns = {
'name' : fields.char('Billing Rate Card Name',required=True),
'project_ids': fields.many2many('project.project', string="Projects"),
'billing_table': fields.many2many('project.billing.rate','project_billing_rate_card_rel','card_id','rate_id',string="Billing Table"),
}
_order = "id"
_defaults = {
}
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id',
'Projects'),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx={}):
project_id = self.pool['project.task']._get_default_project_id(cr, uid, context=ctx)
if project_id:
return [project_id]
return None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table,
super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id',
alias_prefix='project+',
alias_defaults={'project_id': 'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context,
count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)""" % (user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and \
pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task
LEFT JOIN project_task_type ON project_task.stage_id = project_task_type.id
WHERE project_task.project_id IN %s AND project_task_type.fold = False
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours': 0.0, 'total_hours': 0.0, 'effective_hours': 0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise osv.except_osv(_('Invalid Action!'),
_(
'You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid,
[('res_model', '=', 'project.project'), ('res_id', '=', id)],
context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid,
[('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)],
context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for tasks in self.browse(cr, uid, ids, dict(context, active_test=False)):
res[tasks.id] = len(tasks.task_ids)
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('public', _('Public project')),
('employees', _('Internal project: all employees can access')),
('followers', _('Private project: followers Only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
def onchange_resource_allocations(self, cr, uid, id, resource_allocations, context=None):
print "onchange_resource_allocations called"
print id,self
#print resource_allocations
uidArray = []
if resource_allocations:
for (x ,y ,ids) in resource_allocations:
#timesheets = self.env['hr_timesheet_sheet.sheet'].browse(ids)
timesheets = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, ids, context=context)
for timesheet in timesheets:
print timesheet.employee_id.name
print timesheet.employee_id.user_id
uidArray.append(timesheet.employee_id.user_id.id)
#self.write(cr, uid, id , {'members': [(4, timesheet.employee_id.user_id.id)]})
print uidArray
return {'value':{'members': uidArray }}
# project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
# if project and project.partner_id:
# return {'value': {'partner_id': project.partner_id.id}}
return {}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active',
help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'members': fields.many2many('res.users', 'project_user_rel', 'project_id', 'uid', 'Project Members',
help="Project's members are users who can have an access to the tasks related to this project.",
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'planned_hours': fields.function(_progress_rate, multi="progress", string='Planned Time',
help="Sum of planned hours of all tasks related to this project and its child projects.",
store={
'project.project': (
_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks,
['planned_hours', 'remaining_hours', 'work_ids',
'stage_id'], 20),
}),
'effective_hours': fields.function(_progress_rate, multi="progress", string='Time Spent',
help="Sum of spent hours of all tasks related to this project and its child projects.",
store={
'project.project': (
_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks,
['planned_hours', 'remaining_hours', 'work_ids',
'stage_id'], 20),
}),
'total_hours': fields.function(_progress_rate, multi="progress", string='Total Time',
help="Sum of total hours of all tasks related to this project and its child projects.",
store={
'project.project': (
_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks,
['planned_hours', 'remaining_hours', 'work_ids',
'stage_id'], 20),
}),
'progress_rate': fields.function(_progress_rate, multi="progress", string='Progress', type='float',
group_operator="avg",
help="Percent of tasks closed according to the total of tasks todo.",
store={
'project.project': (
_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks,
['planned_hours', 'remaining_hours', 'work_ids',
'stage_id'], 20),
}),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time',
help="Timetable working hours to adjust the gantt diagram report",
states={'close': [('readonly', True)]}),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id',
'Tasks Stages',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks", ),
'task_ids': fields.one2many('project.task', 'project_id',
domain=[('stage_id.fold', '=', False)]),
'color': fields.integer('Color Index'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized"
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Public: everybody sees everything; if portal is activated, portal users\n"
" see all tasks or issues; if anonymous portal is activated, visitors\n"
" see all tasks or issues\n"
"- Portal (only available if Portal is installed): employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('template', 'Template'),
('draft', 'New'),
('open', 'In Progress'),
('cancelled', 'Cancelled'),
('pending', 'Pending'),
('close', 'Closed')],
'Status', required=True, copy=False),
'sap_project_code': fields.char('SAP Project Code'),
'project_status': fields.selection(
[('passive', 'Passive'), ('active', 'Active'), ('suspended', 'Suspended'), ('completed', 'Completed'),
('cancelled', 'Cancelled')], 'Project Status', copy=False),
'first_level': fields.char('First Level Tracking'),
'second_level': fields.char('Second Level Tracking'),
# 'sale_orders': fields.one2many('sale.order', 'po_project_id', 'Sale Orders'),
#'sale_orders': fields.many2many('sale.order', 'project_sale_order_rel', 'project_id', 'sales_id',
'sale_orders': fields.many2many('sale.order', string="Sale Orders", states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
# 'resource_allocations': fields.one2many('hr_timesheet_sheet.sheet','project_id','Resource Allocation Sheet'),
#'resource_allocations': fields.many2many('hr_timesheet_sheet.sheet', 'project_timesheet_rel', 'project_id', 'timesheet_id',
'resource_allocations': fields.many2many('hr_timesheet_sheet.sheet',
string = "Resource Allocation Sheet",states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
# 'sale_orders': fields.many2one('sale.order', 'Sale Orders'),
'department_id': fields.many2one('hr.department', 'Department'),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
)
}
def _get_type_common(self, cr, uid, context):
ids = self.pool.get('project.task.type').search(cr, uid, [('case_default', '=', 1)], context=context)
return ids
_order = "sequence, id"
_defaults = {
'active': True,
'type': 'contract',
'state': 'open',
'sequence': 10,
'type_ids': _get_type_common,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower than project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def set_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks': [(6, 0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
parent_id = context.get('parent_id', False)
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start, '%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date, '%Y-%m-%d')[:3])
new_date_end = (
datetime(*time.strptime(new_date_start, '%Y-%m-%d')[:3]) + (end_date - start_date)).strftime('%Y-%m-%d')
context.update({'copy': True})
new_id = self.copy(cr, uid, proj.id, default={
'name': _("%s (copy)") % (proj.name),
'state': 'open',
'date_start': new_date_start,
'date': new_date_end,
'parent_id': parent_id}, context=context)
result.append(new_id)
child_ids = self.search(cr, uid, [('parent_id', '=', proj.analytic_account_id.id)], context=context)
parent_id = self.read(cr, uid, new_id, ['analytic_account_id'])['analytic_account_id'][0]
if child_ids:
self.duplicate_template(cr, uid, child_ids, context={'parent_id': parent_id})
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'], 'form'), (tree_view['res_id'], 'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
'nodestroy': True
}
# set active value for a project, its sub projects and its tasks
def setActive(self, cr, uid, ids, value=True, context=None):
task_obj = self.pool.get('project.task')
for proj in self.browse(cr, uid, ids, context=None):
self.write(cr, uid, [proj.id], {'state': value and 'open' or 'template'}, context)
cr.execute('select id from project_task where project_id=%s', (proj.id,))
tasks_id = [x[0] for x in cr.fetchall()]
if tasks_id:
task_obj.write(cr, uid, tasks_id, {'active': value}, context=context)
child_ids = self.search(cr, uid, [('parent_id', '=', proj.analytic_account_id.id)])
if child_ids:
self.setActive(cr, uid, child_ids, value, context=None)
return True
def _schedule_header(self, cr, uid, ids, force_members=True, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
for project in projects:
if (not project.members) and force_members:
raise osv.except_osv(_('Warning!'), _("You must assign members on the project '%s'!") % (project.name,))
resource_pool = self.pool.get('resource.resource')
result = "from openerp.addons.resource.faces import *\n"
result += "import datetime\n"
for project in self.browse(cr, uid, ids, context=context):
u_ids = [i.id for i in project.members]
if project.user_id and (project.user_id.id not in u_ids):
u_ids.append(project.user_id.id)
for task in project.tasks:
if task.user_id and (task.user_id.id not in u_ids):
u_ids.append(task.user_id.id)
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
resource_objs = resource_pool.generate_resources(cr, uid, u_ids, calendar_id, context=context)
for key, vals in resource_objs.items():
result += '''
class User_%s(Resource):
efficiency = %s
''' % (key, vals.get('efficiency', False))
result += '''
def Project():
'''
return result
def _schedule_project(self, cr, uid, project, context=None):
resource_pool = self.pool.get('resource.resource')
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
working_days = resource_pool.compute_working_calendar(cr, uid, calendar_id, context=context)
# TODO: check if we need working_..., default values are ok.
puids = [x.id for x in project.members]
if project.user_id:
puids.append(project.user_id.id)
result = """
def Project_%d():
start = \'%s\'
working_days = %s
resource = %s
""" % (
project.id,
project.date_start or time.strftime('%Y-%m-%d'), working_days,
'|'.join(['User_' + str(x) for x in puids]) or 'None'
)
vacation = calendar_id and tuple(resource_pool.compute_vacation(cr, uid, calendar_id, context=context)) or False
if vacation:
result += """
vacation = %s
""" % (vacation,)
return result
# TODO: DO Resource allocation and compute availability
def compute_allocation(self, rc, uid, ids, start_date, end_date, context=None):
if context == None:
context = {}
allocation = {}
return allocation
def schedule_tasks(self, cr, uid, ids, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
result = self._schedule_header(cr, uid, ids, False, context=context)
for project in projects:
result += self._schedule_project(cr, uid, project, context=context)
result += self.pool.get('project.task')._generate_task(cr, uid, project.tasks, ident=4, context=context)
local_dict = {}
exec result in local_dict
projects_gantt = Task.BalancedProject(local_dict['Project'])
for project in projects:
project_gantt = getattr(projects_gantt, 'Project_%d' % (project.id,))
for task in project.tasks:
if task.stage_id and task.stage_id.fold:
continue
p = getattr(project_gantt, 'Task_%d' % (task.id,))
self.pool.get('project.task').write(cr, uid, [task.id], {
'date_start': p.start.strftime('%Y-%m-%d %H:%M:%S'),
'date_end': p.end.strftime('%Y-%m-%d %H:%M:%S')
}, context=context)
if (not task.user_id) and (p.booked_resource):
self.pool.get('project.task').write(cr, uid, [task.id], {
'user_id': int(p.booked_resource[0].name[5:]),
}, context=context)
return True
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# Prevent double project creation when 'use_tasks' is checked + alias management
create_context = dict(context, project_creation_in_progress=True,
alias_model_name=vals.get('alias_model', 'project.task'),
alias_parent_model_name=self._name)
if vals.get('type', False) not in ('template', 'contract'):
vals['type'] = 'contract'
project_id = super(project, self).create(cr, uid, vals, context=create_context)
project_rec = self.browse(cr, uid, project_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [project_rec.alias_id.id], {'alias_parent_thread_id': project_id,
'alias_defaults': {
'project_id': project_id}}, context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid,
[('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
return super(project, self).write(cr, uid, ids, vals, context=context)
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project.mt_task_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project.mt_task_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id.sequence > 1,
},
'user_id': {
'project.mt_task_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project.mt_task_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project.mt_task_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
print '_get_default_partner()'
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return (self._resolve_project_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context['default_project_id']
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return project_ids[0][0]
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid,
context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _read_group_user_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res_users = self.pool.get('res.users')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
access_rights_uid = access_rights_uid or uid
if project_id:
ids += \
self.pool.get('project.project').read(cr, access_rights_uid, project_id, ['members'], context=context)[
'members']
order = res_users._order
# lame way to allow reverting search, should just work in the trivial case
if read_group_order == 'user_id desc':
order = '%s desc' % order
# de-duplicate and apply search order
ids = res_users._search(cr, uid, [('id', 'in', ids)], order=order, access_rights_uid=access_rights_uid,
context=context)
result = res_users.name_get(cr, access_rights_uid, ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(ids.index(x[0]), ids.index(y[0])))
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
'user_id': _read_group_user_id,
}
def _str_get(self, task, level=0, border='***', context=None):
return border + ' ' + (task.user_id and task.user_id.name.upper() or '') + (
level and (': L' + str(level)) or '') + (
' - %.1fh / %.1fh' % (task.effective_hours or 0.0, task.planned_hours)) + ' ' + border + '\n' + \
border[0] + ' ' + (task.name or '') + '\n' + \
(task.description or '') + '\n\n'
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
cr.execute("SELECT task_id, COALESCE(SUM(hours),0) FROM project_task_work WHERE task_id IN %s GROUP BY task_id",
(tuple(ids),))
hours = dict(cr.fetchall())
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': hours.get(task.id, 0.0),
'total_hours': (task.remaining_hours or 0.0) + hours.get(task.id, 0.0)}
res[task.id]['delay_hours'] = res[task.id]['total_hours'] - task.planned_hours
res[task.id]['progress'] = 0.0
if not float_is_zero(res[task.id]['total_hours'], precision_digits=2):
res[task.id]['progress'] = round(
min(100.0 * hours.get(task.id, 0.0) / res[task.id]['total_hours'], 99.99), 2)
if task.stage_id and task.stage_id.fold:
res[task.id]['progress'] = 100.0
return res
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6, 0, list(new_parent_ids))],
'child_ids': [(6, 0, list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name'):
current = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % current.name
return super(task, self).copy_data(cr, uid, id, default, context)
def _is_template(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = True
if task.project_id:
if task.project_id.active == False or task.project_id.state == 'template':
res[task.id] = False
return res
def _get_task(self, cr, uid, ids, context=None):
result = {}
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id: result[work.task_id.id] = True
return result.keys()
_columns = {
'active': fields.function(_is_template, store=True, string='Not a Template Task', type='boolean',
help="This field is computed automatically and have the same behavior than the boolean 'active' field: if the task is linked to a template or unactivated project, it will be hidden unless specifically asked."),
'name': fields.char('Task Summary', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.text('Description'),
'priority': fields.selection([('0', 'Low'), ('1', 'Normal'), ('2', 'High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True,
help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'categ_ids': fields.many2many('project.category', string='Tags'),
'kanban_state': fields.selection(
[('normal', 'In Progress'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=False, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True),
# not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True,
track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id',
'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id',
'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours',
help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='hours',
help="Computed using the sum of the task work done.",
store={
'project.task': (lambda self, cr, uid, ids, c={}: ids,
['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'remaining_hours': fields.float('Remaining Hours', digits=(16, 2),
help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'total_hours': fields.function(_hours_get, string='Total', multi='hours',
help="Computed as: Time Spent + Remaining Time.",
store={
'project.task': (lambda self, cr, uid, ids, c={}: ids,
['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'progress': fields.function(_hours_get, string='Working Time Progress (%)', multi='hours', group_operator="avg",
help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store={
'project.task': (lambda self, cr, uid, ids, c={}: ids,
['work_ids', 'remaining_hours', 'planned_hours', 'state',
'stage_id'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='hours',
help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store={
'project.task': (lambda self, cr, uid, ids, c={}: ids,
['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'reviewer_id': fields.many2one('res.users', 'Reviewer', select=True, track_visibility='onchange'),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'delegated_user_id': fields.related('child_ids', 'user_id', type='many2one', relation='res.users',
string='Delegated To'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'work_ids': fields.one2many('project.task.work', 'task_id', 'Work done'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one',
relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': _get_default_project_id,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'progress': 0,
'sequence': 10,
'active': True,
'reviewer_id': lambda obj, cr, uid, ctx=None: uid,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid,
'project.task',
context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: # Cycle
return False
if id in visited_node: # Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
# visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end:
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task end-date must be greater than task start-date', ['date_start', 'date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
res = super(task, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
try:
# using get_object to get translation value
uom_hour = self.pool['ir.model.data'].get_object(cr, uid, 'product', 'product_uom_hour', context=context)
except ValueError:
uom_hour = False
if not obj_tm or not uom_hour or obj_tm.id == uom_hour.id:
return res
eview = etree.fromstring(res['arch'])
# if the project_time_mode_id is not in hours (so in days), display it as a float field
def _check_rec(eview):
if eview.attrib.get('widget', '') == 'float_time':
eview.set('widget', 'float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
# replace reference of 'Hours' to 'Day(s)'
for f in res['fields']:
# TODO this NOT work in different language than english
# the field 'Initially Planned Hours' should be replaced by 'Initially Planned Days'
# but string 'Initially Planned Days' is not available in translation
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours', obj_tm.name)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise osv.except_osv(_("Warning!"),
_("Child task still open.\nPlease cancel or complete child task first."))
return True
def _delegate_task_attachments(self, cr, uid, task_id, delegated_task_id, context=None):
attachment = self.pool.get('ir.attachment')
attachment_ids = attachment.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', task_id)],
context=context)
new_attachment_ids = []
for attachment_id in attachment_ids:
new_attachment_ids.append(
attachment.copy(cr, uid, attachment_id, default={'res_id': delegated_task_id}, context=context))
return new_attachment_ids
def do_delegate(self, cr, uid, ids, delegate_data=None, context=None):
"""
Delegate Task to another users.
"""
if delegate_data is None:
delegate_data = {}
assert delegate_data['user_id'], _("Delegated User should be specified")
delegated_tasks = {}
for task in self.browse(cr, uid, ids, context=context):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'project_id': delegate_data['project_id'] and delegate_data['project_id'][0] or False,
'stage_id': delegate_data.get('stage_id') and delegate_data.get('stage_id')[0] or False,
'user_id': delegate_data['user_id'] and delegate_data['user_id'][0] or False,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'parent_ids': [(6, 0, [task.id])],
'description': delegate_data['new_task_description'] or '',
'child_ids': [],
'work_ids': []
}, context=context)
self._delegate_task_attachments(cr, uid, task.id, delegated_task_id, context=context)
newname = delegate_data['prefix'] or ''
task.write({
'remaining_hours': delegate_data['planned_hours_me'],
'planned_hours': delegate_data['planned_hours_me'] + (task.effective_hours or 0.0),
'name': newname,
}, context=context)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
def set_remaining_time(self, cr, uid, ids, remaining_time=1.0, context=None):
for task in self.browse(cr, uid, ids, context=context):
if (task.stage_id and task.stage_id.sequence <= 1) or (task.planned_hours == 0.0):
self.write(cr, uid, [task.id], {'planned_hours': remaining_time + task.effective_hours},
context=context)
self.write(cr, uid, ids, {'remaining_hours': remaining_time}, context=context)
return True
def set_remaining_time_1(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 1.0, context)
def set_remaining_time_2(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 2.0, context)
def set_remaining_time_5(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 5.0, context)
def set_remaining_time_10(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 10.0, context)
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_start
if vals.get('user_id') and not vals.get('date_start'):
vals['date_start'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_start
if vals.get('user_id') and 'date_start' not in vals:
vals['date_start'] = fields.datetime.now()
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.stage_id.id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context == None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' ' * ident
company = self.pool["res.users"].browse(cr, uid, uid, context=context).company_id
duration_uom = {
'day(s)': 'd', 'days': 'd', 'day': 'd', 'd': 'd',
'month(s)': 'm', 'months': 'm', 'month': 'month', 'm': 'm',
'week(s)': 'w', 'weeks': 'w', 'week': 'w', 'w': 'w',
'hour(s)': 'H', 'hours': 'H', 'hour': 'H', 'h': 'H',
}.get(company.project_time_mode_id.name.lower(), "hour(s)")
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2f%s\"
%s effort = \"%.2f%s\"''' % (
ident, task.id, ident, task.remaining_hours, duration_uom, ident, task.total_hours, duration_uom)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident, ','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_' + str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
if auto_follow_fields is None:
auto_follow_fields = ['user_id', 'reviewer_id']
return super(task, self)._message_get_auto_subscribe_fields(cr, uid, updated_fields, auto_follow_fields,
context=context)
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
}
defaults.update(custom_values)
return super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
class project_work(osv.osv):
_name = "project.task.work"
_description = "Project Task Work"
_columns = {
'name': fields.char('Work summary'),
'date': fields.datetime('Date', select="1"),
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select="1"),
'hours': fields.float('Time Spent'),
'user_id': fields.many2one('res.users', 'Done by', required=True, select="1"),
'company_id': fields.related('task_id', 'company_id', type='many2one', relation='res.company', string='Company',
store=True, readonly=True)
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S')
}
_order = "date desc"
def create(self, cr, uid, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'task_id' in vals:
cr.execute('update project_task set remaining_hours=remaining_hours - %s where id=%s',
(vals.get('hours', 0.0), vals['task_id']))
self.pool.get('project.task').invalidate_cache(cr, uid, ['remaining_hours'], [vals['task_id']],
context=context)
return super(project_work, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'hours' in vals:
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids, context=context):
cr.execute('update project_task set remaining_hours=remaining_hours - %s + (%s) where id=%s',
(vals.get('hours', 0.0), work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids):
cr.execute('update project_task set remaining_hours=remaining_hours + %s where id=%s',
(work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work, self).unlink(cr, uid, ids, context=context)
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_tasks': fields.boolean('Tasks',
help="If checked, this contract will be available in the project menu and you will be able to manage tasks or track issues"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', type='many2one', relation='product.uom'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start,
context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id', '=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'type': vals.get('type', 'contract'),
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
if not vals.get('type'):
vals_for_project['type'] = account.type
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
project_obj = self.pool.get('project.project')
analytic_ids = project_obj.search(cr, uid, [('analytic_account_id', 'in', ids)])
if analytic_ids:
raise osv.except_osv(_('Warning!'), _('Please delete the project linked with this account first.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, *args, **kwargs)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context = {}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator,
context=context, limit=limit)
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection(
[('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State',
required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_category(osv.osv):
""" Category of project's task (or issue) """
_name = "project.category"
_description = "Category of project's task, issue, ..."
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dezynetechnologies/odoo
|
addons/project/project.py
|
Python
|
agpl-3.0
| 80,406
|
[
"VisIt"
] |
7cb86e8579d0ff660bac52973351e4a303cec4b5f8d5f56e3494c6ea741bd5b3
|
import netCDF4 as nc4
import numpy as np
# Model datatype
float_type = "f8"
# Set the height
kmax = 512
zsize = 0.5
dz = zsize / kmax
# Define the variables
z = np.zeros(kmax)
b = np.zeros(kmax)
# Create non-equidistant grid
alpha = 0.7
for k in range(kmax):
eta = -1. + 2.*((k+1)-0.5) / kmax
z[k] = zsize / (2.*alpha) * np.tanh(eta*0.5*(np.log(1.+alpha) - np.log(1.-alpha))) + 0.5*zsize
# Write input NetCDF file
nc_file = nc4.Dataset('rayleighbenard_input.nc', mode='w', datamodel='NETCDF4', clobber=False)
nc_file.createDimension('z', kmax)
nc_group_init = nc_file.createGroup('init');
nc_z = nc_file.createVariable('z' , float_type, ('z'))
nc_z[:] = z[:]
nc_file.close()
|
microhh/microhh
|
cases/rayleighbenard/rayleighbenard_input.py
|
Python
|
gpl-3.0
| 693
|
[
"NetCDF"
] |
925c1931babd252d336e970808016bf1c07835c3d287c6462446628b9957530c
|
#!/usr/bin/env python
# Copyright 2014->future! Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# partly based on work by Nokia Solutions and Networks Oyj
"""A parallel executor for Robot Framework test cases.
Version 2.1.0
Supports all Robot Framework command line options and also following
options (these must be before normal RF options):
--verbose
more output
--command [ACTUAL COMMANDS TO START ROBOT EXECUTOR] --end-command
RF script for situations where pybot is not used directly
--processes [NUMBER OF PROCESSES]
How many parallel executors to use (default max of 2 and cpu count)
--testlevelsplit
Split execution on test level instead of default suite level.
If .pabotsuitenames contains both tests and suites then this
will only affect new suites and split only them.
Leaving this flag out when both suites and tests in
.pabotsuitenames file will also only affect new suites and
add them as suite files.
--resourcefile [FILEPATH]
Indicator for a file that can contain shared variables for
distributing resources.
--pabotlib
Start PabotLib remote server. This enables locking and resource
distribution between parallel test executions.
--pabotlibhost [HOSTNAME]
Host name of the PabotLib remote server (default is 127.0.0.1)
--pabotlibport [PORT]
Port number of the PabotLib remote server (default is 8270)
--ordering [FILE PATH]
Optionally give execution order from a file.
--suitesfrom [FILEPATH TO OUTPUTXML]
Optionally read suites from output.xml file. Failed suites will run
first and longer running ones will be executed before shorter ones.
--argumentfile[INTEGER] [FILEPATH]
Run same suite with multiple argumentfile options.
For example "--argumentfile1 arg1.txt --argumentfile2 arg2.txt".
Copyright 2019 Mikko Korpela - Apache 2 License
"""
from __future__ import absolute_import, print_function
import datetime
import hashlib
import os
import random
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import uuid
from collections import namedtuple
from contextlib import closing
from glob import glob
from io import BytesIO, StringIO
from multiprocessing.pool import ThreadPool
from robot import __version__ as ROBOT_VERSION
from robot import rebot
from robot.api import ExecutionResult
from robot.conf import RobotSettings
from robot.errors import DataError, Information
from robot.libraries.Remote import Remote
from robot.model import ModelModifier
from robot.result.visitor import ResultVisitor
from robot.run import USAGE
from robot.running import TestSuiteBuilder
from robot.utils import PY2, SYSTEM_ENCODING, ArgumentParser, is_unicode
from . import pabotlib
from .arguments import parse_args, parse_execution_item_line
from .clientwrapper import make_order
from .execution_items import (
DynamicSuiteItem,
ExecutionItem,
GroupEndItem,
GroupItem,
GroupStartItem,
HivedItem,
SuiteItem,
SuiteItems,
TestItem,
)
from .result_merger import merge
try:
import queue # type: ignore
except ImportError:
import Queue as queue # type: ignore
try:
from shlex import quote # type: ignore
except ImportError:
from pipes import quote # type: ignore
from typing import IO, Any, Dict, List, Optional, Tuple, Union
CTRL_C_PRESSED = False
MESSAGE_QUEUE = queue.Queue()
EXECUTION_POOL_IDS = [] # type: List[int]
EXECUTION_POOL_ID_LOCK = threading.Lock()
POPEN_LOCK = threading.Lock()
_PABOTLIBURI = "127.0.0.1:8270"
_PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE = (
"!#$^&*?[(){}<>~;'`\\|= \t\n" # does not contain '"'
)
_BAD_CHARS_SET = set(_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE)
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
_ABNORMAL_EXIT_HAPPENED = False
_COMPLETED_LOCK = threading.Lock()
_NOT_COMPLETED_INDEXES = [] # type: List[int]
_ROBOT_EXTENSIONS = [
".html",
".htm",
".xhtml",
".tsv",
".rst",
".rest",
".txt",
".robot",
]
_ALL_ELAPSED = [] # type: List[Union[int, float]]
class Color:
SUPPORTED_OSES = ["posix"]
GREEN = "\033[92m"
RED = "\033[91m"
ENDC = "\033[0m"
YELLOW = "\033[93m"
def _mapOptionalQuote(command_args):
# type: (List[str]) -> List[str]
if os.name == "posix":
return [quote(arg) for arg in command_args]
return [
arg if set(arg).isdisjoint(_BAD_CHARS_SET) else '"%s"' % arg
for arg in command_args
]
def execute_and_wait_with(item):
# type: ('QueueItem') -> None
global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
is_last = _NUMBER_OF_ITEMS_TO_BE_EXECUTED == 1
_NUMBER_OF_ITEMS_TO_BE_EXECUTED -= 1
if CTRL_C_PRESSED:
# Keyboard interrupt has happened!
return
time.sleep(0)
try:
datasources = [
d.encode("utf-8") if PY2 and is_unicode(d) else d for d in item.datasources
]
caller_id = uuid.uuid4().hex
name = item.display_name
outs_dir = os.path.join(item.outs_dir, item.argfile_index, str(item.index))
os.makedirs(outs_dir)
cmd = _create_command_for_execution(
caller_id, datasources, is_last, item, outs_dir
)
if item.hive:
_hived_execute(
item.hive,
cmd,
outs_dir,
name,
item.verbose,
_make_id(),
caller_id,
item.index,
)
else:
_try_execute_and_wait(
cmd,
outs_dir,
name,
item.verbose,
_make_id(),
caller_id,
item.index,
item.execution_item.type != "test",
)
outputxml_preprocessing(
item.options, outs_dir, name, item.verbose, _make_id(), caller_id
)
except:
_write(traceback.format_exc())
def _create_command_for_execution(caller_id, datasources, is_last, item, outs_dir):
options = item.options.copy()
if item.command == ["robot"] and not options["listener"]:
options["listener"] = ["RobotStackTracer"]
cmd = (
item.command
+ _options_for_custom_executor(
options,
outs_dir,
item.execution_item,
item.argfile,
caller_id,
is_last,
item.index,
item.last_level,
item.processes,
)
+ datasources
)
return _mapOptionalQuote(cmd)
def _pabotlib_in_use():
return _PABOTLIBPROCESS or _PABOTLIBURI != "127.0.0.1:8270"
def _hived_execute(
hive, cmd, outs_dir, item_name, verbose, pool_id, caller_id, my_index=-1
):
plib = None
if _pabotlib_in_use():
plib = Remote(_PABOTLIBURI)
try:
make_order(hive, " ".join(cmd), outs_dir)
except:
_write(traceback.format_exc())
if plib:
_increase_completed(plib, my_index)
def _try_execute_and_wait(
cmd,
outs_dir,
item_name,
verbose,
pool_id,
caller_id,
my_index=-1,
show_stdout_on_failure=False,
):
# type: (List[str], str, str, bool, int, str, int, bool) -> None
plib = None
is_ignored = False
if _pabotlib_in_use():
plib = Remote(_PABOTLIBURI)
try:
with open(os.path.join(outs_dir, cmd[0] + "_stdout.out"), "w") as stdout:
with open(os.path.join(outs_dir, cmd[0] + "_stderr.out"), "w") as stderr:
process, (rc, elapsed) = _run(
cmd, stderr, stdout, item_name, verbose, pool_id, my_index
)
except:
_write(traceback.format_exc())
if plib:
_increase_completed(plib, my_index)
is_ignored = _is_ignored(plib, caller_id)
if is_ignored and os.path.isdir(outs_dir):
shutil.rmtree(outs_dir)
# Thread-safe list append
_ALL_ELAPSED.append(elapsed)
_result_to_stdout(
elapsed,
is_ignored,
item_name,
my_index,
pool_id,
process,
rc,
stderr,
stdout,
verbose,
show_stdout_on_failure,
)
def _result_to_stdout(
elapsed,
is_ignored,
item_name,
my_index,
pool_id,
process,
rc,
stderr,
stdout,
verbose,
show_stdout_on_failure,
):
if is_ignored:
_write_with_id(
process,
pool_id,
my_index,
_execution_ignored_message(item_name, stdout, stderr, elapsed, verbose),
)
elif rc != 0:
_write_with_id(
process,
pool_id,
my_index,
_execution_failed_message(
item_name, stdout, stderr, rc, verbose or show_stdout_on_failure
),
Color.RED,
)
else:
_write_with_id(
process,
pool_id,
my_index,
_execution_passed_message(item_name, stdout, stderr, elapsed, verbose),
Color.GREEN,
)
def _is_ignored(plib, caller_id): # type: (Remote, str) -> bool
return plib.run_keyword("is_ignored_execution", [caller_id], {})
# optionally invoke rebot for output.xml preprocessing to get --RemoveKeywords
# and --flattenkeywords applied => result: much smaller output.xml files + faster merging + avoid MemoryErrors
def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, caller_id):
# type: (Dict[str, Any], str, str, bool, int, str) -> None
try:
remove_keywords = options["removekeywords"]
flatten_keywords = options["flattenkeywords"]
if not remove_keywords and not flatten_keywords:
# => no preprocessing needed if no removekeywords or flattenkeywords present
return
remove_keywords_args = [] # type: List[str]
flatten_keywords_args = [] # type: List[str]
for k in remove_keywords:
remove_keywords_args += ["--removekeywords", k]
for k in flatten_keywords:
flatten_keywords_args += ["--flattenkeywords", k]
outputxmlfile = os.path.join(outs_dir, "output.xml")
oldsize = os.path.getsize(outputxmlfile)
cmd = (
[
"rebot",
"--log",
"NONE",
"--report",
"NONE",
"--xunit",
"NONE",
"--consolecolors",
"off",
"--NoStatusRC",
]
+ remove_keywords_args
+ flatten_keywords_args
+ ["--output", outputxmlfile, outputxmlfile]
)
cmd = _mapOptionalQuote(cmd)
_try_execute_and_wait(
cmd,
outs_dir,
"preprocessing output.xml on " + item_name,
verbose,
pool_id,
caller_id,
)
newsize = os.path.getsize(outputxmlfile)
perc = 100 * newsize / oldsize
if verbose:
_write(
"%s [main] [%s] Filesize reduced from %s to %s (%0.2f%%) for file %s"
% (
datetime.datetime.now(),
pool_id,
oldsize,
newsize,
perc,
outputxmlfile,
)
)
except:
print(sys.exc_info())
def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None):
timestamp = timestamp or datetime.datetime.now()
_write(
"%s [PID:%s] [%s] [ID:%s] %s"
% (timestamp, process.pid, pool_id, item_index, message),
color,
)
def _make_id(): # type: () -> int
global EXECUTION_POOL_IDS, EXECUTION_POOL_ID_LOCK
thread_id = threading.current_thread().ident
assert thread_id is not None
with EXECUTION_POOL_ID_LOCK:
if thread_id not in EXECUTION_POOL_IDS:
EXECUTION_POOL_IDS += [thread_id]
return EXECUTION_POOL_IDS.index(thread_id)
def _increase_completed(plib, my_index):
# type: (Remote, int) -> None
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
with _COMPLETED_LOCK:
if my_index not in _NOT_COMPLETED_INDEXES:
return
_NOT_COMPLETED_INDEXES.remove(my_index)
if _NOT_COMPLETED_INDEXES:
plib.run_keyword(
"set_parallel_value_for_key",
[
pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE,
_NOT_COMPLETED_INDEXES[0],
],
{},
)
if len(_NOT_COMPLETED_INDEXES) == 1:
plib.run_keyword(
"set_parallel_value_for_key", ["pabot_only_last_executing", 1], {}
)
def _run(command, stderr, stdout, item_name, verbose, pool_id, item_index):
# type: (List[str], IO[Any], IO[Any], str, bool, int, int) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
timestamp = datetime.datetime.now()
cmd = " ".join(command)
if PY2:
cmd = cmd.decode("utf-8").encode(SYSTEM_ENCODING)
# avoid hitting https://bugs.python.org/issue10394
with POPEN_LOCK:
process = subprocess.Popen(cmd, shell=True, stderr=stderr, stdout=stdout)
if verbose:
_write_with_id(
process,
pool_id,
item_index,
"EXECUTING PARALLEL %s with command:\n%s" % (item_name, cmd),
timestamp=timestamp,
)
else:
_write_with_id(
process,
pool_id,
item_index,
"EXECUTING %s" % item_name,
timestamp=timestamp,
)
return process, _wait_for_return_code(process, item_name, pool_id, item_index)
def _wait_for_return_code(process, item_name, pool_id, item_index):
rc = None
elapsed = 0
ping_time = ping_interval = 150
while rc is None:
rc = process.poll()
time.sleep(0.1)
elapsed += 1
if elapsed == ping_time:
ping_interval += 50
ping_time += ping_interval
_write_with_id(
process,
pool_id,
item_index,
"still running %s after %s seconds" % (item_name, elapsed / 10.0),
)
return rc, elapsed / 10.0
def _read_file(file_handle):
try:
with open(file_handle.name, "r") as content_file:
content = content_file.read()
return content
except:
return "Unable to read file %s" % file_handle
def _execution_failed_message(suite_name, stdout, stderr, rc, verbose):
if not verbose:
return "FAILED %s" % suite_name
return "Execution failed in %s with %d failing test(s)\n%s\n%s" % (
suite_name,
rc,
_read_file(stdout),
_read_file(stderr),
)
def _execution_passed_message(suite_name, stdout, stderr, elapsed, verbose):
if not verbose:
return "PASSED %s in %s seconds" % (suite_name, elapsed)
return "PASSED %s in %s seconds\n%s\n%s" % (
suite_name,
elapsed,
_read_file(stdout),
_read_file(stderr),
)
def _execution_ignored_message(suite_name, stdout, stderr, elapsed, verbose):
if not verbose:
return "IGNORED %s" % suite_name
return "IGNORED %s in %s seconds\n%s\n%s" % (
suite_name,
elapsed,
_read_file(stdout),
_read_file(stderr),
)
def _options_for_custom_executor(*args):
# type: (Any) -> List[str]
return _options_to_cli_arguments(_options_for_executor(*args))
def _options_for_executor(
options,
outs_dir,
execution_item,
argfile,
caller_id,
is_last,
queueIndex,
last_level,
processes,
):
options = options.copy()
options["log"] = "NONE"
options["report"] = "NONE"
options["xunit"] = "NONE"
options["test"] = options.get("test", [])[:]
options["suite"] = options.get("suite", [])[:]
execution_item.modify_options_for_executor(options)
options["outputdir"] = "%OUTPUTDIR%" if execution_item.type == "hived" else outs_dir
options["variable"] = options.get("variable", [])[:]
options["variable"].append("CALLER_ID:%s" % caller_id)
pabotLibURIVar = "PABOTLIBURI:%s" % _PABOTLIBURI
# Prevent multiple appending of PABOTLIBURI variable setting
if pabotLibURIVar not in options["variable"]:
options["variable"].append(pabotLibURIVar)
pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _make_id()
if pabotExecutionPoolId not in options["variable"]:
options["variable"].append(pabotExecutionPoolId)
pabotIsLast = "PABOTISLASTEXECUTIONINPOOL:%s" % ("1" if is_last else "0")
if pabotIsLast not in options["variable"]:
options["variable"].append(pabotIsLast)
pabotProcesses = "PABOTNUMBEROFPROCESSES:%s" % str(processes)
if pabotProcesses not in options["variable"]:
options["variable"].append(pabotProcesses)
pabotIndex = pabotlib.PABOT_QUEUE_INDEX + ":" + str(queueIndex)
if pabotIndex not in options["variable"]:
options["variable"].append(pabotIndex)
if last_level is not None:
pabotLastLevel = pabotlib.PABOT_LAST_LEVEL + ":" + str(last_level)
if pabotLastLevel not in options["variable"]:
options["variable"].append(pabotLastLevel)
if argfile:
_modify_options_for_argfile_use(argfile, options, execution_item.top_name())
options["argumentfile"] = argfile
return _set_terminal_coloring_options(options)
def _modify_options_for_argfile_use(argfile, options, root_name):
argfile_opts, _ = ArgumentParser(
USAGE,
auto_pythonpath=False,
auto_argumentfile=True,
env_options="ROBOT_OPTIONS",
).parse_args(["--argumentfile", argfile])
old_name = options.get("name", root_name)
if argfile_opts["name"]:
new_name = argfile_opts["name"]
_replace_base_name(new_name, old_name, options, "suite")
if not options["suite"]:
_replace_base_name(new_name, old_name, options, "test")
if "name" in options:
del options["name"]
def _replace_base_name(new_name, old_name, options, key):
if isinstance(options.get(key, None), str):
options[key] = new_name + options[key][len(old_name) :]
elif key in options:
options[key] = [new_name + s[len(old_name) :] for s in options.get(key, [])]
def _set_terminal_coloring_options(options):
if ROBOT_VERSION >= "2.9":
options["consolecolors"] = "off"
options["consolemarkers"] = "off"
else:
options["monitorcolors"] = "off"
if ROBOT_VERSION >= "2.8" and ROBOT_VERSION < "2.9":
options["monitormarkers"] = "off"
return options
def _options_to_cli_arguments(opts): # type: (dict) -> List[str]
res = [] # type: List[str]
for k, v in opts.items():
if isinstance(v, str):
res += ["--" + str(k), str(v)]
elif PY2 and is_unicode(v):
res += ["--" + str(k), v.encode("utf-8")]
elif isinstance(v, bool) and (v is True):
res += ["--" + str(k)]
elif isinstance(v, list):
for value in v:
if PY2 and is_unicode(value):
res += ["--" + str(k), value.encode("utf-8")]
else:
res += ["--" + str(k), str(value)]
return res
def _group_by_groups(tokens):
result = []
group = None
for token in tokens:
if isinstance(token, GroupStartItem):
if group is not None:
raise DataError(
"Ordering: Group can not contain a group. Encoutered '{'"
)
group = GroupItem()
result.append(group)
continue
if isinstance(token, GroupEndItem):
if group is None:
raise DataError(
"Ordering: Group end tag '}' encountered before start '{'"
)
group = None
continue
if group is not None:
group.add(token)
else:
result.append(token)
return result
def hash_directory(digest, path):
if os.path.isfile(path):
digest.update(_digest(_norm_path(path)))
get_hash_of_file(path, digest)
return
for root, _, files in os.walk(path):
for name in sorted(files):
file_path = os.path.join(root, name)
if os.path.isfile(file_path) and any(
file_path.endswith(p) for p in _ROBOT_EXTENSIONS
):
# DO NOT ALLOW CHANGE TO FILE LOCATION
digest.update(_digest(_norm_path(root)))
# DO THESE IN TWO PHASES BECAUSE SEPARATOR DIFFERS IN DIFFERENT OS
digest.update(_digest(name))
get_hash_of_file(file_path, digest)
def _norm_path(path):
return "/".join(os.path.normpath(path).split(os.path.sep))
def _digest(text):
text = text.decode("utf-8") if PY2 and not is_unicode(text) else text
return hashlib.sha1(text.encode("utf-8")).digest()
def get_hash_of_file(filename, digest):
if not os.path.isfile(filename):
return
with open(filename, "rb") as f_obj:
while True:
buf = f_obj.read(1024 * 1024)
if not buf:
break
digest.update(buf)
def get_hash_of_dirs(directories):
digest = hashlib.sha1()
for directory in directories:
hash_directory(digest, directory)
return digest.hexdigest()
IGNORED_OPTIONS = [
"pythonpath",
"outputdir",
"output",
"log",
"report",
"removekeywords",
"flattenkeywords",
"tagstatinclude",
"tagstatexclude",
"tagstatcombine",
"critical",
"noncritical",
"tagstatlink",
"metadata",
"tagdoc",
]
def get_hash_of_command(options, pabot_args):
digest = hashlib.sha1()
hopts = dict(options)
for option in options:
if option in IGNORED_OPTIONS or options[option] == []:
del hopts[option]
if pabot_args.get("testlevelsplit"):
hopts["testlevelsplit"] = True
digest.update(repr(sorted(hopts.items())).encode("utf-8"))
return digest.hexdigest()
Hashes = namedtuple("Hashes", ["dirs", "cmd", "suitesfrom"])
def _suitesfrom_hash(pabot_args):
if "suitesfrom" in pabot_args:
digest = hashlib.sha1()
get_hash_of_file(pabot_args["suitesfrom"], digest)
return digest.hexdigest()
else:
return "no-suites-from-option"
if PY2:
def _open_pabotsuitenames(mode):
return open(".pabotsuitenames", mode)
else:
def _open_pabotsuitenames(mode):
return open(".pabotsuitenames", mode, encoding="utf-8")
def solve_suite_names(outs_dir, datasources, options, pabot_args):
h = Hashes(
dirs=get_hash_of_dirs(datasources),
cmd=get_hash_of_command(options, pabot_args),
suitesfrom=_suitesfrom_hash(pabot_args),
)
try:
if not os.path.isfile(".pabotsuitenames"):
suite_names = generate_suite_names(
outs_dir, datasources, options, pabot_args
)
store_suite_names(h, suite_names)
return suite_names
with _open_pabotsuitenames("r") as suitenamesfile:
lines = [line.strip() for line in suitenamesfile.readlines()]
corrupted = len(lines) < 5
file_h = None # type: Optional[Hashes]
file_hash = None # type: Optional[str]
hash_of_file = None # type: Optional[str]
if not corrupted:
file_h = Hashes(
dirs=lines[0][len("datasources:") :],
cmd=lines[1][len("commandlineoptions:") :],
suitesfrom=lines[2][len("suitesfrom:") :],
)
file_hash = lines[3][len("file:") :]
hash_of_file = _file_hash(lines)
corrupted = corrupted or any(
not l.startswith("--suite ")
and not l.startswith("--test ")
and l != "#WAIT"
and l != "{"
and l != "}"
for l in lines[4:]
)
execution_item_lines = [parse_execution_item_line(l) for l in lines[4:]]
if corrupted or h != file_h or file_hash != hash_of_file:
return _regenerate(
file_h,
h,
pabot_args,
outs_dir,
datasources,
options,
execution_item_lines,
)
return execution_item_lines
except IOError:
return _levelsplit(
generate_suite_names_with_builder(outs_dir, datasources, options),
pabot_args,
)
def _levelsplit(
suites, pabot_args
): # type: (List[SuiteItem], Dict[str, str]) -> List[ExecutionItem]
if pabot_args.get("testlevelsplit"):
tests = [] # type: List[ExecutionItem]
for s in suites:
tests.extend(s.tests)
return tests
return list(suites)
def _group_by_wait(lines):
suites = [[]] # type: List[List[ExecutionItem]]
for suite in lines:
if not suite.isWait:
if suite:
suites[-1].append(suite)
else:
suites.append([])
return suites
def _regenerate(
file_h, h, pabot_args, outs_dir, datasources, options, lines
): # type: (Optional[Hashes], Hashes, Dict[str, str], str, List[str], Dict[str, str], List[ExecutionItem]) -> List[ExecutionItem]
assert all(isinstance(s, ExecutionItem) for s in lines)
if (
(file_h is None or file_h.suitesfrom != h.suitesfrom)
and "suitesfrom" in pabot_args
and os.path.isfile(pabot_args["suitesfrom"])
):
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
if file_h is None or file_h.dirs != h.dirs:
all_suites = generate_suite_names_with_builder(
outs_dir, datasources, options
)
else:
all_suites = [suite for suite in lines if suite]
suites = _preserve_order(all_suites, suites)
else:
suites = _levelsplit(
generate_suite_names_with_builder(outs_dir, datasources, options),
pabot_args,
)
suites = _preserve_order(suites, [suite for suite in lines if suite])
if suites:
store_suite_names(h, suites)
assert all(isinstance(s, ExecutionItem) for s in suites)
return suites
def _contains_suite_and_test(suites):
return any(isinstance(s, SuiteItem) for s in suites) and any(
isinstance(t, TestItem) for t in suites
)
def _preserve_order(new_items, old_items):
assert all(isinstance(s, ExecutionItem) for s in new_items)
assert all(isinstance(s, ExecutionItem) for s in old_items)
old_contains_tests = any(isinstance(t, TestItem) for t in old_items)
old_contains_suites = any(isinstance(s, SuiteItem) for s in old_items)
old_items = _fix_items(old_items)
new_contains_tests = any(isinstance(t, TestItem) for t in new_items)
if old_contains_tests and old_contains_suites and not new_contains_tests:
new_items = _split_partially_to_tests(new_items, old_items)
# TODO: Preserving order when suites => tests OR tests => suites
preserve, ignorable = _get_preserve_and_ignore(
new_items, old_items, old_contains_tests and old_contains_suites
)
exists_in_old_and_new = [
s for s in old_items if (s in new_items and s not in ignorable) or s in preserve
]
exists_only_in_new = [
s for s in new_items if s not in old_items and s not in ignorable
]
return _fix_items(exists_in_old_and_new + exists_only_in_new)
def _fix_items(items): # type: (List[ExecutionItem]) -> List[ExecutionItem]
assert all(isinstance(s, ExecutionItem) for s in items)
to_be_removed = [] # type: List[int]
for i in range(len(items)):
for j in range(i + 1, len(items)):
if items[i].contains(items[j]):
to_be_removed.append(j)
items = [item for i, item in enumerate(items) if i not in to_be_removed]
result = [] # type: List[ExecutionItem]
to_be_splitted = {} # type: Dict[int, List[ExecutionItem]]
for i in range(len(items)):
if i in to_be_splitted:
result.extend(items[i].difference(to_be_splitted[i]))
else:
result.append(items[i])
for j in range(i + 1, len(items)):
if items[j].contains(items[i]):
if j not in to_be_splitted:
to_be_splitted[j] = []
to_be_splitted[j].append(items[i])
_remove_double_waits(result)
_remove_empty_groups(result)
if result and result[0].isWait:
result = result[1:]
if result and result[-1].isWait:
result = result[:-1]
return result
def _get_preserve_and_ignore(new_items, old_items, old_contains_suites_and_tests):
ignorable = []
preserve = []
for old_item in old_items:
for new_item in new_items:
if (
old_item.contains(new_item)
and new_item != old_item
and (isinstance(new_item, SuiteItem) or old_contains_suites_and_tests)
):
preserve.append(old_item)
ignorable.append(new_item)
if (
old_item.isWait
or isinstance(old_item, GroupStartItem)
or isinstance(old_item, GroupEndItem)
):
preserve.append(old_item)
preserve = [
new_item
for new_item in preserve
if not any([i.contains(new_item) and i != new_item for i in preserve])
]
return preserve, ignorable
def _remove_double_waits(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
doubles = []
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
if j.isWait and k == j:
doubles.append(i)
for i in reversed(doubles):
del exists_in_old_and_new[i]
def _remove_empty_groups(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
removables = []
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
if isinstance(j, GroupStartItem) and isinstance(k, GroupEndItem):
removables.extend([i, i + 1])
for i in reversed(removables):
del exists_in_old_and_new[i]
def _split_partially_to_tests(
new_suites, old_suites
): # type: (List[SuiteItem], List[ExecutionItem]) -> List[ExecutionItem]
suits = [] # type: List[ExecutionItem]
for s in new_suites:
split = False
for old_test in old_suites:
if isinstance(old_test, TestItem) and s.contains(old_test):
split = True
if split:
suits.extend(s.tests)
else:
suits.append(s)
return suits
def _file_hash(lines):
digest = hashlib.sha1()
digest.update(lines[0].encode())
digest.update(lines[1].encode())
digest.update(lines[2].encode())
hashes = 0
for line in lines[4:]:
if line not in ("#WAIT", "{", "}"):
line = line.decode("utf-8") if PY2 else line
hashes ^= int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16)
digest.update(str(hashes).encode())
return digest.hexdigest()
def store_suite_names(hashes, suite_names):
# type: (Hashes, List[ExecutionItem]) -> None
assert all(isinstance(s, ExecutionItem) for s in suite_names)
suite_lines = [s.line() for s in suite_names]
_write("Storing .pabotsuitenames file")
try:
with _open_pabotsuitenames("w") as suitenamesfile:
suitenamesfile.write("datasources:" + hashes.dirs + "\n")
suitenamesfile.write("commandlineoptions:" + hashes.cmd + "\n")
suitenamesfile.write("suitesfrom:" + hashes.suitesfrom + "\n")
suitenamesfile.write(
"file:"
+ _file_hash(
[
"datasources:" + hashes.dirs,
"commandlineoptions:" + hashes.cmd,
"suitesfrom:" + hashes.suitesfrom,
None,
]
+ suite_lines
)
+ "\n"
)
suitenamesfile.writelines(
(d + "\n").encode("utf-8") if PY2 and is_unicode(d) else d + "\n"
for d in suite_lines
)
except IOError:
_write(
"[ "
+ _wrap_with(Color.YELLOW, "WARNING")
+ " ]: storing .pabotsuitenames failed"
)
def generate_suite_names(
outs_dir, datasources, options, pabot_args
): # type: (object, object, object, Dict[str, str]) -> List[ExecutionItem]
suites = [] # type: List[SuiteItem]
if "suitesfrom" in pabot_args and os.path.isfile(pabot_args["suitesfrom"]):
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
else:
suites = generate_suite_names_with_builder(outs_dir, datasources, options)
if pabot_args.get("testlevelsplit"):
tests = [] # type: List[ExecutionItem]
for s in suites:
tests.extend(s.tests)
return tests
return list(suites)
def generate_suite_names_with_builder(outs_dir, datasources, options):
opts = _options_for_dryrun(options, outs_dir)
if "pythonpath" in opts:
del opts["pythonpath"]
settings = RobotSettings(opts)
builder = TestSuiteBuilder(
settings["SuiteNames"], settings.extension, rpa=settings.rpa
)
suite = builder.build(*datasources)
settings.rpa = builder.rpa
suite.configure(**settings.suite_config)
if settings.pre_run_modifiers:
_write.error = _write
suite.visit(
ModelModifier(settings.pre_run_modifiers, settings.run_empty_suite, _write)
)
all_suites = (
get_all_suites_from_main_suite(suite.suites) if suite.suites else [suite]
)
suite_names = [
SuiteItem(
suite.longname,
tests=[test.longname for test in suite.tests],
suites=suite.suites,
)
for suite in all_suites
]
if not suite_names and not options.get("runemptysuite", False):
stdout_value = opts["stdout"].getvalue()
if stdout_value:
_write(
"[STDOUT] from suite search:\n" + stdout_value + "[STDOUT] end",
Color.YELLOW,
)
stderr_value = opts["stderr"].getvalue()
if stderr_value:
_write(
"[STDERR] from suite search:\n" + stderr_value + "[STDERR] end",
Color.RED,
)
return list(sorted(set(suite_names)))
def get_all_suites_from_main_suite(suites):
all_suites = []
for suite in suites:
if suite.suites:
all_suites.extend(get_all_suites_from_main_suite(suite.suites))
else:
all_suites.append(suite)
return all_suites
class SuiteNotPassingsAndTimes(ResultVisitor):
def __init__(self):
self.suites = [] # type: List[Tuple[bool, int, str]]
def start_suite(self, suite):
if len(suite.tests) > 0:
self.suites.append((not suite.passed, suite.elapsedtime, suite.longname))
def _suites_from_outputxml(outputxml):
res = ExecutionResult(outputxml)
suite_times = SuiteNotPassingsAndTimes()
res.visit(suite_times)
return [SuiteItem(suite) for (_, _, suite) in reversed(sorted(suite_times.suites))]
def _options_for_dryrun(options, outs_dir):
options = options.copy()
options["log"] = "NONE"
options["report"] = "NONE"
options["xunit"] = "NONE"
options["variable"] = options.get("variable", [])[:]
options["variable"].append(pabotlib.PABOT_QUEUE_INDEX + ":-1")
if ROBOT_VERSION >= "2.8":
options["dryrun"] = True
else:
options["runmode"] = "DryRun"
options["output"] = "suite_names.xml"
# --timestampoutputs is not compatible with hard-coded suite_names.xml
options["timestampoutputs"] = False
options["outputdir"] = outs_dir
if PY2:
options["stdout"] = BytesIO()
options["stderr"] = BytesIO()
else:
options["stdout"] = StringIO()
options["stderr"] = StringIO()
options["listener"] = []
return _set_terminal_coloring_options(options)
def _options_for_rebot(options, start_time_string, end_time_string):
rebot_options = options.copy()
rebot_options["starttime"] = start_time_string
rebot_options["endtime"] = end_time_string
rebot_options["monitorcolors"] = "off"
rebot_options["suite"] = []
rebot_options["test"] = []
rebot_options["exclude"] = []
rebot_options["include"] = []
if ROBOT_VERSION >= "2.8":
options["monitormarkers"] = "off"
for key in [
"skip",
"skiponfailure",
"variable",
"variablefile",
"listener",
"prerunmodifier",
"monitorcolors",
]:
if key in rebot_options:
del rebot_options[key]
return rebot_options
def _now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
def _print_elapsed(start, end):
_write(
"Total testing: "
+ _time_string(sum(_ALL_ELAPSED))
+ "\nElapsed time: "
+ _time_string(end - start)
)
def _time_string(elapsed):
millis = int((elapsed * 100) % 100)
seconds = int(elapsed) % 60
elapsed_minutes = (int(elapsed) - seconds) / 60
minutes = elapsed_minutes % 60
elapsed_hours = (elapsed_minutes - minutes) / 60
elapsed_string = ""
if elapsed_hours > 0:
plural = ""
if elapsed_hours > 1:
plural = "s"
elapsed_string += ("%d hour" % elapsed_hours) + plural + " "
if minutes > 0:
plural = ""
if minutes > 1:
plural = "s"
elapsed_string += ("%d minute" % minutes) + plural + " "
return elapsed_string + "%d.%d seconds" % (seconds, millis)
def keyboard_interrupt(*args):
global CTRL_C_PRESSED
CTRL_C_PRESSED = True
def _parallel_execute(items, processes):
original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
pool = ThreadPool(processes)
result = pool.map_async(execute_and_wait_with, items, 1)
pool.close()
while not result.ready():
# keyboard interrupt is executed in main thread
# and needs this loop to get time to get executed
try:
time.sleep(0.1)
except IOError:
keyboard_interrupt()
signal.signal(signal.SIGINT, original_signal_handler)
def _output_dir(options, cleanup=True):
outputdir = options.get("outputdir", ".")
outpath = os.path.join(outputdir, "pabot_results")
if cleanup and os.path.isdir(outpath):
shutil.rmtree(outpath)
return outpath
def _copy_output_artifacts(options, file_extensions=None, include_subfolders=False):
file_extensions = file_extensions or ["png"]
pabot_outputdir = _output_dir(options, cleanup=False)
outputdir = options.get("outputdir", ".")
copied_artifacts = []
for location, _, file_names in os.walk(pabot_outputdir):
for file_name in file_names:
file_ext = file_name.split(".")[-1]
if file_ext in file_extensions:
rel_path = os.path.relpath(location, pabot_outputdir)
prefix = rel_path.split(os.sep)[0] # folders named "process-id"
dst_folder_path = outputdir
# if it is a file from sub-folders of "location"
if os.sep in rel_path:
if not include_subfolders:
continue
# create destination sub-folder
subfolder_path = rel_path[rel_path.index(os.sep) + 1 :]
dst_folder_path = os.path.join(outputdir, subfolder_path)
if not os.path.isdir(dst_folder_path):
os.makedirs(dst_folder_path)
dst_file_name = "-".join([prefix, file_name])
shutil.copyfile(
os.path.join(location, file_name),
os.path.join(dst_folder_path, dst_file_name),
)
copied_artifacts.append(file_name)
return copied_artifacts
def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root_name):
if "pythonpath" in options:
del options["pythonpath"]
if ROBOT_VERSION < "4.0":
stats = {
"critical": {"total": 0, "passed": 0, "failed": 0},
"all": {"total": 0, "passed": 0, "failed": 0},
}
else:
stats = {
"total": 0,
"passed": 0,
"failed": 0,
"skipped": 0,
}
if pabot_args["argumentfiles"]:
outputs = [] # type: List[str]
for index, _ in pabot_args["argumentfiles"]:
copied_artifacts = _copy_output_artifacts(
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
)
outputs += [
_merge_one_run(
os.path.join(outs_dir, index),
options,
tests_root_name,
stats,
copied_artifacts,
outputfile=os.path.join("pabot_results", "output%s.xml" % index),
)
]
if "output" not in options:
options["output"] = "output.xml"
_write_stats(stats)
return rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()))
else:
return _report_results_for_one_run(
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
)
def _write_stats(stats):
if ROBOT_VERSION < "4.0":
crit = stats["critical"]
al = stats["all"]
_write(
"%d critical tests, %d passed, %d failed"
% (crit["total"], crit["passed"], crit["failed"])
)
_write(
"%d tests total, %d passed, %d failed"
% (al["total"], al["passed"], al["failed"])
)
else:
_write(
"%d tests, %d passed, %d failed, %d skipped."
% (stats["total"], stats["passed"], stats["failed"], stats["skipped"])
)
_write("===================================================")
def _report_results_for_one_run(
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
):
copied_artifacts = _copy_output_artifacts(
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
)
output_path = _merge_one_run(
outs_dir, options, tests_root_name, stats, copied_artifacts
)
_write_stats(stats)
if (
"report" in options
and options["report"] == "NONE"
and "log" in options
and options["log"] == "NONE"
):
options[
"output"
] = output_path # REBOT will return error 252 if nothing is written
else:
_write("Output: %s" % output_path)
options["output"] = None # Do not write output again with rebot
return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
def _merge_one_run(
outs_dir, options, tests_root_name, stats, copied_artifacts, outputfile=None
):
outputfile = outputfile or options.get("output", "output.xml")
output_path = os.path.abspath(
os.path.join(options.get("outputdir", "."), outputfile)
)
files = sorted(glob(os.path.join(_glob_escape(outs_dir), "**/*.xml")))
if not files:
_write('WARN: No output files in "%s"' % outs_dir, Color.YELLOW)
return ""
def invalid_xml_callback():
global _ABNORMAL_EXIT_HAPPENED
_ABNORMAL_EXIT_HAPPENED = True
if PY2:
files = [f.decode(SYSTEM_ENCODING) if not is_unicode(f) else f for f in files]
resu = merge(
files, options, tests_root_name, copied_artifacts, invalid_xml_callback
)
_update_stats(resu, stats)
resu.save(output_path)
return output_path
def _update_stats(result, stats):
s = result.statistics
if ROBOT_VERSION < "4.0":
stats["critical"]["total"] += s.total.critical.total
stats["critical"]["passed"] += s.total.critical.passed
stats["critical"]["failed"] += s.total.critical.failed
stats["all"]["total"] += s.total.all.total
stats["all"]["passed"] += s.total.all.passed
stats["all"]["failed"] += s.total.all.failed
else:
stats["total"] += s.total.total
stats["passed"] += s.total.passed
stats["failed"] += s.total.failed
stats["skipped"] += s.total.skipped
# This is from https://github.com/django/django/blob/master/django/utils/glob.py
_magic_check = re.compile("([*?[])")
def _glob_escape(pathname):
"""
Escape all special characters.
"""
drive, pathname = os.path.splitdrive(pathname)
pathname = _magic_check.sub(r"[\1]", pathname)
return drive + pathname
def _writer():
while True:
message = MESSAGE_QUEUE.get()
if message is None:
MESSAGE_QUEUE.task_done()
return
print(message)
sys.stdout.flush()
MESSAGE_QUEUE.task_done()
def _write(message, color=None):
MESSAGE_QUEUE.put(_wrap_with(color, message))
def _wrap_with(color, message):
if _is_output_coloring_supported() and color:
return "%s%s%s" % (color, message, Color.ENDC)
return message
def _is_output_coloring_supported():
return sys.stdout.isatty() and os.name in Color.SUPPORTED_OSES
def _start_message_writer():
t = threading.Thread(target=_writer)
t.start()
def _stop_message_writer():
MESSAGE_QUEUE.put(None)
MESSAGE_QUEUE.join()
def _get_free_port(pabot_args):
if pabot_args["pabotlibport"] != 0:
return pabot_args["pabotlibport"]
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("localhost", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Popen]
global _PABOTLIBURI
free_port = _get_free_port(pabot_args)
_PABOTLIBURI = "%s:%s" % (pabot_args["pabotlibhost"], free_port)
if not pabot_args["pabotlib"]:
return None
if pabot_args.get("resourcefile") and not os.path.exists(
pabot_args["resourcefile"]
):
_write(
"Warning: specified resource file doesn't exist."
" Some tests may fail or continue forever.",
Color.YELLOW,
)
pabot_args["resourcefile"] = None
return subprocess.Popen(
'"{python}" -m {pabotlibname} {resourcefile} {pabotlibhost} {pabotlibport}'.format(
python=sys.executable,
pabotlibname=pabotlib.__name__,
resourcefile=pabot_args.get("resourcefile"),
pabotlibhost=pabot_args["pabotlibhost"],
pabotlibport=free_port,
),
shell=True,
)
def _stop_remote_library(process): # type: (subprocess.Popen) -> None
_write("Stopping PabotLib process")
try:
remoteLib = Remote(_PABOTLIBURI)
remoteLib.run_keyword("stop_remote_libraries", [], {})
remoteLib.run_keyword("stop_remote_server", [], {})
except RuntimeError:
_write("Could not connect to PabotLib - assuming stopped already")
return
i = 50
while i > 0 and process.poll() is None:
time.sleep(0.1)
i -= 1
if i == 0:
_write(
"Could not stop PabotLib Process in 5 seconds " "- calling terminate",
Color.YELLOW,
)
process.terminate()
else:
_write("PabotLib process stopped")
def _get_suite_root_name(suite_names):
top_names = [x.top_name() for group in suite_names for x in group]
if top_names and top_names.count(top_names[0]) == len(top_names):
return top_names[0]
return ""
class QueueItem(object):
_queue_index = 0
def __init__(
self,
datasources,
outs_dir,
options,
execution_item,
command,
verbose,
argfile,
hive=None,
processes=0,
):
# type: (List[str], str, Dict[str, object], ExecutionItem, List[str], bool, Tuple[str, Optional[str]], Optional[str], int) -> None
self.datasources = datasources
self.outs_dir = (
outs_dir.encode("utf-8") if PY2 and is_unicode(outs_dir) else outs_dir
)
self.options = options
self.execution_item = (
execution_item if not hive else HivedItem(execution_item, hive)
)
self.command = command
self.verbose = verbose
self.argfile_index = argfile[0]
self.argfile = argfile[1]
self._index = QueueItem._queue_index
QueueItem._queue_index += 1
self.last_level = None
self.hive = hive
self.processes = processes
@property
def index(self):
# type: () -> int
return self._index
@property
def display_name(self):
# type: () -> str
if self.argfile:
return "%s {%s}" % (self.execution_item.name, self.argfile)
return self.execution_item.name
def _create_execution_items(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
):
is_dry_run = (
options.get("dryrun")
if ROBOT_VERSION >= "2.8"
else options.get("runmode") == "DryRun"
)
if is_dry_run:
all_items = _create_execution_items_for_dry_run(
suite_names, datasources, outs_dir, opts_for_run, pabot_args
)
else:
all_items = _create_execution_items_for_run(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
)
_construct_index_and_completed_index(all_items)
_construct_last_levels(all_items)
return all_items
def _construct_index_and_completed_index(all_items):
# type: (List[List[QueueItem]]) -> None
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
with _COMPLETED_LOCK:
for item_group in all_items:
for item in item_group:
_NOT_COMPLETED_INDEXES.append(item.index)
def _create_execution_items_for_run(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
):
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
all_items = [] # type: List[List[QueueItem]]
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
for suite_group in suite_names:
# TODO: Fix this better
if (
options.get("randomize") in ["all", "suites"]
and "suitesfrom" not in pabot_args
):
random.shuffle(suite_group)
items = _create_items(
datasources, opts_for_run, outs_dir, pabot_args, suite_group
)
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
all_items.append(items)
return all_items
def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
return [
QueueItem(
datasources,
outs_dir,
opts_for_run,
suite,
pabot_args["command"],
pabot_args["verbose"],
argfile,
pabot_args.get("hive"),
pabot_args["processes"],
)
for suite in suite_group
for argfile in pabot_args["argumentfiles"] or [("", None)]
]
def _create_execution_items_for_dry_run(
suite_names, datasources, outs_dir, opts_for_run, pabot_args
):
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
all_items = [] # type: List[List[QueueItem]]
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
processes_count = pabot_args["processes"]
for suite_group in suite_names:
items = _create_items(
datasources, opts_for_run, outs_dir, pabot_args, suite_group
)
chunk_size = (
round(len(items) / processes_count)
if len(items) > processes_count
else len(items)
)
chunked_items = list(_chunk_items(items, chunk_size))
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(chunked_items)
all_items.append(chunked_items)
return all_items
def _chunk_items(items, chunk_size):
for i in range(0, len(items), chunk_size):
chunked_items = items[i : i + chunk_size]
base_item = chunked_items[0]
if not base_item:
continue
execution_items = SuiteItems([item.execution_item for item in chunked_items])
chunked_item = QueueItem(
base_item.datasources,
base_item.outs_dir,
base_item.options,
execution_items,
base_item.command,
base_item.verbose,
(base_item.argfile_index, base_item.argfile),
processes=base_item.processes,
)
yield chunked_item
def _find_ending_level(name, group):
n = name.split(".")
level = -1
for other in group:
o = other.split(".")
dif = [i for i in range(min(len(o), len(n))) if o[i] != n[i]]
if dif:
level = max(dif[0], level)
else:
return name + ".PABOT_noend"
return ".".join(n[: (level + 1)])
def _construct_last_levels(all_items):
names = []
for items in all_items:
for item in items:
if isinstance(item.execution_item, SuiteItems):
for suite in item.execution_item.suites:
names.append(suite.name)
else:
names.append(item.execution_item.name)
for items in all_items:
for item in items:
if isinstance(item.execution_item, SuiteItems):
for suite in item.execution_item.suites:
item.last_level = _find_ending_level(
suite.name, names[item.index + 1 :]
)
else:
item.last_level = _find_ending_level(
item.execution_item.name, names[item.index + 1 :]
)
def _initialize_queue_index():
global _PABOTLIBURI
plib = Remote(_PABOTLIBURI)
# INITIALISE PARALLEL QUEUE MIN INDEX
for i in range(300):
try:
plib.run_keyword(
"set_parallel_value_for_key",
[pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE, 0],
{},
)
return
except RuntimeError as e:
# REMOTE LIB NOT YET CONNECTED
time.sleep(0.1)
raise RuntimeError("Can not connect to PabotLib at %s" % _PABOTLIBURI)
def _add_dynamically_created_execution_items(
execution_items, datasources, outs_dir, opts_for_run, pabot_args
):
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
if not _pabotlib_in_use():
return
plib = Remote(_PABOTLIBURI)
new_suites = plib.run_keyword("get_added_suites", [], {})
if len(new_suites) == 0:
return
suite_group = [DynamicSuiteItem(s, v) for s, v in new_suites]
items = [
QueueItem(
datasources,
outs_dir,
opts_for_run,
suite,
pabot_args["command"],
pabot_args["verbose"],
("", None),
pabot_args.get("hive"),
pabot_args["processes"],
)
for suite in suite_group
]
with _COMPLETED_LOCK:
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
for item in items:
_NOT_COMPLETED_INDEXES.append(item.index)
execution_items.insert(0, items)
def main(args=None):
global _PABOTLIBPROCESS
args = args or sys.argv[1:]
if len(args) == 0:
print(
"[ "
+ _wrap_with(Color.RED, "ERROR")
+ " ]: Expected at least 1 argument, got 0."
)
print("Try --help for usage information.")
sys.exit(252)
start_time = time.time()
start_time_string = _now()
# NOTE: timeout option
try:
_start_message_writer()
options, datasources, pabot_args, opts_for_run = parse_args(args)
if pabot_args["help"]:
print(__doc__)
sys.exit(0)
if len(datasources) == 0:
print("[ " + _wrap_with(Color.RED, "ERROR") + " ]: No datasources given.")
print("Try --help for usage information.")
sys.exit(252)
_PABOTLIBPROCESS = _start_remote_library(pabot_args)
if _pabotlib_in_use():
_initialize_queue_index()
outs_dir = _output_dir(options)
suite_names = solve_suite_names(outs_dir, datasources, options, pabot_args)
if pabot_args["verbose"]:
_write("Suite names resolved in %s seconds" % str(time.time() - start_time))
ordering = pabot_args.get("ordering")
if ordering:
suite_names = _preserve_order(suite_names, ordering)
suite_names = _group_by_wait(_group_by_groups(suite_names))
if not suite_names or suite_names == [[]]:
_write("No tests to execute")
if not options.get("runemptysuite", False):
sys.exit(252)
execution_items = _create_execution_items(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
)
while execution_items:
items = execution_items.pop(0)
_parallel_execute(items, pabot_args["processes"])
_add_dynamically_created_execution_items(
execution_items, datasources, outs_dir, opts_for_run, pabot_args
)
result_code = _report_results(
outs_dir,
pabot_args,
options,
start_time_string,
_get_suite_root_name(suite_names),
)
sys.exit(result_code if not _ABNORMAL_EXIT_HAPPENED else 252)
except Information as i:
print(__doc__)
print(i.message)
except DataError as err:
print(err.message)
sys.exit(252)
except Exception:
_write("[ERROR] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED)
_write(
"[ERROR] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
Color.RED,
)
raise
finally:
if _PABOTLIBPROCESS:
_stop_remote_library(_PABOTLIBPROCESS)
_print_elapsed(start_time, time.time())
_stop_message_writer()
if __name__ == "__main__":
main()
|
mkorpela/pabot
|
pabot/pabot.py
|
Python
|
apache-2.0
| 59,396
|
[
"VisIt"
] |
3b4fb64951c2c82a1a6ab442e4b1fc48de25eaa090e7286d3ff3b2dec3373284
|
"""Runs gene fusion caller with EricScript.
Install EricScript via `bcbio upgrade --toolplus ericscript`,
or manually add the path to conda environment where it can be found
to the system config.
Reference data can be installed via `bcbio upgrade --datatarget ericscript`.
Alternatively, you can add path to the database into the system config.
EricScript requires bwa index to be built for its reference data.
To run gene fusion detection on disambiguated reads, we convert the .bam file
which was output by disambiguate to fastq files.
"""
import glob
import os
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline.fastq import convert_bam_to_fastq
from bcbio.provenance import do
def run(config):
input_files = prepare_input_data(config)
run_ericscript(config, input_files)
return config
def prepare_input_data(config):
""" In case of disambiguation, we want to run fusion calling on
the disambiguated reads, which are in the work_bam file.
As EricScript accepts 2 fastq files as input, we need to convert
the .bam to 2 .fq files.
"""
if not dd.get_disambiguate(config):
return dd.get_input_sequence_files(config)
work_bam = dd.get_work_bam(config)
logger.info("Converting disambiguated reads to fastq...")
fq_files = convert_bam_to_fastq(
work_bam, dd.get_work_dir(config), None, None, config
)
return fq_files
def run_ericscript(sample_config, input_files):
es_config = EricScriptConfig(sample_config)
utils.safe_makedir(es_config.output_dir)
if es_config.has_ericscript_db():
with file_transaction(sample_config, es_config.sample_out_dir) as tx_out:
cmd = es_config.get_run_command(tx_out, input_files)
logger.info("Running EricScript:\n%s" % ' '.join(cmd))
do.run(cmd, es_config.info_message)
class EricScriptConfig(object):
"""This class which encapsulates access to the data
related to EricScript in the sample config dictionary.
Public constants:
info_message: text message passed as an argument to do.run
EXECUTABLE: name of the EricScipt command
Private constants:
_OUTPUT_DIR_NAME: name of the dir created in working directory for
ericscript ouput
"""
info_message = 'Detect gene fusions with EricScript'
EXECUTABLE = 'ericscript.pl'
_OUTPUT_DIR_NAME = 'ericscript'
_REF_INDEX = 'allseq.fa.bwt'
_REF_FASTA = 'allseq.fa'
def __init__(self, data):
self._db_location = self._get_ericscript_db(data)
self._sample_name = dd.get_lane(data)
self._work_dir = dd.get_work_dir(data)
self._env = None
self._output_dir = None
self._sample_out_dir = None
def _get_ericscript_db(self, data):
transcript_file = dd.get_gtf_file(data)
if transcript_file and os.path.exists(transcript_file):
transcript_dir = os.path.dirname(transcript_file)
ericscript_dirs = glob.glob(os.path.join(transcript_dir, "ericscript", "ericscript_db*"))
if ericscript_dirs:
return sorted(ericscript_dirs)[-1]
def has_ericscript_db(self):
return self._db_location is not None
def get_run_command(self, tx_output_dir, input_files):
"""Constructs a command to run EricScript via do.run function.
:param tx_output_dir: A location where all EricScript output will be
written during execution.
:param input_files: an iterable with paths to 2 fastq files
with input data.
:return: list
"""
logger.debug("Input data: %s" % ', '.join(input_files))
cmd = [
self.EXECUTABLE,
'-db', self._db_location,
'-name', self._sample_name,
'-o', tx_output_dir,
] + list(input_files)
return "export PATH=%s:%s:$PATH; %s;" % (self._get_samtools0_path(), self._get_ericscript_path(), " ".join(cmd))
def _get_ericscript_path(self):
"""Retrieve PATH to the isolated eriscript anaconda environment.
"""
es = utils.which(os.path.join(utils.get_bcbio_bin(), self.EXECUTABLE))
return os.path.dirname(os.path.realpath(es))
def _get_samtools0_path(self):
"""Retrieve PATH to the samtools version specific for eriscript.
"""
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(),"..", "..", "bin"))
return samtools_path
@property
def output_dir(self):
"""Absolute path to permanent location in working directory
where EricScript output will be stored.
"""
if self._output_dir is None:
self._output_dir = self._get_output_dir()
return self._output_dir
@property
def sample_out_dir(self):
"""Absolute path to permanent location in working directory
where EricScript output for the current sample will be stored.
(a subdirectory of `output_dir`)
"""
if self._sample_out_dir is None:
self._sample_out_dir = os.path.join(
self.output_dir, self._sample_name
)
return self._sample_out_dir
@property
def reference_index(self):
"""Absolute path to the BWA index for EricScript reference data."""
if self._db_location:
ref_indices = glob.glob(os.path.join(self._db_location, "*", self._REF_INDEX))
if ref_indices:
return ref_indices[0]
@property
def reference_fasta(self):
"""Absolute path to the fasta file with EricScript reference data."""
if self._db_location:
ref_files = glob.glob(os.path.join(self._db_location, "*", self._REF_FASTA))
if ref_files:
return ref_files[0]
def _get_output_dir(self):
return os.path.join(self._work_dir, self._OUTPUT_DIR_NAME)
|
biocyberman/bcbio-nextgen
|
bcbio/rnaseq/ericscript.py
|
Python
|
mit
| 5,999
|
[
"BWA"
] |
620a92e53565cdaaca3ce655e233a86d37c98f4ce37143aa9209eaff4609ba55
|
import os,sys
import h5py
import numpy as np
# lflib imports
from lflib.iterative_deconvolution import LightFieldOperator
from lflib.calibration import LightFieldCalibration
from lflib.volume import LightFieldProjection
from lflib.optics import compute_light_field_psf
from lflib.optics import LensletArray
from lflib.imageio import load_image, save_image
# PyCuda imports
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import scikits.cuda.fft as cu_fft
# Scipy imports
from scipy.sparse.linalg import LinearOperator
from scipy.stats import ks_2samp
#-------------------------------------------------------------------------------------------------
# Classes and functions for evaluating resolution at the camera sensor
#-------------------------------------------------------------------------------------------------
def noisy_sensor_resolution(p1,p2,error_rate=30):
"""
This function takes two points in 3D space, p1 and p2, projects
each onto the camera sensor (noiselessly), adds Poisson noise with
rate parameter error_rate to each, and runs a KS test to determine
if the two distributions are distinguishable.
"""
# set lenslet array parameters
nu = 27
nv = 27
ns = 21
nt = 21
ulens_pitch = 125
ulens_focal_length = 2426
objective_magnification = 20
objective_na = 0.5
medium_index = 1.33
# Construct lenslet array object
lenslet_array = LensletArray(nu, nv, ns, nt,
ulens_pitch, ulens_focal_length, ulens_focal_length,
objective_magnification, objective_na, medium_index,
ulens_fill_factor = 1.0, pixel_fill_factor = 1.0,
circular_ulens_profile = False,
center_wavelength = 509) # Units: nanometers
# Input list with (intensity,x,y,z,num_lenslets_in_psf,lenslet_array,wavelength_nm)
# to compute_light_field_psf; wavelength currently fixed at 510nm with intensity = 1.0.
psf0 = compute_light_field_psf( None, 1.0, p1[0], p1[1], p1[2], ns, lenslet_array, 510 )
psf1 = compute_light_field_psf( None, 1.0, p2[0], p2[1], p2[2], ns, lenslet_array, 510 )
# Add gaussian noise (making poisson intensity >30 assumption)
# The shot noise variance for each nonzero pixel should be linearly
# related to the mean intensity of the corresponding pixel in p1.
noise = psf0 * np.random.normal(loc=0.0, scale=1.0, size=psf0.shape)
signal = psf1 - psf0 + np.random.normal(loc=0.0, scale=1.0, size=psf0.shape)
# log likelihood ratio on continuous data (based on poisson shot noise)
l0 = 2*psf0
la = psf1 + psf0
logL = np.sum( la*(np.log(la) - np.log(l0) - 1.0) + l0 )
# log likelihood ratio on discrete (16-bit) data (based on poisson shot noise)
psf_max = 2*np.max(psf0)
psf0_discrete = (65535*(psf0/psf_max)).astype(np.uint16)
psf1_discrete = (65535*(psf1/psf_max)).astype(np.uint16)
l0 = 2.0*psf0_discrete
la = psf1_discrete + psf0_discrete
log_la = np.log(la); log_la[np.where(log_la==-np.inf)[0]]=0.0
log_l0 = np.log(l0); log_l0[np.where(log_l0==-np.inf)[0]]=0.0
logL_discrete = np.sum( la*(log_la - log_l0 - 1.0) + l0 )
# save 16-bit pngs
save_image('/home/logan/Documents/Results/Resolution/sensor/psf0.png',psf0_discrete)
save_image('/home/logan/Documents/Results/Resolution/sensor/psf1.png',psf1_discrete)
# KS test
ks, pval = ks_2samp( signal.flatten(), noise.flatten() )
print "KS statistic:",ks
print "KS p-value:",pval
print "log Likelihood ratio:",logL
print "Discrete log Likelihood ratio:",logL_discrete
return ks,pval,logL,logL_discrete
#-------------------------------------------------------------------------------------------------
# Classes and functions for evaluating resolution in volumes
#-------------------------------------------------------------------------------------------------
class CovarianceLinearOperator(object):
"""
Applies A'A to a vector if cov_type=="geometric" or to a delta function in a volume if the
cov_type=="wave", where A is the optical system linear operator.
A'A applied to a point in a volume results in the volumetric PSF for that point.
"""
def __init__(self, A, cov_type="geometric", vol_shape=None):
self.A = A
self.cov_type = cov_type
try:
self.shape = np.prod(vol_shape)
self.vol_shape = vol_shape
except:
print "For a geometric model, you must supply the shape of the volume the covariance matrix refers to as a tuple (y,x,z)."
def matvec(self, x):
if self.cov_type == "geometric":
matvec = self.A.rmatvec(self.A.matvec(x))
elif self.cov_type == "wave":
forward = compute_light_field_psf(*x) # x is a list with (intensity,x,y,z,num_lenslets_in_psf,lenslet_array,wavelength_nm)
matvec = self.A.rmatvec(forward)
return matvec
def rmatvec(self, x):
return self.matvec(x) # operator is symmetric
#-------------------------------------------------------------------------------------------------
def get_system_operator_pca(Cov,K=3):
"""
Get the K largest eigenvalues and vectors of sparse linear operator A using Lanczos iteration (ARPACK).
Note, this may be incredibly slow for large A. TODO: add iteration control.
"""
from scipy.sparse.linalg import eigsh
return eigsh(Cov, K, which='LM')
def get_psf_vol(point, Cov, model_type="geometric", raydb=None, lenslet_array=None):
"""
Get psf of a delta function at 'point' (y,x,z) in a volume by either:
-- if model_type=="geometric":
Apply the covariance operator Cov = A'A to the voxel indexed
by the voxel coordinates supplied in 'point'.
-- if model_type=="wave":
Use wave optics model to estimate sensor psf of a delta function in
the volume at 'point', then apply the appropriate A' to this (Ax).
"""
if model_type=="geometric":
# generate volume with single voxel "on"
vol = np.zeros(raydb.nvoxels, dtype=np.float32).reshape(raydb.ny, raydb.nx, raydb.nz)
vol[point] = 1.0*raydb.supersample_factor**3
vol_vec = vol.flatten()
# get psf
psf_vec = Cov.matvec(vol_vec)
psf_vol = np.reshape(psf_vec, (raydb.ny, raydb.nx, raydb.nz))
elif model_type=="wave":
# get number of lenslets in aperture for splat
objective_theta = np.arcsin(lenslet_array.na / lenslet_array.medium_index)
aperture_diameter = np.abs( 2*point[2]*np.tan(objective_theta) )
num_lenslets_in_aperture = int(np.ceil(aperture_diameter / (lenslet_array.pitch / lenslet_array.magnification)))
# generate point list information for wave optics covariance operator
# (this contains arguments [intensity,x,y,z,num_lenslets_in_psf,lenslet_array,wavelength_nm])
# and get psf
point_list = [1.0, point[1], point[0], point[2], num_lenslets_in_aperture, lenslet_array, 510] # wavelength currently fixed at 510nm
psf_vec = Cov.matvec(point_list)
psf_vol = np.reshape(psf_vec, Cov.vol_shape)
else:
raise ValueError("'model_type' must be geometric or wave")
return psf_vol
def get_Sparrow_vols(vol_points_list,calibration_list,output_file,demagnified_pitch_size=6.25):
"""
Given a list of pairs of continuous (y,x,z) locations for two points (p_1,p_2) in the volume
call these 'vol_points' and the list 'vol_points_list', and a list of calibration files corresponding
to different supersampling factors, construct the covariance matrix for each rayspread/wavespread database
in the list, apply it to each of the points to obtain their PSFs in the volume and save the results to HDF5.
"""
# main HDF5 file
try:
print "Creating new HDF5 file:", output_file
volumes = h5py.File(output_file,'w-')
except:
print "Opening existing HDF5 file:", output_file
volumes = h5py.File(output_file,'r+')
# main loop over points in volume and supersampling factors
for i in xrange(len(vol_points_list)):
# get volume points and create HDF5 group to store results for this loop
vol_points = vol_points_list[i]
vols_by_points = volumes.create_group('vol_points_'+str(i))
# loop over calibration files for different supersampling factors
for calibration_file in calibration_list:
print "Analyzing:", calibration_file
# create subgroup to write data to for this calibration file
vols_by_sampling = vols_by_points.create_group('supersampling_factor_'+calibration_file.split('/')[-1].split('.')[0].split('_')[1])
# get covariance operator
Cov, raydb = get_Cov_from_calibration( calibration_file )
# get point coordinates in discretized volume
vol_coords = []
vol_coords.append( get_voxel_coords(vol_points[0], raydb, pitch=demagnified_pitch_size))
vol_coords.append( get_voxel_coords(vol_points[1], raydb, pitch=demagnified_pitch_size))
print "Volume points:", vol_points
print "Volume coordinates:",vol_coords
# generate two psfs for vol_points and add them to get a volume containing both
psf0 = get_psf_vol(vol_coords[0],Cov,raydb=raydb)
psf1 = get_psf_vol(vol_coords[1],Cov,raydb=raydb)
vol_vec = psf0 + psf1
vol = np.reshape(vol_vec, Cov.vol_shape)
dset = vols_by_sampling.create_dataset('Sparrow_volume', data=vol)
volumes.close()
return True
def get_voxel_coords(vol_points, raydb, pitch):
"""
Get discrete voxel coordinates from continuous point sources (in microns).
"""
supersample_factor = raydb.supersample_factor
x_coords = np.linspace(0,raydb.nx,num=raydb.nx+1)
y_coords = np.linspace(0,raydb.ny,num=raydb.ny+1)
z_coords = np.array(raydb.z_coords)
y = np.where(vol_points[0]<y_coords*(pitch/supersample_factor))[0][0]
x = np.where(vol_points[1]<x_coords*(pitch/supersample_factor))[0][0]
z = np.where(vol_points[2]<np.asarray(z_coords))[0][0]
return (y,x,z)
def plot_Sparrow_vols(vols, vol_slice = "xy"):
# get slice from volume
if vol_slice == "xy":
out_slice = vol[:,:,psf[0][2]]
elif vol_slice == "xz":
out_slice = vol[psf[0][0],:,:]
elif vol_slice == "yz":
out_slice = vol[:,psf[0][1],:]
def get_Cov_from_calibration( calibration_file ):
"""
Given a calibration file, generate a linear covariance operator Cov = A'A.
"""
lfcal = LightFieldCalibration.load(calibration_file)
raydb = lfcal.rayspread_db
lfproj = LightFieldProjection(raydb, disable_gpu = False, gpu_id = 4)
A_op = LightFieldOperator(lfproj, raydb)
CLO = CovarianceLinearOperator(A_op, vol_shape=(raydb.ny, raydb.nx, raydb.nz))
Cov = LinearOperator( (raydb.nvoxels, raydb.nvoxels), matvec=CLO.matvec, rmatvec=CLO.rmatvec, dtype=np.float32 )
Cov.vol_shape = CLO.vol_shape
return Cov, raydb
def FFT_3D_CUDA( vol ):
"""
Get the 3D FFT of a volume using scipy.cuda.
"""
nx = vol.shape[1]
ny = vol.shape[0]
nz = vol.shape[2]
vol.astype(np.float32)
vol_gpu = gpuarray.to_gpu(vol)
F_vol_gpu = gpuarray.empty((ny, nx/2+1, nz), np.complex64)
plan_forward = cu_fft.Plan(vol_gpu.shape, np.float32, np.complex64)
cu_fft.fft(vol_gpu, F_vol_gpu, plan_forward)
F_vol = F_vol_gpu.get()
print 'Success status:', np.allclose(x, x_gpu.get(), atol=1e-6)
return F_vol
def IFFT_3D_CUDA( vol_gpu, F_vol_gpu ):
"""
Get the 3D inverse FFT of a volume using scipy.cuda.
"""
vol_gpu_out = gpuarray.empty_like(vol_gpu)
plan_inverse = cu_fft.Plan(vol_gpu_out.shape, np.complex64, np.float32)
cu_fft.ifft(F_vol_gpu, vol_gpu_out, plan_inverse, True)
vol_out = vol_gpu_out.get()
print 'Success status:', np.allclose(vol_out, vol_gpu_out.get(), atol=1e-6)
return vol_out
def test_Sparrow():
from lflib.volume import LightFieldProjection
from lflib.calibration import LightFieldCalibration
print "Loading calibration data..."
calibration_file = '/lfdata/Results/LFzfish/20121018_aTubHS_dob20121004/1.2_cyanLED15_4Hz/calibration.lfc'
lfcal = LightFieldCalibration.load(calibration_file)
# raydb = lfcal.rayspread_db
# create LensletArray -- HACK, should be done through lfcal
lenslet_array = LensletArray(lfcal.nu,lfcal.nv,lfcal.ns,lfcal.nt,lfcal.array_pitch,lfcal.magnification,lfcal.na,lfcal.medium_index,lfcal.sample_index,lfcal.lenslet_fill_factor)
lenslet_array.objective_magnification = lenslet_array.magnification
lenslet_array.objective_na = lenslet_array.na
lenslet_array.ulens_pitch = lenslet_array.pitch
lenslet_array.ulens_focal_length = lfcal.focal_length
lenslet_array.ulens_focal_distance = lfcal.focal_distance
print "Getting light field projection..."
lfproj = LightFieldProjection(raydb, disable_gpu = False, gpu_id = 4)
print "Constructing A operator..."
A_op = LightFieldOperator(lfproj, raydb)
print "Constructing Cov operator..."
# CLO = CovarianceLinearOperator(A_op, vol_shape=(raydb.ny, raydb.nx, raydb.nz))
CLO = CovarianceLinearOperator(A_op, cov_type="wave",vol_shape=(raydb.ny, raydb.nx, raydb.nz))
Cov = LinearOperator( (raydb.nvoxels, 7), matvec=CLO.matvec, rmatvec=CLO.rmatvec, dtype=np.float32 )
Cov.vol_shape = CLO.vol_shape
print "Getting a PSF..."
coords = (np.floor(raydb.ny/2),np.floor(raydb.nx/2),np.floor(raydb.nz/2))
psf = get_psf_vol(coords, Cov, raydb=raydb, lenslet_array=lenslet_array, model_type="wave")
# psf = get_psf_vol(coords, Cov, raydb=raydb)
np.savez('/lfdata/Results/operator_analysis/psf.npz',psf=psf)
1/0
print "Getting Sparrow criterion volumes..."
calibration_list = os.listdir('/lfdata/Results/operator_analysis/calibration_files')
for i in xrange(len(calibration_list)):
calibration_list[i] = '/lfdata/Results/operator_analysis/calibration_files/'+calibration_list[i]
# create list of points in volume for Sparrow criterion
vol_points_list = []
vol_points_base = [[250.0,250.0,40.0], [250.0,250.0,40.0]]
for i in xrange(20):
vol_points_base[1][0] +=1
vol_points_list.append(vol_points_base)
# for testing
# vol_points_list = [ [[248.0,250.0,40.0], [252.0,250.0,40.0]]] # for testing
# vol_points_list = [ [[248.0,250.0,40.0], [252.0,250.0,40.0]], [[247.0,250.0,40.0], [253.0,250.0,40.0]],[[246.0,250.0,40.0], [254.0,250.0,40.0]], [ [245.0,250.0,40.0], [255.0,250.0,40.0]] ]
vols = get_Sparrow_vols(vol_points_list,calibration_list,'/lfdata/Results/operator_analysis/big_test.h5')
1/0
#-------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import pylab as pl
p1s = 10**np.linspace(-10,-3,num=100)
logLs = []; logLds = []
for p1 in p1s:
ks,pval,logL,logL_discrete = noisy_sensor_resolution((0,0,5e-4),(0,p1,5e-4),error_rate=1)
logLs.append(logL)
logLds.append(logL_discrete)
pl.clf()
pl.semilogx(p1s*1000, np.array(logLs)/np.max(np.array(logLs)),'r-')
pl.semilogx(p1s*1000, np.array(logLds)/np.max(np.array(logLds)),'b--')
pl.axhline(0,color='black')
pl.xlabel('Distance (microns)'); #pl.xscale('log')
pl.ylabel('Log Likelihood Ratio'); #pl.yscale('log')
pl.grid(True)
pl.savefig("/home/logan/Documents/Results/Resolution/sensor/logLik.png")
|
sophie63/FlyLFM
|
stanford_lfanalyze_v0.4/lflib/operator_analysis.py
|
Python
|
bsd-2-clause
| 15,765
|
[
"Gaussian"
] |
ecb5c8b407dff645ca152cdc52e6eaeb7a94ae2118807285647818f0f1bd6b87
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cleaning methods in GSoC.
"""
import unittest
from django import forms
from soc.modules.gsoc.logic import cleaning
class Form(object):
"""A dummy form class for CleaningTest.
"""
def __init__(self):
self.cleaned_data = {}
self._errors = {}
class CleaningTest(unittest.TestCase):
"""Tests for cleaning methods in GSoC.
"""
def setUp(self):
self.form = Form()
def testCleanTagsList(self):
"""Tests if tags are cleaned and validated.
"""
field_name = 'tags'
clean_data = cleaning.cleanTagsList(field_name)
#Test valid tags.
field_value = "python\ndjango\ntesting"
data_to_clean = {field_name: field_value}
self.form.cleaned_data = data_to_clean
expected = field_value.split('\n')
self.assertEqual(clean_data(self.form), expected)
#Test if extra-whitespace in the tags string are removed.
field_value = "python \n django\n testing"
data_to_clean = {field_name: field_value}
self.form.cleaned_data = data_to_clean
temp = field_value.split('\n')
expected = [tag.strip() for tag in temp]
self.assertEqual(clean_data(self.form), expected)
#Invalid tags.
field_value = "python\n &%tag \n#^ase"
data_to_clean = {field_name: field_value}
self.form.cleaned_data = data_to_clean
self.assertRaises(forms.ValidationError, clean_data, self.form)
|
adviti/melange
|
tests/app/soc/modules/gsoc/logic/test_cleaning.py
|
Python
|
apache-2.0
| 1,980
|
[
"ASE"
] |
d4c968413173cd48e38c0c0e051da4445d8e15b06d212b15e05ffce8c5a6707a
|
from __future__ import print_function
import time
from math import pi
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.glyphs import Line, Quad
from bokeh.objects import (
Plot, ColumnDataSource, DataRange1d, FactorRange,
LinearAxis, CategoricalAxis, Grid, Legend,
SingleIntervalTicker
)
from bokeh.sampledata.population import load_population
from bokeh.session import Session
from bokeh.widgets import Select, HBox, VBox
document = Document()
session = Session()
session.use_doc('population_server')
session.load_document(document)
df = load_population()
revision = 2012
year = 2010
location = "World"
years = [str(x) for x in sorted(df.Year.unique())]
locations = sorted(df.Location.unique())
source_pyramid = ColumnDataSource(data=dict())
def pyramid():
xdr = DataRange1d(sources=[source_pyramid.columns("male"), source_pyramid.columns("female")])
ydr = DataRange1d(sources=[source_pyramid.columns("groups")])
plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=600, plot_height=600)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
male_quad = Quad(left="male", right=0, bottom="groups", top="shifted", fill_color="#3B8686")
male_quad_glyph = plot.add_glyph(source_pyramid, male_quad)
female_quad = Quad(left=0, right="female", bottom="groups", top="shifted", fill_color="#CFF09E")
female_quad_glyph = plot.add_glyph(source_pyramid, female_quad)
plot.add_layout(Legend(legends=dict(Male=[male_quad_glyph], Female=[female_quad_glyph])))
return plot
source_known = ColumnDataSource(data=dict(x=[], y=[]))
source_predicted = ColumnDataSource(data=dict(x=[], y=[]))
def population():
xdr = FactorRange(factors=years)
ydr = DataRange1d(sources=[source_known.columns("y"), source_predicted.columns("y")])
plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=800, plot_height=200)
plot.add_layout(CategoricalAxis(major_label_orientation=pi/4), 'below')
line_known = Line(x="x", y="y", line_color="violet", line_width=2)
line_known_glyph = plot.add_glyph(source_known, line_known)
line_predicted = Line(x="x", y="y", line_color="violet", line_width=2, line_dash="dashed")
line_predicted_glyph = plot.add_glyph(source_predicted, line_predicted)
plot.add_layout(
Legend(
orientation="bottom_right",
legends=dict(known=[line_known_glyph], predicted=[line_predicted_glyph])
)
)
return plot
def update_pyramid():
pyramid = df[(df.Location == location) & (df.Year == year)]
male = pyramid[pyramid.Sex == "Male"]
female = pyramid[pyramid.Sex == "Female"]
total = male.Value.sum() + female.Value.sum()
male_percent = -male.Value/total
female_percent = female.Value/total
groups = male.AgeGrpStart.tolist()
shifted = groups[1:] + [groups[-1] + 5]
source_pyramid.data = dict(
groups=groups,
shifted=shifted,
male=male_percent,
female=female_percent,
)
def update_population():
population = df[df.Location == location].groupby(df.Year).Value.sum()
aligned_revision = revision//10 * 10
known = population[population.index <= aligned_revision]
predicted = population[population.index >= aligned_revision]
source_known.data = dict(x=known.index.map(str), y=known.values)
source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)
def update_data():
update_population()
update_pyramid()
session.store_document(document)
def on_year_change(obj, attr, old, new):
global year
year = int(new)
update_data()
def on_location_change(obj, attr, old, new):
global location
location = new
update_data()
def layout():
year_select = Select(title="Year:", value="2010", options=years)
location_select = Select(title="Location:", value="World", options=locations)
year_select.on_change('value', on_year_change)
location_select.on_change('value', on_location_change)
controls = HBox(children=[year_select, location_select])
layout = VBox(children=[controls, pyramid(), population()])
return layout
document.add(layout())
update_data()
if __name__ == "__main__":
link = session.object_link(document.context)
print("Please visit %s to see the plots" % link)
view(link)
print("\npress ctrl-C to exit")
session.poll_document(document)
|
jakevdp/bokeh
|
examples/glyphs/population_server.py
|
Python
|
bsd-3-clause
| 4,649
|
[
"VisIt"
] |
1159e8bd051b76dd99a676a3226e2516265ed2d823bca2377cd4f5f73c526965
|
# .. coding: utf-8
# $Id: __init__.py 7971 2016-09-13 19:11:48Z milde $
# Author: Engelbert Gruber, Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
import urllib.request, urllib.parse, urllib.error
try:
import roman
except ImportError:
import docutils.utils.roman as roman
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import pick_math_environment, unichar2tex
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(os.path.abspath(__file__))
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
table_style_values = ('standard', 'booktabs','nolines', 'borderless',
'colwidths-auto', 'colwidths-given')
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file[,file,...]>',
'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of LaTeX packages/stylesheets. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output *.tex file. ',
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "."',
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': ['.']}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "false" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Additional options to the "hyperref" package '
'(default: "").',
['--hyperref-options'], {'default': ''}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'default': ['standard'],
'metavar': '<format>',
'action': 'append',
'validator': frontend.validate_comma_separated_list,
'choices': table_style_values}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup',
'title', 'subtitle', 'titledata')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
# Convert specific admonitions to generic one
writer_aux.Admonitions,
# TODO: footnote collection transform
]
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
template_file = open(self.document.settings.template, 'rb')
except IOError:
template_file = open(os.path.join(self.default_template_path,
self.document.settings.template), 'rb')
template = string.Template(str(template_file.read(), 'utf-8'))
template_file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# TeX (babel) language names:
# ! not all of these are supported by Docutils!
#
# based on LyX' languages file with adaptions to `BCP 47`_
# (http://www.rfc-editor.org/rfc/bcp/bcp47.txt) and
# http://www.tug.org/TUGboat/Articles/tb29-3/tb93miklavec.pdf
# * the key without subtags is the default
# * case is ignored
# cf. http://docutils.sourceforge.net/docs/howto/i18n.html
# http://www.w3.org/International/articles/language-tags/
# and http://www.iana.org/assignments/language-subtag-registry
language_codes = {
# code TeX/Babel-name comment
'af': 'afrikaans',
'ar': 'arabic',
# 'be': 'belarusian',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
# 'cop': 'coptic',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'ngerman', # new spelling (de_1996)
'de-1901': 'german', # old spelling
'de-AT': 'naustrian',
'de-AT-1901': 'austrian',
'dsb': 'lowersorbian',
'el': 'greek', # monotonic (el-monoton)
'el-polyton': 'polutonikogreek',
'en': 'english', # TeX' default language
'en-AU': 'australian',
'en-CA': 'canadian',
'en-GB': 'british',
'en-NZ': 'newzealand',
'en-US': 'american',
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
# 'fa': 'farsi',
'fi': 'finnish',
'fr': 'french',
'fr-CA': 'canadien',
'ga': 'irish', # Irish Gaelic
# 'grc': # Ancient Greek
'grc-ibycus': 'ibycus', # Ibycus encoding
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hsb': 'uppersorbian',
'hu': 'magyar',
'ia': 'interlingua',
'id': 'bahasai', # Bahasa (Indonesian)
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'kk': 'kazakh',
'la': 'latin',
'lt': 'lithuanian',
'lv': 'latvian',
'mn': 'mongolian', # Mongolian, Cyrillic script (mn-cyrl)
'ms': 'bahasam', # Bahasa (Malay)
'nb': 'norsk', # Norwegian Bokmal
'nl': 'dutch',
'nn': 'nynorsk', # Norwegian Nynorsk
'no': 'norsk', # Norwegian (Bokmal)
'pl': 'polish',
'pt': 'portuges',
'pt-BR': 'brazil',
'ro': 'romanian',
'ru': 'russian',
'se': 'samin', # North Sami
'sh-Cyrl': 'serbianc', # Serbo-Croatian, Cyrillic script
'sh-Latn': 'serbian', # Serbo-Croatian, Latin script see also 'hr'
'sk': 'slovak',
'sl': 'slovene',
'sq': 'albanian',
'sr': 'serbianc', # Serbian, Cyrillic script (contributed)
'sr-Latn': 'serbian', # Serbian, Latin script
'sv': 'swedish',
# 'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnam',
# zh-Latn: Chinese Pinyin
}
# normalize (downcase) keys
language_codes = dict([(k.lower(), v) for (k,v) in list(language_codes.items())])
warn_msg = 'Language "%s" not supported by LaTeX (babel)'
# "Active characters" are shortcuts that start a LaTeX macro and may need
# escaping for literals use. Characters that prevent literal use (e.g.
# starting accent macros like "a -> ä) will be deactivated if one of the
# defining languages is used in the document.
# Special cases:
# ~ (tilde) -- used in estonian, basque, galician, and old versions of
# spanish -- cannot be deactivated as it denotes a no-break space macro,
# " (straight quote) -- used in albanian, austrian, basque
# brazil, bulgarian, catalan, czech, danish, dutch, estonian,
# finnish, galician, german, icelandic, italian, latin, naustrian,
# ngerman, norsk, nynorsk, polish, portuges, russian, serbian, slovak,
# slovene, spanish, swedish, ukrainian, and uppersorbian --
# is escaped as ``\textquotedbl``.
active_chars = {# TeX/Babel-name: active characters to deactivate
# 'breton': ':;!?' # ensure whitespace
# 'esperanto': '^',
# 'estonian': '~"`',
# 'french': ':;!?' # ensure whitespace
'galician': '.<>', # also '~"'
# 'magyar': '`', # for special hyphenation cases
'spanish': '.<>', # old versions also '~'
# 'turkish': ':!=' # ensure whitespace
}
def __init__(self, language_code, reporter=None):
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
def __call__(self):
"""Return the babel call with correct options and settings"""
languages = sorted(self.otherlanguages.keys())
languages.append(self.language or 'english')
self.setup = [r'\usepackage[%s]{babel}' % ','.join(languages)]
# Deactivate "active characters"
shorthands = []
for c in ''.join([self.active_chars.get(l, '') for l in languages]):
if c not in shorthands:
shorthands.append(c)
if shorthands:
self.setup.append(r'\AtBeginDocument{\shorthandoff{%s}}'
% ''.join(shorthands))
# Including '~' in shorthandoff prevents its use as no-break space
if 'galician' in languages:
self.setup.append(r'\deactivatetilden % restore ~ in Galician')
if 'estonian' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasestonian{\bbl@deactivate{~}}',
r'\makeatother'])
if 'basque' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasbasque{\bbl@deactivate{~}}',
r'\makeatother'])
if (languages[-1] == 'english' and
'french' in list(self.otherlanguages.keys())):
self.setup += ['% Prevent side-effects if French hyphenation '
'patterns are not loaded:',
r'\frenchbsetup{StandardLayout}',
r'\AtBeginDocument{\selectlanguage{%s}'
r'\noextrasfrench}' % self.language]
return '\n'.join(self.setup)
def language_name(self, language_code):
"""Return TeX language name for `language_code`"""
for tag in utils.normalize_language_tag(language_code):
try:
return self.language_codes[tag]
except KeyError:
pass
if self.reporter is not None:
self.reporter.warning(self.warn_msg % language_code)
return ''
def get_language(self):
# Obsolete, kept for backwards compatibility with Sphinx
return self.language
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = list(self.keys())
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centering\textbf{#1}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\linewidth}{#2}}
\end{center}
\fi
}"""
PreambleCmds.align_center = r"""
\makeatletter
\@namedef{DUrolealign-center}{\centering}
\makeatother
"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\linewidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi"""
PreambleCmds.highlight_rules = r"""% basic code highlight:
\providecommand*\DUrolecomment[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUroledeleted[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUrolekeyword[1]{\textbf{#1}}
\providecommand*\DUrolestring[1]{\textit{#1}}"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[%s]{hyperref}
\usepackage{bookmark}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\linewidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.documentsubtitle = r"""
% subtitle (in document title)
\providecommand*{\DUdocumentsubtitle}[1]{{\large #1}}"""
PreambleCmds.table = r"""\usepackage{longtable,ltcaption,array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions, unsupported section levels, and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
# LaTeX encoding maps
# -------------------
# ::
class CharMaps(object):
"""LaTeX representations for active and Unicode characters."""
# characters that need escaping even in `alltt` environments:
alltt = {
ord('\\'): r'\textbackslash{}',
ord('{'): r'\{',
ord('}'): r'\}',
}
# characters that normally need escaping:
special = {
ord('#'): r'\#',
ord('$'): r'\$',
ord('%'): r'\%',
ord('&'): r'\&',
ord('~'): r'\textasciitilde{}',
ord('_'): r'\_',
ord('^'): r'\textasciicircum{}',
# straight double quotes are 'active' in many languages
ord('"'): r'\textquotedbl{}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put in a
# group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): r'{[}',
ord(']'): r'{]}',
# the soft hyphen is unknown in 8-bit text
# and not properly handled by XeTeX
0x00AD: r'\-', # SOFT HYPHEN
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode = {
0x00A0: r'~', # NO-BREAK SPACE
# TODO: ensure white space also at the beginning of a line?
# 0x00A0: ur'\leavevmode\nobreak\vadjust{}~'
0x2008: r'\,', # PUNCTUATION SPACE
0x2011: r'\hbox{-}', # NON-BREAKING HYPHEN
0x202F: r'\,', # NARROW NO-BREAK SPACE
0x21d4: r'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: r'$\spadesuit$',
0x2663: r'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
utf8_supported_unicode = {
0x00AB: r'\guillemotleft{}', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bb: r'\guillemotright{}', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x200C: r'\textcompwordmark{}', # ZERO WIDTH NON-JOINER
0x2013: r'\textendash{}',
0x2014: r'\textemdash{}',
0x2018: r'\textquoteleft{}',
0x2019: r'\textquoteright{}',
0x201A: r'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: r'\textquotedblleft{}',
0x201D: r'\textquotedblright{}',
0x201E: r'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: r'\textperthousand{}', # PER MILLE SIGN
0x2031: r'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: r'\guilsinglleft{}',
0x203A: r'\guilsinglright{}',
0x2423: r'\textvisiblespace{}', # OPEN BOX
0x2020: r'\dag{}',
0x2021: r'\ddag{}',
0x2026: r'\dots{}',
0x2122: r'\texttrademark{}',
}
# recognized with 'utf8', if textcomp is loaded
textcomp = {
# Latin-1 Supplement
0x00a2: r'\textcent{}', # ¢ CENT SIGN
0x00a4: r'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: r'\textyen{}', # ¥ YEN SIGN
0x00a6: r'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: r'\textsection{}', # § SECTION SIGN
0x00a8: r'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: r'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: r'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: r'\textlnot{}', # ¬ NOT SIGN
0x00ae: r'\textregistered{}', # ® REGISTERED SIGN
0x00af: r'\textasciimacron{}', # ¯ MACRON
0x00b0: r'\textdegree{}', # ° DEGREE SIGN
0x00b1: r'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: r'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: r'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: r'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: r'\textmu{}', # µ MICRO SIGN
0x00b6: r'\textparagraph{}', # ¶ PILCROW SIGN # != \textpilcrow
0x00b9: r'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: r'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: r'\textonequarter{}', # 1/4 FRACTION
0x00bd: r'\textonehalf{}', # 1/2 FRACTION
0x00be: r'\textthreequarters{}', # 3/4 FRACTION
0x00d7: r'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: r'\textdiv{}', # ÷ DIVISION SIGN
# others
0x0192: r'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: r'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: r'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: r'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: r'\textbullet{}', # BULLET
0x2032: r'\textasciiacute{}', # PRIME
0x2033: r'\textacutedbl{}', # DOUBLE PRIME
0x2035: r'\textasciigrave{}', # REVERSED PRIME
0x2036: r'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: r'\textreferencemark{}', # REFERENCE MARK
0x203d: r'\textinterrobang{}', # INTERROBANG
0x2044: r'\textfractionsolidus{}', # FRACTION SLASH
0x2045: r'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: r'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: r'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: r'\textcolonmonetary{}', # COLON SIGN
0x20a3: r'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: r'\textlira{}', # LIRA SIGN
0x20a6: r'\textnaira{}', # NAIRA SIGN
0x20a9: r'\textwon{}', # WON SIGN
0x20ab: r'\textdong{}', # DONG SIGN
0x20ac: r'\texteuro{}', # EURO SIGN
0x20b1: r'\textpeso{}', # PESO SIGN
0x20b2: r'\textguarani{}', # GUARANI SIGN
0x2103: r'\textcelsius{}', # DEGREE CELSIUS
0x2116: r'\textnumero{}', # NUMERO SIGN
0x2117: r'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: r'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: r'\textservicemark{}', # SERVICE MARK
0x2122: r'\texttrademark{}', # TRADE MARK SIGN
0x2126: r'\textohm{}', # OHM SIGN
0x2127: r'\textmho{}', # INVERTED OHM SIGN
0x212e: r'\textestimated{}', # ESTIMATED SYMBOL
0x2190: r'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: r'\textuparrow{}', # UPWARDS ARROW
0x2192: r'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: r'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: r'\textminus{}', # MINUS SIGN
0x2217: r'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: r'\textsurd{}', # SQUARE ROOT
0x2422: r'\textblank{}', # BLANK SYMBOL
0x25e6: r'\textopenbullet{}', # WHITE BULLET
0x25ef: r'\textbigcircle{}', # LARGE CIRCLE
0x266a: r'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: r'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: r'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: r'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: r'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# Unicode chars that require a feature/package to render
pifont = {
0x2665: r'\ding{170}', # black heartsuit
0x2666: r'\ding{169}', # black diamondsuit
0x2713: r'\ding{51}', # check mark
0x2717: r'\ding{55}', # check mark
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else: # unsupported levels
return 'DUtitle[section%s]' % roman.toRoman(level)
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
:colwidths-auto: column widths determined by LaTeX
:colwidths-given: use colum widths from rST source
"""
def __init__(self, translator, latex_type):
self._translator = translator
self._latex_type = latex_type
self._open = False
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self.colwidths_auto = False
self._in_thead = 0
def open(self):
self._open = True
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = False # maybe context with search
def close(self):
self._open = False
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
self.colwidths_auto = False
def is_open(self):
return self._open
def set_table_style(self, table_style, classes):
borders = [cls.replace('nolines', 'borderless')
for cls in table_style+classes
if cls in ('standard','booktabs','borderless', 'nolines')]
try:
self.borders = borders[-1]
except IndexError:
self.borders = 'standard'
self.colwidths_auto = (('colwidths-auto' in classes
and 'colwidths-given' not in table_style)
or ('colwidths-auto' in table_style
and ('colwidths-given' not in classes)))
def get_latex_type(self):
if self._latex_type == 'longtable' and not self.caption:
# do not advance the "table" counter (requires "ltcaption" package)
return('longtable*')
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self.borders == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
align_map = {'left': 'l',
'center': 'c',
'right': 'r'}
align = align_map.get(self.get('align') or 'center')
opening = [r'\begin{%s}[%s]' % (self.get_latex_type(), align)]
if not self.colwidths_auto:
opening.insert(0, r'\setlength{\DUtablewidth}{\linewidth}')
return '\n'.join(opening)
def get_closing(self):
closing = []
if self.borders == 'booktabs':
closing.append(r'\bottomrule')
# elif self.borders == 'standard':
# closing.append(r'\hline')
closing.append(r'\end{%s}' % self.get_latex_type())
return '\n'.join(closing)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self, node):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
bar = self.get_vertical_bar()
self._rowspan= [0] * len(self._col_specs)
self._col_width = []
if self.colwidths_auto:
latex_table_spec = (bar+'l')*len(self._col_specs)
return latex_table_spec+bar
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
try:
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row]
except IndexError:
return '*'
def get_multicolumn_width(self, start, len_):
"""Return sum of columnwidths for multicell."""
try:
mc_width = sum([width
for width in ([self._col_width[start + co]
for co in range (len_)])])
return 'p{%.2f\\DUtablewidth}' % mc_width
except IndexError:
return 'l'
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self.borders == 'standard':
return ['\\hline\n']
elif self.borders == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self.borders == 'standard':
# a.append('\\hline\n')
if self.borders == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self.borders == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while True:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Write code for typesetting with 8-bit tex/pdftex (vs. xetex/luatex) engine
# overwritten by the XeTeX writer
is_xetex = False
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = False
# If using compound enumerations, include section information.
section_prefix_for_enumerators = False
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
alltt = False # inside `alltt` environment
def __init__(self, document, babel_class=Babel):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = getattr(settings, 'font_encoding', '')
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', r'\_'))
# literal blocks:
self.literal_block_env = 'alltt'
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
if self.babel.language not in ('english', ''):
d_options.append(self.babel.language)
self.documentoptions = ','.join([_f for _f in d_options if _f])
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
# @@ table_style: list of values from fixed set: warn?
# for s in self.settings.table_style:
# if s not in Writer.table_style_values:
# self.warn('Ignoring value "%s" in "table-style" setting.' %s)
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.documentoptions, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
self.titledata = [] # \title, \author, \date
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable')
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Encodings:
# Docutils' output-encoding => TeX input encoding
if self.latex_encoding != 'ascii':
self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
# TeX font encoding
if not self.is_xetex:
if self.font_encoding:
self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' %
self.font_encoding)
# ensure \textquotedbl is defined:
for enc in self.font_encoding.split(','):
enc = enc.strip()
if enc == 'OT1':
self.requirements['_textquotedblOT1'] = (
r'\DeclareTextSymbol{\textquotedbl}{OT1}{`\"}')
elif enc not in ('T1', 'T2A', 'T2B', 'T2C', 'T4', 'T5'):
self.requirements['_textquotedbl'] = (
r'\DeclareTextSymbolDefault{\textquotedbl}{T1}')
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.documentoptions.find('DIV') != -1 or
self.documentoptions.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# (the name `self.stylesheet` is singular because only one
# stylesheet was supported before Docutils 0.6).
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
# PDF setup
if self.hyperlink_color in ('0', 'false', 'False', ''):
self.hyperref_options = ''
else:
self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % (
self.hyperlink_color, self.hyperlink_color)
if settings.hyperref_options:
self.hyperref_options += ',' + settings.hyperref_options
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# Section numbering
if settings.sectnum_xform: # section numbering by Docutils
PreambleCmds.secnumdepth = r'\setcounter{secnumdepth}{0}'
else: # section numbering by LaTeX:
secnumdepth = settings.sectnum_depth
# Possible values of settings.sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# >0 value of "depth" argument -> translate to LaTeX levels:
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if secnumdepth is not None:
# limit to supported levels
secnumdepth = min(secnumdepth, len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
secnumdepth -= 1
if self.d_class.sections[0] == 'part':
secnumdepth -= 1
PreambleCmds.secnumdepth = \
r'\setcounter{secnumdepth}{%d}' % secnumdepth
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# TODO: currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
# is it a package (no extension or *.sty) or "normal" tex code:
(base, ext) = os.path.splitext(path)
is_package = ext in ['.sty', '']
# Embed content of style file:
if self.settings.embed_stylesheet:
if is_package:
path = base + '.sty' # ensure extension
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError as err:
msg = "Cannot embed stylesheet '%s':\n %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '% ' + msg.replace('\n', '\n% ')
if is_package:
content = '\n'.join([r'\makeatletter',
content,
r'\makeatother'])
return '%% embedded stylesheet: %s\n%s' % (path, content)
# Link to style file:
if is_package:
path = base # drop extension
cmd = r'\usepackage{%s}'
else:
cmd = r'\input{%s}'
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return cmd % path
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# drop hyphen or low-line from "latin-1", "latin_1", "utf-8" and similar
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language_module.labels[docutil_label]
def encode(self, text):
"""Return text with 'problematic' characters escaped.
* Escape the special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
* Translate non-supported Unicode characters.
* Separate ``-`` (and more in literal text) to prevent input ligatures.
"""
if self.verbatim:
return text
# Set up the translation table:
table = CharMaps.alltt.copy()
if not self.alltt:
table.update(CharMaps.special)
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', ''] and not self.is_xetex:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = '\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = r'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = r'\textbar{}'
table[ord('<')] = r'\textless{}'
table[ord('>')] = r'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = r'~'
# Unicode replacements for 8-bit tex engines (not required with XeTeX/LuaTeX):
if not self.is_xetex:
table.update(CharMaps.unsupported_unicode)
if not self.latex_encoding.startswith('utf8'):
table.update(CharMaps.utf8_supported_unicode)
table.update(CharMaps.textcomp)
table.update(CharMaps.pifont)
# Characters that require a feature/package to render
if [True for ch in text if ord(ch) in CharMaps.textcomp]:
self.requirements['textcomp'] = PreambleCmds.textcomp
if [True for ch in text if ord(ch) in CharMaps.pifont]:
self.requirements['pifont'] = '\\usepackage{pifont}'
text = text.translate(table)
# Break up input ligatures e.g. '--' to '-{}-'.
if not self.is_xetex: # Not required with xetex/luatex
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
lines = text.split('\n')
# Add a protected space to blank lines (except the last)
# to avoid ``! LaTeX Error: There's no line here to end.``
for i, line in enumerate(lines[:-1]):
if not line.lstrip():
lines[i] += '~'
text = (r'\\' + '\n').join(lines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? -> update (use template) or delete
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline or block-level element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
node['classes'].insert(0, 'abbreviation')
self.visit_inline(node)
def depart_abbreviation(self, node):
self.depart_inline(node)
def visit_acronym(self, node):
node['classes'].insert(0, 'acronym')
self.visit_inline(node)
def depart_acronym(self, node):
self.depart_inline(node)
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
if node['classes']:
self.visit_inline(node)
def depart_block_quote(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
# if node['classes']:
# self.visit_inline(node)
def depart_bullet_list(self, node):
# if node['classes']:
# self.depart_inline(node)
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append('\n\\caption{')
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('\\hyperlink{%s}{[' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = True
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = True
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node, set_anchor=False)
def depart_document(self, node):
# Complete header with information gained from walkabout
# * language setup
if (self.babel.otherlanguages or
self.babel.language not in ('', 'english')):
self.requirements['babel'] = self.babel()
# * conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# * coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# * PDF properties
self.pdfsetup.append(PreambleCmds.linking % self.hyperref_options)
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# * document title (with "use_latex_docinfo" also
# 'author', 'organization', 'contact', 'address' and 'date')
if self.title or (
self.use_latex_docinfo and (self.author_stack or self.date)):
# with the default template, titledata is written to the preamble
self.titledata.append('%%% Title Data')
# \title (empty \title prevents error with \maketitle)
if self.title:
self.title.insert(0, '\phantomsection%\n ')
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
self.body_pre_docinfo.append('\\maketitle\n')
# * bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# * make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Append column delimiters and advance column counter,
# if the current cell is a multi-row continuation."""
def insert_additional_table_colum_delimiters(self):
while self.active_table.get_rowspan(
self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_entry(self, node):
# cell separation
if self.active_table.get_entry_number() == 0:
self.insert_additional_table_colum_delimiters()
else:
self.out.append(' & ')
# multirow, multicolumn
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns currently not supported, sorry.')
# TODO: should be possible with LaTeX, see e.g.
# http://texblog.org/2012/12/21/multi-column-and-multi-row-cells-in-latex-tables/
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
mrows = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number(), mrows)
self.out.append('\\multirow{%d}{%s}{' %
(mrows, self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 0:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
mcols = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%s%s%s}{' %
(mcols, bar1,
self.active_table.get_multicolumn_width(
self.active_table.get_entry_number(),
mcols),
self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# bold header/stub-column
if len(node) and (isinstance(node.parent.parent, nodes.thead)
or self.active_table.is_stub_column()):
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
# if line ends with '{', mask line break to prevent spurious whitespace
if not self.active_table.colwidths_auto and self.out[-1].endswith("{"):
self.out.append("%")
self.active_table.visit_entry() # increment cell count
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# insert extra "&"s, if following rows are spanned from above:
self.insert_additional_table_colum_delimiters()
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# enumeration styles:
types = {'': '',
'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman'}
# the 4 default LaTeX enumeration labels: präfix, enumtype, suffix,
labels = [('', 'arabic', '.'), # 1.
('(', 'alph', ')'), # (a)
('', 'roman', '.'), # i.
('', 'Alph', '.')] # A.
prefix = ''
if self.compound_enumerators:
if (self.section_prefix_for_enumerators and self.section_level
and not self._enumeration_counters):
prefix = '.'.join([str(n) for n in
self._section_number[:self.section_level]]
) + self.section_enumerator_separator
if self._enumeration_counters:
prefix += self._enumeration_counters[-1]
# TODO: use LaTeX default for unspecified label-type?
# (needs change of parser)
prefix += node.get('prefix', '')
enumtype = types[node.get('enumtype' '')]
suffix = node.get('suffix', '')
enumeration_level = len(self._enumeration_counters)+1
counter_name = 'enum' + roman.toRoman(enumeration_level).lower()
label = r'%s\%s{%s}%s' % (prefix, enumtype, counter_name, suffix)
self._enumeration_counters.append(label)
if enumeration_level <= 4:
self.out.append('\\begin{enumerate}\n')
if (prefix, enumtype, suffix
) != labels[enumeration_level-1]:
self.out.append('\\renewcommand{\\label%s}{%s}\n' %
(counter_name, label))
else:
self.fallbacks[counter_name] = '\\newcounter{%s}' % counter_name
self.out.append('\\begin{list}')
self.out.append('{%s}' % label)
self.out.append('{\\usecounter{%s}}\n' % counter_name)
if 'start' in node:
self.out.append('\\setcounter{%s}{%d}\n' %
(counter_name,node['start']-1))
# ## set rightmargin equal to leftmargin
# self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
def depart_enumerated_list(self, node):
if len(self._enumeration_counters) <= 4:
self.out.append('\\end{enumerate}\n')
else:
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# The 'align' attribute sets the "outer alignment",
# for "inner alignment" use LaTeX default alignment (similar to HTML)
alignment = node.attributes.get('align', 'center')
if alignment != 'center':
# The LaTeX "figure" environment always uses the full linewidth,
# so "outer alignment" is ignored. Just write a comment.
# TODO: use the wrapfigure environment?
self.out.append('\n\\begin{figure} %% align = "%s"\n' % alignment)
else:
self.out.append('\n\\begin{figure}\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num = node[0].astext()
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace if paragraph follows:
if node[1:] and isinstance(node[1], nodes.paragraph):
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
raise nodes.SkipNode
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str, pxunit=None):
"""Convert `length_str` with rst lenght to LaTeX length
"""
if pxunit is not None:
sys.stderr.write('deprecation warning: LaTeXTranslator.to_latex_length()'
' option `pxunit` will be removed.')
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
elif self.is_xetex and unit == 'px':
# XeTeX does not know the length unit px.
# Use \pdfpxdimen, the macro to set the value of 1 px in pdftex.
# This way, configuring works the same for pdftex and xetex.
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['px'] = '\n\\DUprovidelength{\\pdfpxdimen}{1bp}\n'
length_str = r'%s\pdfpxdimen' % value
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Convert image URI to a local file path
imagepath = urllib.request.url2pathname(attrs['uri']).replace('\\', '/')
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\linewidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
# TODO: warn or ignore non-applicable alignment settings?
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not (self.is_inline(node) or
isinstance(node.parent, nodes.figure)):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, imagepath))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_inline(self, node): # <span>, i.e. custom roles
self.context.append('}' * len(node['classes']))
for cls in node['classes']:
if cls == 'align-center':
self.fallbacks['align-center'] = PreambleCmds.align_center
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append(r'\foreignlanguage{%s}{' % language)
else:
self.fallbacks['inline'] = PreambleCmds.inline
self.out.append(r'\DUrole{%s}{' % cls)
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
if node['classes']:
self.visit_inline(node)
self.out.append('\n')
def depart_line_block(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'alltt': r'\usepackage{alltt}',
'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# TODO: fails if normal text precedes the literal block.
# check parent node instead?
self.out.append('%\n\\begin{quote}\n')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.is_plaintext(node):
environment = self.literal_block_env
self.requirements['literal_block'] = packages.get(environment, '')
if environment == 'alltt':
self.alltt = True
else:
self.verbatim = True
self.out.append('\\begin{%s}%s\n' %
(environment, self.literal_block_options))
self.context.append('\n\\end{%s}' % environment)
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
self.context.append('\n}')
def depart_literal_block(self, node):
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.verbatim = False
self.alltt = False
self.out.append(self.context.pop())
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_math(self, node, math_env='$'):
"""math role"""
if node['classes']:
self.visit_inline(node)
self.requirements['amsmath'] = r'\usepackage{amsmath}'
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if node.get('ids'):
math_code = '\n'.join([math_code] + self.ids_to_labels(node))
if math_env == '$':
if self.alltt:
wrapper = '\(%s\)'
else:
wrapper = '$%s$'
else:
wrapper = '\n'.join(['%%',
r'\begin{%s}' % math_env,
'%s',
r'\end{%s}' % math_env])
# print repr(wrapper), repr(math_code)
self.out.append(wrapper % math_code)
if node['classes']:
self.depart_inline(node)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# insert blank line, unless
# * the paragraph is first in a list item,
# * follows a non-paragraph node in a compound,
# * is in a table with auto-width columns
index = node.parent.index(node)
if (index == 0 and (isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description))):
pass
elif (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
pass
elif self.active_table.colwidths_auto:
if index == 1: # second paragraph
self.warn('LaTeX merges paragraphs in tables '
'with auto-sized columns!', base_node=node)
if index > 0:
self.out.append('\n')
else:
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
if node['classes']:
self.visit_inline(node)
def depart_paragraph(self, node):
if node['classes']:
self.depart_inline(node)
if not self.active_table.colwidths_auto:
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if not self.is_inline(node):
self.out.append('\n')
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
if not self.is_inline(node):
self.out.append('\n')
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): r'\#',
ord('%'): r'\%',
ord('\\'): r'\\',
}
# external reference (URL)
if 'refuri' in node:
href = str(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': ('—', ''), # EM DASH
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\\nopagebreak\n\n\\raggedleft ')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.fallbacks['documentsubtitle'] = PreambleCmds.documentsubtitle
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular')
# A longtable moves before \paragraph and \subparagraph
# section titles if it immediately follows them:
if (self.active_table._latex_type == 'longtable' and
isinstance(node.parent, nodes.section) and
node.parent.index(node) == 1 and
self.d_class.section(self.section_level).find('paragraph') != -1):
self.out.append('\\leavevmode')
self.active_table.open()
self.active_table.set_table_style(self.settings.table_style,
node['classes'])
if 'align' in node:
self.active_table.set('align', node['align'])
if self.active_table.borders == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.push_output_collector([])
def depart_table(self, node):
# wrap content in the right environment:
content = self.out
self.pop_output_collector()
self.out.append('\n' + self.active_table.get_opening())
self.out += content
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(node)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs(node))
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
if hasattr(PreambleCmds, 'secnumdepth'):
self.requirements['secnumdepth'] = PreambleCmds.secnumdepth
section_name = self.d_class.section(self.section_level)
self.out.append('\n\n')
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
section_title = self.encode(node.astext())
self.out.append(r'\%s[%s]{\color{red}' % (
section_name,section_title))
else:
self.out.append(r'\%s{' % section_name)
if self.section_level > len(self.d_class.sections):
# section level not supported by LaTeX
self.fallbacks['title'] = PreambleCmds.title
# self.out.append('\\phantomsection%\n ')
# label and ToC entry:
bookmark = ['']
# add sections with unsupported level to toc and pdfbookmarks?
## if self.section_level > len(self.d_class.sections):
## section_title = self.encode(node.astext())
## bookmark.append(r'\addcontentsline{toc}{%s}{%s}' %
## (section_name, section_title))
bookmark += self.ids_to_labels(node.parent, set_anchor=False)
self.context.append('%\n '.join(bookmark) + '%\n}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/docutils/writers/latex2e/__init__.py
|
Python
|
gpl-3.0
| 124,872
|
[
"VisIt"
] |
2181994cf9e636ebd512678eb1c2b6b21455ed6817eaba203cb79fea8f58b267
|
# pylint: disable=protected-access
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import tempfile
from DIRAC.Interfaces.API.Job import Job
from DIRAC.tests.Utilities.utils import find_all
def helloWorldJob():
job = Job()
job.setName("helloWorld")
exeScriptLocation = find_all("exe-script.py", "..", "/DIRAC/tests/Integration")[0]
job.setInputSandbox(exeScriptLocation)
job.setExecutable(exeScriptLocation, "", "helloWorld.log")
return job
def parametricJob():
job = Job()
job.setName("parametric_helloWorld_%n")
exeScriptLocation = find_all("exe-script.py", "..", "/DIRAC/tests/Integration")[0]
job.setInputSandbox(exeScriptLocation)
job.setParameterSequence("args", ["one", "two", "three"])
job.setParameterSequence("iargs", [1, 2, 3])
job.setExecutable(exeScriptLocation, arguments=": testing %(args)s %(iargs)s", logFile="helloWorld_%n.log")
return job
def createFile(job):
tmpdir = tempfile.mkdtemp()
jobDescription = tmpdir + "/jobDescription.xml"
with open(jobDescription, "w") as fd:
fd.write(job._toXML())
return jobDescription
|
ic-hep/DIRAC
|
src/DIRAC/tests/Utilities/WMS.py
|
Python
|
gpl-3.0
| 1,186
|
[
"DIRAC"
] |
3c4ad0cb8591c993688ca1d8345c5de61bb31c36c8f976466b928e26c2f8f7b6
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import urllib
import logging
import collections
import mooseutils
from .LogHelper import LogHelper
def check_documents(documents, file_list=None, **kwargs):
"""
Tool for checking SQA document deficiencies
"""
# Setup logger, assume the names of the documents with a "log_" prefix are the logging flags (see get_documents)
log_default = kwargs.get('log_default', logging.ERROR)
for doc in documents:
kwargs.setdefault("log_" + doc.name, log_default)
logger = LogHelper(__name__, **kwargs)
# Setup file_list, if not provided
if (file_list is None) and (not mooseutils.git_is_repo()):
msg = "If the 'file_list' is not provided then the working directory must be a git repository."
raise ValueError(msg)
elif file_list is None:
root = mooseutils.git_root_dir()
file_list = mooseutils.git_ls_files(root, recurse_submodules=False)
# Perform document checks
for doc in documents:
_check_document(doc.name, doc.filename, file_list, logger)
return logger
def _check_document(name, filename, file_list, logger):
"""Helper for inspecting document"""
log_key = "log_" + name
if filename is None:
msg = "Missing value for '{}' document: {}".format(name, filename)
logger.log(log_key, msg)
elif filename.startswith('http'):
try:
response = urllib.request.urlopen(filename)
except urllib.error.URLError:
msg = "Invalid URL for '{}' document: {}".format(name, filename)
logger.log(log_key, msg)
else:
found = list()
for fname in file_list:
if fname.endswith(filename.split('#')[0]):
found.append(filename)
if len(found) == 0:
msg = "Failed to locate '{}' document: {}".format(name, filename)
logger.log(log_key, msg)
elif len(found) > 1:
msg = "Found multiple files for '{}' document:\n ".format(name)
msg += "\n ".join(found)
logger.log(log_key, msg)
|
harterj/moose
|
python/moosesqa/check_documents.py
|
Python
|
lgpl-2.1
| 2,375
|
[
"MOOSE"
] |
bcb5088592a2c0d96cf801989a82138de6c0c9c8df72077b396ad06feb23158b
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import re
from lxml.cssselect import CSSSelector
from zope.testbrowser.browser import Browser, ListControl
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
from splinter.driver import DriverAPI, ElementAPI
from splinter.cookie_manager import CookieManagerAPI
import mimetypes
import lxml.html
import mechanize
import time
class CookieManager(CookieManagerAPI):
def __init__(self, browser_cookies):
self._cookies = browser_cookies
def add(self, cookies):
if isinstance(cookies, list):
for cookie in cookies:
for key, value in cookie.items():
self._cookies[key] = value
return
for key, value in cookies.items():
self._cookies[key] = value
def delete(self, *cookies):
if cookies:
for cookie in cookies:
try:
del self._cookies[cookie]
except KeyError:
pass
else:
self._cookies.clearAll()
def all(self, verbose=False):
cookies = {}
for key, value in self._cookies.items():
cookies[key] = value
return cookies
def __getitem__(self, item):
return self._cookies[item]
def __eq__(self, other_object):
if isinstance(other_object, dict):
return dict(self._cookies) == other_object
class ZopeTestBrowser(DriverAPI):
driver_name = "zope.testbrowser"
def __init__(self, user_agent=None, wait_time=2, ignore_robots=False):
self.wait_time = wait_time
mech_browser = self._get_mech_browser(user_agent, ignore_robots)
self._browser = Browser(mech_browser=mech_browser)
self._cookie_manager = CookieManager(self._browser.cookies)
self._last_urls = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def visit(self, url):
self._browser.open(url)
def back(self):
self._last_urls.insert(0, self.url)
self._browser.goBack()
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self._browser.reload()
def quit(self):
pass
@property
def htmltree(self):
return lxml.html.fromstring(self.html.decode('utf-8'))
@property
def title(self):
return self._browser.title
@property
def html(self):
return self._browser.contents
@property
def url(self):
return self._browser.url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = self._browser.getControl(element.text)
return ElementList([ZopeTestBrowserOptionElement(control, self)], find_by="value", query=value)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = self._browser.getControl(element.text)
return ElementList([ZopeTestBrowserOptionElement(control, self)], find_by="text", query=text)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(xpath, original_find="css", original_selector=selector)
def find_by_xpath(self, xpath, original_find=None, original_selector=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element):
return self.find_by_name(xpath_element.name)
else:
elements.append(xpath_element)
find_by = original_find or "xpath"
query = original_selector or xpath
return ElementList(
[ZopeTestBrowserElement(element, self) for element in elements], find_by=find_by, query=query)
def find_by_tag(self, tag):
return self.find_by_xpath('//%s' % tag, original_find="tag", original_selector=tag)
def find_by_value(self, value):
return self.find_by_xpath('//*[@value="%s"]' % value, original_find="value", original_selector=value)
def find_by_id(self, id_value):
return self.find_by_xpath(
'//*[@id="%s"][1]' % id_value, original_find="id", original_selector=id_value)
def find_by_name(self, name):
elements = []
index = 0
while True:
try:
control = self._browser.getControl(name=name, index=index)
elements.append(control)
index += 1
except LookupError:
break
return ElementList(
[ZopeTestBrowserControlElement(element, self) for element in elements],
find_by="name", query=name)
def find_link_by_text(self, text):
return self._find_links_by_xpath("//a[text()='%s']" % text)
def find_link_by_href(self, href):
return self._find_links_by_xpath("//a[@href='%s']" % href)
def find_link_by_partial_href(self, partial_href):
return self._find_links_by_xpath("//a[contains(@href, '%s')]" % partial_href)
def find_link_by_partial_text(self, partial_text):
return self._find_links_by_xpath("//a[contains(normalize-space(.), '%s')]" % partial_text)
def fill(self, name, value):
self.find_by_name(name=name).first._control.value = value
def fill_form(self, field_values):
for name, value in field_values.items():
element = self.find_by_name(name)
control = element.first._control
if control.type == 'checkbox':
if value:
control.value = control.options
else:
control.value = []
elif control.type == 'radio':
control.value = [option for option in control.options if option == value]
elif control.type == 'select':
control.value = [value]
else:
# text, textarea, password, tel
control.value = value
def choose(self, name, value):
control = self._browser.getControl(name=name)
control.value = [option for option in control.options if option == value]
def check(self, name):
control = self._browser.getControl(name=name)
control.value = control.options
def uncheck(self, name):
control = self._browser.getControl(name=name)
control.value = []
def attach_file(self, name, file_path):
filename = file_path.split('/')[-1]
control = self._browser.getControl(name=name)
content_type, _ = mimetypes.guess_type(file_path)
control.add_file(open(file_path), content_type, filename)
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList(
[ZopeTestBrowserLinkElement(link, self) for link in links], find_by="xpath", query=xpath)
def select(self, name, value):
self.find_by_name(name).first._control.value = [value]
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag('body').first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == 'a'
def _element_is_control(self, element):
return hasattr(element, 'type')
def _get_mech_browser(self, user_agent, ignore_robots):
mech_browser = mechanize.Browser()
if user_agent is not None:
mech_browser.addheaders = [("User-agent", user_agent), ]
if ignore_robots:
mech_browser.set_handle_robots(False)
return mech_browser
@property
def cookies(self):
return self._cookie_manager
re_extract_inner_html = re.compile(r'^<[^<>]+>(.*)</[^<>]+>$')
class ZopeTestBrowserElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_id(self, id):
elements = self._element.cssselect('#%s' % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding='unicode').strip()
@property
def html(self):
return re_extract_inner_html.match(self.outer_html).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class ZopeTestBrowserLinkElement(ZopeTestBrowserElement):
def __init__(self, element, parent):
super(ZopeTestBrowserLinkElement, self).__init__(element, parent)
self._browser = parent._browser
def __getitem__(self, attr):
return super(ZopeTestBrowserLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.open(self["href"])
class ZopeTestBrowserControlElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.mech_control.attrs[attr]
@property
def value(self):
value = self._control.value
if isinstance(self._control, ListControl) and len(value) == 1:
return value[0]
return value
@property
def checked(self):
return bool(self._control.value)
def click(self):
return self._control.click()
def fill(self, value):
self._control.value = value
def select(self, value):
self._control.value = [value]
class ZopeTestBrowserOptionElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.mech_item.attrs[attr]
@property
def text(self):
return self._control.mech_item.get_labels()[0]._text
@property
def value(self):
return self._control.optionValue
@property
def selected(self):
return self._control.mech_item._selected
|
lrowe/splinter
|
splinter/driver/zopetestbrowser.py
|
Python
|
bsd-3-clause
| 12,426
|
[
"VisIt"
] |
d7d0064722cd6d7bc3f9ece144f23ab3d25e1194738718cb06d5859fff01e6fa
|
# -*- coding: utf-8 -*-
# creates: surface.png
import os
from ase import *
execfile('N2Cu.py')
image = read('N2Cu.traj@-1')
write('surface.pov', image, transparent=False, display=False, run_povray=True)
|
freephys/python_ase
|
doc/tutorials/surface.py
|
Python
|
gpl-3.0
| 204
|
[
"ASE"
] |
21d7c0a97d1c530643ab7d232ad54414c56dfac0649085346f58df2c9dd869d1
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2013-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Tag Report"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.plug.menu import EnumeratedListOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
TableStyle, TableCellStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER)
from gramps.gen.lib import NoteType, UrlType
from gramps.gen.filters import GenericFilterFactory, rules
from gramps.gen.errors import ReportError
from gramps.gen.utils.db import get_participant_from_event
from gramps.gen.display.place import displayer as _pd
from gramps.gen.proxy import LivingProxyDb, CacheProxyDb
#------------------------------------------------------------------------
#
# TagReport
#
#------------------------------------------------------------------------
class TagReport(Report):
""" Tag Report """
def __init__(self, database, options, user):
"""
Create the TagReport object that produces the report.
The arguments are:
database - the Gramps database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
tag - The tag each object must match to be included.
name_format - Preferred format to display names of people
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.set_locale(menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, menu)
stdoptions.run_private_data_option(self, menu)
living_opt = stdoptions.run_living_people_option(self, menu,
self._locale)
self.database = CacheProxyDb(self.database)
self._lv = menu.get_option_by_name('living_people').get_value()
for (value, description) in living_opt.get_items(xml_items=True):
if value == self._lv:
living_desc = self._(description)
break
self.living_desc = self._("(Living people: %(option_name)s)"
) % {'option_name' : living_desc}
self.tag = menu.get_option_by_name('tag').get_value()
if not self.tag:
raise ReportError(
_('Tag Report'),
_('You must first create a tag before running this report.'))
stdoptions.run_name_format_option(self, menu)
self.place_format = menu.get_option_by_name("place_format").get_value()
def write_report(self):
self.doc.start_paragraph("TR-Title")
# feature request 2356: avoid genitive form
title = self._("Tag Report for %s Items") % self.tag
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
if self._lv != LivingProxyDb.MODE_INCLUDE_ALL:
self.doc.start_paragraph("TR-ReportSubtitle")
self.doc.write_text(self.living_desc)
self.doc.end_paragraph()
self.write_people()
self.write_families()
self.write_events()
self.write_places()
self.write_notes()
self.write_media()
self.write_repositories()
self.write_sources()
self.write_citations()
def write_people(self):
""" write the people associated with the tag """
plist = self.database.iter_person_handles()
filter_class = GenericFilterFactory('Person')
a_filter = filter_class()
a_filter.add_rule(rules.person.HasTag([self.tag]))
ind_list = a_filter.apply(self.database, plist)
if not ind_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("People")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('PeopleTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Birth"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Death"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for person_handle in ind_list:
person = self.database.get_person_from_handle(person_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(person.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
name = self._name_display.display(person)
mark = utils.get_person_mark(self.database, person)
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(name, mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
birth_ref = person.get_birth_ref()
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
self.doc.write_text(self._get_date(event.get_date_object()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
death_ref = person.get_death_ref()
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
self.doc.write_text(self._get_date(event.get_date_object()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_families(self):
""" write the families associated with the tag """
flist = self.database.iter_family_handles()
filter_class = GenericFilterFactory('Family')
a_filter = filter_class()
a_filter.add_rule(rules.family.HasTag([self.tag]))
fam_list = a_filter.apply(self.database, flist)
if not fam_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Families")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('FamilyTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Father"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Mother"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Relationship"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for family_handle in fam_list:
family = self.database.get_family_from_handle(family_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(family.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
mark = utils.get_person_mark(self.database, father)
self.doc.write_text(self._name_display.display(father), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
mark = utils.get_person_mark(self.database, mother)
self.doc.write_text(self._name_display.display(mother), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
relation = family.get_relationship()
self.doc.write_text(str(relation))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_events(self):
""" write the events associated with the tag """
elist = self.database.get_event_handles()
filter_class = GenericFilterFactory('Event')
a_filter = filter_class()
a_filter.add_rule(rules.event.HasTag([self.tag]))
event_list = a_filter.apply(self.database, elist)
if not event_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Events")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('EventTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Participants"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for event_handle in event_list:
event = self.database.get_event_from_handle(event_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(event.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(self._(self._get_type(event.get_type())))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(get_participant_from_event(self.database,
event_handle))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = self._get_date(event.get_date_object())
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_places(self):
""" write the places associated with the tag """
plist = self.database.get_place_handles()
filter_class = GenericFilterFactory('Place')
a_filter = filter_class()
a_filter.add_rule(rules.place.HasTag([self.tag]))
place_list = a_filter.apply(self.database, plist)
if not place_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Places")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('PlaceTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for place_handle in place_list:
place = self.database.get_place_from_handle(place_handle)
place_title = _pd.display(self.database, place, self.place_format)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(place.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(place_title)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(place.get_name().get_value())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(str(place.get_type()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_notes(self):
""" write the notes associated with the tag """
nlist = self.database.get_note_handles()
filter_class = GenericFilterFactory('Note')
a_filter = filter_class()
a_filter.add_rule(rules.note.HasTag([self.tag]))
note_list = a_filter.apply(self.database, nlist)
if not note_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Notes")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('NoteTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell', 2)
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Text"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for note_handle in note_list:
note = self.database.get_note_from_handle(note_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(note.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
note_type = note.get_type()
self.doc.write_text(str(note_type))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell', 2)
self.doc.write_styled_note(
note.get_styledtext(), note.get_format(), 'TR-Note',
contains_html=((note.get_type() == NoteType.HTML_CODE)))
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_media(self):
""" write the media associated with the tag """
mlist = self.database.get_media_handles(sort_handles=True,
locale=self._locale)
filter_class = GenericFilterFactory('Media')
a_filter = filter_class()
a_filter.add_rule(rules.media.HasTag([self.tag]))
media_list = a_filter.apply(self.database, mlist)
if not media_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Media")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('MediaTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for media_handle in media_list:
media = self.database.get_media_from_handle(media_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(media.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
title = media.get_description()
self.doc.write_text(str(title))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
mime_type = media.get_mime_type()
self.doc.write_text(str(mime_type))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = self._get_date(media.get_date_object())
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_repositories(self):
""" write the repositories associated with the tag """
rlist = self.database.get_repository_handles()
filter_class = GenericFilterFactory('Repository')
a_filter = filter_class()
a_filter.add_rule(rules.repository.HasTag([self.tag]))
repo_list = a_filter.apply(self.database, rlist)
if not repo_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Repositories")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('ReopTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Email Address"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for repo_handle in repo_list:
repo = self.database.get_repository_from_handle(repo_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(repo.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(repo.get_name())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(str(repo.get_type()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
home_page = ''
for url in repo.get_url_list():
if url.get_type() == UrlType.EMAIL:
home_page = url.get_path()
break
self.doc.write_text(home_page)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_sources(self):
""" write the sources associated with the tag """
slist = self.database.get_source_handles(sort_handles=True,
locale=self._locale)
filter_class = GenericFilterFactory('Source')
a_filter = filter_class()
a_filter.add_rule(rules.source.HasTag([self.tag]))
source_list = a_filter.apply(self.database, slist)
if not source_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Source")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('SourceTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Author"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Publication Information"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for source_handle in source_list:
source = self.database.get_source_from_handle(source_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_title())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_author())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_publication_info())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_citations(self):
""" write the citations associated with the tag """
clist = self.database.get_citation_handles(sort_handles=True,
locale=self._locale)
filter_class = GenericFilterFactory('Citation')
a_filter = filter_class()
a_filter.add_rule(rules.citation.HasTag([self.tag]))
citation_list = a_filter.apply(self.database, clist)
if not citation_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Citations")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('CitationTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Volume/Page"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Source"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for citation_handle in citation_list:
citation = self.database.get_citation_from_handle(citation_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(citation.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(citation.get_page())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = self._get_date(citation.get_date_object())
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
source_handle = citation.get_reference_handle()
source = self.database.get_source_from_handle(source_handle)
self.doc.write_text(source.get_title())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
#------------------------------------------------------------------------
#
# TagOptions
#
#------------------------------------------------------------------------
class TagOptions(MenuReportOptions):
""" Options for the Tag Report """
def __init__(self, name, dbase):
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__tag_option.get_value()
def add_menu_options(self, menu):
"""
Add options to the menu for the tag report.
"""
category_name = _("Report Options")
all_tags = []
for handle in self.__db.get_tag_handles(sort_handles=True):
tag = self.__db.get_tag_from_handle(handle)
all_tags.append(tag.get_name())
if len(all_tags) > 0:
self.__tag_option = EnumeratedListOption(_('Tag'), all_tags[0])
for tag_name in all_tags:
self.__tag_option.add_item(tag_name, tag_name)
else:
self.__tag_option = EnumeratedListOption(_('Tag'), '')
self.__tag_option.add_item('', '')
self.__tag_option.set_help(_("The tag to use for the report"))
menu.add_option(category_name, "tag", self.__tag_option)
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_place_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
def make_default_style(self, default_style):
"""Make the default output style for the Tag Report."""
# Paragraph Styles
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title."))
default_style.add_paragraph_style("TR-Title", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=12, bold=1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the subtitle.'))
default_style.add_paragraph_style("TR-ReportSubtitle", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style("TR-Heading", para)
font = FontStyle()
font.set_size(12)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("TR-Normal", para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for table headings.'))
default_style.add_paragraph_style("TR-Normal-Bold", para)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for the note display.'))
default_style.add_paragraph_style("TR-Note", para)
#Table Styles
cell = TableCellStyle()
default_style.add_cell_style('TR-TableCell', cell)
table = TableStyle()
table.set_width(100)
table.set_columns(4)
table.set_column_width(0, 10)
table.set_column_width(1, 30)
table.set_column_width(2, 30)
table.set_column_width(3, 30)
default_style.add_table_style('TR-Table', table)
|
SNoiraud/gramps
|
gramps/plugins/textreport/tagreport.py
|
Python
|
gpl-2.0
| 35,291
|
[
"Brian"
] |
7d90018ad07f41753f6176df187dcb6c4e601324e0a6980a20fccf3fc35d3a27
|
from fabric.api import task, local, run, cd, sudo
from fabric.contrib.files import upload_template
from fabric.context_managers import settings
from fabric.operations import put
from fabric import state
import os
from boto import ec2
config = {
'server_name': 'tiranacode',
'port': 80,
'remote_install_dir': '/home/ubuntu/tiranacode',
'package_name': 'package.tar.gz',
'deploy_content': [
'src',
'requirements.txt',
],
'excludes': [
'src/webapp/node_modules',
'src/webapp/static/dist/vendors.js'
],
'dependencies': [
'python-pip',
'ipython',
'build-essential',
'python-dev',
'postgresql',
'python-psycopg2',
'libpq-dev',
'libssl-dev',
'libffi-dev'
],
'region': 'eu-central-1',
'instance_name': 'tiranacode'
}
### BEGIN: Get instance IPs using boto.ec2
conn = ec2.connect_to_region(config['region'])
reservations = conn.get_all_instances(filters={
'tag:Name' : config['instance_name']
})
instances = [i for r in reservations for i in r.instances]
if len(instances) == 0:
print 'Could not find any instances with name: %s on region: %s' % (
config['instance_name'],
config['region']
)
sys.exit(1)
state.env.user = 'ubuntu'
state.env.hosts = [instance.ip_address for instance in instances]
### END: Get instance IPs using boto.ec2
@task
def upload_ssh_keys():
pub_keys_folder = 'deploy/pub_keys'
for key_fname in os.listdir(pub_keys_folder):
local_loc = os.path.join(pub_keys_folder, key_fname)
remote_loc = os.path.join('/tmp', key_fname)
put(local_loc, remote_loc)
sudo('cat %s >> /root/.ssh/authorized_keys' % remote_loc)
sudo('cat %s >> /home/ubuntu/.ssh/authorized_keys' % remote_loc)
@task
def deploy_code():
#build js files
local('gulp deploy --cwd src/webapp/')
# compress local folder
local('tar -zcvf %s %s --exclude=%s' % (
config['package_name'], ' '.join(config['deploy_content']),
' --exclude='.join(config['excludes'])
))
# upload package and delete local folder
run('mkdir -p %s' % config['remote_install_dir'])
put(config['package_name'], config['remote_install_dir'])
local('rm %s' % config['package_name'])
# uncompress on the remote server and remove compressed package
with cd(config['remote_install_dir']):
run('tar -zxvf %s' % config['package_name'])
run('rm %s' % config['package_name'])
@task
def install_dependencies():
sudo('apt-get update')
for dep in config['dependencies']:
sudo('apt-get -y install %s' % dep)
with cd(config['remote_install_dir']):
sudo("pip install -r requirements.txt")
@task
def install_service():
upload_template('deploy/upstart.tpl.conf',
'/etc/init/%s.conf' % config['server_name'],
context={
'install_dir': config['remote_install_dir'],
'port': config['port'],
'server_name': config['server_name']
}, use_sudo=True)
sudo('initctl reload-configuration')
@task
def restart_service():
sudo('service %s restart' % config['server_name'])
@task
def redeploy():
deploy_code()
restart_service()
@task
def full_install():
upload_ssh_keys()
deploy_code()
install_dependencies()
install_service()
restart_service()
# sudo("sudo -u postgres psql")
@task
def postgres_init():
sudo("psql", user='postgres')
# sudo("psql tiranacode")
# psql -U user_name -d database_name -h 127.0.0.1 -W
# create user dbuser;
# create database dbname;
# GRANT ALL PERMISIONS ON DATABASE dbname to dbuser;
# nano /etc/postgresql/9.3/main/pg_hba.conf
# local all all md5
|
tiranacode/jap-jete-admin
|
fabfile.py
|
Python
|
bsd-3-clause
| 3,869
|
[
"GULP"
] |
3e2ae74dfb7b7f16838a5b45c66de35b539f7a5a30a5f0dcf6d2f292a1e2e023
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds several text ads to a given ad group.
To get ad_group_id, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupAdService.mutate
Api: AdWordsOnly
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201506')
# Construct operations and add ads.
# If needed, you could specify an exemption request here, e.g.:
# 'exemptionRequests': [{
# # This comes back in a PolicyViolationError.
# 'key' {
# 'policyName': '...',
# 'violatingText': '...'
# }
# }]
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
},
# Optional fields.
'status': 'PAUSED'
}
},
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'example.com',
'description1': 'Enjoy your stay at Red Planet.',
'description2': 'Buy your tickets now!',
'headline': 'Luxury Cruise to Mars'
}
}
}
]
ads = ad_group_ad_service.mutate(operations)
# Display results.
for ad in ads['value']:
print ('Ad with id \'%s\' and of type \'%s\' was added.'
% (ad['ad']['id'], ad['ad']['Ad.Type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
coxmediagroup/googleads-python-lib
|
examples/adwords/v201506/basic_operations/add_text_ads.py
|
Python
|
apache-2.0
| 3,121
|
[
"VisIt"
] |
d8842ff156471b415803a7d4ab05245fdcb358311b84c05d21cc6118072cac4b
|
# Copyright: (c) 2017, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import argparse
from operator import attrgetter
from ansible import constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils._text import to_bytes, to_native
from ansible.utils.vars import combine_vars
from ansible.utils.display import Display
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
display = Display()
INTERNAL_VARS = frozenset(['ansible_diff_mode',
'ansible_config_file',
'ansible_facts',
'ansible_forks',
'ansible_inventory_sources',
'ansible_limit',
'ansible_playbook_python',
'ansible_run_tags',
'ansible_skip_tags',
'ansible_verbosity',
'ansible_version',
'inventory_dir',
'inventory_file',
'inventory_hostname',
'inventory_hostname_short',
'groups',
'group_names',
'omit',
'playbook_dir', ])
class InventoryCLI(CLI):
''' used to display or dump the configured inventory as Ansible sees it '''
ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
'group': 'The name of a group in the inventory, relevant when using --graph', }
def __init__(self, args):
super(InventoryCLI, self).__init__(args)
self.vm = None
self.loader = None
self.inventory = None
def init_parser(self):
super(InventoryCLI, self).init_parser(
usage='usage: %prog [options] [host|group]',
epilog='Show Ansible inventory information, by default it uses the inventory script JSON format')
opt_help.add_inventory_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_basedir_options(self.parser)
# remove unused default options
self.parser.add_argument('-l', '--limit', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument, nargs='?')
self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
self.parser.add_argument('args', metavar='host|group', nargs='?')
# Actions
action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
action_group.add_argument("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
help='create inventory graph, if supplying pattern it must be a valid group name')
self.parser.add_argument_group(action_group)
# graph
self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
help='Use YAML format instead of default JSON, ignored for --graph')
self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
help='Use TOML format instead of default JSON, ignored for --graph')
self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
help='Add vars to graph display, ignored unless used with --graph')
# list
self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
help="When doing an --list, represent in a way that is optimized for export,"
"not as an accurate representation of how Ansible has processed it")
self.parser.add_argument('--output', default=None, dest='output_file',
help="When doing --list, send the inventory to a file instead of to the screen")
# self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
# help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
def post_process_args(self, options):
options = super(InventoryCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options)
# there can be only one! and, at least, one!
used = 0
for opt in (options.list, options.host, options.graph):
if opt:
used += 1
if used == 0:
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
elif used > 1:
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
# set host pattern to default if not supplied
if options.args:
options.pattern = options.args
else:
options.pattern = 'all'
return options
def run(self):
super(InventoryCLI, self).run()
# Initialize needed objects
self.loader, self.inventory, self.vm = self._play_prereqs()
results = None
if context.CLIARGS['host']:
hosts = self.inventory.get_hosts(context.CLIARGS['host'])
if len(hosts) != 1:
raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
myvars = self._get_host_variables(host=hosts[0])
# FIXME: should we template first?
results = self.dump(myvars)
elif context.CLIARGS['graph']:
results = self.inventory_graph()
elif context.CLIARGS['list']:
top = self._get_group('all')
if context.CLIARGS['yaml']:
results = self.yaml_inventory(top)
elif context.CLIARGS['toml']:
results = self.toml_inventory(top)
else:
results = self.json_inventory(top)
results = self.dump(results)
if results:
outfile = context.CLIARGS['output_file']
if outfile is None:
# FIXME: pager?
display.display(results)
else:
try:
with open(to_bytes(outfile), 'wt') as f:
f.write(results)
except (OSError, IOError) as e:
raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
sys.exit(0)
sys.exit(1)
@staticmethod
def dump(stuff):
if context.CLIARGS['yaml']:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
elif context.CLIARGS['toml']:
from ansible.plugins.inventory.toml import toml_dumps, HAS_TOML
if not HAS_TOML:
raise AnsibleError(
'The python "toml" library is required when using the TOML output format'
)
results = toml_dumps(stuff)
else:
import json
from ansible.parsing.ajson import AnsibleJSONEncoder
results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True)
return results
def _get_group_variables(self, group):
# get info from inventory source
res = group.get_vars()
# Always load vars plugins
res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
if context.CLIARGS['basedir']:
res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
if group.priority != 1:
res['ansible_group_priority'] = group.priority
return self._remove_internal(res)
def _get_host_variables(self, host):
if context.CLIARGS['export']:
# only get vars defined directly host
hostvars = host.get_vars()
# Always load vars plugins
hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
if context.CLIARGS['basedir']:
hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
else:
# get all vars flattened by host, but skip magic hostvars
hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
return self._remove_internal(hostvars)
def _get_group(self, gname):
group = self.inventory.groups.get(gname)
return group
@staticmethod
def _remove_internal(dump):
for internal in INTERNAL_VARS:
if internal in dump:
del dump[internal]
return dump
@staticmethod
def _remove_empty(dump):
# remove empty keys
for x in ('hosts', 'vars', 'children'):
if x in dump and not dump[x]:
del dump[x]
@staticmethod
def _show_vars(dump, depth):
result = []
for (name, val) in sorted(dump.items()):
result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
return result
@staticmethod
def _graph_name(name, depth=0):
if depth:
name = " |" * (depth) + "--%s" % name
return name
def _graph_group(self, group, depth=0):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
for kid in sorted(group.child_groups, key=attrgetter('name')):
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
result.append(self._graph_name(host.name, depth))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_group_variables(group), depth))
return result
def inventory_graph(self):
start_at = self._get_group(context.CLIARGS['pattern'])
if start_at:
return '\n'.join(self._graph_group(start_at))
else:
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
def json_inventory(self, top):
seen = set()
def format_group(group):
results = {}
results[group.name] = {}
if group.name != 'all':
results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen:
results.update(format_group(subgroup))
seen.add(subgroup.name)
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty(results[group.name])
if not results[group.name]:
del results[group.name]
return results
results = format_group(top)
# populate meta
results['_meta'] = {'hostvars': {}}
hosts = self.inventory.get_hosts()
for host in hosts:
hvars = self._get_host_variables(host)
if hvars:
results['_meta']['hostvars'][host.name] = hvars
return results
def yaml_inventory(self, top):
seen = []
def format_group(group):
results = {}
# initialize group + vars
results[group.name] = {}
# subgroups
results[group.name]['children'] = {}
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name != 'all':
results[group.name]['children'].update(format_group(subgroup))
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
for h in sorted(group.hosts, key=attrgetter('name')):
myvars = {}
if h.name not in seen: # avoid defining host vars more than once
seen.append(h.name)
myvars = self._get_host_variables(host=h)
results[group.name]['hosts'][h.name] = myvars
if context.CLIARGS['export']:
gvars = self._get_group_variables(group)
if gvars:
results[group.name]['vars'] = gvars
self._remove_empty(results[group.name])
return results
return format_group(top)
def toml_inventory(self, top):
seen = set()
has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
def format_group(group):
results = {}
results[group.name] = {}
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
results[group.name]['children'].append(subgroup.name)
results.update(format_group(subgroup))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
if host.name not in seen:
seen.add(host.name)
host_vars = self._get_host_variables(host=host)
else:
host_vars = {}
try:
results[group.name]['hosts'][host.name] = host_vars
except KeyError:
results[group.name]['hosts'] = {host.name: host_vars}
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty(results[group.name])
if not results[group.name]:
del results[group.name]
return results
results = format_group(top)
return results
|
indrajitr/ansible
|
lib/ansible/cli/inventory.py
|
Python
|
gpl-3.0
| 15,510
|
[
"Brian"
] |
6caf7d3ad23c7165844ba1415bef4ea1e63f151f2c6a60180d4f8f36add27e5c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.