input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 16:01:31 2020
@author: kirk.mutafopulos
"""
import sys
import serial
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Pumps(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#layout=QFormLayout()
#Start Pump Button
btn1 = QPushButton("Run Pump 1", self)
btn1.move(30, 140)
btn1.resize(200,32)
#Stop Pump Button
btn2 = QPushButton("Stop Pump 1", self)
btn2.move(250, 140)
btn2.resize(200,32)
#Setting FLOW Value
'''
lineEdit1 = QLineEdit()
lineEdit1.move(500,500)
lineEdit1.resize(200,200)
'''
#Set Flow Rate Button for mL/hour
btn3 = QPushButton("Set to mL/hour", self)
btn3.move(470, 140)
btn3.resize(200,32)
#Set Diameter Value
'''
lineEdit2 = QlineEdit()
lineEdit2.move(500,500)
lineEdit2.resize(200,200)
'''
#Set Syringe Diameter Button
btn4 = QPushButton("Set Diameter (mm)", self)
btn4.move(690, 140)
btn4.resize(200,32)
#Set Flow Rate Button for uL/hour
btn5 = QPushButton("Set to uL/hour", self)
btn5.move(470,180)
btn5.resize(200,32)
#Set Flow Rate Button for uL/min
btn6 = QPushButton("Set to uL/min", self)
btn6.move(470, 220)
btn6.resize(200,32)
# Run Pump for address 01
btn7 = QPushButton("Run Pump 2", self)
btn7.move(30,320)
btn7.resize(200,32)
#Stop Pump for address 01
btn8 = QPushButton("Stop Pump 2", self)
btn8.move(250,320)
btn8.resize(200, 32)
#Set flow rate to mL/hr for address 01
btn9 = QPushButton("Set to mL/hour", self)
btn9.move(470,320)
btn9.resize(200,32)
#Set Diameter (mm) for Pump address 01
btn10 = QPushButton("Set Diameter (mm)", self)
btn10.move(690,320)
btn10.resize(200,32)
#Set flow rate to ul/hour for pump address 01
btn11 = QPushButton("Set to uL/hour", self)
btn11.move(470,360)
btn11.resize(200,32)
#Set flow Rate to ul/min for pump address 01
btn12 = QPushButton("Set to uL/min", self)
btn12.move(470,400)
btn12.resize(200,32)
#Run Pump for address 02
btn13 = QPushButton("Run Pump 3", self)
btn13.move(30, 500)
btn13.resize(200,32)
#Stop Pump for address 02
btn14 = QPushButton("Stop Pump 3", self)
btn14.move(250,500)
btn14.resize(200,32)
#Set flow rate to mL/hr for pump address 02
btn15 = QPushButton("Set to mL/hour", self)
btn15.move(470,500)
btn15.resize(200,32)
#Set flow rate to ul/hour for pump address 02
btn16 = QPushButton("Set to uL/hour", self)
btn16.move(470,540)
btn16.resize(200,32)
#Set flow rate to ul/min for pump address 02
btn17 = QPushButton("Set to uL/min", self)
btn17.move(470,580)
btn17.resize(200,32)
#Set Diameter (mm) for Pump address 02
btn18 = QPushButton("Set Diameter (mm)", self)
btn18.move(690,500)
btn18.resize(200,32)
#Stop all pumps
btn19 = QPushButton("STOP ALL PUMPS", self)
btn19.move(100,620)
btn19.resize(200,32)
#Run all pumps
btn20 = QPushButton("RUN ALL PUMPS", self)
btn20.move(100,570)
btn20.resize(200,32)
DiamInfo = QLabel("Syringe Diameter: 1mL=4.78mm, 3mL=8.66mm, 5mL=12.06mm, 10mL=14.5mm, 20mL=19.13mm, 30mL=21.7mm" , self)
DiamInfo.move(80,700)
DiamInfo.resize(850,32)
ProgInfo = QLabel("This program was made by <NAME>, PhD", self)
ProgInfo.move(80,730)
ProgInfo.resize(850,32)
btn1.clicked.connect(self.runbuttonClicked)
btn2.clicked.connect(self.stopbuttonClicked)
btn3.clicked.connect(self.sfrbuttonClicked)
btn4.clicked.connect(self.diambuttonClicked)
btn5.clicked.connect(self.ulhbuttonClicked)
btn6.clicked.connect(self.ulmbuttonClicked)
btn7.clicked.connect(self.run2buttonClicked)
btn8.clicked.connect(self.stop2buttonClicked)
btn9.clicked.connect(self.mlh2buttonClicked)
btn10.clicked.connect(self.diam2buttonClicked)
btn11.clicked.connect(self.ulh2buttonClicked)
btn12.clicked.connect(self.ulm2buttonClicked)
btn13.clicked.connect(self.run3buttonClicked)
btn14.clicked.connect(self.stop3buttonClicked)
btn15.clicked.connect(self.mlh3buttonClicked)
btn16.clicked.connect(self.ulh3buttonClicked)
btn17.clicked.connect(self.ulm3buttonClicked)
btn18.clicked.connect(self.diam3buttonClicked)
btn19.clicked.connect(self.stopallbuttonClicked)
btn20.clicked.connect(self.runallbuttonClicked)
#
# MRB: QLineEdit Instance
#Input Flow Rate Value for Pump Address 00
self.lineEdit1 = QLineEdit(self)
self.lineEdit1.setObjectName("frEdit") # Flow-Rate Edit
self.lineEdit1.setText(" Input flow rate")# Placeholder text
self.lineEdit1.resize(200, 32) # Same as whatever else
self.lineEdit1.move(470, 100) # ... wherever
#
# To get text self.lineEdit.text()
#Input Diameter Value for Pump Address 00
self.lineEdit2 = QLineEdit(self)
self.lineEdit2.setObjectName("frEdit") # Flow-Rate Edit
self.lineEdit2.setText(" Input Diameter")# Placeholder text
self.lineEdit2.resize(200, 32) # Same as whatever else
self.lineEdit2.move(690, 100) # ... wherever
#Input Diameter Value for Pump Address 01
self.lineEdit3 = QLineEdit(self)
self.lineEdit3.setObjectName("frEdit") # Flow-Rate Edit
self.lineEdit3.setText(" Input Diameter")# Placeholder text
self.lineEdit3.resize(200, 32) # Same as whatever else
self.lineEdit3.move(690, 280) # ... wherever
#Input Flow Rate Value for Pump Address 01
self.lineEdit4 = QLineEdit(self)
self.lineEdit4.setObjectName("frEdit") # Flow-Rate Edit
self.lineEdit4.setText(" Input flow rate")# Placeholder text
self.lineEdit4.resize(200, 32) # Same as whatever else
self.lineEdit4.move(470, 280) # ... wherever
#Input Flow Rate Value for Pump Address 02
self.lineEdit5 = QLineEdit(self)
self.lineEdit5.setObjectName("frEdit") # Flow-Rate Edit
self.lineEdit5.setText(" Input flow rate")# Placeholder text
self.lineEdit5.resize(200, 32) # Same as whatever else
self.lineEdit5.move(470, 460) # ... wherever
#Input Diameter Value for Pump Address 02
self.lineEdit6 = QLineEdit(self)
self.lineEdit6.setObjectName("frEdit") # Flow-Rate Edit
self.lineEdit6.setText(" Input Diameter")# Placeholder text
self.lineEdit6.resize(200, 32) # Same as whatever else
self.lineEdit6.move(690, 460) # ... wherever
self.statusBar()
self.setGeometry(200, 200, 1000, 800) #(X Window Coor, Y Window Coor, Width Window, Height Window)
self.setWindowTitle('Syringe Pump Controller')
#Was an area for my logo, or can be used to display an image of diam info
#label = QLabel(self)
#pixmap = QPixmap('C:/Users/username/Pictures/example.png')
#label.setPixmap(pixmap)
#label.move(50,1)
#label.resize(120,145)
#label.setPixmap(pixmap)
self.show()
def diambuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba1 = QByteArray
ba1 = b"MMD " + str(self.lineEdit2.text()).encode('ascii') + b"\r"
ser.write(ba1) #b"MMD 22\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def sfrbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba = QByteArray
ba = b"MLH " + str(self.lineEdit1.text()).encode('ascii') + b"\r"
ser.write(ba) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def ulhbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba2 = QByteArray
ba2 =b"ULH " + str(self.lineEdit1.text()).encode('ascii') + b"\r"
ser.write(ba2)
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def ulmbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba3 = QByteArray
ba3 = b"ULM " + str(self.lineEdit1.text()).encode('ascii') + b"\r"
ser.write(ba3)
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def runbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"RUN\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
def run2buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"01RUN\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
def stopbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"STP\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def stop2buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"01STP\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
def mlh2buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba4 = QByteArray
ba4 = b"01MLH " + str(self.lineEdit4.text()).encode('ascii') + b"\r"
ser.write(ba4) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def diam2buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba5 = QByteArray
ba5 = b"01MMD " + str(self.lineEdit3.text()).encode('ascii') + b"\r"
ser.write(ba5) #b"MMD 22\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def ulh2buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba6 = QByteArray
ba6 = b"01ULH " + str(self.lineEdit4.text()).encode('ascii') + b"\r"
ser.write(ba6) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def ulm2buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba7 = QByteArray
ba7 = b"01ULM " + str(self.lineEdit4.text()).encode('ascii') + b"\r"
ser.write(ba7) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def run3buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"02RUN\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
def stop3buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"02STP\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
def mlh3buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba8 = QByteArray
ba8 = b"02MLH " + str(self.lineEdit5.text()).encode('ascii') + b"\r"
ser.write(ba8) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def ulh3buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba9 = QByteArray
ba9 = b"02ULH " + str(self.lineEdit5.text()).encode('ascii') + b"\r"
ser.write(ba9) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def ulm3buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba10 = QByteArray
ba10 = b"02ULH " + str(self.lineEdit5.text()).encode('ascii') + b"\r"
ser.write(ba10) #b"MLH 2\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def diam3buttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ba11 = QByteArray
ba11 = b"02MMD " + str(self.lineEdit6.text()).encode('ascii') + b"\r"
ser.write(ba11) #b"MMD 22\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def stopallbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"STP\r")
ser.write(b"01STP\r")
ser.write(b"02STP\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def runallbuttonClicked(self):
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"RUN\r")
ser.write(b"01RUN\r")
ser.write(b"02RUN\r")
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
ser.close()
def getint(self):
num,ok = QInputDialog.getInt(self, "interger input dialog", "enter value")
if ok:
self.le2.setText(str(num))
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure want to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
ser = serial.Serial(port='COM1', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, timeout=0.5)
ser.write(b"STP\r")
ser.write(b"01STP\r")
ser.write(b"02STP\r")
ser.close()
else:
event.ignore()
if | |
import collections, os, sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.special import erf
import scipy.interpolate
fontsize = 11/1.4
latex_preamble = r'''
\usepackage{lmodern}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{bm}
'''
matplotlib.rcParams.update({
'text.usetex' : True,
'font.family' : 'serif',
'font.serif' : 'cmr10',
'font.size' : fontsize,
'mathtext.fontset' : 'cm',
'axes.formatter.use_mathtext': True,
'text.latex.preamble': latex_preamble,
})
fig, ax = plt.subplots(1, 1, figsize=(8.44, 3.9))
def get_aspect(ax=None):
if ax is None:
ax = plt.gca()
fig = ax.figure
ll, ur = ax.get_position() * fig.get_size_inches()
width, height = ur - ll
axes_ratio = height / width
aspect = axes_ratio / ax.get_data_ratio()
return aspect
def draw_arrow(x, y, dir, color='k', rot=None, sync=False, zorder=None):
if zorder is None:
zorder = -15
text = (r'$\bm{\uparrow}$' if dir == 'up' else r'$\bm{\downarrow}$')
va = ('top' if dir == 'up' else 'bottom')
fontsize = 19
if sync:
fontsize = 14.9
if rot is not None:
v = [np.cos(rot*np.pi/180), np.sin(rot*np.pi/180)/get_aspect()]
t = -0.034 #-0.053
dy = -0.001
plt.text(
x + t*v[0], y + t*v[1] + dy,
r'$\bm{\rightarrow}$',
va='center', ha='center', fontsize=fontsize,
zorder=zorder, color=color, rotation=rot,
)
# Hide stalk
if not sync:
for dt in (-0.0056, ):
plt.text(
x + (t + dt)*v[0], y + (t + dt)*v[1] + dy,
r'$\bm{-}$',
va='center', ha='center', fontsize=22,
zorder=zorder+1, color='w', rotation=rot,
)
for dt in (-0.036, ):
plt.text(
x + (t + dt)*v[0], y + (t + dt)*v[1] + dy,
r'$\bm{-}$',
va='center', ha='center', fontsize=36,
zorder=zorder+1, color='w', rotation=rot,
)
return
# Not rotated
plt.text(
x, y, text,
va=va, ha='center', fontsize=fontsize,
zorder=zorder, color=color,
)
# Hide stalk
if not sync:
dx = 0.010
dy = 0.192
dY = (-0.145 if dir == 'up' else +0.145)
plt.fill(
[x - 0.5*dx, x + 0.5*dx, x + 0.5*dx, x - 0.5*dx, x - 0.5*dx],
np.array([y + 0.5*dy, y + 0.5*dy, y - 0.5*dy, y - 0.5*dy, y + 0.5*dy]) + dY,
'w', ec='none', zorder=zorder+1,
)
dY += 0.1*dY
dx *= 1.3
plt.fill(
[x - 0.5*dx, x + 0.5*dx, x + 0.5*dx, x - 0.5*dx, x - 0.5*dx],
np.array([y + 0.5*dy, y + 0.5*dy, y - 0.5*dy, y - 0.5*dy, y + 0.5*dy]) + dY,
'w', ec='none', zorder=zorder+1,
)
theta = np.linspace(np.pi, 0, 201)
def step(bgn, end, offset_y, dir, color, colors=None, jump_up=False, jump_down=False):
global y_jump_up_last, y_jump_down_last
arrow_offset = 0.04
jump_up_height = 0.10 #0.0925 #0.135
if offset_y == offset_y0:
jump_down_height = 0.79 - 0.05
else:
jump_down_height = 0.614 + 0.018 - 0.05
if offset_y == offset_y2:
jump_up_height += 0.013 #0.008
x = bgn + ((end - bgn)/2)*(1 + np.cos(theta))
if dir == 'up':
y = (height/2)*np.sin(theta)
elif dir == 'down':
y = -(height/2)*np.sin(theta)
else:
print(f'Unrecognized dir="{dir}"', file=sys.stderr, flush=True)
sys.exit(1)
y += offset_y
if colors:
color0, color1 = colors
color0 = np.asarray(matplotlib.colors.ColorConverter().to_rgb(color0), dtype=float)
color1 = np.asarray(matplotlib.colors.ColorConverter().to_rgb(color1), dtype=float)
mid = (x.size - 1)/2
for i in range(x.size - 1):
w = (1 + erf(1.8*(i - mid)/mid))/2
color = (1 - w)*color0 + w*color1
plt.plot([x[i], x[i + 1]], [y[i], y[i + 1]], '-', color=color, lw=1.2)
# Arrow
if i == int((x.size - 1)*0.30):
dy = (y[i+1] - y[i-1])/2*get_aspect()
dx = (x[i+1] - x[i-1])/2
draw_arrow(x[i], y[i], 'up', color, rot=180/np.pi*np.arctan2(dy, dx))
el_skip = 16
if jump_up:
if jump_up is True:
y_jump = np.array(
list(y[:len(y)//2])
+ list(offset_y + np.linspace(
height/2,
height/2 + jump_up_height,
len(y) - len(y)//2,
))
)
X = bgn + (end - bgn)/2
x_jump = np.array(list(x[:len(x)//2]) + [X]*(len(x) - len(x)//2))
mid = (y_jump.size - 1)/2
random_fac = 1.22 # because I can't do the math, apparently
mid *= random_fac
for i in range(len(y)//2 + el_skip, y_jump.size - 1):
w = (1 + erf(1.95*(i - mid)/mid))/2
color = (1 - w)*color0 + w*color1
plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
'-', color=color, lw=1.2)
# Arrow
draw_arrow(x_jump[i+1], y_jump[i+1] + arrow_offset, 'up', color1)
else:
X1 = bgn + (jump_up - bgn)/2
index1 = np.argmin((X1 - x)**2)
x_jump = np.array([X1]*(len(x)//2))
y_jump = np.linspace(
offset_y + height/2 + 1e-3,
y_jump_up_last[-1], #offset_y + height/2 + jump_up_height,
x_jump.size,
)
mid = (y_jump.size - 1)/2
random_fac = 1.22 # because I can't do the math, apparently
for i in range(y_jump.size - 1):
w = (1 + erf(1.95*(i - mid)/mid))/2
color = (1 - w)*(color0/(1 + random_fac*index1/len(x_jump))) + w*color1
plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
'-', color=color, lw=1.2)
# Arrow
draw_arrow(x_jump[i+1], y_jump[i+1] + arrow_offset, 'up', color1)
y_jump_up_last = y_jump
if jump_down:
if jump_down is True:
X = bgn + (end - bgn)*3/4
x_jump = np.array(list(x[:3*len(x)//4]) + [X]*(len(x) - 3*len(x)//4))
Y = np.interp(X, x, y)
y_jump = np.array(
list(y[:3*len(y)//4])
+ list(np.linspace(
Y - 2e-3,
Y - jump_down_height,
len(y) - 3*len(y)//4,
))
)
mid = (y_jump.size - 1)/2
for i in range(3*len(y)//4, y_jump.size - 1):
w = (1 + erf(1.4*(i - mid)/mid))/2
color = (1 - w)*color0 + w*color1
plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
'-', color=color, lw=1.2)
# Arrow
draw_arrow(x_jump[i+1], y_jump[i+1] - arrow_offset, 'down', color1)
else:
X1 = bgn + 3*(jump_down - bgn)/4
Y = np.interp(X1, x, y)
index1 = np.argmin((X1 - x)**2)
x_jump = np.array([X1]*(1*len(x)//2))
y_jump = np.linspace(Y - 2e-3, y_jump_down_last[-1], len(x_jump))
mid = (y_jump.size - 1)/2
random_fac = 3.70 # because I can't do the math, apparently
for i in range(y_jump.size - 1):
w = (1 + erf(1.4*(i - mid)/mid))/2
color = (1 - w)*(color0/(1 + random_fac*index1/len(x_jump))) + w*color1
plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
'-', color=color, lw=1.2)
# Arrow
draw_arrow(x_jump[i+1], y_jump[i+1] - arrow_offset, 'down', color1)
y_jump_down_last = y_jump
else:
plt.plot(x, y, '-', color=color, lw=1.2)
# Arrow
i = int((x.size - 1)*0.33)
dy = (y[i+1] - y[i])*get_aspect()
dx = (x[i+1] - x[i])
draw_arrow(x[i], y[i], 'down', color, rot=180/np.pi*np.arctan2(dy, dx))
y_jump_up_last = None
y_jump_down_last = None
# Specs
height = 0.615 #0.68
rung_offset = 0.75
rung0_final_step = 0.5 #0.21 #0.457
offset_y0 = 0
offset_y1 = -1.102*rung_offset
offset_y2 = -2*rung_offset
offset_ydrift = -2.73*rung_offset
end_sync = 1/2 + 1 + 1 + rung0_final_step
particle_scatter_size = 98
particle_vert_offset = 0.0135*np.sqrt(particle_scatter_size)
dy_vert = 0.085 #0.079
dy_vert_fac = 1.2
dx_rung0 = 0.0567 # 0.0507
dx_rung1 = 0.033 #0.0295
colors = ['C0', 'C1', 'C2', 'C3']
# Curve through blue points
lw_fat = 14.5
alpha_fat = 0.154
def draw_fat_blue_curve(x_offset):
dX_up = 0.017 #-0.015 #0.036
dX_down = -0.006
dY_up = 0.1 #0.22
dY_down = 0.1
X = [
1.0*dX_down + 1 - 0.015,
1 + 0.4*dX_down,
#
1,
1 + 1/8,
0.2*(2*(1 + 1/4) + 3*(1 + 1/4 - dx_rung1)),
0.2*(2*(1 + 1/2) + 3*(1 + 1/2 - dx_rung0)),
#
#(1 + 1/2),
#(1 + 1/2),
dX_up + (1 + 1/2),
]
X = np.array(X) + x_offset
Y = [
-1.0*dY_down + offset_ydrift + 0.0,
-0.4*dY_down + offset_ydrift + 0.03,
#
0.05 + 0.2*(2*(offset_ydrift) + 3*(offset_ydrift + dy_vert_fac*dy_vert)) + 0.03,
0.2*(2*(offset_y2) + 3*(offset_y2 - dy_vert_fac*dy_vert)) + 0.03,
0.2*(2*(offset_y1) + 3*(offset_y1 - dy_vert_fac*dy_vert)),
0.2*(2*(offset_y0) + 3*(offset_y0 - dy_vert*(1 + dy_vert_fac))),
#
#offset_y0,
#0.4*dY_up + offset_y0,
1.0*dY_up + offset_y0,
]
tck, u = scipy.interpolate.splprep([X, Y], s=1.58e-3, k=2)
unew = np.arange(0, 1.01, 0.01)
out = scipy.interpolate.splev(unew, tck)
color_C0 = np.asarray(matplotlib.colors.ColorConverter().to_rgb('C0'), dtype=float)
color_c = np.asarray(matplotlib.colors.ColorConverter().to_rgb('c'), dtype=float)
w = 0.66
color = w*color_C0 + (1 - w)*color_c
plt.plot(out[0], out[1], '-', color=color, lw=lw_fat, alpha=alpha_fat, zorder=-12.9, solid_capstyle='round')
draw_fat_blue_curve(0)
draw_fat_blue_curve(1)
# Black curves
plt.plot([0, 0], [offset_ydrift - 0.1, offset_y0 + 0.1],
'k', lw=lw_fat, alpha=alpha_fat, zorder=-12.9, solid_capstyle='round')
plt.plot([end_sync, end_sync], [offset_ydrift - 0.1, offset_y0 + 0.1],
'k', lw=lw_fat, alpha=alpha_fat, zorder=-12.9, solid_capstyle='round')
# Labels
x = -0.085
dy = 0.123
fontsize = 11
plt.text(x, offset_y0 - dy, 'rung 0',
va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x - 0.067, offset_y0 - dy, 'long-range,',
va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x, offset_y1 - dy, 'rung 1',
va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x, offset_y2 - dy, 'rung 2',
va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x, offset_ydrift + dy, 'drift',
va='top', ha='right', fontsize=fontsize, rotation=90)
# Delta t
y = 0.529
space = r'\,'*736
plt.text(0.5, y,
rf'$\overbrace{{{space}}}^{{}}$',
fontsize=1, ha='center', va='center', rotation=0, color='k', zorder=np.inf)
plt.text(0.5, y + 0.140, r'initial, $\Delta t$',
fontsize=fontsize, ha='center', va='center', color='k', zorder=np.inf)
plt.text(1.5, y,
rf'$\overbrace{{{space}}}^{{}}$',
fontsize=1, ha='center', va='center', rotation=0, color='k', zorder=np.inf)
plt.text(1.5, y + 0.140, r'repeatable, $\Delta t$',
fontsize=fontsize, ha='center', va='center', color='k', zorder=np.inf)
space = r'\,'*int(round(len(space)/2*(end_sync - 2)/1) - 1)
plt.text(0.5*(2 + end_sync), y,
rf'$\overbrace{{{space}}}^{{}}$',
fontsize=1, ha='center', va='center', rotation=0, color='k', zorder=np.inf)
plt.text(0.5*(2 + end_sync), y + 0.140, r'synchronisation, $\leq\Delta t$',
fontsize=fontsize, ha='center', va='center', color='k', zorder=np.inf)
# Time step
y = -2.47
plt.text(0, y, r'$t_0$', fontsize=fontsize, ha='center', va='top')
plt.text(1, y, r'$t_1$', fontsize=fontsize, ha='center', va='top')
plt.text(2, y, r'$t_2$', fontsize=fontsize, ha='center', va='top')
plt.text(end_sync, y, r'$t_{\text{sync}}$', fontsize=fontsize, ha='center', va='top')
# For testing for ellipticity
"""
THETA = np.linspace(0, 2*np.pi, 200)
end = 0
for i in | |
np.digitize(self.time_uniform,
self.trajectories[t, :, 0], right=False) - 1, 1]
#self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)
def interpolate_trajectories(self, t, noise=0):
interpolated = np.zeros([self.time_uniform.size])
for i, x in enumerate(self.time_uniform):
time_index = np.argmin(np.abs(x - self.trajectories[t, :, 0]))
if x - self.trajectories[t, time_index, 0] < 0:
time_index -= 1
interpolated[i] = self.trajectories[t, time_index, 1]
# self.pbar.update(1)
return interpolated + np.random.normal(scale=noise, size=self.time_uniform.size)
# self.z_interpolated[t, i] = self.trajectories[t, time_index, 1]
# self.z_interpolated[t, :] += np.random.normal(scale=noise, size=self.time_uniform.size)
def calculate_msd(self, ensemble=False):
""" Calculate mean squared displacement of time series
:param ensemble: if True, calculate the ensemble msd
:type ensemble: bool
"""
print('Calculating MSD...', end='', flush=True)
# start = timer.time() # This is just here to test parallelization
self.msd = timeseries.msd(self.z_interpolated.T[..., np.newaxis], 0, ensemble=ensemble).T
# print('Done in %.3f seconds' % (timer.time() - start))
def plot_trajectory(self, n, show=False, save=True, savename='ctrw_trajectory.pdf'):
""" Plot a CTRW trajectory
:param n: Trajectory number
:param show: show plot
:param save: save plot under savename
:param savename: name under which to save plot
:type n: int
:type show: bool
:type save: bool
:type savename: str
"""
plt.figure()
# plt.plot(self.trajectory_hops[n, :, 0] / 1000000, self.trajectory_hops[n, :, 1], linewidth=2)
plt.plot(self.trajectories[n, :, 0] / 1000000, self.trajectories[n, :, 1], linewidth=2)
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.xlabel('Time (ms)', fontsize=14)
plt.ylabel('$z$-coordinate (nm)', fontsize=14)
plt.tight_layout()
if show:
plt.show(block=True)
if save:
plt.savefig(savename)
def bootstrap_msd(self, nboot=200, fit_power_law=False, fit_linear=False):
if fit_power_law or fit_linear:
self.fit_parameters = np.zeros([nboot, 2])
# The average MSD is a collective property, so each bootstrap trial should be an average of self.ntrials
# ranodomly reconstructed simulated trajectories
print('Bootstrapping...')
# This is not any faster than the serial version for some reason
# if self.nt > 1:
# with Pool(self.nt) as pool:
# self.bootstraps = np.array(pool.map(self.bootstrap_trial, range(nboot)))
# else:
self.bootstraps = np.zeros([nboot, self.msd.shape[1]])
for i in tqdm.tqdm(range(nboot)):
self.bootstraps[i, :] = self.bootstrap_trial()
for i in range(nboot):
if fit_power_law:
self.fit_parameters[i, :] = self.fit_power_law(self.bootstraps[i, :])
if fit_linear:
self.fit_parameters[i, :] = self.fit_line(self.bootstraps[i, :])
def bootstrap_trial(self):
indices = np.random.choice(self.msd.shape[0], size=self.msd.shape[0], replace=True)
# return self.msd[indices, :].mean(axis=0) # ~30 % slower than following line (ยฏ\_(ใ)_/ยฏ)
return self.msd.take(indices, axis=0).mean(axis=0)
def fit_power_law(self, y, cut=0.6, interactive=True):
""" Fit power law to MSD curves. Should probably be replaced by MLE
TODO: weighted fit (need to do error analysis first)
:param y: y-axis values of MSD curve (x-axis values are values from self.time_uniform
:param cut: fraction of trajectory to include in fit
:type y: np.ndarray
:type cut: float
:return: Coefficient and exponent in power low of form [coefficient, power]
"""
self.fit_cut = cut
start = np.where(y > 1e-6)[0][0] # find first non-zero value since we will take the log
end = int(self.fit_cut * len(self.time_uniform)) # fit up until a fraction, cut, of the trajectory
# print(y[start:end])
# exit()
# fit line to linear log plot
A = Poly_fit.poly_fit(np.log(self.time_uniform[start:end]), np.log(y[start:end]), 1)[-1]
return [np.exp(A[0]), A[1]]
def fit_line(self, y, start=0.1, cut=0.5):
""" Fit line to MSD curve
:param y: y-axis values of MSD curve (x-axis values are values from self.time_uniform
:param start: beginning fraction of trajectory to cut out of calculations
:param cut: fraction of trajectory to include in fit
:type y: np.ndarray
:type cut: float
:return: slope and intercept of fit line [y-intercept, slope]
"""
self.fit_start = start
self.fit_cut = cut
start = int(self.fit_start * len(self.time_uniform))
end = int(self.fit_cut * len(self.time_uniform))
A = Poly_fit.poly_fit(self.time_uniform[start:end], y[start:end], 1)[-1]
return [A[0], A[1]]
def plot_msd(self, confidence=95, plot_power_law=False, plot_linear=False, show=True, end_frame=None, newfig=True,
alpha=0.5):
""" Plot averaged mean squared displacement with error bars
:param confidence: confidence interval for error bars
:param plot_power_law: if True, fit power law to MSD
:param plot_linear: if True, fit a line to MSD
:param show: if True, show plot after this function is run
:param end_frame: last frame to include in fit to MSD
:param newfig: Make this a figure of its own. Set to False if trying to plot multiple MSDs on top of each other
using this function.
:param alpha: opacity of errorbars
:type confidence: float
:type plot_power_law: bool
:type plot_linear: bool
:type show: bool
:type end_frame: int
:type newfig: bool
"""
if newfig:
plt.figure()
mean = self.msd.mean(axis=0)
if end_frame is not None:
mean = mean[:end_frame]
self.time_uniform = self.time_uniform[:end_frame]
plt.plot(self.time_uniform, mean, linewidth=2)
if self.bootstraps is not None:
error = stats.confidence_interval(self.bootstraps, confidence)
if end_frame is not None:
error = error[:, :end_frame]
plt.fill_between(self.time_uniform, error[1, :] + mean, mean - error[0, :], alpha=alpha)
self.final_msd = [mean[-1], mean[-1] - error[0, -1], error[1, -1] + mean[-1]]
print('Estimated MSD: %.2f [%.2f, %.2f]' % (self.final_msd[0], self.final_msd[1], self.final_msd[2]))
if plot_power_law:
fit = self.fit_power_law(self.msd.mean(axis=0))
end = int(self.fit_cut * len(self.time_uniform))
print('Estimated alpha parameter: %.2f +/- %.2f' % (np.mean(self.fit_parameters[:, 1]),
np.std(self.fit_parameters[:, 1])))
plt.plot(self.time_uniform[:end], fitting_functions.power_law(self.time_uniform[:end], fit[0], fit[1]), '--',
label='Power law fit')
if plot_linear:
fit = self.fit_line(self.msd.mean(axis=0))
start = int(self.fit_start * len(self.time_uniform))
end = int(self.fit_cut * len(self.time_uniform))
print('Estimated slope of line: %.4f +/- %.4f' % (100*np.mean(self.fit_parameters[:, 1]),
100*np.std(self.fit_parameters[:, 1])))
plt.plot(self.time_uniform[start:end], fitting_functions.line(fit[1], self.time_uniform[start:end], fit[0]),
'--', label='Linear fit')
if plot_linear | plot_power_law:
plt.legend()
plt.xlabel('Time (ns)', fontsize=14)
plt.ylabel('Mean squared displacement (nm$^2$)', fontsize=14)
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.tight_layout()
#plt.savefig('msd_ctrw.pdf')
#np.savez_compressed('msd.npz', msd=self.msd.mean(axis=0), error=error, time=self.time_uniform)
if show:
plt.show()
def step_autocorrelation(self):
""" Calculate autocorrelation of step length and direction
"""
max_hops = max([len(x) for x in self.steps])
self.acf = np.zeros([len(self.steps), max_hops])
keep = [] # list to hold indices of trajectories with a non-zero amount of hops
for i in range(len(self.steps)):
hops = self.steps[i]
if len(hops) > 1:
self.acf[i, :len(self.steps[i])] = timeseries.acf(self.steps[i])
keep.append(i)
self.acf = self.acf[keep, :]
self.acf = np.array([self.acf[np.nonzero(self.acf[:, i]), i].mean() for i in range(max_hops)])
#self.acf = timeseries.step_autocorrelation(self.z_interpolated.T[..., np.newaxis])
def plot_autocorrelation(self, show=True, fit=False):
""" Plot autocorrelation function
:param show: show plot
:param fit: fit the autocorrelation function for subordinate fractional brownian motion to acf
:type show: bool
:type fit: bool
"""
# acf = self.acf.mean(axis=0)
if fit:
hdip = self.acf[1]
H = np.log(2 * hdip + 2) / (2 * np.log(2)) # initial guess at H based on first dip in autocovariance
print('First dip: %.3f' % H)
from scipy.optimize import curve_fit
max_k = 5
h_opt = curve_fit(fitting_functions.hurst_autocovariance, np.arange(max_k + 1), self.acf[:(max_k + 1)],
p0=H)[0]
print('Fit H: %.3f' % h_opt)
plt.figure()
plt.plot(self.time_uniform[:self.acf.size], self.acf)
plt.xlabel('Steps', fontsize=14)
plt.ylabel('Autocorrelation', fontsize=14)
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.tight_layout()
if show:
plt.show()
if __name__ == "__main__":
args = initialize().parse_args()
ctrw = CTRW(args.steps, args.ntraj, hop_dist=args.hop_length_distribution, dwell_dist=args.dwell_time_distribution,
hop_sigma=args.hop_sigma, alpha=args.alpha, dt=args.dt, nt=args.nthreads, H=args.hurst, lamb=args.lamb)
ctrw.generate_trajectories(fixed_time=args.fix_time, noise=args.noise, limit=args.upper_limit)
ctrw.calculate_msd(ensemble=args.ensemble)
ctrw.bootstrap_msd(nboot=args.nboot, fit_power_law=args.fit_power_law, fit_linear=args.fit_line)
ctrw.plot_msd(plot_power_law=args.fit_power_law, plot_linear=args.fit_line, show=True, end_frame=5000)
if args.autocorrelation:
ctrw.step_autocorrelation()
ctrw.plot_autocorrelation(show=False, fit=True)
exit()
#
# last = ctrw.msd.mean(axis=0)[int(0.5*ctrw.time_uniform.size)]
# CI = stats.confidence_interval(ctrw.bootstraps, 95)[:, int(0.5*ctrw.time_uniform.size)]
# print("MSD at 50 %% of time: %.2f 95 %% CI [%.2f, %.2f]" % (last, last - CI[0], CI[1] + last))
#
# # run below with args.ensemble = False, fit_line = True
# ctrw.calculate_msd(ensemble=True)
# ctrw.bootstrap_msd(nboot=args.nboot, fit_power_law=True)
# ctrw.plot_msd(plot_power_law=True, show=False)
#
# last = ctrw.msd.mean(axis=0)[-1]
# CI = stats.confidence_interval(ctrw.bootstraps, 95)[:, -1]
# print("MSD at 100 %% of time: %.2f 95 %% CI [%.2f, %.2f]" % (last, last - CI[0], CI[1] + last))
# # plt.show()
# for ageing demonstration
steps = [500, 1000, 2000]
walks = []
plt.figure()
for i in steps:
ctrw = CTRW(i, args.ntraj, hop_dist=args.hop_length_distribution,
dwell_dist=args.dwell_time_distribution,
hop_sigma=args.hop_sigma, alpha=args.alpha)
ctrw.generate_trajectories(fixed_time=True)
ctrw.calculate_msd(ensemble=True)
plt.plot(ctrw.time_uniform, ctrw.msd.mean(axis=0), linewidth=2, label='Length = %s ns' % i)
plt.legend()
plt.xlabel('Time (ns)', fontsize=14)
plt.ylabel('Mean squared displacement (nm$^2$)', fontsize=14)
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.tight_layout()
plt.show()
exit()
# for plotting MSDs using a bunch of different dwell time limits
# limits = [800, 1600, 3200, 6400, 12800, 25600, 51200, 102800]
# walks = []
# ctrw = CTRW(args.steps, args.ntraj, hop_dist=args.hop_length_distribution, dwell_dist=args.dwell_time_distribution,
# hop_sigma=args.hop_sigma, alpha=args.alpha)
#
# plt.figure()
# for i in limits:
# ctrw.generate_trajectories(limit=i)
# ctrw.calculate_msd(ensemble=True)
# plt.plot(ctrw.time_uniform, ctrw.msd.mean(axis=0), linewidth=2, label='Limit = %s ns' % i)
#
# plt.legend()
# plt.xlabel('Time (ns)', fontsize=14)
# plt.ylabel('Mean squared displacement (nm$^2$)', fontsize=14)
# plt.gcf().get_axes()[0].tick_params(labelsize=14)
# plt.tight_layout()
# plt.show()
# exit()
# for generated MSDs with varied amount of noise (with all else fixed).
# noise = [1, 0.1, 0]
# walks = []
# ctrw = CTRW(args.steps, args.ntraj, hop_dist=args.hop_length_distribution, dwell_dist=args.dwell_time_distribution,
# hop_sigma=args.hop_sigma, alpha=args.alpha)
#
# plt.figure()
# for i, n in enumerate(noise):
# np.random.seed(1) # set random seed so trajectory generation will always be the same
# ctrw.generate_trajectories(noise=n, nt=args.nthreads)
# ctrw.calculate_msd(ensemble=args.ensemble)
# #plt.plot(ctrw.time_uniform, | |
<reponame>bckim92/dotfiles
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@wookayin's โโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ
โโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโ โโโโโโโโ
โโโ โโโโโโ โโโ โโโ โโโโโโ โโโโโโ โโโโโโ โโโโโโโโ
โโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโ โโโโโโ โโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโ
https://dotfiles.wook.kr/
'''
print(__doc__) # print logo.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--force', action="store_true", default=False,
help='If set, it will override existing symbolic links')
parser.add_argument('--skip-vimplug', action='store_true',
help='If set, do not update vim plugins.')
parser.add_argument('--skip-zgen', '--skip-zplug', action='store_true',
help='If set, skip zgen updates.')
args = parser.parse_args()
################# BEGIN OF FIXME #################
# Task Definition
# (path of target symlink) : (location of source file in the repository)
tasks = {
# SHELLS
'~/.bashrc' : 'bashrc',
'~/.screenrc' : 'screenrc',
# VIM
'~/.vimrc' : 'vim/vimrc',
'~/.vim' : 'vim',
'~/.vim/autoload/plug.vim' : 'vim/bundle/vim-plug/plug.vim',
# NeoVIM
'~/.config/nvim' : 'nvim',
# GIT
'~/.gitconfig' : 'git/gitconfig',
'~/.gitignore' : 'git/gitignore',
# ZSH
'~/.zgen' : 'zsh/zgen',
'~/.zsh' : 'zsh',
'~/.zlogin' : 'zsh/zlogin',
'~/.zlogout' : 'zsh/zlogout',
'~/.zpreztorc': 'zsh/zpreztorc',
'~/.zprofile' : 'zsh/zprofile',
'~/.zshenv' : 'zsh/zshenv',
'~/.zshrc' : 'zsh/zshrc',
# Bins
'~/.local/bin/dotfiles' : 'bin/dotfiles',
'~/.local/bin/fasd' : 'zsh/fasd/fasd',
'~/.local/bin/is_mosh' : 'zsh/is_mosh/is_mosh',
'~/.local/bin/fzf' : '~/.fzf/bin/fzf', # fzf is at $HOME/.fzf
# X
'~/.Xmodmap' : 'Xmodmap',
# GTK
'~/.gtkrc-2.0' : 'gtkrc-2.0',
# kitty
'~/.config/kitty/kitty.conf': 'config/kitty/kitty.conf',
# tmux
'~/.tmux' : 'tmux',
'~/.tmux.conf' : 'tmux/tmux.conf',
# .config (XDG-style)
'~/.config/terminator' : 'config/terminator',
'~/.config/pudb/pudb.cfg' : 'config/pudb/pudb.cfg',
'~/.config/fsh/wook.ini' : 'config/fsh/wook.ini',
# pip and python
#'~/.pip/pip.conf' : 'pip/pip.conf',
'~/.pythonrc.py' : 'python/pythonrc.py',
'~/.pylintrc' : 'python/pylintrc',
'~/.condarc' : 'python/condarc',
'~/.config/pycodestyle' : 'python/pycodestyle',
'~/.ptpython/config.py' : 'python/ptpython.config.py',
}
try:
from distutils.spawn import find_executable
except ImportError:
# In some environments, distutils might not be available.
import sys
sys.stderr.write("WARNING: distutils not available\n")
find_executable = lambda _: False # type: ignore
post_actions = []
post_actions += [
'''#!/bin/bash
# Check whether ~/.vim and ~/.zsh are well-configured
for f in ~/.vim ~/.zsh ~/.vimrc ~/.zshrc; do
if ! readlink $f >/dev/null; then
echo -e "\033[0;31m\
WARNING: $f is not a symbolic link to ~/.dotfiles.
Please remove your local folder/file $f and try again.\033[0m"
echo -n "(Press any key to continue) "; read user_confirm
exit 1;
else
echo "$f --> $(readlink $f)"
fi
done
''']
post_actions += [
'''#!/bin/bash
# Download command line scripts
mkdir -p "$HOME/.local/bin/"
_download() {
curl -L "$2" > "$1" && chmod +x "$1"
}
ret=0
set -v
_download "$HOME/.local/bin/video2gif" "https://raw.githubusercontent.com/wookayin/video2gif/master/video2gif" || ret=1
exit $ret;
''']
post_actions += [
'''#!/bin/bash
# Update zgen modules and cache (the init file)
zsh -c "
# source zplug and list plugins
DOTFILES_UPDATE=1 __p9k_instant_prompt_disabled=1 source ${HOME}/.zshrc
if ! which zgen > /dev/null; then
echo -e '\033[0;31m\
ERROR: zgen not found. Double check the submodule exists, and you have a valid ~/.zshrc!\033[0m'
ls -alh ~/.zsh/zgen/
ls -alh ~/.zshrc
exit 1;
fi
zgen reset
zgen update
"
''' if not args.skip_zgen else \
'# zgen update (Skipped)'
]
post_actions += [
'''#!/bin/bash
# validate neovim package installation on python2/3 and automatically install if missing
bash "etc/install-neovim-py.sh"
''']
vim = 'nvim' if find_executable('nvim') else 'vim'
post_actions += [
# Run vim-plug installation
{'install' : '{vim} +PlugInstall +qall'.format(vim=vim),
'update' : '{vim} +PlugUpdate +qall'.format(vim=vim),
'none' : '# {vim} +PlugUpdate (Skipped)'.format(vim=vim)
}['update' if not args.skip_vimplug else 'none']
]
post_actions += [
# Install tmux plugins via tpm
'~/.tmux/plugins/tpm/bin/install_plugins',
r'''#!/bin/bash
# Check tmux version >= 2.3 (or use `dotfiles install tmux`)
_version_check() { # target_ver current_ver
[ "$1" = "$(echo -e "$1\n$2" | sort -s -t- -k 2,2n | sort -t. -s -k 1,1n -k 2,2n | head -n1)" ]
}
if ! _version_check "2.3" "$(tmux -V | cut -d' ' -f2)"; then
echo -en "\033[0;33m"
echo -e "$(tmux -V) is too old. Contact system administrator, or:"
echo -e " $ dotfiles install tmux \033[0m (installs to ~/.local/, if you don't have sudo)"
exit 1;
else
echo "$(which tmux): $(tmux -V)"
fi
''']
post_actions += [
r'''#!/bin/bash
# Setting up for coc.nvim (~/.config/coc, node.js)
# (i) create ~/.config/coc directory if not exists
GREEN="\033[0;32m"; YELLOW="\033[0;33m"; RESET="\033[0m";
coc_dir="$HOME/.config/coc/"
if [ ! -d "$coc_dir" ]; then
mkdir -p "$coc_dir" || exit 1;
echo "Created: $coc_dir"
else
echo -e "${GREEN}coc directory:${RESET} $coc_dir"
fi
# (ii) validate or auto-install node.js locally
bash "etc/install-node.sh" || exit 1;
''']
post_actions += [
r'''#!/bin/bash
# Change default shell to zsh
/bin/zsh --version >/dev/null || (\
echo -e "\033[0;31mError: /bin/zsh not found. Please install zsh.\033[0m"; exit 1)
if [[ ! "$SHELL" = *zsh ]]; then
echo -e '\033[0;33mPlease type your password if you wish to change the default shell to ZSH\e[m'
chsh -s /bin/zsh && echo -e 'Successfully changed the default shell, please re-login'
else
echo -e "\033[0;32m\$SHELL is already zsh.\033[0m $(zsh --version)"
fi
''']
post_actions += [
r'''#!/bin/bash
# Create ~/.gitconfig.secret file and check user configuration
if [ ! -f ~/.gitconfig.secret ]; then
cat > ~/.gitconfig.secret <<EOL
# vim: set ft=gitconfig:
EOL
fi
if ! git config --file ~/.gitconfig.secret user.name 2>&1 > /dev/null || \
! git config --file ~/.gitconfig.secret user.email 2>&1 > /dev/null; then echo -ne '
\033[1;33m[!!!] Please configure git user name and email:
git config --file ~/.gitconfig.secret user.name "(YOUR NAME)"
git config --file ~/.gitconfig.secret user.email "(YOUR EMAIL)"
\033[0m'
echo -en '\n'
git config --file ~/.gitconfig.secret user.name "<NAME>"
git config --file ~/.gitconfig.secret user.email "<EMAIL>"
fi
# get the current config
echo -en '\033[0;32m';
echo -en 'user.name : '; git config --file ~/.gitconfig.secret user.name
echo -en 'user.email : '; git config --file ~/.gitconfig.secret user.email
echo -en '\033[0m';
''']
################# END OF FIXME #################
def _wrap_colors(ansicode):
return (lambda msg: ansicode + str(msg) + '\033[0m')
GRAY = _wrap_colors("\033[0;37m")
WHITE = _wrap_colors("\033[1;37m")
RED = _wrap_colors("\033[0;31m")
GREEN = _wrap_colors("\033[0;32m")
YELLOW = _wrap_colors("\033[0;33m")
CYAN = _wrap_colors("\033[0;36m")
BLUE = _wrap_colors("\033[0;34m")
import os
import sys
import subprocess
from signal import signal, SIGPIPE, SIG_DFL
from sys import stderr
if sys.version_info[0] >= 3: # python3
unicode = lambda s, _: str(s)
from builtins import input
else: # python2
input = sys.modules['__builtin__'].raw_input
def log(msg, cr=True):
stderr.write(msg)
if cr:
stderr.write('\n')
def log_boxed(msg, color_fn=WHITE, use_bold=False, len_adjust=0):
import unicodedata
pad_msg = (" " + msg + " ")
l = sum(not unicodedata.combining(ch) for ch in unicode(pad_msg, 'utf-8')) + len_adjust
if use_bold:
log(color_fn("โ" + ("โ" * l) + "โ\n" +
"โ" + pad_msg + "โ\n" +
"โ" + ("โ" * l) + "โ\n"), cr=False)
else:
log(color_fn("โ" + ("โ" * l) + "โ\n" +
"โ" + pad_msg + "โ\n" +
"โ" + ("โ" * l) + "โ\n"), cr=False)
def makedirs(target, mode=511, exist_ok=False):
try:
os.makedirs(target, mode=mode)
except OSError as ex: # py2 has no exist_ok=True
import errno
if ex.errno == errno.EEXIST and exist_ok: pass
else: raise
# get current directory (absolute path)
current_dir = os.path.abspath(os.path.dirname(__file__))
os.chdir(current_dir)
# check if git submodules are loaded properly
stat = subprocess.check_output("git submodule status --recursive",
shell=True, universal_newlines=True)
submodule_issues = [(l.split()[1], l[0]) for l in stat.split('\n') if len(l) and l[0] != ' ']
if submodule_issues:
stat_messages = {'+': 'needs update', '-': 'not initialized', 'U': 'conflict!'}
for (submodule_name, submodule_stat) in submodule_issues:
log(RED("git submodule {name} : {status}".format(
name=submodule_name,
status=stat_messages.get(submodule_stat, '(Unknown)'))))
log(RED(" you may run: $ git submodule update --init --recursive"))
log("")
log(YELLOW("Do you want to update submodules? (y/n) "), cr=False)
shall_we = (input().lower() == 'y')
if shall_we:
git_submodule_update_cmd = 'git submodule update --init --recursive'
# git 2.8+ supports parallel submodule fetching
try:
git_version = str(subprocess.check_output("""git --version | awk '{print $3}'""", shell=True))
if git_version >= '2.8': git_submodule_update_cmd += ' --jobs 8'
except Exception as ex:
pass
log("Running: %s" % BLUE(git_submodule_update_cmd))
subprocess.call(git_submodule_update_cmd, shell=True)
else:
log(RED("Aborted."))
sys.exit(1)
log_boxed("Creating symbolic links", color_fn=CYAN)
for target, source in sorted(tasks.items()):
# normalize paths
source = os.path.join(current_dir, os.path.expanduser(source))
target = os.path.expanduser(target)
# bad entry if source does not exists...
if not os.path.lexists(source):
log(RED("source %s : does not exist" % source))
continue
# if --force option is given, delete and override the previous symlink
if os.path.lexists(target):
is_broken_link = os.path.islink(target) and not os.path.exists(os.readlink(target))
if args.force or is_broken_link:
if os.path.islink(target):
os.unlink(target)
else:
log("{:50s} : {}".format(
BLUE(target),
YELLOW("already exists but not a symbolic link; --force option ignored")
))
else:
log("{:50s} : {}".format(
BLUE(target),
GRAY("already exists, skipped") if os.path.islink(target) \
else YELLOW("exists, but not a symbolic link. Check by yourself!!")
))
# make a symbolic link if available
if not os.path.lexists(target):
mkdir_target = os.path.split(target)[0]
makedirs(mkdir_target, exist_ok=True)
log(GREEN('Created directory : %s' % mkdir_target))
os.symlink(source, target)
log("{:50s} : {}".format(
BLUE(target),
GREEN("symlink created from '%s'" % source)
))
errors = []
for action in post_actions:
if not action:
continue
action_title = action.strip().split('\n')[0].strip()
if action_title == '#!/bin/bash':
action_title = action.strip().split('\n')[1].strip()
log("\n", cr=False)
log_boxed("Executing: " + action_title, | |
)
"""
Key, val pairs of the parameters (`F0`, `F1`, ..., including glitch parameters),
and the units (`Hz`, `Hz/s`, ...).
"""
transform_dictionary = dict(
tglitch={
"multiplier": 1 / 86400.0,
"subtractor": "minStartTime",
"unit": "day",
"label": r"$t^{g}_0$ \n [d]",
}
)
"""
Key, val pairs of the parameters (`F0`, `F1`, ...), where the key is
itself a dictionary which can item `multiplier`, `subtractor`, or
`unit` by which to transform by and update the units.
"""
@helper_functions.initializer
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
RngMedWindow=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
dtglitchmin=1 * 86400,
theta0_idx=0,
nglitch=1,
earth_ephem=None,
sun_ephem=None,
allowedMismatchFromSFTLength=None,
):
"""
Parameters
----------
nglitch: int
The number of glitches to allow
dtglitchmin: int
The minimum duration (in seconds) of a segment between two glitches
or a glitch and the start/end of the data
theta0_idx, int
Index (zero-based) of which segment the theta refers to - useful
if providing a tight prior on theta to allow the signal to jump
too theta (and not just from)
"""
self._set_init_params_dict(locals())
os.makedirs(outdir, exist_ok=True)
self.output_file_header = self.get_output_file_header()
self._add_log_file(self.output_file_header)
logging.info(
(
"Set-up MCMC glitch search with {} glitches for model {}" " on data {}"
).format(self.nglitch, self.label, self.sftfilepattern)
)
self.pickle_path = os.path.join(self.outdir, self.label + "_saved_data.p")
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
self._log_input()
self._set_likelihoodcoef()
self.set_ephemeris_files(earth_ephem, sun_ephem)
self.allowedMismatchFromSFTLength = allowedMismatchFromSFTLength
def _set_likelihoodcoef(self):
"""Additional constant terms to turn a detection statistic into a likelihood.
See MCMCSearch._set_likelihoodcoef for the base implementation.
This method simply extends it in order to account for the increased number
of segments due to the presence of glitches.
"""
super()._set_likelihoodcoef()
self.likelihoodcoef *= self.nglitch + 1
def _initiate_search_object(self):
logging.info("Setting up search object")
search_ranges = self._get_search_ranges()
self.search = core.SemiCoherentGlitchSearch(
label=self.label,
outdir=self.outdir,
sftfilepattern=self.sftfilepattern,
tref=self.tref,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
search_ranges=search_ranges,
detectors=self.detectors,
BSGL=self.BSGL,
nglitch=self.nglitch,
theta0_idx=self.theta0_idx,
injectSources=self.injectSources,
earth_ephem=self.earth_ephem,
sun_ephem=self.sun_ephem,
allowedMismatchFromSFTLength=self.allowedMismatchFromSFTLength,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def _logp(self, theta_vals, theta_prior, theta_keys, search):
if self.nglitch > 1:
ts = (
[self.minStartTime]
+ list(theta_vals[-self.nglitch :])
+ [self.maxStartTime]
)
if np.array_equal(ts, np.sort(ts)) is False:
return -np.inf
if any(np.diff(ts) < self.dtglitchmin):
return -np.inf
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def _logl(self, theta, search):
in_theta = self._set_point_for_evaluation(theta)
if self.nglitch > 1:
ts = (
[self.minStartTime] + list(theta[-self.nglitch :]) + [self.maxStartTime]
)
if np.array_equal(ts, np.sort(ts)) is False:
return -np.inf
# FIXME: BSGL case?
twoF = search.get_semicoherent_nglitch_twoF(*in_theta)
return twoF * self.likelihooddetstatmultiplier + self.likelihoodcoef
def _unpack_input_theta(self):
base_keys = ["F0", "F1", "F2", "Alpha", "Delta"]
glitch_keys = ["delta_F0", "delta_F1", "tglitch"]
full_glitch_keys = list(
np.array([[gk] * self.nglitch for gk in glitch_keys]).flatten()
)
if "tglitch_0" in self.theta_prior:
full_glitch_keys[-self.nglitch :] = [
"tglitch_{}".format(i) for i in range(self.nglitch)
]
full_glitch_keys[-2 * self.nglitch : -1 * self.nglitch] = [
"delta_F1_{}".format(i) for i in range(self.nglitch)
]
full_glitch_keys[-4 * self.nglitch : -2 * self.nglitch] = [
"delta_F0_{}".format(i) for i in range(self.nglitch)
]
full_theta_keys = base_keys + full_glitch_keys
full_theta_keys_copy = copy.copy(full_theta_keys)
full_glitch_symbols = list(
np.array(
[[gs] * self.nglitch for gs in self.glitch_symbol_dictionary]
).flatten()
)
full_theta_symbols = [
self.symbol_dictionary[key] for key in base_keys
] + full_glitch_symbols
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
if key in glitch_keys:
for i in range(self.nglitch):
self.theta_keys.append(key)
else:
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
if key in glitch_keys:
for i in range(self.nglitch):
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
else:
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in full_theta_keys]
self.theta_idxs = [full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [full_theta_symbols[i] for i in self.theta_idxs]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
# Correct for number of glitches in the idxs
self.theta_idxs = np.array(self.theta_idxs)
while np.sum(self.theta_idxs[:-1] == self.theta_idxs[1:]) > 0:
for i, idx in enumerate(self.theta_idxs):
if idx in self.theta_idxs[:i]:
self.theta_idxs[i] += 1
self.output_keys = self.theta_keys.copy()
self.output_keys.append("twoF")
if self.BSGL:
self.output_keys.append("log10BSGL")
def _get_data_dictionary_to_save(self):
d = dict(
nsteps=self.nsteps,
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
theta0_idx=self.theta0_idx,
BSGL=self.BSGL,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
)
return d
def _apply_corrections_to_p0(self, p0):
p0 = np.array(p0)
if self.nglitch > 1:
p0[:, :, -self.nglitch :] = np.sort(p0[:, :, -self.nglitch :], axis=2)
return p0
def plot_cumulative_max(self, savefig=True):
"""
Override MCMCSearch.plot_cumulative_max implementation to deal with the
split at glitches.
Parameters
----------
savefig: boolean
included for consistency with core plot_twoF_cumulative() function.
If true, save the figure in outdir.
If false, return an axis object.
"""
logging.info("Getting cumulative 2F")
fig, ax = plt.subplots()
d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.items():
if key not in d:
d[key] = val
if self.nglitch > 1:
delta_F0s = [d["delta_F0_{}".format(i)] for i in range(self.nglitch)]
delta_F0s.insert(self.theta0_idx, 0)
delta_F0s = np.array(delta_F0s)
delta_F0s[: self.theta0_idx] *= -1
tglitches = [d["tglitch_{}".format(i)] for i in range(self.nglitch)]
elif self.nglitch == 1:
delta_F0s = [d["delta_F0"]]
delta_F0s.insert(self.theta0_idx, 0)
delta_F0s = np.array(delta_F0s)
delta_F0s[: self.theta0_idx] *= -1
tglitches = [d["tglitch"]]
tboundaries = [self.minStartTime] + tglitches + [self.maxStartTime]
for j in range(self.nglitch + 1):
ts = tboundaries[j]
te = tboundaries[j + 1]
if (te - ts) / 86400 < 5:
logging.info("Period too short to perform cumulative search")
continue
if j < self.theta0_idx:
summed_deltaF0 = np.sum(delta_F0s[j : self.theta0_idx])
F0_j = d["F0"] - summed_deltaF0
actual_ts, taus, twoFs = self.search.calculate_twoF_cumulative(
F0_j,
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
tstart=ts,
tend=te,
)
elif j >= self.theta0_idx:
summed_deltaF0 = np.sum(delta_F0s[self.theta0_idx : j + 1])
F0_j = d["F0"] + summed_deltaF0
actual_ts, taus, twoFs = self.search.calculate_twoF_cumulative(
F0_j,
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
tstart=ts,
tend=te,
)
ax.plot(actual_ts + taus, twoFs)
ax.set_xlabel("GPS time")
if savefig:
fig.savefig(os.path.join(self.outdir, self.label + "_twoFcumulative.png"))
plt.close(fig)
return ax
def _get_savetxt_fmt_dict(self):
fmt_dict = helper_functions.get_doppler_params_output_format(self.theta_keys)
if "tglitch" in self.theta_keys:
fmt_dict["tglitch"] = "%d"
if "delta_F0" in self.theta_keys:
fmt_dict["delta_F0"] = "%.16g"
if "delta_F1" in self.theta_keys:
fmt_dict["delta_F1"] = "%.16g"
fmt_dict["twoF"] = "%.9g"
if self.BSGL:
fmt_dict["log10BSGL"] = "%.9g"
return fmt_dict
class MCMCSemiCoherentSearch(MCMCSearch):
"""MCMC search for a signal using the semicoherent ComputeFstat.
Evaluates the semicoherent F-statistic acros a parameter space region
corresponding to an isolated/binary-modulated CW signal.
See MCMCSearch for a list of additional parameters, here we list only the additional
init parameters of this class.
"""
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
RngMedWindow=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
nsegs=None,
earth_ephem=None,
sun_ephem=None,
allowedMismatchFromSFTLength=None,
):
"""
Parameters
----------
nsegs: int
The number of segments into which the input datastream will be devided.
Coherence time is computed internally as (maxStartTime - minStarTime) / nsegs.
"""
self._set_init_params_dict(locals())
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.RngMedWindow = RngMedWindow
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.nsegs = nsegs
self.set_ephemeris_files(earth_ephem, sun_ephem)
self.allowedMismatchFromSFTLength = allowedMismatchFromSFTLength
os.makedirs(outdir, exist_ok=True)
self.output_file_header = self.get_output_file_header()
self._add_log_file(self.output_file_header)
logging.info(
("Set-up MCMC semi-coherent search for model {} on data" "{}").format(
self.label, self.sftfilepattern
)
)
self.pickle_path = os.path.join(self.outdir, self.label + "_saved_data.p")
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._log_input()
if self.nsegs:
self._set_likelihoodcoef()
else:
logging.info("Value `nsegs` not yet provided")
def _set_likelihoodcoef(self):
"""Additional constant terms to turn a detection statistic into a likelihood.
See MCMCSearch._set_likelihoodcoef for the base implementation.
This method simply extends it in order to account for the increased number
of segments a semicoherent search works with.
"""
super()._set_likelihoodcoef()
self.likelihoodcoef *= self.nsegs
def _get_data_dictionary_to_save(self):
d = dict(
nsteps=self.nsteps,
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
BSGL=self.BSGL,
nsegs=self.nsegs,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
)
return d
def _initiate_search_object(self):
logging.info("Setting | |
<reponame>lao-tseu-is-alive/tensorflow2-tutorial
import matplotlib.pyplot as plt
import numpy as np
import os
# next line is to limit tensorflow verbose output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
from my_tf_lib import images_classification as ic
CONST_MODEL_PATH = 'trained_models/tf2_model_cnn_transfer_learning_mobilenet-v2_with_data_augmentation_classifier'
CONST_CLASS_NAMES = ['cats', 'dogs']
CONST_IMAGE_SIZE = (160, 160)
CONST_BATCH_SIZE = 32
if __name__ == '__main__':
print('# Tensorflow version : {}'.format(tf.__version__))
print('# TensorFlow 2 Transfer learning and fine-tuning')
print('# more info: https://www.tensorflow.org/tutorials/images/transfer_learning')
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
print('# creating the tf.data.Dataset from disk')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
train_dataset = image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=CONST_BATCH_SIZE,
image_size=CONST_IMAGE_SIZE)
validation_dataset = image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=CONST_BATCH_SIZE,
image_size=CONST_IMAGE_SIZE)
print("""
# You can find the class names in the class_names attribute on these datasets.
# These correspond to the directory names in alphabetical order.
""")
class_names = train_dataset.class_names
print(class_names)
CONST_CLASS_NAMES.sort()
print(CONST_CLASS_NAMES)
ic.show_n_images_from_dataset(train_dataset, num_images_by_category=4)
print("""
# As the original dataset doesn't contains a test set, you will create one.
# To do so, determine how many batches of data are available in the validation set
# using tf.data.experimental.cardinality, then move 20% of them to a test set.
""")
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
print("""
# Configure the dataset for performance
# Use buffered prefetching to load images from disk without having I/O become blocking.
# To learn more about this method see the data performance guide :
# https://www.tensorflow.org/guide/data_performance
""")
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
print("""
# Use data augmentation :
# When you don't have a large image dataset, it's a good practice to artificially introduce sample diversity
# by applying random, yet realistic, transformations to the training images, such as rotation and horizontal flipping.
# This helps expose the model to different aspects of the training data and reduce overfitting.
# You can learn more about data augmentation in this tutorial :
# https://www.tensorflow.org/tutorials/images/data_augmentation
""")
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
print("""
# Note: The previous layers (RandomFlip & RandomRotation) are active only during training, when you call model.fit.
# They are inactive when the model is used in inference mode in model.evaluate or model.fit.
""")
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
plt.show()
print("""
# Rescale pixel values
# In a moment, you will download tf.keras.applications.MobileNetV2 for use as your base model.
# This model expects pixel vaues in [-1,1], but at this point, the pixel values in your images are in [0-255].
# To rescale them, use the preprocessing method included with the model.
""")
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
print("""
# Note: Alternatively, you could rescale pixel values from [0,255] to [-1, 1] using a Rescaling layer.
# rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)
""")
print("""
# Note: If using other tf.keras.applications, be sure to check the API doc to determine if
# they expect pixels in [-1,1] or [0,1], or use the included preprocess_input function.
""")
print("""
# Create the base model from the pre-trained convnets :
# You will create the base model from the MobileNet V2 model developed at Google.
# This is pre-trained on the ImageNet dataset, a large dataset consisting of 1.4M images and 1000 classes.
# ImageNet is a research training dataset with a wide variety of categories like jackfruit and syringe.
# This base of knowledge will help us classify cats and dogs from our specific dataset.
# First, you need to pick which layer of MobileNet V2 you will use for feature extraction.
# The very last classification layer (on "top", as most diagrams of machine learning models go from bottom to top)
# is not very useful. Instead, you will follow the common practice to depend on the very last layer
# before the flatten operation. This layer is called the "bottleneck layer".
# The bottleneck layer features retain more generality as compared to the final/top layer.
# First, instantiate a MobileNet V2 model pre-loaded with weights trained on ImageNet.
# By specifying the include_top=False argument, you load a network that doesn't include the
# classification layers at the top, which is ideal for feature extraction.
""")
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = CONST_IMAGE_SIZE + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
print("""
# This feature extractor converts each 160x160x3 image into a 5x5x1280 block of features.
# Let's see what it does to an example batch of images:
""")
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
print("""
# Feature extraction
# In this step, you will freeze the convolutional base created from the previous step and
# to use as a feature extractor.
# Additionally, you add a classifier on top of it and train the top-level classifier.
# Freeze the convolutional base
# It is important to freeze the convolutional base before you compile and train the model.
# Freezing (by setting layer.trainable = False) prevents the weights in a given layer
# from being updated during training. MobileNet V2 has many layers, so setting the entire model's
# trainable flag to False will freeze all of them. (and save you A LOT OF COMPUTE TIME !!!)
""")
base_model.trainable = False
print("""
# Important note about BatchNormalization layers
# Many models contain tf.keras.layers.BatchNormalization layers.
# This layer is a special case and precautions should be taken in the context of fine-tuning,
# as shown later in this tutorial.
#
# When you set layer.trainable = False, the BatchNormalization layer will run in inference mode,
# and will not update its mean and variance statistics.
#
# When you unfreeze a model that contains BatchNormalization layers in order to do fine-tuning,
# you should keep the BatchNormalization layers in inference mode by passing training = False
# when calling the base model. Otherwise, the updates applied to the non-trainable weights
# will destroy what the model has learned.
#
# For details, see the Transfer learning guide :
# https://www.tensorflow.org/guide/keras/transfer_learning
""")
# Let's take a look at the base model architecture
base_model.summary()
print("""
# Add a classification head
# To generate predictions from the block of features, average over the spatial 5x5 spatial locations,
# using a tf.keras.layers.GlobalAveragePooling2D layer to convert the features
# to a single 1280-element vector per image.
""")
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
print("""
# Apply a tf.keras.layers.Dense layer to convert these features into a single prediction per image.
# You don't need an activation function here because this prediction will be treated as a logit,
# or a raw prediction value. Positive numbers predict class 1, negative numbers predict class 0.
""")
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
print("""
# Build a model by chaining together the data augmentation, rescaling, base_model and feature extractor layers
# using the Keras Functional API. As previously mentioned, use training=False as our model contains
# a BatchNormalization layer.
""")
inputs = tf.keras.Input(shape=(160, 160, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
print("""
# Compile the model
# Compile the model before training it. Since there are two classes, use a binary cross-entropy loss
# with from_logits=True since the model provides a linear output.
""")
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
print("""
# The 2.5M parameters in MobileNet are frozen, but there are 1.2K trainable parameters in the Dense layer.
# These are divided between two tf.Variable objects, the weights and biases.
""")
print("# len(model.trainable_variables) : {}".format(len(model.trainable_variables)))
print("""
# Train the model
# After training for 10 epochs, you should see ~94% accuracy on the validation set.
""")
initial_epochs = 10
loss0, accuracy0 = model.evaluate(validation_dataset)
print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))
history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)
print("""
# Learning curves
# Let's take a look at the learning curves of the training and validation accuracy/loss
# when using | |
<filename>cqlengine/models.py
from collections import OrderedDict
import re
from cqlengine import columns
from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError
from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn
from cqlengine.query import DoesNotExist as _DoesNotExist
from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned
class ModelDefinitionException(ModelException):
pass
DEFAULT_KEYSPACE = 'cqlengine'
class hybrid_classmethod(object):
"""
Allows a method to behave as both a class method and
normal instance method depending on how it's called
"""
def __init__(self, clsmethod, instmethod):
self.clsmethod = clsmethod
self.instmethod = instmethod
def __get__(self, instance, owner):
if instance is None:
return self.clsmethod.__get__(owner, owner)
else:
return self.instmethod.__get__(instance, owner)
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
"""
raise NotImplementedError
class QuerySetDescriptor(object):
"""
returns a fresh queryset for the given model
it's declared on everytime it's accessed
"""
def __get__(self, obj, model):
""" :rtype: ModelQuerySet """
if model.__abstract__:
raise CQLEngineException(
'cannot execute queries against abstract models')
return model.__queryset__(model)
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
:rtype: ModelQuerySet
"""
raise NotImplementedError
class ColumnQueryEvaluator(AbstractQueryableColumn):
"""
Wraps a column and allows it to be used in comparator
expressions, returning query operators
ie:
Model.column == 5
"""
def __init__(self, column):
self.column = column
def _get_column(self):
return self.column
class ColumnDescriptor(object):
"""
Handles the reading and writing of column values to and from
a model instance's value manager, as well as creating
comparator queries
"""
def __init__(self, column):
"""
:param column:
:type column: columns.Column
:return:
"""
self.column = column
self.query_evaluator = ColumnQueryEvaluator(self.column)
def __get__(self, instance, owner):
"""
Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model
"""
if instance:
return instance._values[self.column.column_name].getval()
else:
return self.query_evaluator
def __set__(self, instance, value):
"""
Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements
"""
if instance:
return instance._values[self.column.column_name].setval(value)
else:
raise AttributeError('cannot reassign column values')
def __delete__(self, instance):
"""
Sets the column value to None, if possible
"""
if instance:
if self.column.can_delete:
instance._values[self.column.column_name].delval()
else:
raise AttributeError(
'cannot delete {} columns'.format(self.column.column_name))
class BaseModel(object):
"""
The base model class, don't inherit from this, inherit from Model, defined below
"""
class DoesNotExist(_DoesNotExist):
pass
class MultipleObjectsReturned(_MultipleObjectsReturned):
pass
objects = QuerySetDescriptor()
# table names will be generated automatically from it's model and package name
# however, you can also define them manually here
__table_name__ = None
# the keyspace for this model
__keyspace__ = None
# compaction options
__compaction__ = None
__compaction_tombstone_compaction_interval__ = None
__compaction_tombstone_threshold = None
# compaction - size tiered options
__compaction_bucket_high__ = None
__compaction_bucket_low__ = None
__compaction_max_threshold__ = None
__compaction_min_threshold__ = None
__compaction_min_sstable_size__ = None
# compaction - leveled options
__compaction_sstable_size_in_mb__ = None
# end compaction
# the queryset class used for this class
__queryset__ = ModelQuerySet
__dmlquery__ = DMLQuery
__read_repair_chance__ = 0.1
def __init__(self, **values):
self._values = {}
extra_columns = set(values.keys()) - set(self._columns.keys())
if extra_columns:
raise ValidationError(
"Incorrect columns passed: {}".format(extra_columns))
for name, column in self._columns.items():
value = values.get(name, None)
if value is not None or isinstance(column, columns.BaseContainerColumn):
value = column.to_python(value)
value_mngr = column.value_manager(self, column, value)
self._values[name] = value_mngr
# a flag set by the deserializer to indicate
# that update should be used when persisting changes
self._is_persisted = False
self._batch = None
def _can_update(self):
"""
Called by the save function to check if this should be
persisted with update or insert
:return:
"""
if not self._is_persisted:
return False
pks = self._primary_keys.keys()
return all([not self._values[k].changed for k in self._primary_keys])
@classmethod
def _get_keyspace(cls):
""" Returns the manual keyspace, if set, otherwise the default keyspace """
return cls.__keyspace__ or DEFAULT_KEYSPACE
@classmethod
def _get_column(cls, name):
"""
Returns the column matching the given name, raising a key error if
it doesn't exist
:param name: the name of the column to return
:rtype: Column
"""
return cls._columns[name]
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
# check attribute keys
keys = set(self._columns.keys())
other_keys = set(self._columns.keys())
if keys != other_keys:
return False
# check that all of the attributes match
for key in other_keys:
if getattr(self, key, None) != getattr(other, key, None):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def column_family_name(cls, include_keyspace=True):
"""
Returns the column family name if it's been defined
otherwise, it creates it from the module and class name
"""
cf_name = ''
if cls.__table_name__:
cf_name = cls.__table_name__.lower()
else:
camelcase = re.compile(r'([a-z])([A-Z])')
ccase = lambda s: camelcase.sub(
lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)
cf_name += ccase(cls.__name__)
# trim to less than 48 characters or cassandra will complain
cf_name = cf_name[-48:]
cf_name = cf_name.lower()
cf_name = re.sub(r'^_+', '', cf_name)
if not include_keyspace:
return cf_name
return '{}.{}'.format(cls._get_keyspace(), cf_name)
def validate(self):
""" Cleans and validates the field values """
for name, col in self._columns.items():
val = col.validate(getattr(self, name))
setattr(self, name, val)
def _as_dict(self):
""" Returns a map of column names to cleaned values """
values = self._dynamic_columns or {}
for name, col in self._columns.items():
values[name] = getattr(self, name, None)
print values
return values
@classmethod
def create(cls, **kwargs):
return cls.objects.create(**kwargs)
@classmethod
def all(cls):
return cls.objects.all()
@classmethod
def filter(cls, *args, **kwargs):
return cls.objects.filter(*args, **kwargs)
@classmethod
def get(cls, *args, **kwargs):
return cls.objects.get(*args, **kwargs)
def save(self):
is_new = self.pk is None
self.validate()
self.__dmlquery__(self.__class__, self, batch=self._batch).save()
# reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
return self
def delete(self):
""" Deletes this instance """
self.__dmlquery__(self.__class__, self, batch=self._batch).delete()
@classmethod
def _class_batch(cls, batch):
return cls.objects.batch(batch)
def _inst_batch(self, batch):
self._batch = batch
return self
batch = hybrid_classmethod(_class_batch, _inst_batch)
class ModelMetaClass(type):
def __new__(cls, name, bases, attrs):
"""
"""
# move column definitions into columns dict
# and set default column names
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
# get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k, v in getattr(base, '_defined_columns', {}).items():
inherited_columns.setdefault(k, v)
# short circuit __abstract__ inheritance
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
# set properties
attrs[col_name] = ColumnDescriptor(col_obj)
column_definitions = [
(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)]
column_definitions = sorted(
column_definitions, lambda x, y: cmp(x[1].position, y[1].position))
column_definitions = inherited_columns.items() + column_definitions
defined_columns = OrderedDict(column_definitions)
# prepend primary key if one hasn't been defined
if not is_abstract and not any([v.primary_key for k, v in column_definitions]):
raise ModelDefinitionException(
"At least 1 primary key is required.")
counter_columns = [
c for c in defined_columns.values() if isinstance(c, columns.Counter)]
data_columns = [c for c in defined_columns.values(
) if not c.primary_key and not isinstance(c, columns.Counter)]
if counter_columns and data_columns:
raise ModelDefinitionException(
'counter models may not have data columns')
has_partition_keys = any(
v.partition_key for (k, v) in column_definitions)
# TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods
# transform column definitions
for k, v in column_definitions:
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)):
raise ModelDefinitionException(
'counter columns and container columns cannot be used as primary keys')
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
_transform_column(k, v)
partition_keys = OrderedDict(
k for k in primary_keys.items() if k[1].partition_key)
clustering_keys = OrderedDict(
k for k in primary_keys.items() if not k[1].partition_key)
# setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException(
"at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = partition_keys.keys()[0]
attrs['pk'] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
_get = lambda self: tuple(
self._values[c].getval() for c in partition_keys.keys())
_set = lambda self, val: tuple(
self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val))
attrs['pk'] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException(
"{} defines the column {} more than once".format(name, v.db_field_name))
if v.clustering_order and not (v.primary_key and not v.partition_key):
| |
<reponame>rupakgoyal/panel-<gh_stars>1-10
"""
Interact with functions using widgets.
The interact Pane implemented in this module mirrors
ipywidgets.interact in its API and implementation. Large parts of the
code were copied directly from ipywidgets:
Copyright (c) Jupyter Development Team and PyViz Development Team.
Distributed under the terms of the Modified BSD License.
"""
from __future__ import absolute_import, division, unicode_literals
import types
from collections import OrderedDict
from inspect import getcallargs
from numbers import Real, Integral
from six import string_types
try: # Python >= 3.3
from inspect import signature, Parameter
from collections.abc import Iterable, Mapping
empty = Parameter.empty
except ImportError:
from collections import Iterable, Mapping
try:
from IPython.utils.signatures import signature, Parameter
empty = Parameter.empty
except:
signature, Parameter, empty = None, None, None
try:
from inspect import getfullargspec as check_argspec
except ImportError:
from inspect import getargspec as check_argspec # py2
import param
from .layout import Panel, Column, Row
from .pane import PaneBase, Pane, HTML
from .util import as_unicode
from .widgets import (Checkbox, TextInput, Widget, IntSlider, FloatSlider,
Select, DiscreteSlider, Button)
def _get_min_max_value(min, max, value=None, step=None):
"""Return min, max, value given input values with possible None."""
# Either min and max need to be given, or value needs to be given
if value is None:
if min is None or max is None:
raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))
diff = max - min
value = min + (diff / 2)
# Ensure that value has the same type as diff
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else: # value is not None
if not isinstance(value, Real):
raise TypeError('expected a real number, got: %r' % value)
# Infer min/max from value
if value == 0:
# This gives (0, 1) of the correct type
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
# ensure value is on a step
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))
return min, max, value
def _yield_abbreviations_for_parameter(parameter, kwargs):
"""Get an abbreviation for a function parameter."""
name = parameter.name
kind = parameter.kind
ann = parameter.annotation
default = parameter.default
not_found = (name, empty, empty)
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY):
if name in kwargs:
value = kwargs.pop(name)
elif ann is not empty:
param.main.warning("Using function annotations to implicitly specify interactive controls is deprecated. Use an explicit keyword argument for the parameter instead.", DeprecationWarning)
value = ann
elif default is not empty:
value = default
if isinstance(value, (Iterable, Mapping)):
value = fixed(value)
else:
yield not_found
yield (name, value, default)
elif kind == Parameter.VAR_KEYWORD:
# In this case name=kwargs and we yield the items in kwargs with their keys.
for k, v in kwargs.copy().items():
kwargs.pop(k)
yield k, v, empty
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
class interactive(PaneBase):
default_layout = param.ClassSelector(default=Column, class_=(Panel),
is_instance=False)
manual_update = param.Boolean(default=False, doc="""
Whether to update manually by clicking on button.""")
manual_name = param.String(default='Run Interact')
def __init__(self, object, params={}, **kwargs):
if signature is None:
raise ImportError('interact requires either recent Python version '
'(>=3.3 or IPython to inspect function signatures.')
super(interactive, self).__init__(object, **params)
new_kwargs = self.find_abbreviations(kwargs)
# Before we proceed, let's make sure that the user has passed a set of args+kwargs
# that will lead to a valid call of the function. This protects against unspecified
# and doubly-specified arguments.
try:
check_argspec(object)
except TypeError:
# if we can't inspect, we can't validate
pass
else:
getcallargs(object, **{n:v for n,v,_ in new_kwargs})
widgets = self.widgets_from_abbreviations(new_kwargs)
if self.manual_update:
widgets.append(('manual', Button(name=self.manual_name)))
self._widgets = OrderedDict(widgets)
self._pane = Pane(self.object(**self.kwargs), name=self.name)
self._inner_layout = Row(self._pane)
widgets = [widget for _, widget in widgets if isinstance(widget, Widget)]
if 'name' in params:
widgets.insert(0, HTML('<h2>%s</h2>' % self.name))
self.widget_box = Column(*widgets)
self.layout.objects = [self.widget_box, self._inner_layout]
self._link_widgets()
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _get_model(self, doc, root=None, parent=None, comm=None):
return self._inner_layout._get_model(doc, root, parent, comm)
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
def _synced_params(self):
return []
def _link_widgets(self):
if self.manual_update:
widgets = [('manual', self._widgets['manual'])]
else:
widgets = self._widgets.items()
for name, widget in widgets:
def update_pane(change):
# Try updating existing pane
new_object = self.object(**self.kwargs)
pane_type = self.get_pane_type(new_object)
if type(self._pane) is pane_type:
if isinstance(new_object, (PaneBase, Panel)):
new_params = {k: v for k, v in new_object.get_param_values()
if k != 'name'}
self._pane.set_param(**new_params)
else:
self._pane.object = new_object
return
# Replace pane entirely
self._pane = Pane(new_object)
self._inner_layout[0] = self._pane
pname = 'clicks' if name == 'manual' else 'value'
watcher = widget.param.watch(update_pane, pname)
self._callbacks.append(watcher)
def _cleanup(self, root):
self._inner_layout._cleanup(root)
super(interactive, self)._cleanup(root)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@property
def kwargs(self):
return {k: widget.value for k, widget in self._widgets.items()
if k != 'manual'}
def signature(self):
return signature(self.object)
def find_abbreviations(self, kwargs):
"""Find the abbreviations for the given function and kwargs.
Return (name, abbrev, default) tuples.
"""
new_kwargs = []
try:
sig = self.signature()
except (ValueError, TypeError):
# can't inspect, no info from function; only use kwargs
return [ (key, value, value) for key, value in kwargs.items() ]
for parameter in sig.parameters.values():
for name, value, default in _yield_abbreviations_for_parameter(parameter, kwargs):
if value is empty:
raise ValueError('cannot find widget or abbreviation for argument: {!r}'.format(name))
new_kwargs.append((name, value, default))
return new_kwargs
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
if isinstance(abbrev, fixed):
widget = abbrev
else:
widget = self.widget_from_abbrev(abbrev, name, default)
if not (isinstance(widget, Widget) or isinstance(widget, fixed)):
if widget is None:
continue
else:
raise TypeError("{!r} is not a ValueWidget".format(widget))
result.append((name, widget))
return result
@classmethod
def applies(cls, object):
return isinstance(object, types.FunctionType)
@classmethod
def widget_from_abbrev(cls, abbrev, name, default=empty):
"""Build a ValueWidget instance given an abbreviation or Widget."""
if isinstance(abbrev, Widget):
return abbrev
if isinstance(abbrev, tuple):
widget = cls.widget_from_tuple(abbrev, name, default)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# Try single value
widget = cls.widget_from_single_value(abbrev, name)
if widget is not None:
return widget
# Something iterable (list, dict, generator, ...). Note that str and
# tuple should be handled before, that is why we check this case last.
if isinstance(abbrev, Iterable):
widget = cls.widget_from_iterable(abbrev, name)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# No idea...
return fixed(abbrev)
@staticmethod
def widget_from_single_value(o, name):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return TextInput(value=as_unicode(o), name=name)
elif isinstance(o, bool):
return Checkbox(value=o, name=name)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, start=min, end=max, name=name)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, start=min, end=max, name=name)
else:
return None
@staticmethod
def widget_from_tuple(o, name, default=empty):
"""Make widgets from a tuple abbreviation."""
int_default = (default is empty or isinstance(default, int))
if _matches(o, (Real, Real)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, Integral) for _ in o) and int_default:
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, name=name)
elif _matches(o, (Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, Integral) for _ in o) and int_default:
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, step=step, name=name)
elif _matches(o, (Real, Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], value=o[3], step=step)
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, step=step, name=name)
elif len(o) == 4:
min, max, value = _get_min_max_value(o[0], o[1], value=o[3])
if all(isinstance(_, Integral) for _ in [o[0], o[1], o[3]]):
cls = IntSlider
else:
cls = | |
<filename>toolbox/alignment.py
#!/usr/bin/env python
"""
The alignment module contains functions used in aligning two channel data.
See our `walkthrough <https://github.com/ReddingLab/Learning/blob/master/image-analysis-basics/Image-alignment-with-toolbox.ipynb/>`_
of the alignment module's usage.
"""
__all__ = ['FD_rule_bins', 'scrub_outliers', 'im_split',
'get_offset_distribution', 'find_global_offset',
'plot_assigned_maxima','align_by_offset','overlay']
__version__ = '0.0.1'
__author__ = '<NAME> and <NAME>'
import numpy as np
import random as ra
import matplotlib.pyplot as plt
from toolbox.point_fitting import find_maxima, fit_routine
from scipy.stats import skewnorm
from scipy.spatial import cKDTree
from scipy.ndimage import map_coordinates
from skimage.transform import warp_coords,rotate
from PIL import Image
import os
def FD_rule_bins(data):
"""
Finds the optimal spacing of histogram bins based on the
Freedman-Diaconis rule. https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
:param data: 1D array of data points
:return: 1D array of bin edges. passes directly to numpy.histogram or matplotlib.pyplot.hist
:Example:
>>> import numpy as np
>>> from toolbox.alignment import FD_rule_bins
>>> x = np.random.normal(size=100)
>>> FD_rule_bins(x)
array([-2.25503346, -1.75181888, -1.2486043 , -0.74538972, -0.24217514,
0.26103944, 0.76425402, 1.2674686 , 1.77068318, 2.27389776,
2.77711234])
"""
iqr = np.percentile(data, 75) - np.percentile(data, 25)
opt_binwidth = 2*iqr/(len(data)**(1/3.))
return np.arange(min(data), max(data) + opt_binwidth, opt_binwidth)
def scrub_outliers(data):
"""
Removes outliers from data. Works in two steps:
* First, data is binned using ``FD_rule_bins`` and only the most highly populated bins are retained
* Second, any datum more than two standard deviations away from the mean are filtered out
:param data: 1D array or list of data points
:return: Filtered result, 1D array
:Example:
>>> import numpy as np
>>> from toolbox.alignment import scrub_outliers
>>> x = np.concatenate((np.random.normal(size=200),np.random.uniform(-10,10,size=20)))
>>> scrubed_x = scrub_outliers(x)
>>> len(x), len(scrubed_x)
(220, 179)
>>> import matplotlib.pyplot as plt
>>> from toolbox.alignment import FD_rule_bins
>>> plt.figure()
>>> plt.hist(x, FD_rule_bins(x), fc = "m")
>>> plt.hist(scrubed_x, FD_rule_bins(x), fc = "g")
>>> plt.show()
"""
vals = np.histogram(data, FD_rule_bins(data))
sorted_counts = sorted(vals[0])
binslist = [i for i in sorted_counts if i > .6 * sorted_counts[-1]]
# -initial scrub using taking just highly populated bins
scrubbed_data = []
for i in binslist:
leftedge = vals[0].tolist().index(i)
for datum in data:
if datum < vals[1][leftedge + 1] and datum >= vals[1][leftedge]:
scrubbed_data.append(datum)
# -final scrub using standard deviation
scrubbed_data = [datum for datum in scrubbed_data if
datum < np.mean(scrubbed_data) + 2 * np.std(scrubbed_data) and
datum > np.mean(scrubbed_data) - 2 * np.std(scrubbed_data) and
# inserted median filter below
np.median(scrubbed_data) - 2 < datum < np.median(scrubbed_data) + 2]
#print(scrubbed_data, "\n")
return scrubbed_data
def clean_duplicate_maxima(dist, indexes):
paired_indexes = []
count = 0
for i in set(indexes):
tmp = [np.inf,np.inf]
for j,k in zip(indexes, dist):
if i == j and k < abs(tmp[1]):
tmp = [j,count]
count += 1
elif i == j:
count += 1
else:
pass
paired_indexes.append(tmp)
return paired_indexes
def im_split(Image, splitstyle = "hsplit"):
"""
Image passed to this function is split into two channels based on "splitstyle".
***note*** micromanager images and numpy arrays are indexed opposite of one another.
:param Image: 2D image array
:param splitstyle: str, accepts "hsplit", "vsplit". Default is "hsplit"
:return: The two subarrays of Image split along specified axis.
:Example:
>>> from toolbox.alignment import im_split
>>> import toolbox.testdata as test
>>> im = test.image_stack()[0]
>>> ch1, ch2 = im_split(im)
>>> ch1.shape, ch2.shape
((512, 256), (512, 256))
>>> ch1, ch2 = im_split(im, "vsplit")
>>> ch1.shape, ch2.shape
((256, 512), (256, 512))
"""
return getattr(np, splitstyle)(Image, 2)[0],getattr(np, splitstyle)(Image, 2)[1]
def get_offset_distribution(Image, bbox=9, splitstyle="hsplit", fsize=10):
"""
This function in order:
* splits the image into channels
* locates and fits all of the points in each channel
* pairs up associated points from each channel, uses cDKTree
* and determines their offset
:param Image: 2D image array
:param bbox: int, passed to ``point_fitting.fit_routine``, size of ROI around each point to apply gaussian fit. Default is 9.
:param splitstyle: string, passed to ``im_split``; accepts "hsplit", "vsplit". Default is "hsplit"
:param fsize: int, passed to ``point_fitting.find_maxima``, size of average filters used in maxima determination. Default is 10.
:return: Two lists containing all of the measured x- and y- offsets
:Example:
>>> from toolbox.alignment import get_offset_distribution
>>> import toolbox.testdata as test
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> im = test.image_stack()[0]
>>> x_dist, y_dist = get_offset_distribution(im)
>>> print(np.mean(x_dist), np.mean(y_dist))
-1.9008888233326608 -2.042675546813981
"""
ch1, ch2 = im_split(Image, splitstyle)
ch1_maxima = find_maxima(ch1, fsize)
ch2_maxima = find_maxima(ch2, fsize)
Delta_x, Delta_y = [], []
mytree = cKDTree(ch1_maxima)
dist, indexes = mytree.query(ch2_maxima)
for i, j in clean_duplicate_maxima(dist, indexes):
x1, y1 = ch1_maxima[i]
x2, y2 = ch2_maxima[j]
fit_ch1 = fit_routine(ch1, x1, y1, bbox)
fit_ch2 = fit_routine(ch2, x2, y2, bbox)
try:
Delta_x.append(fit_ch1[1]-fit_ch2[1])
Delta_y.append(fit_ch1[2]-fit_ch2[2])
except TypeError:
pass
return(Delta_x, Delta_y)
def find_global_offset(im_list, bbox=9, splitstyle="hsplit", fsize=10
, scale = 0.5, binning = 1):
"""
This function finds the optimal x-offset and y-offset of the data using ``scrub_outliers`` to filter
the data collected from ``get_offset_distribution``. The filtered data are then fit using ``scipy.stats.skewnorm``
:param im_list: 1D list of image arrays to be used in determination of the offset
:param bbox: int, passed to ``point_fitting.fit_routine``, size of ROI around each point to apply gaussian fit. Default is 9.
:param splitstyle: string, passed to ``im_split``; accepts "hsplit", "vsplit". Default is "hsplit"
:param fsize: int, passed to ``point_fitting.find_maxima``, size of average filters used in maxima determination. Default is 10.
:return: Mean x- and y-offset values.
:Example:
>>> from toolbox.alignment import find_global_offset
>>> import toolbox.testdata as test
>>> im = test.image_stack()
>>> print(find_global_offset(im))
(5.624042070667237, -2.651128775580636)
"""
pooled_x, pooled_y = [], []
for im in im_list:
xdist, ydist = get_offset_distribution(im, bbox, splitstyle, fsize)
pooled_x += pinhole_filter(xdist, scale, binning)
pooled_y += pinhole_filter(ydist, scale, binning)
skew, mu1, sigma1 = skewnorm.fit(pooled_x)
skew, mu2, sigma2 = skewnorm.fit(pooled_y)
return mu1, mu2
def plot_assigned_maxima(Image, splitstyle="hsplit", fsize=10):
"""
This function spits out a matplotlib plot with lines drawn between each of the assigned pairs of maxima.
The purpose of this function is more for a sanity check than anything useful.
:param Image: 2D image array
:param splitstyle: string, passed to ``im_split``; accepts "hsplit", "vsplit". Default is "hsplit"
:param fsize: int, passed to ``point_fitting.find_maxima``, size of average filters used in maxima determination. Default is 10.
:return: fancy plot of assigned points.
:Example:
>>> from toolbox.alignment import plot_assigned_maxima
>>> import toolbox.testdata as test
>>> im = test.image_stack()[0]
>>> plot_assigned_maxima(im)
"""
ch1, ch2 = im_split(Image, splitstyle)
ch1_maxima = find_maxima(ch1, fsize)
ch2_maxima = find_maxima(ch2, fsize)
width = ch2.shape[1]
plt.figure(figsize=(Image.shape[0]/64,Image.shape[1]/64))
plt.axis('off')
plt.imshow(Image, cmap="binary_r")
plt.title("Assigned matching points")
mytree = cKDTree(ch1_maxima)
dist, indexes = mytree.query(ch2_maxima)
for i, j in clean_duplicate_maxima(dist, indexes):
x1, y1 = ch1_maxima[i]
x2, y2 = ch2_maxima[j]
tmp_color = (ra.uniform(0, 1), ra.uniform(0, 1), ra.uniform(0, 1))
plt.plot(x1, y1, color=tmp_color, marker='+')
plt.plot(x2+width, y2, color=tmp_color, marker='+')
plt.plot([x1, x2+width], [y1, y2], color=tmp_color)
plt.show()
def align_by_offset(Image, shift_x, shift_y, splitstyle="hsplit", shift_channel = 1):
"""
This function shifts one channel of the array based supplied offset values. Retains the single image
structure.
:param Image: 2D image array
:param shift_x: float, offset in x
:param shift_y: float, offset in y
:param splitstyle: string, passed to ``im_split``; accepts "hsplit", "vsplit". Default is "hsplit"
:param shift_channel: int, which channel to shift by offsets, default is channel 1.
:return: 2D image array of aligned image
:Example:
>>> from toolbox.alignment import find_global_offset, align_by_offset
>>> import toolbox.testdata as test
>>> import matplotlib.pyplot as plt
>>> im = test.image_stack()
>>> dx, dy = find_global_offset(im)
>>> new_image = align_by_offset(im[0], dx, dy)
>>> plt.imshow(new_image), plt.show()
"""
if splitstyle == "vsplit":
ch2, ch1 = im_split(Image, splitstyle)
else:
ch1, ch2 = im_split(Image, splitstyle)
if shift_channel == 1:
new_coords = warp_coords(lambda xy: xy - np.array([shift_x, shift_y]), ch2.shape)
warped_channel = map_coordinates(ch2, new_coords)
aligned_image = np.concatenate((ch1, warped_channel), axis=1)
else:
new_coords = warp_coords(lambda xy: xy + np.array([shift_x, shift_y]), ch1.shape)
warped_channel = map_coordinates(ch1, new_coords)
aligned_image = np.concatenate((warped_channel, ch2), axis=1)
return aligned_image
def overlay(Image, rot=True, invert=False):
"""
Overlays the two channels derived from Image. Converts Image to an 8-bit RGB array, with one channel colored magenta and the other green.
:param Image: 2D image array
:param rot: bool, if True, image is rotated 90 degrees
:param invert: bool, if True, inverts the channel color assignment.
:return: 8-bit RGB image
:Example:
>>> from toolbox.alignment import overlay
>>> import toolbox.testdata as test
>>> import matplotlib.pyplot as plt
>>> im = test.image_stack()
>>> dx, dy = find_global_offset(im)
>>> aligned_image = | |
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [<NAME>]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import unittest
import numpy as np
import numpy.testing as npt
import itertools
from pydl.nn.layers import FC
from pydl import conf
class TestLayers(unittest.TestCase):
def test_score_fn(self):
def test(inp, w, true_out, bias=False):
fc = FC(inp, w.shape[-1], w, bias)
out_fc = fc.score_fn(inp)
npt.assert_almost_equal(out_fc, true_out, decimal=5)
# Manually calculated
# -------------------
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
true_out = np.array([[38, 44, 50, 56],
[83, 98, 113, 128]], dtype=conf.dtype)
test(X, w, true_out)
test(X, w, true_out + bias, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2, 3, 10]
for batch, feat, neur, scl in list(itertools.product(batch_size, feature_size, num_neurons,
scale)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.zeros(neur)
true_out = np.matmul(X, w)
test(X, w, true_out)
test(X, w, true_out + bias, bias)
def test_forward(self):
def test(inp, w, true_out, bias=False, actv_fn='Sigmoid', bchnorm=False, p=None, mask=None):
fc = FC(inp, w.shape[-1], w, bias, activation_fn=actv_fn, batchnorm=bchnorm, dropout=p)
out_fc = fc.forward(inp, mask=mask)
npt.assert_almost_equal(out_fc, true_out, decimal=5)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
score_out = np.array([[38, 44, 50, 56],
[83, 98, 113, 128]], dtype=conf.dtype)
true_out = 1.0 / (1.0 + np.exp(-score_out))
test(X, w, true_out)
true_out = 1.0 / (1.0 + np.exp(-(score_out + bias)))
test(X, w, true_out, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2]
batchnorm = [True, False]
dropout = [True, False]
for batch, feat, scl, neur, bn, dout in \
list(itertools.product(batch_size, feature_size, scale, num_neurons, batchnorm,
dropout)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.zeros(neur)
score = np.matmul(X, w) + bias
if bn:
score = (score - np.mean(score, axis=0)) / np.sqrt(np.var(score, axis=0) + 1e-32)
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*score.shape) < p, dtype=conf.dtype)
else:
p = None
mask = None
true_out_sig = 1.0 / (1.0 + np.exp(-np.matmul(X, w)))
if dout:
true_out_sig *= mask
test(X, w, true_out_sig, bias=False, actv_fn='Sigmoid', bchnorm=False, p=p, mask=mask)
true_out_sig = 1.0 / (1.0 + np.exp(-score))
if dout:
true_out_sig *= mask
test(X, w, true_out_sig, bias, actv_fn='Sigmoid', bchnorm=bn, p=p, mask=mask)
true_out_tanh = (2.0 / (1.0 + np.exp(-2.0 * score))) - 1.0
if dout:
true_out_tanh *= mask
test(X, w, true_out_tanh, bias, actv_fn='Tanh', bchnorm=bn, p=p, mask=mask)
unnorm_prob = np.exp(score)
true_out_softmax = unnorm_prob / np.sum(unnorm_prob, axis=-1, keepdims=True)
if dout:
true_out_softmax *= mask
test(X, w, true_out_softmax, bias, actv_fn='Softmax', bchnorm=bn, p=p, mask=mask)
true_out_relu = np.maximum(0, score)
if dout:
mask /= p
true_out_relu *= mask
test(X, w, true_out_relu, bias, actv_fn='ReLU', bchnorm=bn, p=p, mask=mask)
true_out_linear = score
if dout:
true_out_linear *= mask
test(X, w, true_out_linear, bias, actv_fn='Linear', bchnorm=bn, p=p, mask=mask)
def test_gradients_manually(self):
def test(inp, w, inp_grad, true_weights_grad, true_inputs_grad, bias=False,
true_bias_grad=None):
fc = FC(inp, w.shape[-1], w, bias)
weights_grad = fc.weight_gradients(inp_grad, inputs=X)
bias_grad = fc.bias_gradients(inp_grad)
inputs_grad = fc.input_gradients(inp_grad)
npt.assert_almost_equal(weights_grad, true_weights_grad, decimal=5)
npt.assert_almost_equal(bias_grad, true_bias_grad, decimal=5)
npt.assert_almost_equal(inputs_grad, true_inputs_grad, decimal=5)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
true_weights_grad = np.sum(X, axis=0, keepdims=True).T * np.ones(w.shape, dtype=conf.dtype)
true_inputs_grad = np.sum(w, axis=-1, keepdims=True).T * np.ones(X.shape, dtype=conf.dtype)
true_bias_grad = np.sum(inp_grad, axis=0, keepdims=False)
test(X, w, inp_grad, true_weights_grad, true_inputs_grad, bias, true_bias_grad)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[3, 3, 3, 3],
[2, 2, 2, 2]], dtype=conf.dtype)
true_weights_grad = np.array([[11, 11, 11, 11],
[16, 16, 16, 16],
[21, 21, 21, 21]], dtype=conf.dtype)
true_bias_grad = np.sum(inp_grad, axis=0, keepdims=False)
true_inputs_grad = np.array([[30, 78, 126],
[20, 52, 84]], dtype=conf.dtype)
test(X, w, inp_grad, true_weights_grad, true_inputs_grad, bias, true_bias_grad)
def test_gradients_finite_difference(self):
self.delta = 1e-5
def test(inp, w, inp_grad, bias=False):
fc = FC(inp, w.shape[-1], w, bias)
weights_grad = fc.weight_gradients(inp_grad, inputs=X)
bias_grad = fc.bias_gradients(inp_grad)
inputs_grad = fc.input_gradients(inp_grad)
# Weights finite difference gradients
weights_finite_diff = np.empty(weights_grad.shape)
for i in range(weights_grad.shape[0]):
w_delta = np.zeros(w.shape, dtype=conf.dtype)
w_delta[i] = self.delta
weights_finite_diff[i] = np.sum(((fc.score_fn(inp, w + w_delta) -
fc.score_fn(inp, w - w_delta)) /
(2 * self.delta)) * inp_grad, axis=0)
# Bias finite difference gradients
fc.bias = bias + self.delta
lhs = fc.score_fn(inp)
fc.bias = bias - self.delta
rhs = fc.score_fn(inp)
bias_finite_diff = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad, axis=0)
fc.bias = bias
# Inputs finite difference gradients
inputs_finite_diff = np.empty(inputs_grad.shape)
for i in range(inputs_grad.shape[1]):
i_delta = np.zeros(inp.shape, dtype=conf.dtype)
i_delta[:, i] = self.delta
inputs_finite_diff[:, i] = np.sum(((fc.score_fn(inp + i_delta, w) -
fc.score_fn(inp - i_delta, w)) /
(2 * self.delta)) * inp_grad, axis=-1,
keepdims=False)
# Threshold Gradient Diff Check
npt.assert_almost_equal(weights_grad, weights_finite_diff, decimal=5)
npt.assert_almost_equal(bias_grad, bias_finite_diff, decimal=5)
npt.assert_almost_equal(inputs_grad, inputs_finite_diff, decimal=5)
# # Relative gradient error check
# max_abs_w_grads = np.maximum(np.abs(weights_grad), np.abs(weights_finite_diff))
# max_abs_w_grads[max_abs_w_grads==0] = 1
# w_grads_accuracy = np.abs(weights_grad - weights_finite_diff) / max_abs_w_grads
# npt.assert_almost_equal(np.zeros_like(w_grads_accuracy), w_grads_accuracy, decimal=5)
#
# max_abs_b_grads = np.maximum(np.abs(bias_grad), np.abs(bias_finite_diff))
# max_abs_b_grads[max_abs_b_grads==0] = 1
# b_grads_accuracy = np.abs(bias_grad - bias_finite_diff) / max_abs_b_grads
# npt.assert_almost_equal(np.zeros_like(b_grads_accuracy), b_grads_accuracy, decimal=5)
#
# max_abs_inp_grads = np.maximum(np.abs(inputs_grad), np.abs(inputs_finite_diff))
# max_abs_inp_grads[max_abs_inp_grads==0] = 1
# inp_grads_accuracy = np.abs(inputs_grad - inputs_finite_diff) / max_abs_inp_grads
# npt.assert_almost_equal(np.zeros_like(inp_grads_accuracy), inp_grads_accuracy,
# decimal=5)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
test(X, w, inp_grad, bias)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[1, 2, 3, 4],
[-5, -6, -7, -8]], dtype=conf.dtype)
test(X, w, inp_grad, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-4, 1e-3, 1e-1, 1e-0, 2, 3, 10]
unit_inp_grad = [True, False]
for batch, feat, neur, scl, unit in list(itertools.product(batch_size, feature_size,
num_neurons, scale,
unit_inp_grad)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.random.rand(neur) * scl
inp_grad = np.ones((batch, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-10, 10, (batch, neur))
test(X, w, inp_grad, bias)
def test_backward_gradients_finite_difference(self):
self.delta = 1e-8
def test(inp, w, inp_grad, bias=False, actv_fn='Sigmoid', batchnorm=False, p=None,
mask=None):
fc = FC(inp, w.shape[-1], w, bias, activation_fn=actv_fn, batchnorm=batchnorm,
dropout=p)
_ = fc.forward(inp, mask=mask)
inputs_grad = fc.backward(inp_grad)
weights_grad = fc.weights_grad
bias_grad = fc.bias_grad
# Weights finite difference gradients
weights_finite_diff = np.empty(weights_grad.shape)
for i in range(weights_grad.shape[0]):
for j in range(weights_grad.shape[1]):
w_delta = np.zeros(w.shape, dtype=conf.dtype)
w_delta[i, j] = self.delta
fc.weights = w + w_delta
lhs = fc.forward(inp, mask=mask)
fc.weights = w - w_delta
rhs = fc.forward(inp, mask=mask)
weights_finite_diff[i, j] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
weights_finite_diff[i, j] = weights_grad[i, j]
fc.weights = w
# Bias finite difference gradients
bias_finite_diff = np.empty(bias_grad.shape)
for i in range(bias_grad.shape[0]):
bias_delta = np.zeros(bias.shape, dtype=conf.dtype)
bias_delta[i] = self.delta
fc.bias = bias + bias_delta
lhs = fc.forward(inp, mask=mask)
fc.bias = bias - bias_delta
rhs = fc.forward(inp, mask=mask)
| |
<filename>openpecha/formatters/hfml.py
"""
Formatter for HFML annotations in the text
This module implements all classes necessary to format HFML annotation to OpenPecha format.
HFML (Human Friendly Markup Language) contains tagset used for structuring and annotating the text.
"""
import re
from json import encoder
from pathlib import Path
import yaml
from ..utils import Vol2FnManager, dump_yaml, load_yaml
from .formatter import BaseFormatter
from .layers import *
from .layers import AnnType, _attr_names
class HFMLFormatter(BaseFormatter):
"""
OpenPecha Formatter for digitized wooden-printed Pecha based on annotation scheme from Esukhia.
"""
def __init__(self, output_path=None, metadata=None, is_book=False):
super().__init__(output_path, metadata)
self.is_book = is_book
self.base_text = ""
self.vol_walker = 0
self.book_title = []
self.book_number = []
self.poti_title = []
self.author = []
self.chapter_title = []
self.topic_id = [] # made class variable as it needs to update cross poti
self.current_topic_id = (
[]
) # made class variable as it needs to update cross poti
self.sub_topic = [] # made class variable as it needs to update cross poti
self.page = []
self.error_id = []
self.archaic_word_id = []
self.abs_er_id = []
self.notes_id = []
self.sub_topic_Id = [] # made class variable as it needs to update cross poti
self.topic_info = []
self.topic_local_id = []
self.sub_topic_info = []
self.sub_topic_local_id = []
self.cur_sub = []
self.author_pattern = []
self.citation_pattern = []
self.sabche_pattern = []
self.tsawa_pattern = []
self.yigchung_pattern = []
self.durchen_pattern = []
def text_preprocess(self, text):
if text[0] == "\ufeff":
text = text[1:]
if not self.is_book:
return text
p = r"\[p\]"
lines = text.splitlines()
result_text = ""
para = False
for line in lines:
if re.search(p, line):
if para:
para = False
result_text += "\n"
else:
para = True
elif re.search(p, line) is None:
if para:
result_text += line
else:
result_text += line + "\n"
return result_text
def _load_metadata(self):
if self.metadata:
return self.metadata
meta_fn = self.dirs["opf_path"] / "meta.yml"
if meta_fn.is_file():
return load_yaml(meta_fn)
else:
return {}
def _save_metadata(self, **kwargs):
meta_fn = self.dirs["opf_path"] / "meta.yml"
if kwargs:
self.metadata.update(kwargs)
if "id" not in self.metadata:
self.metadata["id"] = f"opecha:{self.pecha_path.name}"
dump_yaml(self.metadata, meta_fn)
def get_input(self, input_path):
fns = list(input_path.iterdir())
fns_len = len(fns)
for fn in sorted(fns):
yield self.text_preprocess(fn.read_text()), fn.name, fns_len
def total_pattern(self, pat_list, annotated_line):
""" It calculates the length of all the annotation detected in a line.
Args:
pat_list (dict): It contains all the annotation's regex pattern as value and name of annotation as key.
annotated_line (str): It contains the annotated line to be process.
Return:
total_length (int): It accumulates as annotations are detected in the line.
"""
total_length = 0 # total length of annotation detected in a line
for pattern in [
"line_pattern",
"topic_pattern",
"sub_topic_pattern",
"note_pattern",
"start_cit_pattern",
"start_sabche_pattern",
"start_tsawa_pattern",
"start_yigchung_pattern",
"start_durchen_pattern",
]:
if re.search(pat_list[pattern], annotated_line):
match_list = re.finditer(
pat_list[pattern], annotated_line
) # list of match object of given pattern in line
for match in match_list:
total_length = total_length + len(match[0])
if re.search(pat_list["error_pattern"], annotated_line):
errors = re.finditer(
pat_list["error_pattern"], annotated_line
) # list of match object of error pattern in line
for error in errors:
if error.group(1):
starting_point = 2
else:
starting_point = 1
error_part = error[0].split(",")[0][starting_point:]
total_length = total_length + (len(error[0]) - len(error_part))
if re.search(pat_list["archaic_word_pattern"], annotated_line):
archaic_words = re.finditer(
pat_list["archaic_word_pattern"], annotated_line
) # list of match object of error pattern in line
for archaic_word in archaic_words:
if archaic_word.group(1):
starting_point = 2
else:
starting_point = 1
archaic_part = archaic_word[0].split(",")[0][starting_point:]
total_length = total_length + (len(archaic_word[0]) - len(archaic_part))
if re.search(pat_list["abs_er_pattern"], annotated_line):
abs_ers = re.finditer(
pat_list["abs_er_pattern"], annotated_line
) # list of match of abs_er pattern in line
for abs_er in abs_ers:
if abs_er.group(1):
total_length += 3
else:
total_length += 2
for pattern in [
"author_pattern",
"book_title_pattern",
"poti_title_pattern",
"chapter_title_pattern",
"book_number_pattern",
]:
title_pattern = re.search(pat_list[pattern], annotated_line)
if title_pattern:
if title_pattern.group(1):
total_length += 5
else:
total_length += 4
for pattern in [
"end_cit_pattern",
"end_sabche_pattern",
"end_tsawa_pattern",
"end_yigchung_pattern",
"end_durchen_pattern",
]:
end_patterns = re.findall(
pat_list[pattern], annotated_line
) # list of match of citation pattern in line
total_length = total_length + 2 * len(end_patterns)
return total_length
def merge(self, start_list, end_list):
""" It merges two list.
The starting and ending of annotation(citaion,yigchung,sabche and tsawa) are stored in two list.
Merging these two list will generate a list in which both starting and ending of an annotation together in a tuple.
It is applicable only if the annotaions are not cross volume.
Args:
start_list (list): It contains index of where starting annotations(citaion,yigchung,sabche and tsawa) are detected.
end_list (list): It contains index of where ending annotations(citaion,yigchung,sabche and tsawa) are detected.
Return:
result (list): It contains tuples where starting and ending of an annotation(citaion,yigchung,sabche and tsawa) is combined.
"""
walker = 0
result = []
while walker < len(end_list) and walker < len(start_list):
result.append(
(
start_list[walker][0],
{"span": Span(start_list[walker][1], end_list[walker])},
)
)
walker += 1
return result
def search_before(self, ann, pat_list, line):
""" It calculates the length of annotation detected in a given line before a given annotation.
Args:
ann (match object): It is a match object of the annotation of which we want to calculate
the length of any annotation detected before it.
pat_list (dict): It contains all the annotation's regex pattern as value and name of annotation as key.
line (str): It contains the line in which we wants to calculate the legth of annotation found before the given annotation.
Return:
length_before (int): It accumalates as we detect annotation which is before the given annotation and
finally gives the total length of annotation caught before the given annotation in the given line.
"""
length_before = 0
for pp in [
"line_pattern",
"topic_pattern",
"sub_topic_pattern",
"note_pattern",
"start_cit_pattern",
"start_sabche_pattern",
"start_tsawa_pattern",
"start_yigchung_pattern",
"start_durchen_pattern",
]:
if re.search(pat_list[pp], line):
match_list = re.finditer(
pat_list[pp], line
) # list of match object of given pattern in line
for match in match_list:
if ann.start() > match.start():
length_before = length_before + len(match[0])
if re.search(pat_list["error_pattern"], line):
errors = re.finditer(
pat_list["error_pattern"], line
) # list of match object of error pattern in line
for error in errors:
if error.group(1):
starting_point = 2
else:
starting_point = 1
if ann.start() > error.start():
error_part = error[0].split(",")[0][starting_point:]
length_before = length_before + (len(error[0]) - len(error_part))
if re.search(pat_list["archaic_word_pattern"], line):
archaic_words = re.finditer(
pat_list["archaic_word_pattern"], line
) # list of match object of error pattern in line
for archaic_word in archaic_words:
if archaic_word.group(1):
starting_point = 2
else:
starting_point = 1
if ann.start() > archaic_word.start():
archaic_part = archaic_word[0].split(",")[0][starting_point:]
length_before = length_before + (
len(archaic_word[0]) - len(archaic_part)
)
if re.search(pat_list["abs_er_pattern"], line):
abs_ers = re.finditer(
pat_list["abs_er_pattern"], line
) # list of match object of abs_er pattern in line
for abs_er in abs_ers:
if ann.start() > abs_er.start():
if abs_er.group(1):
pat_len = 3
else:
pat_len = 2
length_before = length_before + pat_len
for pp in [
"author_pattern",
"book_title_pattern",
"poti_title_pattern",
"chapter_title_pattern",
"book_number_pattern",
]:
title_pattern = re.search(pat_list[pp], line)
if title_pattern:
if ann.start() > title_pattern.start():
if title_pattern.group(1):
length_before += 4
else:
length_before += 3
for pp in [
"end_cit_pattern",
"end_sabche_pattern",
"end_tsawa_pattern",
"end_yigchung_pattern",
"end_durchen_pattern",
]:
end_patterns = re.finditer(
pat_list[pp], line
) # list of match object of citation pattern in line
for end_pattern in end_patterns:
if ann.start() > end_pattern.start():
length_before = length_before + 2
return length_before
def base_extract(self, pat_list, annotated_line):
""" It extract the base text from annotated text.
Args:
pat_list (dict): It contains all the annotation's regex pattern as value and name of annotation as key.
annotated_line (str): It contains the annotated line from which we want to extract the base text.
Return:
base_line (str): It contains the base text which is being extracted from the given annotated line.
"""
base_line = (
annotated_line # stores the base_line which is line without annotation
)
for pattern in [
"line_pattern",
"topic_pattern",
"sub_topic_pattern",
"note_pattern",
"start_cit_pattern",
"end_cit_pattern",
"start_sabche_pattern",
"end_sabche_pattern",
"start_tsawa_pattern",
"end_tsawa_pattern",
"start_yigchung_pattern",
"end_yigchung_pattern",
"start_durchen_pattern",
"end_durchen_pattern",
]:
base_line = re.sub(pat_list[pattern], "", base_line)
for pattern in [
"author_pattern",
"book_title_pattern",
"poti_title_pattern",
"chapter_title_pattern",
"book_number_pattern",
]:
title_pattern = re.search(pat_list[pattern], annotated_line)
if title_pattern:
if title_pattern.group(1):
starting_point = 4
else:
starting_point = 3
title = title_pattern[0][starting_point:-1]
base_line = re.sub(pat_list[pattern], title, base_line, 1)
if re.search(pat_list["error_pattern"], annotated_line):
errors = re.finditer(
pat_list["error_pattern"], annotated_line
) # list of match object of error pattern in line
for error in errors:
if error.group(1):
starting_point = 2
else:
starting_point = 1
| |
"4781")),
((Standards.JSIC13, "5894"), (Standards.ISIC4, "4722")),
((Standards.JSIC13, "5894"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "5895"), (Standards.ISIC4, "4721")),
((Standards.JSIC13, "5895"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "5896"), (Standards.ISIC4, "4721")),
((Standards.JSIC13, "5896"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "5897"), (Standards.ISIC4, "4721")),
((Standards.JSIC13, "5897"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "5898"), (Standards.ISIC4, "4721")),
((Standards.JSIC13, "5898"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "5899"), (Standards.ISIC4, "4721")),
((Standards.JSIC13, "5899"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "5900"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "5908"), (Standards.ISIC4, "5210")),
((Standards.JSIC13, "5909"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "5911"), (Standards.ISIC4, "4510")),
((Standards.JSIC13, "5912"), (Standards.ISIC4, "4510")),
((Standards.JSIC13, "5912"), (Standards.ISIC4, "4520")),
((Standards.JSIC13, "5913"), (Standards.ISIC4, "4520")),
((Standards.JSIC13, "5913"), (Standards.ISIC4, "4530")),
((Standards.JSIC13, "5914"), (Standards.ISIC4, "4540")),
((Standards.JSIC13, "5921"), (Standards.ISIC4, "4763")),
((Standards.JSIC13, "5931"), (Standards.ISIC4, "4742")),
((Standards.JSIC13, "5931"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "5931"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "5932"), (Standards.ISIC4, "4741")),
((Standards.JSIC13, "5932"), (Standards.ISIC4, "4762")),
((Standards.JSIC13, "5932"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "5933"), (Standards.ISIC4, "4774")),
((Standards.JSIC13, "5933"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "5939"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "5939"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6000"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6008"), (Standards.ISIC4, "5210")),
((Standards.JSIC13, "6009"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6011"), (Standards.ISIC4, "4752")),
((Standards.JSIC13, "6011"), (Standards.ISIC4, "4753")),
((Standards.JSIC13, "6011"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6011"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6012"), (Standards.ISIC4, "4752")),
((Standards.JSIC13, "6013"), (Standards.ISIC4, "4753")),
((Standards.JSIC13, "6014"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6021"), (Standards.ISIC4, "4752")),
((Standards.JSIC13, "6021"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6021"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6022"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6022"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6022"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6023"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6023"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6023"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6029"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6029"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6031"), (Standards.ISIC4, "4772")),
((Standards.JSIC13, "6031"), (Standards.ISIC4, "4719")),
((Standards.JSIC13, "6032"), (Standards.ISIC4, "4772")),
((Standards.JSIC13, "6033"), (Standards.ISIC4, "4772")),
((Standards.JSIC13, "6034"), (Standards.ISIC4, "4772")),
((Standards.JSIC13, "6041"), (Standards.ISIC4, "4653")),
((Standards.JSIC13, "6041"), (Standards.ISIC4, "4752")),
((Standards.JSIC13, "6042"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6043"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6051"), (Standards.ISIC4, "4730")),
((Standards.JSIC13, "6051"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6052"), (Standards.ISIC4, "4730")),
((Standards.JSIC13, "6052"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6061"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6061"), (Standards.ISIC4, "4761")),
((Standards.JSIC13, "6061"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6062"), (Standards.ISIC4, "4774")),
((Standards.JSIC13, "6063"), (Standards.ISIC4, "4761")),
((Standards.JSIC13, "6064"), (Standards.ISIC4, "4761")),
((Standards.JSIC13, "6071"), (Standards.ISIC4, "4763")),
((Standards.JSIC13, "6071"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6072"), (Standards.ISIC4, "4741")),
((Standards.JSIC13, "6072"), (Standards.ISIC4, "4764")),
((Standards.JSIC13, "6072"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6073"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6073"), (Standards.ISIC4, "4762")),
((Standards.JSIC13, "6073"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6081"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6082"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6091"), (Standards.ISIC4, "4719")),
((Standards.JSIC13, "6092"), (Standards.ISIC4, "4723")),
((Standards.JSIC13, "6092"), (Standards.ISIC4, "4781")),
((Standards.JSIC13, "6093"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6094"), (Standards.ISIC4, "4752")),
((Standards.JSIC13, "6094"), (Standards.ISIC4, "4753")),
((Standards.JSIC13, "6095"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6095"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6096"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6097"), (Standards.ISIC4, "4774")),
((Standards.JSIC13, "6098"), (Standards.ISIC4, "4774")),
((Standards.JSIC13, "6099"), (Standards.ISIC4, "4759")),
((Standards.JSIC13, "6099"), (Standards.ISIC4, "4762")),
((Standards.JSIC13, "6099"), (Standards.ISIC4, "4773")),
((Standards.JSIC13, "6099"), (Standards.ISIC4, "4789")),
((Standards.JSIC13, "6100"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6108"), (Standards.ISIC4, "5210")),
((Standards.JSIC13, "6109"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6111"), (Standards.ISIC4, "4791")),
((Standards.JSIC13, "6111"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6112"), (Standards.ISIC4, "4791")),
((Standards.JSIC13, "6112"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6113"), (Standards.ISIC4, "4791")),
((Standards.JSIC13, "6113"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6114"), (Standards.ISIC4, "4791")),
((Standards.JSIC13, "6114"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6119"), (Standards.ISIC4, "4791")),
((Standards.JSIC13, "6119"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6121"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6199"), (Standards.ISIC4, "4791")),
((Standards.JSIC13, "6199"), (Standards.ISIC4, "4799")),
((Standards.JSIC13, "6200"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6209"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6211"), (Standards.ISIC4, "6411")),
((Standards.JSIC13, "6221"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6222"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6223"), (Standards.ISIC4, "6430")),
((Standards.JSIC13, "6229"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6300"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6309"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6311"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6312"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6313"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6314"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6321"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6322"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6323"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6324"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6325"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6400"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6409"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6411"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6412"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6421"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6431"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6432"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6491"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6492"), (Standards.ISIC4, "6492")),
((Standards.JSIC13, "6493"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6499"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6500"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6509"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6511"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6512"), (Standards.ISIC4, "6619")),
((Standards.JSIC13, "6513"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6513"), (Standards.ISIC4, "6630")),
((Standards.JSIC13, "6514"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6521"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6522"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6522"), (Standards.ISIC4, "6619")),
((Standards.JSIC13, "6529"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6600"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6609"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6611"), (Standards.ISIC4, "6419")),
((Standards.JSIC13, "6611"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6611"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6612"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6613"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6614"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6615"), (Standards.ISIC4, "6520")),
((Standards.JSIC13, "6616"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6617"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6617"), (Standards.ISIC4, "6611")),
((Standards.JSIC13, "6618"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6618"), (Standards.ISIC4, "6611")),
((Standards.JSIC13, "6619"), (Standards.ISIC4, "6499")),
((Standards.JSIC13, "6619"), (Standards.ISIC4, "6619")),
((Standards.JSIC13, "6619"), (Standards.ISIC4, "8291")),
((Standards.JSIC13, "6621"), (Standards.ISIC4, "6430")),
((Standards.JSIC13, "6622"), (Standards.ISIC4, "6430")),
((Standards.JSIC13, "6631"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6632"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6639"), (Standards.ISIC4, "6612")),
((Standards.JSIC13, "6700"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6709"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6711"), (Standards.ISIC4, "6511")),
((Standards.JSIC13, "6712"), (Standards.ISIC4, "6511")),
((Standards.JSIC13, "6713"), (Standards.ISIC4, "6520")),
((Standards.JSIC13, "6719"), (Standards.ISIC4, "6511")),
((Standards.JSIC13, "6721"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6722"), (Standards.ISIC4, "6520")),
((Standards.JSIC13, "6729"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6731"), (Standards.ISIC4, "6511")),
((Standards.JSIC13, "6731"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6732"), (Standards.ISIC4, "6511")),
((Standards.JSIC13, "6732"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6733"), (Standards.ISIC4, "6512")),
((Standards.JSIC13, "6741"), (Standards.ISIC4, "6622")),
((Standards.JSIC13, "6742"), (Standards.ISIC4, "6622")),
((Standards.JSIC13, "6743"), (Standards.ISIC4, "6622")),
((Standards.JSIC13, "6751"), (Standards.ISIC4, "6629")),
((Standards.JSIC13, "6752"), (Standards.ISIC4, "6621")),
((Standards.JSIC13, "6759"), (Standards.ISIC4, "6621")),
((Standards.JSIC13, "6759"), (Standards.ISIC4, "6622")),
((Standards.JSIC13, "6759"), (Standards.ISIC4, "6629")),
((Standards.JSIC13, "6800"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6809"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6811"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6812"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6821"), (Standards.ISIC4, "6820")),
((Standards.JSIC13, "6900"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6909"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "6911"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6912"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6919"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6921"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6922"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6931"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6931"), (Standards.ISIC4, "6820")),
((Standards.JSIC13, "6941"), (Standards.ISIC4, "6810")),
((Standards.JSIC13, "6941"), (Standards.ISIC4, "6820")),
((Standards.JSIC13, "7000"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7009"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7011"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7011"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7019"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7019"), (Standards.ISIC4, "7729")),
((Standards.JSIC13, "7019"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7021"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7021"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7022"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7022"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7031"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7031"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7032"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7032"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7041"), (Standards.ISIC4, "6491")),
((Standards.JSIC13, "7041"), (Standards.ISIC4, "7710")),
((Standards.JSIC13, "7041"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7051"), (Standards.ISIC4, "7721")),
((Standards.JSIC13, "7091"), (Standards.ISIC4, "7729")),
((Standards.JSIC13, "7091"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7092"), (Standards.ISIC4, "7722")),
((Standards.JSIC13, "7093"), (Standards.ISIC4, "7729")),
((Standards.JSIC13, "7099"), (Standards.ISIC4, "7729")),
((Standards.JSIC13, "7099"), (Standards.ISIC4, "7730")),
((Standards.JSIC13, "7101"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7111"), (Standards.ISIC4, "7210")),
((Standards.JSIC13, "7112"), (Standards.ISIC4, "7210")),
((Standards.JSIC13, "7113"), (Standards.ISIC4, "7210")),
((Standards.JSIC13, "7114"), (Standards.ISIC4, "7210")),
((Standards.JSIC13, "7121"), (Standards.ISIC4, "7220")),
((Standards.JSIC13, "7201"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7211"), (Standards.ISIC4, "6910")),
((Standards.JSIC13, "7212"), (Standards.ISIC4, "6910")),
((Standards.JSIC13, "7221"), (Standards.ISIC4, "6910")),
((Standards.JSIC13, "7222"), (Standards.ISIC4, "6910")),
((Standards.JSIC13, "7222"), (Standards.ISIC4, "7110")),
((Standards.JSIC13, "7231"), (Standards.ISIC4, "6910")),
((Standards.JSIC13, "7241"), (Standards.ISIC4, "6920")),
((Standards.JSIC13, "7242"), (Standards.ISIC4, "6920")),
((Standards.JSIC13, "7251"), (Standards.ISIC4, "7490")),
((Standards.JSIC13, "7261"), (Standards.ISIC4, "7410")),
((Standards.JSIC13, "7271"), (Standards.ISIC4, "9000")),
((Standards.JSIC13, "7272"), (Standards.ISIC4, "9000")),
((Standards.JSIC13, "7281"), (Standards.ISIC4, "7020")),
((Standards.JSIC13, "7281"), (Standards.ISIC4, "7740")),
((Standards.JSIC13, "7282"), (Standards.ISIC4, "6420")),
((Standards.JSIC13, "7282"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7291"), (Standards.ISIC4, "8030")),
((Standards.JSIC13, "7292"), (Standards.ISIC4, "7490")),
((Standards.JSIC13, "7293"), (Standards.ISIC4, "7490")),
((Standards.JSIC13, "7294"), (Standards.ISIC4, "6820")),
((Standards.JSIC13, "7299"), (Standards.ISIC4, "7490")),
((Standards.JSIC13, "7300"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7309"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7311"), (Standards.ISIC4, "7310")),
((Standards.JSIC13, "7401"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7411"), (Standards.ISIC4, "7500")),
((Standards.JSIC13, "7421"), (Standards.ISIC4, "7110")),
((Standards.JSIC13, "7422"), (Standards.ISIC4, "7110")),
((Standards.JSIC13, "7429"), (Standards.ISIC4, "7110")),
((Standards.JSIC13, "7431"), (Standards.ISIC4, "7110")),
((Standards.JSIC13, "7441"), (Standards.ISIC4, "7120")),
((Standards.JSIC13, "7442"), (Standards.ISIC4, "7120")),
((Standards.JSIC13, "7451"), (Standards.ISIC4, "5229")),
((Standards.JSIC13, "7452"), (Standards.ISIC4, "7120")),
((Standards.JSIC13, "7459"), (Standards.ISIC4, "7120")),
((Standards.JSIC13, "7461"), (Standards.ISIC4, "7420")),
((Standards.JSIC13, "7462"), (Standards.ISIC4, "7420")),
((Standards.JSIC13, "7499"), (Standards.ISIC4, "7110")),
((Standards.JSIC13, "7500"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7509"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7511"), (Standards.ISIC4, "5510")),
((Standards.JSIC13, "7521"), (Standards.ISIC4, "5510")),
((Standards.JSIC13, "7531"), (Standards.ISIC4, "5590")),
((Standards.JSIC13, "7591"), (Standards.ISIC4, "5510")),
((Standards.JSIC13, "7592"), (Standards.ISIC4, "5510")),
((Standards.JSIC13, "7599"), (Standards.ISIC4, "5520")),
((Standards.JSIC13, "7599"), (Standards.ISIC4, "5590")),
((Standards.JSIC13, "7600"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7609"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7611"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7611"), (Standards.ISIC4, "5629")),
((Standards.JSIC13, "7621"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7622"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7623"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7624"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7625"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7629"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7631"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7641"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7651"), (Standards.ISIC4, "5630")),
((Standards.JSIC13, "7661"), (Standards.ISIC4, "5630")),
((Standards.JSIC13, "7671"), (Standards.ISIC4, "5630")),
((Standards.JSIC13, "7691"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7692"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7699"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7699"), (Standards.ISIC4, "5629")),
((Standards.JSIC13, "7700"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7709"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7711"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7711"), (Standards.ISIC4, "5629")),
((Standards.JSIC13, "7721"), (Standards.ISIC4, "5610")),
((Standards.JSIC13, "7721"), (Standards.ISIC4, "5621")),
((Standards.JSIC13, "7800"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7809"), (Standards.ISIC4, "7010")),
((Standards.JSIC13, "7811"), (Standards.ISIC4, | |
#!/usr/bin/env python
"""
_WorkQueue_t_
WorkQueue tests
"""
from __future__ import print_function
from builtins import next, range
from future.utils import viewitems
import os
import threading
import time
import unittest
import logging
from retry import retry
from Utils.PythonVersion import PY3
from WMCore.WMBase import getTestBase
from WMCore.ACDC.DataCollectionService import DataCollectionService
from WMCore.Configuration import Configuration
from WMCore.DAOFactory import DAOFactory
from WMCore.DataStructs.File import File as WMFile
from WMCore.DataStructs.Run import Run
from WMCore.Lexicon import sanitizeURL
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.Services.DBS.DBSErrors import DBSReaderError
from WMCore.Services.UUIDLib import makeUUID
from WMCore.Services.WorkQueue.WorkQueue import WorkQueue as WorkQueueService
from WMCore.WMBS.Job import Job
from WMCore.WMSpec.StdSpecs.DQMHarvest import DQMHarvestWorkloadFactory
from WMCore.WMSpec.StdSpecs.StepChain import StepChainWorkloadFactory
from WMCore.WMSpec.StdSpecs.ReReco import ReRecoWorkloadFactory
from WMCore.WMSpec.WMWorkload import WMWorkload, WMWorkloadHelper
from WMCore.WorkQueue.WorkQueue import WorkQueue, globalQueue, localQueue
from WMCore.WorkQueue.WorkQueueExceptions import (WorkQueueWMSpecError, WorkQueueNoMatchingElements,
WorkQueueNoWorkError)
from WMCore.WorkQueue.DataStructs.WorkQueueElement import STATES
from WMQuality.Emulators import EmulatorSetup
from WMQuality.Emulators.DataBlockGenerator import Globals
from WMQuality.Emulators.RucioClient.MockRucioApi import PILEUP_DATASET
from WMQuality.Emulators.WMSpecGenerator.WMSpecGenerator import createConfig
from WMCore_t.WMSpec_t.samples.MultiTaskProductionWorkload \
import workload as MultiTaskProductionWorkload
from WMCore_t.WorkQueue_t.WorkQueueTestCase import WorkQueueTestCase
NBLOCKS_HICOMM = 47
NFILES_HICOMM = 72
NBLOCKS_COSMIC = 58
NFILES_COSMIC = 108
NFILES_COSMICRAW = 141
TOTAL_EVENTS=10000
def rerecoWorkload(workloadName, arguments, assignArgs=None):
factory = ReRecoWorkloadFactory()
wmspec = factory.factoryWorkloadConstruction(workloadName, arguments)
if assignArgs:
args = factory.getAssignTestArguments()
args.update(assignArgs)
wmspec.updateArguments(args)
return wmspec
def stepchainWorkload(workloadName, arguments):
factory = StepChainWorkloadFactory()
wmspec = factory.factoryWorkloadConstruction(workloadName, arguments)
return wmspec
def getFirstTask(wmspec):
"""Return the 1st top level task"""
return next(wmspec.taskIterator())
def syncQueues(queue, skipWMBS=False):
"""Sync parent & local queues and split work
Workaround having to wait for couchdb replication and splitting polling
"""
queue.backend.forceQueueSync()
time.sleep(2)
work = queue.processInboundWork()
queue.performQueueCleanupActions(skipWMBS=skipWMBS)
queue.backend.forceQueueSync()
# after replication need to wait a while to update result
time.sleep(2)
return work
class WorkQueueTest(WorkQueueTestCase):
"""
_WorkQueueTest_
For /MinimumBias/ComissioningHI-v1/RAW the dataset has 47 blocks with 72 files.
The Rucio emulator sets the block locations like:
17 at 'T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC'
19 at 'T2_XX_SiteA', 'T2_XX_SiteB'
11 at 'T2_XX_SiteA' only
"""
def __init__(self, methodName='runTest'):
super(WorkQueueTest, self).__init__(methodName=methodName, mockDBS=True, mockRucio=True)
self.queueParams = {}
self.queueParams['log_reporter'] = "WorkQueue_Unittest"
self.queueParams['rucioAccount'] = "wma_test"
self.queueParams['rucioAuthUrl'] = "http://cms-rucio-int.cern.ch"
self.queueParams['rucioUrl'] = "https://cms-rucio-auth-int.cern.ch"
def setupConfigCacheAndAgrs(self):
self.rerecoArgs = ReRecoWorkloadFactory.getTestArguments()
self.rerecoArgs["CouchDBName"] = self.configCacheDB
self.rerecoArgs["ConfigCacheID"] = createConfig(self.rerecoArgs["CouchDBName"])
self.mcArgs = StepChainWorkloadFactory.getTestArguments()
self.mcArgs["CouchDBName"] = self.configCacheDB
self.mcArgs['Step1']["ConfigCacheID"] = createConfig(self.mcArgs["CouchDBName"])
self.parentProcArgs = ReRecoWorkloadFactory.getTestArguments()
self.parentProcArgs.update(IncludeParents="True")
self.parentProcArgs.update(InputDataset="/Cosmics/ComissioningHI-PromptReco-v1/RECO")
self.parentProcArgs["CouchDBName"] = self.configCacheDB
self.parentProcArgs["ConfigCacheID"] = createConfig(self.parentProcArgs["CouchDBName"])
self.openRunningProcArgs = ReRecoWorkloadFactory.getTestArguments()
self.openRunningProcArgs.update(OpenRunningTimeout=10)
self.openRunningProcArgs["CouchDBName"] = self.configCacheDB
self.openRunningProcArgs["ConfigCacheID"] = createConfig(self.openRunningProcArgs["CouchDBName"])
self.pileupArgs = StepChainWorkloadFactory.getTestArguments()
self.pileupArgs['Step1'].update(MCPileup=PILEUP_DATASET)
self.pileupArgs['Step1'].update(InputDataset="/MinimumBias/ComissioningHI-v1/RAW",
RequestNumEvents=TOTAL_EVENTS,
SplittingAlgo="EventAwareLumiBased")
self.pileupArgs["CouchDBName"] = self.configCacheDB
self.pileupArgs['Step1']["ConfigCacheID"] = createConfig(self.pileupArgs["CouchDBName"])
self.pileupMcArgs = StepChainWorkloadFactory.getTestArguments()
self.pileupMcArgs['Step1'].update(MCPileup=PILEUP_DATASET)
self.pileupArgs['Step1'].update(RequestNumEvents=TOTAL_EVENTS)
self.pileupMcArgs["CouchDBName"] = self.configCacheDB
self.pileupMcArgs['Step1']["ConfigCacheID"] = createConfig(self.pileupMcArgs["CouchDBName"])
def setUp(self):
"""
If we dont have a wmspec file create one
"""
# undo any customizations
Globals.GlobalParams.resetParams()
# set up WMAgent config file for couchdb
self.configFile = EmulatorSetup.setupWMAgentConfig()
WorkQueueTestCase.setUp(self)
self.setupConfigCacheAndAgrs()
# Basic production Spec
self.spec = stepchainWorkload('testProduction', self.mcArgs)
self.spec.setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB'])
getFirstTask(self.spec).addProduction(totalEvents=TOTAL_EVENTS)
self.spec.setSpecUrl(os.path.join(self.workDir, 'testworkflow.spec'))
self.spec.save(self.spec.specUrl())
# Production spec plus pileup
self.productionPileupSpec = stepchainWorkload('testProduction', self.pileupMcArgs)
self.productionPileupSpec.setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB'])
getFirstTask(self.productionPileupSpec).addProduction(totalEvents=TOTAL_EVENTS)
self.productionPileupSpec.setSpecUrl(os.path.join(self.workDir, 'testworkflowPileupMc.spec'))
self.productionPileupSpec.save(self.productionPileupSpec.specUrl())
# Processing spec plus pileup
self.processingPileupSpec = stepchainWorkload('testProcessing', self.pileupArgs)
self.processingPileupSpec.setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC'])
getFirstTask(self.processingPileupSpec).addProduction(totalEvents=TOTAL_EVENTS)
self.processingPileupSpec.setSpecUrl(os.path.join(self.workDir, 'testworkflowPileup.spec'))
self.processingPileupSpec.save(self.processingPileupSpec.specUrl())
# ReReco spec with whitelist
self.whitelistSpec = rerecoWorkload('whitelistlistSpec', self.rerecoArgs)
self.whitelistSpec.setSpecUrl(os.path.join(self.workDir,
'testWhitelist.spec'))
getFirstTask(self.whitelistSpec).data.constraints.sites.whitelist = ['T2_XX_SiteB']
self.whitelistSpec.save(self.whitelistSpec.specUrl())
# ReReco spec with delay for running open
self.openRunningSpec = rerecoWorkload('openRunningSpec', self.openRunningProcArgs)
self.openRunningSpec.setSpecUrl(os.path.join(self.workDir,
'testOpenRunningSpec.spec'))
self.openRunningSpec.save(self.openRunningSpec.specUrl())
# Create queues
globalCouchUrl = "%s/%s" % (self.testInit.couchUrl, self.globalQDB)
logdbCouchUrl = "%s/%s" % (self.testInit.couchUrl, self.logDBName)
reqdbUrl = "%s/%s" % (self.testInit.couchUrl, self.requestDBName)
self.globalQueue = globalQueue(DbName=self.globalQDB,
InboxDbName=self.globalQInboxDB,
QueueURL=globalCouchUrl,
central_logdb_url=logdbCouchUrl,
UnittestFlag=True,
RequestDBURL=reqdbUrl,
**self.queueParams)
# self.midQueue = WorkQueue(SplitByBlock = False, # mid-level queue
# PopulateFilesets = False,
# ParentQueue = self.globalQueue,
# CacheDir = None)
# ignore mid queue as it causes database duplication's
# copy jobStateMachine couchDB configuration here since we don't want/need to pass whole configuration
jobCouchConfig = Configuration()
jobCouchConfig.section_("JobStateMachine")
jobCouchConfig.JobStateMachine.couchurl = os.environ["COUCHURL"]
jobCouchConfig.JobStateMachine.couchDBName = "testcouchdb"
jobCouchConfig.JobStateMachine.jobSummaryDBName = "wmagent_summary_test"
jobCouchConfig.JobStateMachine.summaryStatsDBName = "stat_summary_test"
# copy bossAir configuration here since we don't want/need to pass whole configuration
bossAirConfig = Configuration()
bossAirConfig.section_("BossAir")
bossAirConfig.BossAir.pluginDir = "WMCore.BossAir.Plugins"
bossAirConfig.BossAir.pluginNames = ["MockPlugin"]
bossAirConfig.BossAir.section_("MockPlugin")
bossAirConfig.BossAir.MockPlugin.fakeReport = os.path.join(getTestBase(),
'WMComponent_t/JobAccountant_t/fwjrs',
"MergeSuccess.pkl")
bossAirConfig.section_("Agent")
bossAirConfig.Agent.agentName = "TestAgent"
bossAirConfig.section_("JobStateMachine")
bossAirConfig.JobStateMachine.couchurl = os.environ["COUCHURL"]
bossAirConfig.JobStateMachine.couchDBName = "testcouchdb"
bossAirConfig.JobStateMachine.jobSummaryDBName = "wmagent_summary_test"
bossAirConfig.JobStateMachine.summaryStatsDBName = "stat_summary_test"
self.localQueue = localQueue(DbName=self.localQDB,
InboxDbName=self.localQInboxDB,
ParentQueueCouchUrl=globalCouchUrl,
ParentQueueInboxCouchDBName=self.globalQInboxDB,
JobDumpConfig=jobCouchConfig,
BossAirConfig=bossAirConfig,
CacheDir=self.workDir,
central_logdb_url=logdbCouchUrl,
RequestDBURL=reqdbUrl,
**self.queueParams)
self.localQueue2 = localQueue(DbName=self.localQDB2,
InboxDbName=self.localQInboxDB2,
ParentQueueCouchUrl=globalCouchUrl,
ParentQueueInboxCouchDBName=self.globalQInboxDB,
JobDumpConfig=jobCouchConfig,
BossAirConfig=bossAirConfig,
CacheDir=self.workDir,
central_logdb_url=logdbCouchUrl,
RequestDBURL=reqdbUrl,
**self.queueParams)
# configuration for the Alerts messaging framework, work (alerts) and
# control channel addresses to which alerts
# these are destination addresses where AlertProcessor:Receiver listens
config = Configuration()
config.section_("Alert")
config.Alert.address = "tcp://127.0.0.1:5557"
config.Alert.controlAddr = "tcp://127.0.0.1:5559"
# standalone queue for unit tests
self.queue = WorkQueue(JobDumpConfig=jobCouchConfig,
BossAirConfig=bossAirConfig,
DbName=self.queueDB,
InboxDbName=self.queueInboxDB,
CacheDir=self.workDir,
config=config,
central_logdb_url=logdbCouchUrl,
RequestDBURL=reqdbUrl,
**self.queueParams)
# create relevant sites in wmbs
rc = ResourceControl()
site_se_mapping = {'T2_XX_SiteA': 'T2_XX_SiteA', 'T2_XX_SiteB': 'T2_XX_SiteB'}
for site, se in viewitems(site_se_mapping):
rc.insertSite(site, 100, 200, se, cmsName=site, plugin="MockPlugin")
daofactory = DAOFactory(package="WMCore.WMBS",
logger=threading.currentThread().logger,
dbinterface=threading.currentThread().dbi)
addLocation = daofactory(classname="Locations.New")
addLocation.execute(siteName=site, pnn=se)
if PY3:
self.assertItemsEqual = self.assertCountEqual
def setupReReco(self, assignArgs=None, **kwargs):
# Sample Tier1 ReReco spec
self.rerecoArgs.update(kwargs)
processingSpec = rerecoWorkload('testProcessing', self.rerecoArgs, assignArgs=assignArgs)
processingSpec.setSpecUrl(os.path.join(self.workDir, 'testProcessing.spec'))
processingSpec.save(processingSpec.specUrl())
return processingSpec
def setupParentProcSpec(self, assignArgs=None, **kwargs):
# Sample Tier1 ReReco spec with parent
self.parentProcArgs.update(kwargs)
parentProcSpec = rerecoWorkload('testParentProcessing', self.parentProcArgs, assignArgs=assignArgs)
parentProcSpec.setSpecUrl(os.path.join(self.workDir, 'testParentProcessing.spec'))
parentProcSpec.save(parentProcSpec.specUrl())
return parentProcSpec
def setupHighPrioReReco(self, assignArgs=None, **kwargs):
# High priority ReReco spec
self.rerecoArgs.update(kwargs)
highPrioReReco = rerecoWorkload('highPrioSpec', self.rerecoArgs, assignArgs=assignArgs)
highPrioReReco.data.request.priority = 999998
highPrioReReco.setSpecUrl(os.path.join(self.workDir, 'highPrioSpec.spec'))
highPrioReReco.save(highPrioReReco.specUrl())
return highPrioReReco
def tearDown(self):
"""tearDown"""
super(WorkQueueTest, self).tearDown()
# Delete WMBSAgent config file
EmulatorSetup.deleteConfig(self.configFile)
def createWQReplication(self, parentQURL, childURL):
wqfilter = 'WorkQueue/queueFilter'
query_params = {'childUrl': childURL, 'parentUrl': sanitizeURL(parentQURL)['url']}
localQInboxURL = "%s_inbox" % childURL
replicatorDocs = []
replicatorDocs.append({'source': sanitizeURL(parentQURL)['url'], 'target': localQInboxURL,
'filter': wqfilter, 'query_params': query_params})
replicatorDocs.append({'source': sanitizeURL(localQInboxURL)['url'], 'target': parentQURL,
'filter': wqfilter, 'query_params': query_params})
for rp in replicatorDocs:
self.localCouchMonitor.couchServer.replicate(
rp['source'], rp['target'], filter=rp['filter'],
query_params=rp.get('query_params', False),
continuous=False)
return
def pullWorkWithReplication(self, localQ, resources):
localQ.pullWork(resources)
self.createWQReplication(localQ.params['ParentQueueCouchUrl'], localQ.params['QueueURL'])
def createResubmitSpec(self, serverUrl, couchDB, parentage=False):
"""
_createResubmitSpec_
Create a bogus resubmit workload.
"""
site = ["T1_US_FNAL"]
workload = WMWorkloadHelper(WMWorkload("TestWorkload"))
reco = workload.newTask("reco")
workload.setOwnerDetails(name="evansde77", group="DMWM")
workload.setSiteWhitelist(site)
# first task uses the input dataset
reco.addInputDataset(name="/PRIMARY/processed-v1/TIERONE",
primary="PRIMARY", processed="processed-v1", tier="TIERONE")
reco.data.input.splitting.algorithm = "File"
reco.data.input.splitting.include_parents = parentage
reco.setTaskType("Processing")
cmsRunReco = reco.makeStep("cmsRun1")
cmsRunReco.setStepType("CMSSW")
reco.applyTemplates()
cmsRunRecoHelper = cmsRunReco.getTypeHelper()
cmsRunRecoHelper.addOutputModule("outputRECO",
primaryDataset="PRIMARY",
processedDataset="processed-v2",
dataTier="TIERTWO",
lfnBase="/store/dunkindonuts",
mergedLFNBase="/store/kfc")
workload.setTrustLocationFlag(inputFlag=True, pileupFlag=False)
dcs = DataCollectionService(url=serverUrl, database=couchDB)
def getJob(workload):
job = Job()
job["task"] = workload.getTask("reco").getPathName()
job["workflow"] = workload.name()
job["location"] = site
job["owner"] = workload.getOwner().get("name")
job["group"] = workload.getOwner().get("group")
return job
testFileA = WMFile(lfn=makeUUID(), size=1024, events=1024, parents=['parent1'])
testFileA.setLocation(site)
testFileA.addRun(Run(1, 1, 2))
testFileB = WMFile(lfn=makeUUID(), size=1024, events=1024, parents=['parent2'])
testFileB.setLocation(site)
testFileB.addRun(Run(1, 3, 4))
testJobA = getJob(workload)
testJobA.addFile(testFileA)
testJobA.addFile(testFileB)
dcs.failedJobs([testJobA])
topLevelTask = workload.getTopLevelTask()[0]
workload.truncate("Resubmit_TestWorkload", topLevelTask.getPathName(),
serverUrl, couchDB)
return workload
def testProduction(self):
"""
Enqueue and get work for a production WMSpec.
"""
specfile = self.spec.specUrl()
numUnit = 1
jobSlot = [10] * numUnit # array of jobs per block
total = sum(jobSlot)
for _ in range(numUnit):
self.queue.queueWork(specfile)
self.assertEqual(numUnit, len(self.queue))
# try to get work
work = self.queue.getWork({'SiteDoesNotExist': jobSlot[0]}, {})
self.assertEqual([], work) # not in whitelist
work = self.queue.getWork({'T2_XX_SiteA': 0}, {})
self.assertEqual([], work)
work = self.queue.getWork({'T2_XX_SiteA': jobSlot[0]}, {})
self.assertEqual(len(work), 1)
# no more work available
self.assertEqual(0, len(self.queue.getWork({'T2_XX_SiteA': total}, {})))
def testProductionMultiQueue(self):
"""Test production with multiple queueus"""
specfile = self.spec.specUrl()
numUnit = 1
jobSlot = [10] * numUnit # array of jobs per block
total = sum(jobSlot)
self.globalQueue.queueWork(specfile)
self.assertEqual(numUnit, len(self.globalQueue))
# pull work to localQueue2 - check local doesn't get any
numWork = self.localQueue2.pullWork({'T2_XX_SiteA': total})
self.assertEqual(numUnit, numWork)
self.assertEqual(0, self.localQueue.pullWork({'T2_XX_SiteA': total}))
syncQueues(self.localQueue)
syncQueues(self.localQueue2)
self.assertEqual(0, len(self.localQueue.status(status='Available')))
self.assertEqual(numUnit, len(self.localQueue2.status(status='Available')))
self.assertEqual(numUnit, len(self.globalQueue.status(status='Acquired')))
self.assertEqual(sanitizeURL(self.localQueue2.params['QueueURL'])['url'],
self.globalQueue.status()[0]['ChildQueueUrl'])
def testPriority(self):
"""
Test priority change functionality
"""
jobSlot = 10
totalSlices = 1
self.queue.queueWork(self.spec.specUrl())
self.queue.processInboundWork()
# priority change
self.queue.setPriority(50, self.spec.name())
# test elements are now cancelled
self.assertEqual([x['Priority'] for x in self.queue.status(RequestName=self.spec.name())],
[50] * totalSlices)
self.assertRaises(RuntimeError, self.queue.setPriority, 50, 'blahhhhh')
# claim all work
work = self.queue.getWork({'T2_XX_SiteA': jobSlot}, {})
self.assertEqual(len(work), totalSlices)
# no more work available
self.assertEqual(0, len(self.queue.getWork({'T2_XX_SiteA': jobSlot}, {})))
def testProcessing(self):
"""
Enqueue and get work for a processing WMSpec.
"""
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB", "T2_XX_SiteC"]})
specfile = processingSpec.specUrl()
# Queue Work & check accepted
self.queue.queueWork(specfile)
self.queue.processInboundWork()
self.assertEqual(NBLOCKS_HICOMM, len(self.queue))
self.queue.updateLocationInfo()
# No resources
work = self.queue.getWork({}, {})
self.assertEqual(len(work), 0)
work = self.queue.getWork({'T2_XX_SiteA': 0, 'T2_XX_SiteB': 0}, {})
self.assertEqual(len(work), 0)
# Get the first bit of work available at site C
work = self.queue.getWork({'T2_XX_SiteC': 1}, {})
self.assertEqual(len(work), 1) # Double check A
processedBlocks = len(work)
processedFiles = work[0]["NumOfFilesAdded"]
# Get the rest the of work available at site C
work = self.queue.getWork({'T2_XX_SiteC': 1000}, {})
processedBlocks += len(work)
for element in work:
processedFiles += element["NumOfFilesAdded"]
self.assertEqual(processedBlocks, 9)
self.assertEqual(processedFiles, 14)
# Get the rest the of work available at site B
work = self.queue.getWork({'T2_XX_SiteB': 1000}, {})
processedBlocks += len(work)
for element in work:
processedFiles += element["NumOfFilesAdded"]
self.assertEqual(processedBlocks, 31)
self.assertEqual(processedFiles, 52)
# Make sure no work left for B or C
| |
<gh_stars>0
# -*- coding:utf-8 -*-
from ciscoconfparse import CiscoConfParse
from logzero import logger as log
import logzero
import re
# from netaddr import IPNetwork, IPAddress
# from ipaddress import IPv4Network
import ipaddress
from time import time
## v0.1a - 2015-06-01: add lastseen property
## v0.1b - 2016-02-02: add properties "FIRST SEEN", "SERIALNUMBER", "VDSL LINEPROFILE"
## v0.1c - 2016-03-16: update getVdslLineProfile() + add property "VDSL LINEPROFILE UPDATED"
## v0.1d - 2016-05-24: add tunnel interface on IPSEC routers for 4G
## v0.1e - 2016-10-24: add property APN + SOFTWARE
## v0.1f - 2017-01-18: match VT regex on 5 or 6 digits, to make it work with new 6 digit VT's
## v0.1f - 2017-04-12: add properties for CELLULAR
## v0.1h - 2022-01-25: fix mismatches between ops/conf info about interfaces (uppercase/lowercase),
## for LBB15x devices
version = "v0.1h"
class RouterFactory(object):
SUPPORTED_ROUTER_TYPES = ["CPE", "PE"]
@staticmethod
def NewRouter(routertype="CPE", configfile=""):
log.debug("===> new router object from config file: %s" % configfile)
rtr = ""
if routertype == "PE":
return PERouter(configfile)
elif routertype == "CPE":
return CPERouter(configfile)
else:
log.error("unsupported router type: %s" % routertype)
raise ValueError('Unsupported router type: "%s".' % routertype)
@staticmethod
def SupportedRouterTypes():
"""
Returns the supported router types
"""
return RouterFactory.SUPPORTED_ROUTER_TYPES
class Router(object):
# configdir = ""
TELNETOK = 1 # number of days since "lastseen" timestamp that we consider telnet to be working
INTERNAL_VRF = [
"MODEM"
] # internally used for mgmt purposes, exclude these from props like "is_multivrf"
def __init__(self, configfile):
self.props = {
"hostname": "", # this is the actual hostname found on the device
"hostname_guess": "", # this is the hostname found by "guessing" based on interface description
"mgmt_interfaces": [], # list of all mgmt (loopback for IPVPN or WAN P2P for CI) interfaces
"p2p_interfaces": [], # list of all WAN P2P interfaces
"ipsec_tunnel_interfaces": [], # list of all IPSEC Tunnel interfaces
"mobile_interfaces": [], # list of all mobile interfaces (ex. 3G, 4G)
"all_interfaces": [],
"all_vrfs": [], # list of all VRF objects
"lastseen": "0", # epoch timestamp when the config was last saved, router was last seen
"vendor": "", # ex. CISCO |ย ONEACCESS
"hardware": "", # ex. 888C |ย LBB4G
"function": "", # ex. CPE | PE
"apn": "", # APN configured on the router
"cellularimei": "", # IMEI of sim card
"cellularimsi": "", # IMSI of sim card
"cellularcellid": "", # connected cell id
"cellularoperator": "", # connected operator
"software": "", # router software
"telnetok": False, # assume telnet is ok if lastseen date is recent
"configfile": configfile, # name of the config file that is being parsed
}
@property
def is_multivrf(self):
"""Returns True if the router has at least 1 VRF configured
VRFs used for management are excluded here
"""
vrflist = [
vrf for vrf in self.props["all_vrfs"] if vrf.vrf not in self.INTERNAL_VRF
]
if len(vrflist) > 0:
return True
return False
# parse the running config of a router and get all info
def ParseRunningConfig(self, config):
self.parser = CiscoConfParse(str(config).splitlines())
# self.rtr._parse_running_config(parser)
# generic parsers
self._parse_config_header(
[
"HOSTNAME",
"VENDOR",
"LAST SEEN",
"HARDWARE",
"FUNCTION",
"FIRST SEEN",
"SERIALNUMBER",
"VDSL BW DOWNLOAD",
"VDSL BW UPLOAD",
"VDSL LINEPROFILE",
"VDSL LINEPROFILE UPDATED",
"APN",
"SOFTWARE",
"CELLULAR IMEI",
"CELLULAR IMSI",
"CELLULAR CELLID",
"CELLULAR OPERATOR",
]
)
self._parse_vrfs()
self._parse_interfaces()
# routertype specific parsers
self._parse_running_config()
# try to indicate if telnet has worked by checking the "last seen" time
# let's assume that if it's more than 24h that telnet was not ok
self.props["telnetok"] = self._calculate_lastseen()
def GetProp(self, property):
return self.props[property]
def GetAllProps(self):
return self.props
def SetProp(self, property, value):
self.props[property] = value
def _calculate_lastseen(self):
now = int(time())
return (now - int(self.props["lastseen"])) < self.TELNETOK * 24 * 3600
## parse header parameters which are generated in the config files by the backup script
## they all should start with ! <param>: <value>
def _parse_config_header(self, keywords):
for kw in keywords:
lines = self.parser.find_lines(r"^! %s:" % kw)
if len(lines) > 0:
log.info(
"property found in header: %s = %s" % (kw, lines[0].split(": ")[-1])
)
val = lines[0].split(": ")
if len(val) > 1:
self.props[kw.replace(" ", "").lower()] = val[-1]
else:
# if kw == 'VDSL LINEPROFILE' or kw == 'VDSL LINEPROFILE UPDATED' or kw == 'FIRST SEEN' or kw == 'SERIALNUMBER':
if kw in (
"VDSL LINEPROFILE",
"VDSL LINEPROFILE UPDATED",
"FIRST SEEN",
"SERIALNUMBER",
"VDSL BW DOWNLOAD",
"VDSL BW UPLOAD",
):
self.props[kw.replace(" ", "").lower()] = ""
log.debug("property %s is not found in the config header" % kw)
def GetAllVTFromRouter(self):
allvt = []
for i in self.props["all_interfaces"]:
[allvt.append(vt) for vt in i.vt]
[allvt.append(vt) for o in self.props["all_interfaces"] for vt in o.vt]
return allvt
def _merge_two_interfaces(self, intf1, intf2):
"""Merges two IOSCfgline objects into 1
The interface of the intf1 is used as the new interface
This returns again an IOSCfgline object
"""
new_interface = CiscoConfParse(
[intf1.text]
+ [line.text for line in intf1.children]
+ [line.text for line in intf2.children]
)
intf_cfg = new_interface.find_objects(intf1.text)
return intf_cfg
# parse the output of "show ip int brief" to find all L3 interfaces
# for all the interfaces found, create an interface object
# we use output of "show ip int brief" to make sure that we see the IP for DHCP interfaces and virtual interfaces
def _parse_interfaces(self):
lines = self.parser.find_lines(
r"^! INT: .*\W([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+|<unassigned>).*([Uu]p|[Dd]own).*"
)
for l in lines:
m = re.match(
r"! INT: +([\w\/\. \-]+\w).*\W([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}|<unassigned>).*(?:[upUP]|[downDOWN])",
l,
)
if not m:
log.warn(
"skip parsing, no interface and ip found for line: {}".format(l)
)
continue
if not (m and len(m.groups()) > 0):
log.warn(
"L3 interface found but unable to parse interface or ip address (%s)"
% l
)
else:
_intf = m.groups()[0].strip()
_ip = m.groups()[1]
_intf_name = _intf
_intf_idx = ""
log.debug("**** line = {}".format(l))
log.debug("**** _intf = {}".format(_intf))
log.debug("**** _ip = {}".format(_ip))
##### SOME INTERFACES CAN BE SKIPPED
## for OneAccess: skip interfaces matching ip = 127.0.0.1
if _ip == "127.0.0.1":
log.debug("Skipping interface (%s) because of ip 127.0.0.1" % _intf)
continue
elif _ip == "<unassigned>" and "Tunnel" not in _intf:
log.debug(
"Unassigned IP found for a none-tunnel interface - removing it"
)
continue
elif _ip == "<unassigned>":
_ip = ""
## skip Cisco NVI interfaces
if "NVI" in _intf:
log.debug(
"Skipping interface (%s) because of interface name" % _intf
)
continue
if "Virtual-Access70." in _intf:
log.debug(
"Skipping interface (%s) because of interface name" % _intf
)
continue
# we need to match the interface name found in "show ip int brief" with
# the interface name found in "show running-config"
# With severeal OS there is a mismatch due to change in upper/lower case
# Exception: we don't want to match Dialer from "show ip int brief" with dialer from "show run"
# because this should be mapped to virtual-template instead
all_interfaces = self.parser.find_lines("^interface \w+")
m = re.findall(
r"interface ((?!Virtual-Access)[\w\/\. \-]+\w)",
"\n".join(all_interfaces),
re.MULTILINE,
)
matched_interfaces = [i for i in m if _intf.lower() == i.lower()]
if matched_interfaces:
log.debug(
"matched the interface show_ip_int_brief={}, show_run={}".format(
_intf, matched_interfaces[0]
)
)
_intf = matched_interfaces[0]
m = re.match(r"([^0-9]+)(.*)", _intf)
# interface may be truncated in "show ip int brief" output
if len(m.groups()) > 0:
_intf_name = m.groups()[0]
_intf_idx = m.groups()[1]
log.debug("---> new L3 interface found: {} - {}".format(_intf, _ip))
try:
# in some OS versions the first letter does not always correspond between
# operational output and config output
# caveat: don't lowercase Dialer interfaces because this messes up the
# translation from dialer to virtual-template ppp
# there also exists a dialer interface in the config but this
# config does not have the ip address info etc
# if _intf_name.startswith("Dialer") or _intf_name.startswith(
# "Virtual-Access"
# ):
# first_letter = _intf_name[0]
# else:
# first_letter = "[{}{}]".format(
# _intf_name[0].lower(), _intf_name[0].upper()
# )
# log.debug(m)
# for i in m:
# log.debug("i={}".format(i))
# log.debug("to match={}".format(_intf_name))
# if i.lower() == _intf_name.lower():
# print("MATCH FOUND")
# log.debug("all interfaces: {}".format(m))
# log.debug("matched interface: {}".format(found_interface))
# match the "show run" interfaces withe the "show ip int brief" interface
# intf_cfg = self.parser.find_objects(
# "^interface {}{}[^0-9]*{}( .*)?$".format(
# first_letter, _intf_name[1:], _intf_idx
# )
# )
intf_cfg = self.parser.find_objects(
"^interface {}[^0-9]*{}( .*)?$".format(_intf_name, | |
import tensorflow as tf
import sonnet as snt
import numpy as np
from graph_nets import blocks
from tensorflow_addons.image import gaussian_filter2d
from graph_nets.graphs import GraphsTuple
from graph_nets.modules import _unsorted_segment_softmax, _received_edges_normalizer, GraphIndependent, SelfAttention, GraphNetwork
from graph_nets.utils_tf import fully_connect_graph_dynamic, concat
from sonnet.src import utils, once
from neural_deprojection.graph_net_utils import AbstractModule
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class RelationNetwork(AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def \
__init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_mean,
use_globals=False,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=use_globals)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=use_globals,
edges_reducer=reducer)
def _build(self, graph):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
edge_block = self._edge_block(graph)
output_graph = self._global_block(edge_block)
return output_graph
class EncodeProcessDecode(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode"):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps):
latent_graph = self._encoder(input_graph)
# for _ in range(num_processing_steps):
# latent_graph = self._core(latent_graph)
# state = (counter, latent_graph)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph)
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.ln1 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.ln2 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.ln1(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.ln2(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
return output_graph
class EncoderNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the Core network.
Contains a node block to update the edges and a relation network to generate edges and globals.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
name=None):
super(EncoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
self.relation_network = RelationNetwork(edge_model_fn=edge_model_fn,
global_model_fn=global_model_fn)
def _build(self, input_graph):
latent = self.node_block(input_graph)
output = self.relation_network(latent)
return output
class AutoEncoder(AbstractModule):
def __init__(self, kernel_size=4, name=None):
super(AutoEncoder, self).__init__(name=name)
self.encoder = snt.Sequential([snt.Conv2D(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2D(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2D(64, kernel_size, stride=4, padding='SAME'), tf.nn.relu])
self.decoder = snt.Sequential([snt.Conv2DTranspose(64, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2DTranspose(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2DTranspose(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2D(1, kernel_size, padding='SAME')])
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch):
(img, ) = batch
img = gaussian_filter2d(img, filter_shape=[6, 6])
img_before_autoencoder = (img - tf.reduce_min(img)) / (
tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_autoencoder', img_before_autoencoder, step=self.step)
encoded_img = self.encoder(img)
print(encoded_img.shape)
decoded_img = self.decoder(encoded_img)
img_after_autoencoder = (decoded_img - tf.reduce_min(decoded_img)) / (
tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'img_after_autoencoder', img_after_autoencoder, step=self.step)
return decoded_img
class Model(AbstractModule):
"""Model inherits from AbstractModule, which contains a __call__ function which executes a _build function
that is to be specified in the child class. So for example:
model = Model(), then model() returns the output of _build()
AbstractModule inherits from snt.Module, which has useful functions that can return the (trainable) variables,
so the Model class has this functionality as well
An instance of the RelationNetwork class also inherits from AbstractModule,
so it also executes its _build() function when called and it can return its (trainable) variables
A RelationNetwork contains an edge block and a global block:
The edge block generally uses the edge, receiver, sender and global attributes of the input graph
to calculate the new edges.
In our case we currently only use the receiver and sender attributes to calculate the edges.
The global block generally uses the aggregated edge, aggregated node and the global attributes of the input graph
to calculate the new globals.
In our case we currently only use the aggregated edge attributes to calculate the new globals.
As input the RelationNetwork needs two (neural network) functions:
one to calculate the new edges from receiver and sender nodes
and one to calculate the globals from the aggregated edges.
The new edges will be a vector with size 16 (i.e. the output of the first function in the RelationNetwork)
The new globals will also be a vector with size 16 (i.e. the output of the second function in the RelationNetwork)
The image_cnn downscales the image (currently from 4880x4880 to 35x35) and encodes the image in 16 channels.
So we (currently) go from (4880,4880,1) to (35,35,16)
"""
def __init__(self,
mlp_size=16,
cluster_encoded_size=10,
image_encoded_size=64,
num_heads=10,
kernel_size=4,
image_feature_size=16,
core_steps=10, name=None):
super(Model, self).__init__(name=name)
self.epd_graph = EncodeProcessDecode(encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([32, 32, 256], activate_final=True)))
self.epd_image = EncodeProcessDecode(encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], | |
<reponame>jscsmk/medicaldetectiontoolkit<filename>predictor.py
#!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import torch
from scipy.stats import norm
from collections import OrderedDict
from multiprocessing import Pool
import pickle
import pandas as pd
class Predictor:
"""
Prediction pipeline:
- receives a patched patient image (n_patches, c, y, x, (z)) from patient data loader.
- forwards patches through model in chunks of batch_size. (method: batch_tiling_forward)
- unmolds predictions (boxes and segmentations) to original patient coordinates. (method: spatial_tiling_forward)
Ensembling (mode == 'test'):
- for inference, forwards 4 mirrored versions of image to through model and unmolds predictions afterwards
accordingly (method: data_aug_forward)
- for inference, loads multiple parameter-sets of the trained model corresponding to different epochs. for each
parameter-set loops over entire test set, runs prediction pipeline for each patient. (method: predict_test_set)
Consolidation of predictions:
- consolidates a patient's predictions (boxes, segmentations) collected over patches, data_aug- and temporal ensembling,
performs clustering and weighted averaging (external function: apply_wbc_to_patient) to obtain consistent outptus.
- for 2D networks, consolidates box predictions to 3D cubes via clustering (adaption of non-maximum surpression).
(external function: merge_2D_to_3D_preds_per_patient)
Ground truth handling:
- dissmisses any ground truth boxes returned by the model (happens in validation mode, patch-based groundtruth)
- if provided by data loader, adds 3D ground truth to the final predictions to be passed to the evaluator.
"""
def __init__(self, cf, net, logger, mode):
self.cf = cf
self.logger = logger
# mode is 'val' for patient-based validation/monitoring and 'test' for inference.
self.mode = mode
# model instance. In validation mode, contains parameters of current epoch.
self.net = net
# rank of current epoch loaded (for temporal averaging). this info is added to each prediction,
# for correct weighting during consolidation.
self.rank_ix = '0'
# number of ensembled models. used to calculate the number of expected predictions per position
# during consolidation of predictions. Default is 1 (no ensembling, e.g. in validation).
self.n_ens = 1
if self.mode == 'test':
try:
self.epoch_ranking = np.load(os.path.join(self.cf.fold_dir, 'epoch_ranking.npy'))[:cf.test_n_epochs]
except:
raise RuntimeError('no epoch ranking file in fold directory. '
'seems like you are trying to run testing without prior training...')
self.n_ens = cf.test_n_epochs
if self.cf.test_aug:
self.n_ens *= 4
def predict_patient(self, batch):
"""
predicts one patient.
called either directly via loop over validation set in exec.py (mode=='val')
or from self.predict_test_set (mode=='test).
in val mode: adds 3D ground truth info to predictions and runs consolidation and 2Dto3D merging of predictions.
in test mode: returns raw predictions (ground truth addition, consolidation, 2D to 3D merging are
done in self.predict_test_set, because patient predictions across several epochs might be needed
to be collected first, in case of temporal ensembling).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- monitor_values (only in validation mode)
"""
self.logger.info('evaluating patient {} for fold {} '.format(batch['pid'], self.cf.fold))
# True if patient is provided in patches and predictions need to be tiled.
self.patched_patient = True if 'patch_crop_coords' in list(batch.keys()) else False
# forward batch through prediction pipeline.
results_dict = self.data_aug_forward(batch)
if self.mode == 'val':
for b in range(batch['patient_bb_target'].shape[0]):
for t in range(len(batch['patient_bb_target'][b])):
results_dict['boxes'][b].append({'box_coords': batch['patient_bb_target'][b][t],
'box_label': batch['patient_roi_labels'][b][t],
'box_type': 'gt'})
if self.patched_patient:
wcs_input = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.wcs_iou, self.n_ens]
results_dict['boxes'] = apply_wbc_to_patient(wcs_input)[0]
if self.cf.merge_2D_to_3D_preds:
merge_dims_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.merge_3D_iou]
results_dict['boxes'] = merge_2D_to_3D_preds_per_patient(merge_dims_inputs)[0]
return results_dict
def predict_test_set(self, batch_gen, return_results=True):
"""
wrapper around test method, which loads multiple (or one) epoch parameters (temporal ensembling), loops through
the test set and collects predictions per patient. Also flattens the results per patient and epoch
and adds optional ground truth boxes for evaluation. Saves out the raw result list for later analysis and
optionally consolidates and returns predictions immediately.
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo for evaluation of instance/semantic segmentation.
"""
dict_of_patient_results = OrderedDict()
# get paths of all parameter sets to be loaded for temporal ensembling. (or just one for no temp. ensembling).
weight_paths = [os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(epoch), 'params.pth') for epoch in
self.epoch_ranking]
for rank_ix, weight_path in enumerate(weight_paths):
self.logger.info(('tmp ensembling over rank_ix:{} epoch:{}'.format(rank_ix, weight_path)))
self.net.load_state_dict(torch.load(weight_path))
self.net.eval()
self.rank_ix = str(rank_ix) # get string of current rank for unique patch ids.
with torch.no_grad():
for _ in range(batch_gen['n_test']):
batch = next(batch_gen['test'])
# store batch info in patient entry of results dict.
if rank_ix == 0:
dict_of_patient_results[batch['pid']] = {}
dict_of_patient_results[batch['pid']]['results_list'] = []
dict_of_patient_results[batch['pid']]['patient_bb_target'] = batch['patient_bb_target']
dict_of_patient_results[batch['pid']]['patient_roi_labels'] = batch['patient_roi_labels']
# call prediction pipeline and store results in dict.
results_dict = self.predict_patient(batch)
dict_of_patient_results[batch['pid']]['results_list'].append(results_dict['boxes'])
self.logger.info('finished predicting test set. starting post-processing of predictions.')
list_of_results_per_patient = []
# loop over patients again to flatten results across epoch predictions.
# if provided, add ground truth boxes for evaluation.
for pid, p_dict in dict_of_patient_results.items():
tmp_ens_list = p_dict['results_list']
results_dict = {}
# collect all boxes/seg_preds of same batch_instance over temporal instances.
results_dict['boxes'] = [[item for d in tmp_ens_list for item in d[batch_instance]]
for batch_instance in range(len(tmp_ens_list[0]))]
# TODO return for instance segmentation:
# results_dict['seg_preds'] = np.mean(results_dict['seg_preds'], 1)[:, None]
# results_dict['seg_preds'] = np.array([[item for d in tmp_ens_list for item in d['seg_preds'][batch_instance]]
# for batch_instance in range(len(tmp_ens_list[0]['boxes']))])
# add 3D ground truth boxes for evaluation.
for b in range(p_dict['patient_bb_target'].shape[0]):
for t in range(len(p_dict['patient_bb_target'][b])):
results_dict['boxes'][b].append({'box_coords': p_dict['patient_bb_target'][b][t],
'box_label': p_dict['patient_roi_labels'][b][t],
'box_type': 'gt'})
list_of_results_per_patient.append([results_dict['boxes'], pid])
# save out raw predictions.
out_string = 'raw_pred_boxes_hold_out_list' if self.cf.hold_out_test_set else 'raw_pred_boxes_list'
with open(os.path.join(self.cf.fold_dir, '{}.pickle'.format(out_string)), 'wb') as handle:
pickle.dump(list_of_results_per_patient, handle)
if return_results:
# consolidate predictions.
self.logger.info('applying wcs to test set predictions with iou = {} and n_ens = {}.'.format(
self.cf.wcs_iou, self.n_ens))
pool = Pool(processes=6)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.wcs_iou, self.n_ens] for ii in list_of_results_per_patient]
list_of_results_per_patient = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
# merge 2D boxes to 3D cubes. (if model predicts 2D but evaluation is run in 3D)
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2Dto3D merging to test set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=6)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in list_of_results_per_patient]
list_of_results_per_patient = pool.map(merge_2D_to_3D_preds_per_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
return list_of_results_per_patient
def load_saved_predictions(self, apply_wbc=False):
"""
loads raw predictions saved by self.predict_test_set. consolidates and merges 2D boxes to 3D cubes for evaluation.
(if model predicts 2D but evaluation is run in 3D)
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo for evaluation of instance/semantic segmentation.
"""
# load predictions for a single test-set fold.
if not self.cf.hold_out_test_set:
with open(os.path.join(self.cf.fold_dir, 'raw_pred_boxes_list.pickle'), 'rb') as handle:
list_of_results_per_patient = pickle.load(handle)
da_factor = 4 if self.cf.test_aug else 1
n_ens = self.cf.test_n_epochs * da_factor
self.logger.info('loaded raw test set predictions with n_patients = {} and n_ens = {}'.format(
len(list_of_results_per_patient), n_ens))
# if hold out test | |
not not_found == []:
print('Missing files: ' + ', '.join(not_found))
else:
print('Recognised input fields were: ' + ', '.join(all_features))
if not self.to_generate == []:
print('These dust files will be generated: ' + ', '.join(self.to_generate))
print('Found {0} out of {1} files.'.format(len(all_features), len(self.features)))
if not not_found == []:
print('Missing files: ' + ', '.join(not_found))
self.features = all_features
if not self.features == []:
self.__feature = self.features[0]
else:
print('Nothing to convert! Exiting.')
raise SystemExit(0)
# ---------------------------------------------------------------------------------------------
# Important part starts here
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Wrapper
#
# Wraps the whole conversion process under one function. Once all user inputs were given this function can be called and it will:
# 1. Setup the Directories.
# 2. Build the Base Grid and calculates/stores the Parent Information.
# 3. Goes through all the features and performs the conversion process:
# a. Prepares the converted data by reordering it and possibly mirrors/extends it.
# b. Writes the converted data into the RADMC3D input write_data_file.
# 5. If no dust file exists or force is True it generates the dust file from the gas file.
# 6. Checks the dimensions of the data and grid file.
# 7. Finally writes the Grid File.
# ---------------------------------------------------------------------------------------------
def Wrapper(self):
self.SetupDirs()
self.SetConstants()
self.fetch_features()
self.GetCoordinates()
self.completed = []
for feat in self.features:
if not feat in self.completed:
print('Converting: ' + feat)
gen = False
if 'dust' + feat[3:] in self.to_generate and 'dust' + feat[3:] not in self.generated:
print('Generating dust file from gas file.')
gen = True
self.__feature = feat
self.ncells_filt[self.__feature] = []
self.ConvertFiles()
if self.binary == True:
self.write_data_binary(generate_dust=gen)
else:
self.write_data_ascii(generate_dust=gen)
self.completed.append(feat)
self.write_grid_file()
print('Conversion Completed.')
print('Converted: ' + str(len(self.completed)) + '/' + str(len(self.features)))
if not self.to_generate == []:
print('Generated ' + str(len(self.generated)) + '/' + str(len(self.to_generate)))
# ---------------------------------------------------------------------------------------------
# Convert Files
#
# Converts all Levels of Jupiter Output Data to 1D lists in the right order.
# 1. reads in the data files for the current __feature and reshapes it into 3D-Array
# 2. mirrors/extend the 3D-Array
# 3. reorders each level
# 3. appends the converted 1D lists to the Cache List 'converted'.
# ---------------------------------------------------------------------------------------------
def ConvertFiles(self):
converted_dat = []
self.converted = []
for i in range(self.nLevels):
self.SetupNames(i)
in_dat = np.fromfile(self.dataDir + self.inFilename, dtype='double')
if "velocity" in self.__feature:
data2 = np.array([], dtype=np.float64)
data2 = np.append(data2, in_dat.astype(np.float64))
data2 = np.reshape(data2, (3, -1))
# 3 lines with entire data: theta, radial, phi
data3 = np.column_stack((data2[0], data2[1], data2[2]))
reordered_th = self.reorder_one_line(data3[:, 0], i)
reordered_r = self.reorder_one_line(data3[:, 1], i)
reordered_phi = self.reorder_one_line(data3[:, 2], i)
reordered_dat = []
for i in range(len(reordered_th)):
reordered_dat.append(reordered_r[i])
reordered_dat.append(reordered_th[i])
reordered_dat.append(reordered_phi[i])
print(len(reordered_dat))
reordered_dat = [x * self.VEL for x in reordered_dat]
else:
reordered_dat = self.reorder_one_line(in_dat, i)
if ("density" in self.__feature):
reordered_dat = [x * self.DENS for x in reordered_dat]
if ("temperature" in self.__feature):
reordered_dat = [x * self.TEMP for x in reordered_dat]
self.converted.append(reordered_dat)
# ---------------------------------------------------------------------------------------------
# GetCoordinates
# This function generates all the Information for the amr_grid.inp file:
# 1. Reads in the Descriptor file
# 2. Stores the grid points in the LevelCoords Dictionary
# 3. Stores the Number of Coordinates of each axis at each level in nLevelCoords
# 4. Calls get_parent_info to store the parent information for each axis in their dictionaruies
#
# ---------------------------------------------------------------------------------------------
def GetCoordinates(self):
phi = []
r = []
th = []
self.ncells = []
def round_sig(f, p):
ar = np.array([f])
ar = ar.astype(np.float64)
num = ar[0]
return np.float64(('%.' + str(p) + 'g') % num)
#round_sig = lambda f, p: (('%.' + str(p) + 'g') % [f].astype(np.float64)[0]).astype(np.float64)
if self.nLevels < 0:
raise Exception('Please set the number of mesh levels!')
if self.nRefinements < 0:
raise Exception('Please set the number of refinement levels!')
for i in range(self.nLevels):
self.SetupNames(i)
dsc = open(self.dataDir + self.descriptorName)
for j, line in enumerate(dsc):
if j == 6 + (i * 11):
n_phi, n_r, n_th = [int(x) for x in line.split()]
if i == 0 and n_r % 2 == 1:
self.oddR = True
self.ncells.append([n_r, n_th, n_phi])
# Invert theta, phi so that coordinate system is right handed
# (needed for cell orientation)
if j == 8 + (i * 11):
cur = [np.float64(x) for x in line.split()] # Import one line into list of values
cur.pop(0) # First and last two points are 'ghost points'
cur.pop(0)
cur.pop()
cur.pop()
if i == 0:
self.__cur00 = cur[0]
phi.append([x - self.__cur00 for x in cur])
cur = []
if j == 9 + (i * 11):
cur = [np.float64(x) for x in line.split()]
cur.pop(0) # First and last two points are 'ghost points'
cur.pop(0)
# pop first radial vertice for odd number of cells
if self.oddR == True and i == 0:
cur.pop(0)
cur.pop()
cur.pop()
cur_scaled = [np.float64(x) * self.rcgs.value for x in cur]
r.append(cur_scaled)
cur = []
if j == 10 + (i * 11):
cur = [np.float64(x) for x in line.split()]
cur.pop(0) # First and last two points are 'ghost points'
cur.pop(0)
cur.pop()
cur.pop()
th.append((self.extend_th_coords(cur, i)))
cur = []
self.nLevelCoords.append([len(phi[i]), len(r[i]), len(th[i])])
dsc.seek(0)
# Put phi,r,th array into LevelCoords
self.LevelCoords['phi'] = phi
self.LevelCoords['r'] = r
self.LevelCoords['th'] = th
self.get_parent_info()
if self.ncells[0][0] == self.nLevelCoords[0][0]:
raise Exception('Number of cells and number of vertices in the Base-Grid do not match!')
else:
print(str(self.nLevelCoords[0][0]) + ' x ' + str(self.nLevelCoords[0][1]) + ' x ' + str(
self.nLevelCoords[0][2]) + " Vertices in filtered Base-Grid.")
# ---------------------------------------------------------------------------------------------
# get_parent_information
# used in get_coordinates
# Calculates the amr_grid.inp Grid File Parent Information and stores it in the Dictionaries.
# Actually just a wrapper for the pi_one_axis function.
# ---------------------------------------------------------------------------------------------
def get_parent_info(self):
# -----------------------------------------------------------------------------------------
# pi_one_axis
# Input: list of coords for each layer for one axis
# For a list of Coords for each layer it:
# 1. Find the Minima and Maxima
# 2. Searches for the index closest to the minima of the parent coords from the front
# 3. Searches for the index closest to the maxima of the parent coords from the back
# 4. Stores all the information in a dictionary
# Output: Dictionary with the mins, maxs, first and last shared and relative sizes.
# -----------------------------------------------------------------------------------------
def pi_one_axis(n_coords):
dict = {}
dict['mins'] = []
dict['maxs'] = []
dict['first shared'] = []
dict['last shared'] = []
dict['sizes'] = []
# Starts at level 1
for i in range(1, len(n_coords)):
dict['mins'].append(min(n_coords[i]))
dict['maxs'].append(max(n_coords[i]))
# FIXED: iterate from front for th centered around 0
smallest_diff = np.abs(n_coords[i - 1][0] - n_coords[i][0])
closest_index = 0
for j, item in enumerate(n_coords[i - 1][1:], 1):
if abs(item - n_coords[i][0]) < smallest_diff:
closest_index = j
smallest_diff = np.abs(n_coords[i - 1][j] - n_coords[i][0])
# +1 because radmc3d counts index from 1
first_shared = closest_index + 1
smallest_diff = np.abs(n_coords[i - 1][-1] - n_coords[i][-1])
closest_index = len(n_coords[i - 1]) - 1
for j, item in reversed(list(enumerate(n_coords[i - 1][0:-1]))):
if abs(item - n_coords[i][-1]) < smallest_diff:
closest_index = j
smallest_diff = np.abs(n_coords[i - 1][j] - n_coords[i][-1])
# +1 because radmc3d counts index from 1
last_shared = closest_index + 1
# last_shared = np.argmin(np.abs(n_coords[i - 1] - n_coords[i][-1]))+1
dict['first shared'].append(first_shared)
dict['last shared'].append(last_shared)
dict['sizes'].append(last_shared - first_shared)
return dict
self.pi_r = pi_one_axis(self.LevelCoords['r'])
self.pi_th = pi_one_axis(self.LevelCoords['th'])
self.pi_phi = pi_one_axis(self.LevelCoords['phi'])
# ---------------------------------------------------------------------------------------------
# extend_th_coords
# used in get_coordinates
# Input: Theta Array of Grid Points for one layer
# 1. Extends, if wanted, the theta axis by n_extend cells
# 2. Mirrors, if wanted, the theta axis along the midplane
# Output: extended and mirrored theta array
# ---------------------------------------------------------------------------------------------
def extend_th_coords(self, th_array, index):
len_th = len(th_array)
th_diff = th_array[1] - th_array[0]
# if ext == True, extends the theta array by 30 before mirroring
if self.n_extend > 0 and index == 0:
for i in range(0, self.n_extend):
th_array.insert(0, th_array[0] - th_diff)
len_th = len(th_array)
flipped_th = []
# Creates the mirror array
if self.mirror == True:
for i in range(1, len_th):
flipped_th.append(np.float64(th_array[-1] + (i * th_diff)))
return th_array + flipped_th
| |
g_param[OptionsDefine.Filter])
def doModifyDeviceAutoRenewFlag(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyDeviceAutoRenewFlagRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyDeviceAutoRenewFlag(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPayModePre2Post(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPayModePre2PostRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyPayModePre2Post(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnbindPsaTag(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnbindPsaTagRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnbindPsaTag(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateSpotDevice(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateSpotDeviceRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateSpotDevice(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetDevicePassword(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetDevicePasswordRequest()
model.from_json_string(json.dumps(args))
rsp = client.ResetDevicePassword(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDeviceInventory(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDeviceInventoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDeviceInventory(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDeviceOperationLog(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDeviceOperationLogRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDeviceOperationLog(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTaskInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTaskInfoRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTaskInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRepairTaskControl(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RepairTaskControlRequest()
model.from_json_string(json.dumps(args))
rsp = client.RepairTaskControl(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDevices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDevicesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDevices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRepairTaskConstant(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRepairTaskConstantRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeRepairTaskConstant(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doSetOutBandVpnAuthPassword(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.SetOutBandVpnAuthPasswordRequest()
model.from_json_string(json.dumps(args))
rsp = client.SetOutBandVpnAuthPassword(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCustomImages(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCustomImagesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeCustomImages(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRecoverDevices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RecoverDevicesRequest()
model.from_json_string(json.dumps(args))
rsp = client.RecoverDevices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDevicePartition(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDevicePartitionRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDevicePartition(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doShutdownDevices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ShutdownDevicesRequest()
model.from_json_string(json.dumps(args))
rsp = client.ShutdownDevices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyDeviceAliases(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyDeviceAliasesRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyDeviceAliases(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateCustomImage(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateCustomImageRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateCustomImage(model)
result = rsp.to_json_string()
| |
= get_object_or_404(Document, docalias__name=name)
if doc.get_state_slug() != "expired":
raise Http404
resurrect_requested_by = None
e = doc.latest_event(type__in=('requested_resurrect', "completed_resurrect"))
if e and e.type == 'requested_resurrect':
resurrect_requested_by = e.by
if request.method == 'POST':
if resurrect_requested_by:
email_resurrection_completed(request, doc, requester=resurrect_requested_by)
events = []
e = DocEvent(doc=doc, rev=doc.rev, by=request.user.person)
e.type = "completed_resurrect"
e.desc = "Resurrection was completed"
e.save()
events.append(e)
doc.set_state(State.objects.get(used=True, type="draft", slug="active"))
doc.expires = datetime.datetime.now() + datetime.timedelta(settings.INTERNET_DRAFT_DAYS_TO_EXPIRE)
doc.save_with_history(events)
restore_draft_file(request, doc)
return HttpResponseRedirect(doc.get_absolute_url())
return render(request, 'doc/draft/resurrect.html',
dict(doc=doc,
resurrect_requested_by=resurrect_requested_by,
back_url=doc.get_absolute_url()))
def restore_draft_file(request, draft):
'''restore latest revision document file from archive'''
basename = '{}-{}'.format(draft.name, draft.rev)
files = glob.glob(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR, basename) + '.*')
log.log("Resurrecting %s. Moving files:" % draft.name)
for file in files:
try:
shutil.move(file, settings.INTERNET_DRAFT_PATH)
log.log(" Moved file %s to %s" % (file, settings.INTERNET_DRAFT_PATH))
except shutil.Error as ex:
messages.warning(request, 'There was an error restoring the draft file: {} ({})'.format(file, ex))
log.log(" Exception %s when attempting to move %s" % (ex, file))
class IESGNoteForm(forms.Form):
note = forms.CharField(widget=forms.Textarea, label="IESG note", required=False, strip=False)
def clean_note(self):
# not munging the database content to use html line breaks --
# that has caused a lot of pain in the past.
return self.cleaned_data['note'].replace('\r', '').strip()
@role_required("Area Director", "Secretariat")
def edit_iesg_note(request, name):
doc = get_object_or_404(Document, type="draft", name=name)
login = request.user.person
initial = dict(note=doc.note)
if request.method == "POST":
form = IESGNoteForm(request.POST, initial=initial)
if form.is_valid():
new_note = form.cleaned_data['note']
if new_note != doc.note:
if not new_note:
if doc.note:
log_message = "Note field has been cleared"
else:
if doc.note:
log_message = "Note changed to '%s'" % new_note
else:
log_message = "Note added '%s'" % new_note
c = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=login)
c.desc = log_message
c.save()
doc.note = new_note
doc.save_with_history([c])
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
form = IESGNoteForm(initial=initial)
return render(request, 'doc/draft/edit_iesg_note.html',
dict(doc=doc,
form=form,
))
class ShepherdWriteupUploadForm(forms.Form):
content = forms.CharField(widget=forms.Textarea, label="Shepherd writeup", help_text="Edit the shepherd writeup.", required=False, strip=False)
txt = forms.FileField(label=".txt format", help_text="Or upload a .txt file.", required=False)
def clean_content(self):
return self.cleaned_data["content"].replace("\r", "")
def clean_txt(self):
return get_cleaned_text_file_content(self.cleaned_data["txt"])
@login_required
def edit_shepherd_writeup(request, name):
"""Change this document's shepherd writeup"""
doc = get_object_or_404(Document, type="draft", name=name)
can_edit_stream_info = is_authorized_in_doc_stream(request.user, doc)
can_edit_shepherd_writeup = ( can_edit_stream_info
or (doc.shepherd and user_is_person(request.user, doc.shepherd.person))
or has_role(request.user, ["Area Director"]))
if not can_edit_shepherd_writeup:
permission_denied(request, "You do not have the necessary permissions to view this page")
login = request.user.person
if request.method == 'POST':
if "submit_response" in request.POST:
form = ShepherdWriteupUploadForm(request.POST, request.FILES)
if form.is_valid():
from_file = form.cleaned_data['txt']
if from_file:
writeup = from_file
else:
writeup = form.cleaned_data['content']
e = WriteupDocEvent(doc=doc, rev=doc.rev, by=login, type="changed_protocol_writeup")
# Add the shepherd writeup to description if the document is in submitted for publication state
stream_state = doc.get_state("draft-stream-%s" % doc.stream_id)
iesg_state = doc.get_state("draft-iesg")
if (iesg_state or (stream_state and stream_state.slug=='sub-pub')):
e.desc = writeup
else:
e.desc = "Changed document writeup"
e.text = writeup
e.save()
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
elif "reset_text" in request.POST:
init = { "content": render_to_string("doc/shepherd_writeup.txt",dict(doc=doc))}
form = ShepherdWriteupUploadForm(initial=init)
# Protect against handcrufted malicious posts
else:
form = None
else:
form = None
if not form:
init = { "content": ""}
previous_writeup = doc.latest_event(WriteupDocEvent,type="changed_protocol_writeup")
if previous_writeup:
init["content"] = previous_writeup.text
else:
init["content"] = render_to_string("doc/shepherd_writeup.txt",
dict(doc=doc),
)
form = ShepherdWriteupUploadForm(initial=init)
return render(request, 'doc/draft/change_shepherd_writeup.html',
{'form': form,
'doc' : doc,
})
class ShepherdForm(forms.Form):
shepherd = SearchableEmailField(required=False, only_users=True)
def edit_shepherd(request, name):
"""Change the shepherd for a Document"""
# TODO - this shouldn't be type="draft" specific
doc = get_object_or_404(Document, type="draft", name=name)
can_edit_stream_info = is_authorized_in_doc_stream(request.user, doc)
if not can_edit_stream_info:
permission_denied(request, "You do not have the necessary permissions to view this page.")
if request.method == 'POST':
form = ShepherdForm(request.POST)
if form.is_valid():
if form.cleaned_data['shepherd'] != doc.shepherd:
events = []
doc.shepherd = form.cleaned_data['shepherd']
if doc.shepherd and not doc.shepherd.origin:
doc.shepherd.origin = 'shepherd: %s' % doc.name
doc.shepherd.save()
c = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=request.user.person)
c.desc = "Document shepherd changed to "+ (doc.shepherd.person.name if doc.shepherd else "(None)")
c.save()
events.append(c)
if doc.shepherd and (doc.shepherd.address not in doc.notify):
addrs = doc.notify
if addrs:
addrs += ', '
addrs += doc.shepherd.address
c = make_notify_changed_event(request, doc, request.user.person, addrs, c.time)
c.desc += " because the document shepherd was set"
c.save()
events.append(c)
doc.notify = addrs
doc.save_with_history(events)
else:
messages.info(request,"The selected shepherd was already assigned - no changes have been made.")
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
form = ShepherdForm(initial={ "shepherd": doc.shepherd_id })
return render(request, 'doc/change_shepherd.html', {
'form': form,
'doc': doc,
})
class ChangeShepherdEmailForm(forms.Form):
shepherd = forms.ModelChoiceField(queryset=Email.objects.all(), label="Shepherd email", empty_label=None)
def __init__(self, *args, **kwargs):
super(ChangeShepherdEmailForm, self).__init__(*args, **kwargs)
self.fields["shepherd"].queryset = self.fields["shepherd"].queryset.filter(person__email=self.initial["shepherd"]).distinct()
def change_shepherd_email(request, name):
"""Change the shepherd email address for a Document"""
doc = get_object_or_404(Document, name=name)
if not doc.shepherd:
raise Http404
can_edit_stream_info = is_authorized_in_doc_stream(request.user, doc)
is_shepherd = user_is_person(request.user, doc.shepherd and doc.shepherd.person)
if not can_edit_stream_info and not is_shepherd:
permission_denied(request, "You do not have the necessary permissions to view this page")
initial = { "shepherd": doc.shepherd_id }
if request.method == 'POST':
form = ChangeShepherdEmailForm(request.POST, initial=initial)
if form.is_valid():
if form.cleaned_data['shepherd'] != doc.shepherd:
doc.shepherd = form.cleaned_data['shepherd']
events = []
c = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=request.user.person)
c.desc = "Document shepherd email changed"
c.save()
events.append(c)
doc.save_with_history(events)
else:
messages.info(request,"The selected shepherd address was already assigned - no changes have been made.")
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
form = ChangeShepherdEmailForm(initial=initial)
return render(request, 'doc/change_shepherd_email.html', {
'form': form,
'doc': doc,
})
class AdForm(forms.Form):
ad = forms.ModelChoiceField(Person.objects.filter(role__name="ad", role__group__state="active", role__group__type="area").order_by('name'),
label="Shepherding AD", empty_label="(None)", required=False)
def __init__(self, doc, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.doc = doc
# if previous AD is now ex-AD, append that person to the list
ad_pk = self.initial.get('ad')
choices = self.fields['ad'].choices
if ad_pk and ad_pk not in [pk for pk, name in choices]:
self.fields['ad'].choices = list(choices) + [("", "-------"), (ad_pk, Person.objects.get(pk=ad_pk).plain_name())]
def clean_ad(self):
ad = self.cleaned_data['ad']
state = self.doc.get_state('draft-iesg')
if not ad:
if state.slug not in ['idexists','dead']:
raise forms.ValidationError("Drafts in state %s must have an assigned AD." % state)
return ad
@role_required("Area Director", "Secretariat")
def edit_ad(request, name):
"""Change the shepherding Area Director for this draft."""
doc = get_object_or_404(Document, type="draft", name=name)
if request.method == 'POST':
form = AdForm(doc, request.POST)
if form.is_valid():
new_ad = form.cleaned_data['ad']
if new_ad != doc.ad:
doc.ad = new_ad
c = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=request.user.person)
c.desc = "Shepherding AD changed to "+doc.ad.name if doc.ad else "None"
c.save()
doc.save_with_history([c])
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
init = { "ad" : doc.ad_id }
form = AdForm(doc, initial=init)
return render(request, 'doc/draft/change_ad.html',
{'form': form,
'doc': doc,
},
)
class ConsensusForm(forms.Form):
consensus = forms.ChoiceField(choices=(("Unknown", "Unknown"), ("Yes", "Yes"), ("No", "No")),
required=True, label="When published as an RFC, should the consensus boilerplate be included?")
def edit_consensus(request, name):
"""When this draft is published as an RFC, should it include the consensus boilerplate or not."""
doc = get_object_or_404(Document, type="draft", name=name)
if not (has_role(request.user, ("Secretariat", "Area Director"))
or is_authorized_in_doc_stream(request.user, doc)):
permission_denied(request, "You do not have the necessary permissions to view this page.")
e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
prev_consensus = e.consensus if e else default_consensus(doc)
if request.method == 'POST':
form = ConsensusForm(request.POST)
if form.is_valid():
if form.cleaned_data["consensus"] != prev_consensus:
e = ConsensusDocEvent(doc=doc, rev=doc.rev, type="changed_consensus", by=request.user.person)
e.consensus = {"Unknown":None,"Yes":True,"No":False}[form.cleaned_data["consensus"]]
if not e.consensus and doc.intended_std_level_id in ("std", "ds", "ps", "bcp"):
permission_denied(request, "BCPs and Standards Track documents must include the consensus boilerplate.")
e.desc = "Changed consensus to <b>%s</b> from %s" % (nice_consensus(e.consensus),
nice_consensus(prev_consensus))
e.save()
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
form = ConsensusForm(initial=dict(consensus=nice_consensus(prev_consensus)))
return render(request, 'doc/draft/change_consensus.html',
{'form': form,
'doc': doc,
},
)
def edit_doc_extresources(request, name):
doc = get_object_or_404(Document, name=name)
if not can_edit_docextresources(request.user, doc):
permission_denied(request, "You do not have the necessary permissions to view this page.")
if request.method == 'POST':
form = ExtResourceForm(request.POST)
if form.is_valid():
if update_doc_extresources(doc, form.cleaned_data['resources'], by=request.user.person):
messages.success(request,"Document resources updated.")
else:
messages.info(request,"No change in Document resources.")
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
form = ExtResourceForm(initial={'resources': doc.docextresource_set.all()})
info = "Valid tags:<br><br> %s" % ', '.join(form.valid_resource_tags())
# May need to explain the tags more - probably more reason to move to a formset.
title = "Additional document resources"
return render(request, 'doc/edit_field.html',dict(doc=doc, form=form, title=title, info=info) )
def request_publication(request, name):
"""Request publication by RFC Editor for a document which hasn't
been through the IESG ballot process."""
class PublicationForm(forms.Form):
subject = forms.CharField(max_length=200, required=True)
body = forms.CharField(widget=forms.Textarea, required=True, strip=False)
doc = get_object_or_404(Document, type="draft", name=name, stream__in=("iab", "ise", "irtf"))
if not is_authorized_in_doc_stream(request.user, doc):
permission_denied(request, "You do not have the necessary permissions to view this page.")
consensus_event = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
m = Message()
m.frm = request.user.person.formatted_email()
(m.to, m.cc) = gather_address_lists('pubreq_rfced',doc=doc).as_strings()
m.by = request.user.person
next_state = State.objects.get(used=True, type="draft-stream-%s" % doc.stream.slug, slug="rfc-edit")
if request.method == 'POST' | |
leger lines: {0}'
''.format(notehead.id))
# Determine: is notehead above or below staff?
is_above_staff = (notehead.top < current_staff.top)
# Determine: is notehead on/next to (closest) leger line?
# This needs to be done *after* we know whether the notehead
# is above/below staff: if the notehead is e.g. above,
# then it would be weird to find out it is in the
# mini-staffspace *below* the closest leger line,
# signalling a mistake in the data.
closest_ll = min(lls, key=lambda x: (x.top - notehead.top) ** 2 + (x.bottom - notehead.bottom) ** 2)
# Determining whether the notehead is on a leger
# line or in the adjacent temp staffspace.
# This uses a magic number, ON_STAFFLINE_RATIO_THRESHOLD.
on_leger_line = True
### DEBUG!!!
dtop, dbottom = 1, 1
# Weird situation with notehead vertically *inside* bbox
# of leger line (could happen with slanted LLs and very small
# noteheads).
if closest_ll.top <= notehead.top <= notehead.bottom <= closest_ll.bottom:
on_leger_line = True
# No vertical overlap between LL and notehead
elif closest_ll.top > notehead.bottom:
on_leger_line = False
elif notehead.top > closest_ll.bottom:
on_leger_line = False
# Complicated situations: overlap
else:
# Notehead "around" leger line.
if notehead.top < closest_ll.top <= closest_ll.bottom < notehead.bottom:
dtop = closest_ll.top - notehead.top
dbottom = notehead.bottom - closest_ll.bottom
if min(dtop, dbottom) / max(dtop, dbottom) \
< _CONST.ON_STAFFLINE_RATIO_THRESHOLD:
on_leger_line = False
# Check orientation congruent with rel. to staff.
# If it is wrong (e.g., notehead mostly under LL
# but above staffline, and looks like off-LL),
# change back to on-LL.
if (dtop > dbottom) and not is_above_staff:
on_leger_line = True
logging.debug('Notehead in LL space with wrong orientation '
'w.r.t. staff:'
' {0}'.format(notehead.id))
if (dbottom > dtop) and is_above_staff:
on_leger_line = True
logging.debug('Notehead in LL space with wrong orientation '
'w.r.t. staff:'
' {0}'.format(notehead.id))
# Notehead interlaced with leger line, notehead on top
elif notehead.top < closest_ll.top <= notehead.bottom <= closest_ll.bottom:
# dtop = closest_ll.top - notehead.top
# dbottom = max(notehead.bottom - closest_ll.top, 1)
# if float(dbottom) / float(dtop) \
# < _CONST.ON_STAFFLINE_RATIO_TRHESHOLD:
on_leger_line = False
# Notehead interlaced with leger line, leger line on top
elif closest_ll.top <= notehead.top <= closest_ll.bottom < notehead.bottom:
# dtop = max(closest_ll.bottom - notehead.top, 1)
# dbottom = notehead.bottom - closest_ll.bottom
# if float(dtop) / float(dbottom) \
# < _CONST.ON_STAFFLINE_RATIO_TRHESHOLD:
on_leger_line = False
else:
raise ValueError('Strange notehead {0} vs. leger line {1}'
' situation: bbox notehead {2}, LL {3}'
''.format(notehead.id, closest_ll.id,
notehead.bounding_box,
closest_ll.bounding_box))
delta = (2 * n_lls - 1) + 5
if not on_leger_line:
delta += 1
if not is_above_staff:
delta *= -1
return delta
elif len(staffline_objects) == 1:
current_staffline = staffline_objects[0]
# Count how far from the current staffline we are.
# - Collect staffline objects from the current staff
all_staffline_objects = self.__children(current_staff,
_CONST.STAFFLINE_CLASS_NAMES)
# - Determine their ordering, top to bottom
sorted_staffline_objects = sorted(all_staffline_objects,
key=lambda x: (x.top + x.bottom) / 2.)
delta = None
for i, s in enumerate(sorted_staffline_objects):
if s.id == current_staffline.id:
delta = 5 - i
if delta is None:
raise ValueError('Notehead {0} attached to staffline {1},'
' which is however not a child of'
' the notehead\'s staff {2}!'
''.format(notehead.id, current_staffline.id,
current_staff.id))
return delta
else:
raise ValueError('Notehead {0} attached to more than one'
' staffline/staffspace!'.format(notehead.id))
def process_measure_separator(self, measure_separator):
self.pitch_state.reset_inline_accidentals()
def process_key_signature(self, key_signature):
sharps = self.__children(key_signature, ['sharp'])
flats = self.__children(key_signature, ['flat'])
self.pitch_state.set_key(len(sharps), len(flats))
def process_clef(self, clef):
# Check for staffline children
stafflines = self.__children(clef, class_names=_CONST.STAFFLINE_CLASS_NAMES)
if len(stafflines) == 0:
logging.info('Clef not connected to any staffline, assuming default'
' position: {0}'.format(clef.id))
self.pitch_state.init_base_pitch(clef=clef)
else:
# Compute clef staffline delta from middle staffline.
delta = self.staffline_delta(clef)
logging.info('Clef {0}: computed staffline delta {1}'
''.format(clef.id, delta))
self.pitch_state.init_base_pitch(clef=clef, delta=delta)
def _collect_symbols_for_pitch_inference(self, nodes: List[Node],
ignore_nonstaff=True):
"""Extract all symbols from the document relevant for pitch
inference and index them in the Engine's temp data structures."""
graph = NotationGraph(nodes)
# Collect staves.
self.staves = [c for c in nodes if c.class_name == InferenceEngineConstants.STAFF]
logging.info('We have {0} staves.'.format(len(self.staves)))
# Collect clefs and key signatures per staff.
self.clefs = [c for c in nodes
if c.class_name in _CONST.CLEF_CLASS_NAMES]
if ignore_nonstaff:
self.clefs = [c for c in self.clefs if graph.has_children(c, [InferenceEngineConstants.STAFF])]
self.key_signatures = [c for c in nodes
if c.class_name == InferenceEngineConstants.KEY_SIGNATURE]
if ignore_nonstaff:
self.key_signatures = [c for c in self.key_signatures
if graph.has_children(c, [InferenceEngineConstants.STAFF])]
self.clef_to_staff_map = {}
# There may be more than one clef per staff.
self.staff_to_clef_map = collections.defaultdict(list)
for c in self.clefs:
# Assuming one staff per clef
try:
s = self.__children(c, [InferenceEngineConstants.STAFF])[0]
except (KeyError, ValueError):
logging.warning('Clef {0} has no staff attached! Will not be'
' part of pitch inference.'.format(c.id))
continue
self.clef_to_staff_map[c.id] = s
self.staff_to_clef_map[s.id].append(c)
self.key_to_staff_map = {}
# There may be more than one key signature per staff.
self.staff_to_key_map = collections.defaultdict(list)
for k in self.key_signatures:
try:
s = self.__children(k, [InferenceEngineConstants.STAFF])[0]
except KeyError:
logging.warning('Key signature {0} has no staff attached! Will not be'
' part of pitch inference.'.format(k.id))
continue
self.key_to_staff_map[k.id] = s
self.staff_to_key_map[s.id].append(k)
# Collect measure separators.
self.measure_separators = [c for c in nodes
if c.class_name == InferenceEngineConstants.MEASURE_SEPARATOR]
if ignore_nonstaff:
self.measure_separators = [c for c in self.measure_separators
if graph.has_children(c, [InferenceEngineConstants.STAFF])]
self.staff_to_msep_map = collections.defaultdict(list)
for m in self.measure_separators:
_m_staves = self.__children(m, [InferenceEngineConstants.STAFF])
# (Measure separators might belong to multiple staves.)
for s in _m_staves:
self.staff_to_msep_map[s.id].append(m)
# Collect accidentals per notehead.
# Collect noteheads.
self.noteheads = [c for c in nodes
if c.class_name in _CONST.NOTEHEAD_CLASS_NAMES]
if ignore_nonstaff:
self.noteheads = [c for c in self.noteheads
if graph.has_children(c, [InferenceEngineConstants.STAFF])]
self.staff_to_noteheads_map = collections.defaultdict(list)
for n in self.noteheads:
s = self.__children(n, [InferenceEngineConstants.STAFF])[0]
self.staff_to_noteheads_map[s.id].append(n)
def __children(self, c: Node, class_names: List[str]) -> List[Node]:
"""Retrieve the children of the given Node ``c``
that have class in ``clsnames``."""
return [self.id_to_node_mapping[o] for o in c.outlinks
if self.id_to_node_mapping[o].class_name in class_names]
def __parents(self, c: Node, class_names: List[str]) -> List[Node]:
"""Retrieve the parents of the given Node ``c``
that have class in ``clsnames``."""
return [self.id_to_node_mapping[i] for i in c.inlinks
if self.id_to_node_mapping[i].class_name in class_names]
def __warning_or_error(self, message):
if self.strategy.permissive:
logging.warning(message)
else:
raise ValueError(message)
class OnsetsInferenceEngine(object):
def __init__(self, nodes: List[Node], strategy=OnsetsInferenceStrategy()):
"""Initialize the onset inference engine with the full Node
list in a document."""
self.id_to_node_mapping = {c.id: c for c in nodes}
self.strategy = strategy
def durations(self, nodes: List[Node], ignore_modifiers: bool = False) -> Dict[int, float]:
"""Returns a dict that contains the durations (in beats)
of all Nodes that should be associated with a duration.
The dict keys are ``id``.
:param ignore_modifiers: If set, will ignore duration dots,
tuples, and other potential duration modifiers when computing
the durations. Effectively, this gives you classes that
correspond to note(head) type: whole (4.0), half (2.0),
quarter (1.0), eighth (0.5), etc.
"""
# Generate & return the durations dictionary.
_relevant_clsnames = _CONST.classes_bearing_duration
duration_nodes = [c for c in nodes
if c.class_name in _relevant_clsnames]
durations = {c.id: self.beats(c, ignore_modifiers=ignore_modifiers)
for c in duration_nodes}
return durations
def beats(self, node: Node, ignore_modifiers=False):
if node.class_name in _CONST.NOTEHEAD_CLASS_NAMES:
return self.notehead_beats(node,
ignore_modifiers=ignore_modifiers)
elif node.class_name in _CONST.REST_CLASS_NAMES:
return self.rest_beats(node,
ignore_modifiers=ignore_modifiers)
else:
raise ValueError('Cannot compute beats for object {0} of class {1};'
' beats only available for notes and rests.'
''.format(node.id, node.class_name))
def notehead_beats(self, notehead, ignore_modifiers=False) -> float:
"""Retrieves the duration for the given notehead, in beats.
It is possible that the notehead has two stems.
In that case, we return all the possible durations:
usually at most two, but if there is a duration dot, then
there can be up to 4 possibilities.
Grace notes currently return 0 beats.
:param ignore_modifiers: If given, will ignore all duration
modifiers: Duration dots, tuples, and other potential duration
modifiers when computing the durations. Effectively, this
gives you classes that correspond to note(head) type:
whole (4.0), half (2.0), quarter (1.0), eighth (0.5), etc.
:returns: A list of possible durations for the given notehead.
Mostly its length is just 1; for multi-stem noteheads,
you might get more.
"""
beat = [0]
stems = self.children(notehead, [_CONST.STEM])
flags_and_beams = self.children(
notehead,
_CONST.FLAGS_AND_BEAMS)
if notehead.class_name in _CONST.GRACE_NOTEHEAD_CLASS_NAMES:
logging.warning('Notehead {0}: Grace notes get zero duration!'
''.format(notehead.id))
| |
<filename>src/oogeso/core/devices/base.py
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union
import pyomo.environ as pyo
from pyomo.core import Constraint
from oogeso.core.networks.network_node import NetworkNode
from oogeso.dto import CarrierData, DeviceData, TimeSeriesData
logger = logging.getLogger(__name__)
class Device(ABC):
"""
Parent class from which all device types derive
"""
# Common class parameters:
carrier_in = list()
carrier_out = list()
serial = list()
def __init__(self, dev_data: DeviceData, carrier_data_dict: Dict[str, CarrierData]):
"""Device object constructor"""
self.dev_data = dev_data
self.id = dev_data.id
self.carrier_data = carrier_data_dict
self.node: Optional[NetworkNode] = None
self._flow_upper_bound: Optional[pyo.Constraint] = None
def add_node(self, node: NetworkNode) -> None:
"""associate node with device"""
self.node = node
def set_init_values(self, pyomo_model: pyo.Model) -> None:
"""Method invoked to specify optimisation problem initial value parameters"""
# TODO: move this to each subclass instead?
dev_id = self.id
dev_data = self.dev_data
if hasattr(dev_data, "E_init"):
pyomo_model.paramDeviceEnergyInitially[dev_id] = dev_data.E_init
if dev_data.start_stop is not None:
pyomo_model.paramDeviceIsOnInitially[dev_id] = dev_data.start_stop.is_on_init
if hasattr(dev_data, "P_init"):
pyomo_model.paramDevicePowerInitially[dev_id] = dev_data.P_init
def _rule_device_flow_max(self, pyomo_model: pyo.Model, t: int) -> Union[pyo.Expression, pyo.Constraint.Skip]:
power = self.get_flow_var(pyomo_model, t)
if power is None:
return pyo.Constraint.Skip
max_value = self.get_max_flow(pyomo_model, t)
expr = power <= max_value
return expr
def _rule_device_flow_min(self, pyomo_model: pyo.Model, t: int) -> Union[pyo.Expression, pyo.Constraint.Skip]:
power = self.get_flow_var(pyomo_model, t)
if power is None:
return pyo.Constraint.Skip
min_value = self.dev_data.flow_min
if self.dev_data.profile is not None:
# use an availability profile if provided
ext_profile = self.dev_data.profile
min_value = min_value * pyomo_model.paramProfiles[ext_profile, t]
ison = 1
if self.dev_data.start_stop is not None:
ison = pyomo_model.varDeviceIsOn[self.id, t]
expr = power >= ison * min_value
return expr
def _rule_ramp_rate(self, pyomo_model: pyo.Model, t: int) -> Union[pyo.Expression, pyo.Constraint.Skip]:
"""power ramp rate limit"""
dev = self.id
dev_data = self.dev_data
# If no ramp limits have been specified, skip constraint
if self.dev_data.max_ramp_up is None:
return pyo.Constraint.Skip
if t > 0:
p_prev = self.get_flow_var(pyomo_model, t - 1)
else:
p_prev = pyomo_model.paramDevicePowerInitially[dev]
p_this = self.get_flow_var(pyomo_model, t)
delta_P = p_this - p_prev
delta_t = pyomo_model.paramTimestepDeltaMinutes
max_P = dev_data.flow_max
max_neg = -dev_data.max_ramp_down * max_P * delta_t
max_pos = dev_data.max_ramp_up * max_P * delta_t
expr = pyo.inequality(max_neg, delta_P, max_pos)
return expr
def _rule_startup_shutdown(self, model: pyo.Model, t: int) -> Union[pyo.Expression, pyo.Constraint.Skip]:
"""startup/shutdown constraint
connecting starting, stopping, preparation, online stages of GTs"""
dev = self.id
T_delay_min = self.dev_data.start_stop.delay_start_minutes
time_delta_minutes = model.paramTimestepDeltaMinutes
# Delay in time-steps, rounding down.
# example: time_delta = 5 min, delay_start_minutes= 8 min => T_delay=1
T_delay = int(T_delay_min / time_delta_minutes)
prev_part = 0
if t >= T_delay:
# prev_part = sum( model.varDeviceStarting[dev,t-tau]
# for tau in range(0,T_delay) )
prev_part = model.varDeviceStarting[dev, t - T_delay]
else:
# NOTE: for this to work as intended, may need to reconstruct constraint
# pyo.value(...) needed in pyomo v6
prep_init = pyo.value(model.paramDevicePrepTimestepsInitially[dev])
if prep_init + t == T_delay:
prev_part = 1
if t > 0:
is_on_prev = model.varDeviceIsOn[dev, t - 1]
else:
is_on_prev = model.paramDeviceIsOnInitially[dev]
lhs = model.varDeviceIsOn[dev, t] - is_on_prev
rhs = prev_part - model.varDeviceStopping[dev, t]
return lhs == rhs
def _rule_startup_delay(self, pyomo_model: pyo.Model, t: int) -> Union[bool, pyo.Constraint, pyo.Constraint.Skip]:
"""startup delay/preparation for GTs"""
dev = self.id
time_delta_minutes = pyomo_model.paramTimestepDeltaMinutes
T_delay_min = self.dev_data.start_stop.delay_start_minutes
# Delay in time-steps, rounding down.
# example: time_delta = 5 min, startupDelay= 8 min => T_delay=1
T_delay = int(T_delay_min / time_delta_minutes)
if T_delay == 0:
return pyo.Constraint.Skip
# determine if was in preparation previously
# dependent on value - so must reconstruct constraint each time
steps_prev_prep = pyo.value(pyomo_model.paramDevicePrepTimestepsInitially[dev])
if steps_prev_prep > 0:
prev_is_prep = 1
else:
prev_is_prep = 0
prev_part = 0
if t < T_delay - steps_prev_prep:
prev_part = prev_is_prep
tau_range = range(0, min(t + 1, T_delay))
lhs = pyomo_model.varDeviceIsPrep[dev, t]
rhs = sum(pyomo_model.varDeviceStarting[dev, t - tau] for tau in tau_range) + prev_part
return lhs == rhs
def define_constraints(self, pyomo_model: pyo.Model) -> List[Constraint]:
"""Build constraints for the device and add to pyomo model.
Returns list of constraints that need to be reconstructed between each
optimisation
Fixme: Make setattr robust. Possible to set the wrong attribute without knowing.
"""
list_to_reconstruct = [] # Default
if self.dev_data.flow_max is not None:
constrDevicePmax = pyo.Constraint(pyomo_model.setHorizon, rule=self._rule_device_flow_max)
setattr(
pyomo_model,
f"constr_{self.id}_flowMax",
constrDevicePmax,
)
if self.dev_data.flow_min is not None:
constrDevicePmin = pyo.Constraint(pyomo_model.setHorizon, rule=self._rule_device_flow_min)
setattr(
pyomo_model,
f"constr_{self.id}_flowMin",
constrDevicePmin,
)
if (self.dev_data.max_ramp_up is not None) or (self.dev_data.max_ramp_down is not None):
constrDevice_ramprate = pyo.Constraint(pyomo_model.setHorizon, rule=self._rule_ramp_rate)
setattr(
pyomo_model,
f"constr_{self.id}_ramprate",
constrDevice_ramprate,
)
if self.dev_data.start_stop is not None:
constrDevice_startup_shutdown = pyo.Constraint(pyomo_model.setHorizon, rule=self._rule_startup_shutdown)
setattr(
pyomo_model,
f"constr_{self.id}_startstop",
constrDevice_startup_shutdown,
)
constrDevice_startup_delay = pyo.Constraint(pyomo_model.setHorizon, rule=self._rule_startup_delay)
setattr(
pyomo_model,
f"constr_{self.id}_startdelay",
constrDevice_startup_delay,
)
# TODO: Add constraints for minimum up and down-time
# return list of constraints that need to be reconstructed:
list_to_reconstruct = [
constrDevice_startup_shutdown,
constrDevice_startup_delay,
]
return list_to_reconstruct
@abstractmethod
def get_flow_var(self, pyomo_model: pyo.Model, t: int) -> float:
pass
def get_max_flow(self, pyomo_model: pyo.Model, t: int) -> float:
"""
Return available capacity at given time-step.
This is given by the "flow_max" input parameter, profile value (if any), and
whether device is on/off.
"""
max_value = self.dev_data.flow_max
if self.dev_data.profile is not None:
ext_profile = self.dev_data.profile
max_value = max_value * pyomo_model.paramProfiles[ext_profile, t]
if self.dev_data.start_stop is not None:
is_on = pyomo_model.varDeviceIsOn[self.id, t]
max_value = is_on * max_value
return max_value
def set_flow_upper_bound(self, profiles: List[TimeSeriesData]) -> None:
"""
Maximum flow value through entire profile.
Given as the product of the "flow_max" parameter and profile values.
"""
ub = self.dev_data.flow_max
if self.dev_data.profile is not None:
ext_profile = self.dev_data.profile
prof_max = None
for prof in profiles:
if prof.id == ext_profile:
prof_max = max(prof.data)
if prof.data_nowcast is not None:
prof_nowcast_max = max(prof.data_nowcast)
prof_max = max(prof_max, prof_nowcast_max)
ub = ub * prof_max
break
if prof_max is None:
logger.warning(
"Profile (%s) defined for device %s was not found",
ext_profile,
self.dev_data.id,
)
self._flow_upper_bound = ub
def get_flow_upper_bound(self) -> Optional[pyo.Constraint]:
"""Returns the maximum possible flow given capacity and profile"""
# Used by piecewise linear constraints
return self._flow_upper_bound
def compute_export(
self, pyomo_model: pyo.Model, value: str, carriers: List[CarrierData], timesteps: List[int]
) -> float:
"""Compute average export (volume or revenue)
Parameters:
-----------
value : str
"revenue" (โฌ/s) or "volume" (Sm3oe/s)
carriers : list of carriers ("gas","oil","el")
timesteps : list of timesteps
Fixme: price needs to be handled in subclasses, or price needs to be an Optional attribute of DeviceData.
"""
carriers_in = self.carrier_in
carriers_incl = [v for v in carriers if v in carriers_in]
sumValue = 0
if not hasattr(self.dev_data, "price"):
return 0
for carrier in carriers_incl:
# flow in m3/s, price in $/m3
if self.dev_data.price is not None:
if carrier in self.dev_data.price:
inflow = sum(pyomo_model.varDeviceFlow[self.id, carrier, "in", t] for t in timesteps)
if value == "revenue":
sumValue += inflow * self.dev_data.price[carrier]
elif value == "volume":
volumefactor = 1
if carrier == "gas":
volumefactor = 1 / 1000 # Sm3 to Sm3oe
sumValue += inflow * volumefactor
return sumValue
def compute_el_reserve(self, pyomo_model: pyo.Model, t: int) -> Dict[str, float]:
"""Compute available reserve power from this device
device parameter "reserve_factor" specifies how large part of the
available capacity should count towards the reserve (1=all, 0=none)
"""
rf = 1
load_reduction = 0
cap_avail = 0
p_generating = 0
if "el" in self.carrier_out:
# Generators and storage
max_value = self.get_max_flow(pyomo_model=pyomo_model, t=t)
if self.dev_data.reserve_factor is not None:
# safety margin - only count a part of the forecast power
# towards the reserve, relevant for wind power
# (equivalently, this may be seen as increaseing the
# reserve margin requirement)
reserve_factor = self.dev_data.reserve_factor
max_value = max_value * reserve_factor
if reserve_factor == 0:
# no reserve contribution
rf = 0
cap_avail = rf * max_value
p_generating = rf * pyomo_model.varDeviceFlow[self.id, "el", "out", t]
elif "el" in self.carrier_in:
# Loads (only consider if resere factor has been set)
if self.dev_data.reserve_factor is not None:
# load reduction possible
f_lr = self.dev_data.reserve_factor
load_reduction = f_lr * pyomo_model.varDeviceFlow[self.id, "el", "in", t]
reserve = {
"capacity_available": cap_avail,
"capacity_used": p_generating,
"loadreduction_available": load_reduction,
}
return reserve
def compute_startup_penalty(self, pyomo_model: pyo.Model, timesteps: List[int]) -> float:
"""start/stop penalty - generalised from gas turbine startup cost."""
penalty = 0
if self.dev_data.start_stop is not None:
penalty = (
sum(pyomo_model.varDeviceStarting[self.id, t] for t in timesteps)
* self.dev_data.start_stop.penalty_start
+ sum(pyomo_model.varDeviceStopping[self.id, t] for t in timesteps)
* self.dev_data.start_stop.penalty_stop
)
return penalty
def compute_operating_costs(self, | |
# By definition this means that the template is not a match, so
# we return None to indicate that we could not generate the product
# structures
# We need to think this way in order to distinguish between
# intermolecular and intramolecular versions of reaction families,
# which will have very different kinetics
# Unfortunately this may also squash actual errors with malformed
# reaction templates
return None
# If there are two product structures, place the one containing '*1' first
if len(productStructures) == 2:
if not productStructures[0].containsLabeledAtom('*1') and \
productStructures[1].containsLabeledAtom('*1'):
productStructures.reverse()
# If product structures are Molecule objects, update their atom types
for struct in productStructures:
if isinstance(struct, Molecule):
struct.updateAtomTypes()
# Return the product structures
return productStructures
def __generateProductStructures(self, reactantStructures, maps, forward):
"""
For a given set of `reactantStructures` and a given set of `maps`,
generate and return the corresponding product structures. The
`reactantStructures` parameter should be given in the order the
reactants are stored in the reaction family template. The `maps`
parameter is a list of mappings of the top-level tree node of each
*template* reactant to the corresponding *structure*. This function
returns the product structures.
"""
if not forward: template = self.reverseTemplate
else: template = self.forwardTemplate
# Clear any previous atom labeling from all reactant structures
for struct in reactantStructures: struct.clearLabeledAtoms()
# If there are two structures and they are the same, then make a copy
# of the second one and adjust the second map to point to its atoms
# This is for the case where A + A --> products
if len(reactantStructures) == 2 and reactantStructures[0] == reactantStructures[1]:
reactantStructures[1] = reactantStructures[1].copy(deep=True)
newMap = {}
for reactantAtom, templateAtom in maps[1].iteritems():
index = reactantStructures[0].atoms.index(reactantAtom)
newMap[reactantStructures[1].atoms[index]] = templateAtom
maps[1] = newMap
# Tag atoms with labels
for m in maps:
for reactantAtom, templateAtom in m.iteritems():
reactantAtom.label = templateAtom.label
# Generate the product structures by applying the forward reaction recipe
try:
productStructures = self.applyRecipe(reactantStructures, forward=forward)
if not productStructures: return None
except InvalidActionError, e:
logging.error('Unable to apply reaction recipe!')
logging.error('Reaction family is {0} in {1} direction'.format(self.label, 'forward' if forward else 'reverse'))
logging.error('Reactant structures are:')
for struct in reactantStructures:
logging.error(struct.toAdjacencyList())
raise
# If there are two product structures, place the one containing '*1' first
if len(productStructures) == 2:
if not productStructures[0].containsLabeledAtom('*1') and \
productStructures[1].containsLabeledAtom('*1'):
productStructures.reverse()
# Check that reactant and product structures are allowed in this family
# If not, then stop
if self.forbidden is not None:
for struct in reactantStructures:
if self.forbidden.isMoleculeForbidden(struct): raise ForbiddenStructureException()
for struct in productStructures:
if self.forbidden.isMoleculeForbidden(struct): raise ForbiddenStructureException()
# Also check the global forbiddenStructures
from rmgpy.data.rmg import database
for struct in reactantStructures:
if database.forbiddenStructures.isMoleculeForbidden(struct): raise ForbiddenStructureException()
for struct in productStructures:
if database.forbiddenStructures.isMoleculeForbidden(struct): raise ForbiddenStructureException()
return productStructures
def __createReaction(self, reactants, products, isForward):
"""
Create and return a new :class:`Reaction` object containing the
provided `reactants` and `products` as lists of :class:`Molecule`
objects.
"""
# Make sure the products are in fact different than the reactants
if len(reactants) == len(products) == 1:
if reactants[0].isIsomorphic(products[0]):
return None
elif len(reactants) == len(products) == 2:
if reactants[0].isIsomorphic(products[0]) and reactants[1].isIsomorphic(products[1]):
return None
elif reactants[0].isIsomorphic(products[1]) and reactants[1].isIsomorphic(products[0]):
return None
# If forbidden structures are defined, make sure the products are not forbidden
if self.forbidden:
for product in products:
if self.forbidden.isMoleculeForbidden(product):
return None
# Also check the global forbiddenStructures
from rmgpy.data.rmg import database
for product in products:
if database.forbiddenStructures.isMoleculeForbidden(product): return None
# We need to save the reactant and product structures with atom labels so
# we can generate the kinetics
# We make copies so the structures aren't trampled on by later actions
reactants = [reactant.copy(deep=True) for reactant in reactants]
products = [product.copy(deep=True) for product in products]
for reactant in reactants:
reactant.updateAtomTypes()
reactant.updateConnectivityValues()
for product in products:
product.updateAtomTypes()
product.updateConnectivityValues()
# Create and return reaction object
return Reaction(reactants=reactants, products=products)
def __matchReactantToTemplate(self, reactant, templateReactant):
"""
Return ``True`` if the provided reactant matches the provided
template reactant and ``False`` if not, along with a complete list of the
mappings.
"""
if isinstance(templateReactant, list): templateReactant = templateReactant[0]
struct = templateReactant.item
if isinstance(struct, LogicNode):
mappings = []
for child_structure in struct.getPossibleStructures(self.groups.entries):
mappings.extend(reactant.findSubgraphIsomorphisms(child_structure))
return mappings
elif isinstance(struct, Group):
return reactant.findSubgraphIsomorphisms(struct)
def generateReactions(self, reactants):
"""
Generate all reactions between the provided list of one or two
`reactants`, which should be either single :class:`Molecule` objects
or lists of same. Does not estimate the kinetics of these reactions
at this time. Returns a list of :class:`TemplateReaction` objects
using :class:`Species` objects for both reactants and products. The
reactions are constructed such that the forward direction is consistent
with the template of this reaction family.
"""
reactionList = []
# Forward direction (the direction in which kinetics is defined)
reactions = self.__generateReactions(reactants, forward=True)
for rxn in reactions:
reaction = TemplateReaction(
reactants = rxn.reactants[:],
products = rxn.products[:],
degeneracy = rxn.degeneracy,
thirdBody = rxn.thirdBody,
reversible = rxn.reversible,
family = self,
)
reactionList.append(reaction)
reverseReactions = []
if self.ownReverse:
# for each reaction, make its reverse reaction and store in a 'reverse' attribute
for rxn in reactionList:
reactions = self.__generateReactions(rxn.products, forward=True)
reactions = filterReactions(rxn.products, rxn.reactants, reactions)
assert len(reactions) == 1, "Expecting one matching reverse reaction, not {0}. Forward reaction {1!s} : {1!r}".format(len(reactions), rxn)
reaction = reactions[0]
reaction = TemplateReaction(
reactants = reaction.reactants[:],
products = reaction.products[:],
degeneracy = reaction.degeneracy,
thirdBody = reaction.thirdBody,
reversible = reaction.reversible,
family = self,
)
rxn.reverse = reaction
reverseReactions.append(reaction)
else: # family is not ownReverse
# Reverse direction (the direction in which kinetics is not defined)
reactions = self.__generateReactions(reactants, forward=False)
for rxn in reactions:
reaction = TemplateReaction(
reactants = rxn.products[:],
products = rxn.reactants[:],
thirdBody = rxn.thirdBody,
reversible = rxn.reversible,
family = self,
)
reaction.degeneracy = self.calculateDegeneracy(reaction)
reactionList.append(reaction)
# Determine the reactant-product pairs to use for flux analysis
# Also store the reaction template (useful so we can easily get the kinetics later)
for reaction in reactionList:
reaction.pairs = self.getReactionPairs(reaction)
reaction.template = self.getReactionTemplate(reaction)
if hasattr(reaction,'reverse'):
reaction.reverse.pairs = self.getReactionPairs(reaction.reverse)
reaction.reverse.template = self.getReactionTemplate(reaction.reverse)
# Return the reactions as containing Species objects, not Molecule objects
for reaction in reactionList:
moleculeDict = {}
for molecule in reaction.reactants:
moleculeDict[molecule] = Species(molecule=[molecule])
for molecule in reaction.products:
moleculeDict[molecule] = Species(molecule=[molecule])
reaction.reactants = [moleculeDict[molecule] for molecule in reaction.reactants]
reaction.products = [moleculeDict[molecule] for molecule in reaction.products]
reaction.pairs = [(moleculeDict[reactant],moleculeDict[product]) for reactant, product in reaction.pairs]
return reactionList
def calculateDegeneracy(self, reaction):
"""
For a `reaction` given in the direction in which the kinetics are
defined, compute the reaction-path degeneracy.
"""
reactions = self.__generateReactions(reaction.reactants, forward=True)
products = []
for product in reaction.products:
if isinstance(product, Molecule):
species = Species(molecule=[product])
species.generateResonanceIsomers()
products.append(species)
elif isinstance(product, Species):
products.append(product.molecule)
for rxn in reactions:
# We already know the reactants match, so we only need to evaluate the products
if len(rxn.products) == len(products) == 1:
if products[0].isIsomorphic(rxn.products[0]):
return rxn.degeneracy
elif len(rxn.products) == len(products) == 2:
if products[0].isIsomorphic(rxn.products[0]) and products[1].isIsomorphic(rxn.products[1]):
return rxn.degeneracy
elif products[0].isIsomorphic(rxn.products[1]) and products[1].isIsomorphic(rxn.products[0]):
return rxn.degeneracy
raise Exception('Unable to calculate degeneracy for reaction {0} in reaction family {1}.'.format(reaction, self.label))
def __generateReactions(self, reactants, forward=True):
"""
Generate a list of all of the possible reactions of this family between
the list of `reactants`. The number of reactants provided must match
the number of reactants expected by the template, or this function
will return an empty list. Each item in the list of reactants should
be a list of :class:`Molecule` objects, each representing a resonance
isomer of the species of interest.
"""
rxnList = []; speciesList = []
# Wrap each reactant in a list if not already done (this is done to
# allow for passing multiple resonance structures for each molecule)
# This also makes a copy of the reactants list so we don't modify the
# original
reactants = [reactant if isinstance(reactant, list) else [reactant] for reactant in reactants]
sameReactants = len(reactants) == 2 and reactants[0] == reactants[1]
# Also make a deep copy of each reactant molecule
for i in range(len(reactants)):
| |
number=1,)
sentiment_data = proto.Field(proto.MESSAGE, number=2, message="SentimentData",)
class IssueAssignment(proto.Message):
r"""Information about the issue.
Attributes:
issue (str):
Resource name of the assigned issue.
score (float):
Score indicating the likelihood of the issue assignment.
currently bounded on [0,1].
"""
issue = proto.Field(proto.STRING, number=1,)
score = proto.Field(proto.DOUBLE, number=2,)
class CallAnnotation(proto.Message):
r"""A piece of metadata that applies to a window of a call.
Attributes:
interruption_data (google.cloud.contact_center_insights_v1.types.InterruptionData):
Data specifying an interruption.
sentiment_data (google.cloud.contact_center_insights_v1.types.SentimentData):
Data specifying sentiment.
silence_data (google.cloud.contact_center_insights_v1.types.SilenceData):
Data specifying silence.
hold_data (google.cloud.contact_center_insights_v1.types.HoldData):
Data specifying a hold.
entity_mention_data (google.cloud.contact_center_insights_v1.types.EntityMentionData):
Data specifying an entity mention.
intent_match_data (google.cloud.contact_center_insights_v1.types.IntentMatchData):
Data specifying an intent match.
phrase_match_data (google.cloud.contact_center_insights_v1.types.PhraseMatchData):
Data specifying a phrase match.
channel_tag (int):
The channel of the audio where the annotation
occurs. For single-channel audio, this field is
not populated.
annotation_start_boundary (google.cloud.contact_center_insights_v1.types.AnnotationBoundary):
The boundary in the conversation where the
annotation starts, inclusive.
annotation_end_boundary (google.cloud.contact_center_insights_v1.types.AnnotationBoundary):
The boundary in the conversation where the
annotation ends, inclusive.
"""
interruption_data = proto.Field(
proto.MESSAGE, number=10, oneof="data", message="InterruptionData",
)
sentiment_data = proto.Field(
proto.MESSAGE, number=11, oneof="data", message="SentimentData",
)
silence_data = proto.Field(
proto.MESSAGE, number=12, oneof="data", message="SilenceData",
)
hold_data = proto.Field(proto.MESSAGE, number=13, oneof="data", message="HoldData",)
entity_mention_data = proto.Field(
proto.MESSAGE, number=15, oneof="data", message="EntityMentionData",
)
intent_match_data = proto.Field(
proto.MESSAGE, number=16, oneof="data", message="IntentMatchData",
)
phrase_match_data = proto.Field(
proto.MESSAGE, number=17, oneof="data", message="PhraseMatchData",
)
channel_tag = proto.Field(proto.INT32, number=1,)
annotation_start_boundary = proto.Field(
proto.MESSAGE, number=4, message="AnnotationBoundary",
)
annotation_end_boundary = proto.Field(
proto.MESSAGE, number=5, message="AnnotationBoundary",
)
class AnnotationBoundary(proto.Message):
r"""A point in a conversation that marks the start or the end of
an annotation.
Attributes:
word_index (int):
The word index of this boundary with respect
to the first word in the transcript piece. This
index starts at zero.
transcript_index (int):
The index in the sequence of transcribed
pieces of the conversation where the boundary is
located. This index starts at zero.
"""
word_index = proto.Field(proto.INT32, number=3, oneof="detailed_boundary",)
transcript_index = proto.Field(proto.INT32, number=1,)
class Entity(proto.Message):
r"""The data for an entity annotation.
Represents a phrase in the conversation that is a known entity,
such as a person, an organization, or location.
Attributes:
display_name (str):
The representative name for the entity.
type_ (google.cloud.contact_center_insights_v1.types.Entity.Type):
The entity type.
metadata (Sequence[google.cloud.contact_center_insights_v1.types.Entity.MetadataEntry]):
Metadata associated with the entity.
For most entity types, the metadata is a Wikipedia URL
(``wikipedia_url``) and Knowledge Graph MID (``mid``), if
they are available. For the metadata associated with other
entity types, see the Type table below.
salience (float):
The salience score associated with the entity in the [0,
1.0] range.
The salience score for an entity provides information about
the importance or centrality of that entity to the entire
document text. Scores closer to 0 are less salient, while
scores closer to 1.0 are highly salient.
sentiment (google.cloud.contact_center_insights_v1.types.SentimentData):
The aggregate sentiment expressed for this
entity in the conversation.
"""
class Type(proto.Enum):
r"""The type of the entity. For most entity types, the associated
metadata is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph
MID (``mid``). The table below lists the associated fields for
entities that have different metadata.
"""
TYPE_UNSPECIFIED = 0
PERSON = 1
LOCATION = 2
ORGANIZATION = 3
EVENT = 4
WORK_OF_ART = 5
CONSUMER_GOOD = 6
OTHER = 7
PHONE_NUMBER = 9
ADDRESS = 10
DATE = 11
NUMBER = 12
PRICE = 13
display_name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum=Type,)
metadata = proto.MapField(proto.STRING, proto.STRING, number=3,)
salience = proto.Field(proto.FLOAT, number=4,)
sentiment = proto.Field(proto.MESSAGE, number=5, message="SentimentData",)
class Intent(proto.Message):
r"""The data for an intent. Represents a detected intent in the
conversation, for example MAKES_PROMISE.
Attributes:
id (str):
The unique identifier of the intent.
display_name (str):
The human-readable name of the intent.
"""
id = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
class PhraseMatchData(proto.Message):
r"""The data for a matched phrase matcher.
Represents information identifying a phrase matcher for a given
match.
Attributes:
phrase_matcher (str):
The unique identifier (the resource name) of
the phrase matcher.
display_name (str):
The human-readable name of the phrase
matcher.
"""
phrase_matcher = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
class DialogflowIntent(proto.Message):
r"""The data for a Dialogflow intent. Represents a detected intent in
the conversation, e.g. MAKES_PROMISE.
Attributes:
display_name (str):
The human-readable name of the intent.
"""
display_name = proto.Field(proto.STRING, number=1,)
class InterruptionData(proto.Message):
r"""The data for an interruption annotation. """
class SilenceData(proto.Message):
r"""The data for a silence annotation. """
class HoldData(proto.Message):
r"""The data for a hold annotation. """
class EntityMentionData(proto.Message):
r"""The data for an entity mention annotation. This represents a mention
of an ``Entity`` in the conversation.
Attributes:
entity_unique_id (str):
The key of this entity in conversation entities. Can be used
to retrieve the exact ``Entity`` this mention is attached
to.
type_ (google.cloud.contact_center_insights_v1.types.EntityMentionData.MentionType):
The type of the entity mention.
sentiment (google.cloud.contact_center_insights_v1.types.SentimentData):
Sentiment expressed for this mention of the
entity.
"""
class MentionType(proto.Enum):
r"""The supported types of mentions."""
MENTION_TYPE_UNSPECIFIED = 0
PROPER = 1
COMMON = 2
entity_unique_id = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum=MentionType,)
sentiment = proto.Field(proto.MESSAGE, number=3, message="SentimentData",)
class IntentMatchData(proto.Message):
r"""The data for an intent match.
Represents an intent match for a text segment in the
conversation. A text segment can be part of a sentence, a
complete sentence, or an utterance with multiple sentences.
Attributes:
intent_unique_id (str):
The id of the matched intent.
Can be used to retrieve the corresponding intent
information.
"""
intent_unique_id = proto.Field(proto.STRING, number=1,)
class SentimentData(proto.Message):
r"""The data for a sentiment annotation.
Attributes:
magnitude (float):
A non-negative number from 0 to infinity
which represents the abolute magnitude of
sentiment regardless of score.
score (float):
The sentiment score between -1.0 (negative)
and 1.0 (positive).
"""
magnitude = proto.Field(proto.FLOAT, number=1,)
score = proto.Field(proto.FLOAT, number=2,)
class IssueModel(proto.Message):
r"""The issue model resource.
Attributes:
name (str):
Immutable. The resource name of the issue model. Format:
projects/{project}/locations/{location}/issueModels/{issue_model}
display_name (str):
The representative name for the issue model.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which this issue
model was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The most recent time at which
the issue model was updated.
state (google.cloud.contact_center_insights_v1.types.IssueModel.State):
Output only. State of the model.
training_stats (google.cloud.contact_center_insights_v1.types.IssueModelLabelStats):
Output only. Immutable. The issue model's
label statistics on its training data.
"""
class State(proto.Enum):
r"""State of the model."""
STATE_UNSPECIFIED = 0
UNDEPLOYED = 1
DEPLOYING = 2
DEPLOYED = 3
UNDEPLOYING = 4
DELETING = 5
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
state = proto.Field(proto.ENUM, number=5, enum=State,)
training_stats = proto.Field(
proto.MESSAGE, number=7, message="IssueModelLabelStats",
)
class Issue(proto.Message):
r"""The issue resource.
Attributes:
name (str):
Immutable. The resource name of the issue. Format:
projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}
display_name (str):
The representative name for the issue.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which this issue was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The most recent time that this
issue was updated.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
class IssueModelLabelStats(proto.Message):
r"""Aggregated statistics about an issue model.
Attributes:
analyzed_conversations_count (int):
Number of conversations the issue model has
analyzed at this point in time.
unclassified_conversations_count (int):
Number of analyzed conversations for which no
issue was applicable at this point in time.
issue_stats (Sequence[google.cloud.contact_center_insights_v1.types.IssueModelLabelStats.IssueStatsEntry]):
Statistics on each issue. Key is the issue's
resource name.
"""
class IssueStats(proto.Message):
r"""Aggregated statistics about an issue.
Attributes:
issue (str):
Issue resource. Format:
projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}
labeled_conversations_count (int):
Number of conversations attached to the issue
at this point in time.
"""
issue = proto.Field(proto.STRING, number=1,)
labeled_conversations_count = proto.Field(proto.INT64, number=2,)
analyzed_conversations_count = proto.Field(proto.INT64, number=1,)
unclassified_conversations_count = proto.Field(proto.INT64, number=2,)
issue_stats = proto.MapField(
proto.STRING, proto.MESSAGE, number=3, message=IssueStats,
)
class PhraseMatcher(proto.Message):
r"""The phrase matcher resource.
Attributes:
name (str):
The resource name of the phrase matcher. Format:
projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}
revision_id (str):
Output only. Immutable. The revision ID of
the phrase matcher. A new revision is committed
whenever the matcher is changed, except when it
is activated or deactivated. A server generated
random ID will be used. Example:
locations/global/phraseMatchers/my-first-
matcher@1234567
version_tag (str):
The customized version tag to use for the phrase matcher. If
not specified, it will default to ``revision_id``.
revision_create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The timestamp of when the
revision was created. It is also the create time
when a new matcher is | |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''RNN Cells module, include RNNCell, GRUCell, LSTMCell'''
import math
import numpy as np
import mindspore.ops as P
import mindspore.common.dtype as mstype
from mindspore import log as logger
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer, Uniform
from mindspore.ops.primitive import constexpr
from mindspore.nn.cell import Cell
from mindspore._checkparam import Validator as validator
__all__ = ['LSTMCell', 'GRUCell', 'RNNCell']
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
@constexpr
def _check_is_tensor(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(P.typeof(input_data), mstype.tensor_type):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.tensor_type}', "
f"but got '{P.typeof(input_data)}'")
@constexpr
def _check_is_tuple(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(P.typeof(input_data), mstype.Tuple):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.Tuple}', "
f"but got '{P.typeof(input_data)}'")
@constexpr
def _check_tuple_length(param_name, input_data, length, cls_name):
"""Internal function, used to check whether the input data is Tensor."""
if input_data is not None and len(input_data) != length:
raise TypeError(f"For '{cls_name}', the length of '{param_name}' should be '{length}', "
f"but got '{len(input_data)}'")
@constexpr
def _check_batch_size_equal(batch_size_x, batch_size_hx, cls_name):
if batch_size_x != batch_size_hx:
raise ValueError(f"For '{cls_name}' batch size of x and hx should be equal, but got {batch_size_x} of x "
f"and {batch_size_hx} of hx.")
def _check_lstmcell_init(func):
def wrapper(*args, **kwargs):
logger.warning(f"LSTMCell has been changed from 'single LSTM layer' to 'single LSTM cell', "
f"if you still need use single LSTM layer, please use `nn.LSTM` instead.")
if len(args) > 4 or 'batch_size' in kwargs or \
'dropout' in kwargs or 'bidirectional' in kwargs:
raise ValueError(f"The arguments of `nn.LSTMCell` from old MindSpore version(<1.6) are detected, "
f"if you still need use single LSTM layer, please use `nn.LSTM` instead.")
return func(*args, **kwargs)
return wrapper
def _rnn_tanh_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
'''RNN cell function with tanh activation'''
if b_ih is None:
igates = P.MatMul(False, True)(inputs, w_ih)
hgates = P.MatMul(False, True)(hidden, w_hh)
else:
igates = P.MatMul(False, True)(inputs, w_ih) + b_ih
hgates = P.MatMul(False, True)(hidden, w_hh) + b_hh
return P.Tanh()(igates + hgates)
def _rnn_relu_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
'''RNN cell function with relu activation'''
if b_ih is None:
igates = P.MatMul(False, True)(inputs, w_ih)
hgates = P.MatMul(False, True)(hidden, w_hh)
else:
igates = P.MatMul(False, True)(inputs, w_ih) + b_ih
hgates = P.MatMul(False, True)(hidden, w_hh) + b_hh
return P.ReLU()(igates + hgates)
def _lstm_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
'''LSTM cell function'''
hx, cx = hidden
if b_ih is None:
gates = P.MatMul(False, True)(inputs, w_ih) + P.MatMul(False, True)(hx, w_hh)
else:
gates = P.MatMul(False, True)(inputs, w_ih) + P.MatMul(False, True)(hx, w_hh) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = P.Split(1, 4)(gates)
ingate = P.Sigmoid()(ingate)
forgetgate = P.Sigmoid()(forgetgate)
cellgate = P.Tanh()(cellgate)
outgate = P.Sigmoid()(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * P.Tanh()(cy)
return hy, cy
def _gru_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
'''GRU cell function'''
if b_ih is None:
gi = P.MatMul(False, True)(inputs, w_ih)
gh = P.MatMul(False, True)(hidden, w_hh)
else:
gi = P.MatMul(False, True)(inputs, w_ih) + b_ih
gh = P.MatMul(False, True)(hidden, w_hh) + b_hh
i_r, i_i, i_n = P.Split(1, 3)(gi)
h_r, h_i, h_n = P.Split(1, 3)(gh)
resetgate = P.Sigmoid()(i_r + h_r)
inputgate = P.Sigmoid()(i_i + h_i)
newgate = P.Tanh()(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
class RNNCellBase(Cell):
'''Basic class for RNN Cells'''
def __init__(self, input_size: int, hidden_size: int, has_bias: bool, num_chunks: int):
super().__init__()
validator.check_value_type("has_bias", has_bias, [bool], self.cls_name)
validator.check_positive_int(hidden_size, "hidden_size", self.cls_name)
validator.check_positive_int(input_size, "input_size", self.cls_name)
self.input_size = input_size
self.hidden_size = hidden_size
self.has_bias = has_bias
self.weight_ih = Parameter(Tensor(np.random.randn(num_chunks * hidden_size, input_size).astype(np.float32)))
self.weight_hh = Parameter(Tensor(np.random.randn(num_chunks * hidden_size, hidden_size).astype(np.float32)))
if has_bias:
self.bias_ih = Parameter(Tensor(np.random.randn(num_chunks * hidden_size).astype(np.float32)))
self.bias_hh = Parameter(Tensor(np.random.randn(num_chunks * hidden_size).astype(np.float32)))
else:
self.bias_ih = None
self.bias_hh = None
self.reset_parameters()
def reset_parameters(self):
stdv = 1 / math.sqrt(self.hidden_size)
for weight in self.get_parameters():
weight.set_data(initializer(Uniform(stdv), weight.shape))
class RNNCell(RNNCellBase):
r"""
An Elman RNN cell with tanh or ReLU non-linearity.
.. math::
h_t = \tanh(W_{ih} x_t + b_{ih} + W_{hh} h_{(t-1)} + b_{hh})
Here :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If `nonlinearity` is `relu`, then `relu` is used instead of `tanh`.
Args:
input_size (int): Number of features of input.
hidden_size (int): Number of features of hidden layer.
has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`. Default: True.
nonlinearity (str): The non-linearity to use. Can be either `tanh` or `relu`. Default: `tanh`.
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(batch\_size, input\_size)` .
- **hx** (Tensor) - Tensor of data type mindspore.float32 and shape :math:`(batch\_size, hidden\_size)` .
Data type of `hx` must be the same as `x`.
Outputs:
- **hx'** (Tensor) - Tensor of shape :math:`(batch\_size, hidden\_size)` .
Raises:
TypeError: If `input_size` or `hidden_size` is not an int or not greater than 0.
TypeError: If `has_bias` is not a bool.
ValueError: If `nonlinearity` is not in ['tanh', 'relu'].
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> net = nn.RNNCell(10, 16)
>>> x = Tensor(np.ones([5, 3, 10]).astype(np.float32))
>>> hx = Tensor(np.ones([3, 16]).astype(np.float32))
>>> output = []
>>> for i in range(5):
... hx = net(x[i], hx)
... output.append(hx)
>>> print(output[0].shape)
(3, 16)
"""
_non_linearity = ['tanh', 'relu']
def __init__(self, input_size: int, hidden_size: int, has_bias: bool = True, nonlinearity: str = "tanh"):
super().__init__(input_size, hidden_size, has_bias, num_chunks=1)
validator.check_value_type("nonlinearity", nonlinearity, [str], self.cls_name)
validator.check_string(nonlinearity, self._non_linearity, "nonlinearity", self.cls_name)
self.nonlinearity = nonlinearity
def construct(self, x, hx):
_check_is_tensor('x', x, self.cls_name)
_check_is_tensor('hx', hx, self.cls_name)
_check_input_dtype(x.dtype, "x", [mstype.float32, mstype.float16], self.cls_name)
_check_input_dtype(hx.dtype, "hx", [mstype.float32, mstype.float16], self.cls_name)
_check_batch_size_equal(x.shape[0], hx.shape[0], self.cls_name)
if self.nonlinearity == "tanh":
ret = _rnn_tanh_cell(x, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh)
else:
ret = _rnn_relu_cell(x, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh)
return ret
class LSTMCell(RNNCellBase):
r"""
A LSTM (Long Short-Term Memory) cell.
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\
f_t = \sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\
\tilde{c}_t = \tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\
o_t = \sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\
c_t = f_t * c_{(t-1)} + i_t * \tilde{c}_t \\
h_t = o_t * \tanh(c_t) \\
\end{array}
Here :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`
are learnable weights between the output and the input in the formula. For instance,
:math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.
Details can be found in paper `LONG SHORT-TERM MEMORY
<https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and
`Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling
<https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.
Args:
input_size (int): Number of features of input.
hidden_size (int): Number of features of hidden layer.
has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`. Default: True.
Inputs:
- **x** (Tensor) - Tensor of shape (batch_size, `input_size`).
- **hx** (tuple) - A tuple of two Tensors (h_0, c_0) both of data type mindspore.float32
and shape (batch_size, `hidden_size`). The data type of `hx` must be the same as `x`.
Outputs:
- **hx'** (Tensor) - A tuple of two Tensors (h', c') both of data shape (batch_size, `hidden_size`).
Raises:
TypeError: If `input_size`, `hidden_size` is not an int.
TypeError: If `has_bias` is not a bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> net = nn.LSTMCell(10, 16)
>>> x = Tensor(np.ones([5, 3, 10]).astype(np.float32))
>>> h = Tensor(np.ones([3, 16]).astype(np.float32))
>>> c = Tensor(np.ones([3, 16]).astype(np.float32))
>>> output = []
>>> for i in range(5):
... hx = net(x[i], (h, c))
... output.append(hx)
| |
the
derivatives of the mean and variance. Resulting arrays are sized:
dmu_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one).
Note that this is not the same as computing the mean and variance of the derivative of the function!
dv_dX* -- [N*, Q], (since all outputs have the same variance)
:param X: The points at which to get the predictive gradients
:type X: np.ndarray (Xnew x self.input_dim)
:returns: dmu_dX, dv_dX
:rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q) ]
"""
dmu_dX = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
for i in range(self.output_dim):
dmu_dX[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self.X)
# gradients wrt the diagonal part k_{xx}
dv_dX = self.kern.gradients_X(np.eye(Xnew.shape[0]), Xnew)
#grads wrt 'Schur' part K_{xf}K_{ff}^{-1}K_{fx}
alpha = -2.*np.dot(self.kern.K(Xnew, self.X),self.posterior.woodbury_inv)
dv_dX += self.kern.gradients_X(alpha, Xnew, self.X)
return dmu_dX, dv_dX
def predict_jacobian(self, Xnew, kern=None, full_cov=True):
"""
Compute the derivatives of the posterior of the GP.
Given a set of points at which to predict X* (size [N*,Q]), compute the
mean and variance of the derivative. Resulting arrays are sized:
dL_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one).
Note that this is the mean and variance of the derivative,
not the derivative of the mean and variance! (See predictive_gradients for that)
dv_dX* -- [N*, Q], (since all outputs have the same variance)
If there is missing data, it is not implemented for now, but
there will be one output variance per output dimension.
:param X: The points at which to get the predictive gradients.
:type X: np.ndarray (Xnew x self.input_dim)
:param kern: The kernel to compute the jacobian for.
:param boolean full_cov: whether to return the full covariance of the jacobian.
:returns: dmu_dX, dv_dX
:rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q,(D)) ]
Note: We always return sum in input_dim gradients, as the off-diagonals
in the input_dim are not needed for further calculations.
This is a compromise for increase in speed. Mathematically the jacobian would
have another dimension in Q.
"""
if kern is None:
kern = self.kern
mean_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
for i in range(self.output_dim):
mean_jac[:,:,i] = kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self._predictive_variable)
dK_dXnew_full = np.empty((self._predictive_variable.shape[0], Xnew.shape[0], Xnew.shape[1]))
for i in range(self._predictive_variable.shape[0]):
dK_dXnew_full[i] = kern.gradients_X([[1.]], Xnew, self._predictive_variable[[i]])
if full_cov:
dK2_dXdX = kern.gradients_XX([[1.]], Xnew)
else:
dK2_dXdX = kern.gradients_XX_diag([[1.]], Xnew)
def compute_cov_inner(wi):
if full_cov:
# full covariance gradients:
var_jac = dK2_dXdX - np.einsum('qnm,miq->niq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
else:
var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
return var_jac
if self.posterior.woodbury_inv.ndim == 3: # Missing data:
if full_cov:
var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],self.output_dim))
for d in range(self.posterior.woodbury_inv.shape[2]):
var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
else:
var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
for d in range(self.posterior.woodbury_inv.shape[2]):
var_jac[:, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
else:
var_jac = compute_cov_inner(self.posterior.woodbury_inv)
return mean_jac, var_jac
def predict_wishard_embedding(self, Xnew, kern=None, mean=True, covariance=True):
"""
Predict the wishard embedding G of the GP. This is the density of the
input of the GP defined by the probabilistic function mapping f.
G = J_mean.T*J_mean + output_dim*J_cov.
:param array-like Xnew: The points at which to evaluate the magnification.
:param :py:class:`~GPy.kern.Kern` kern: The kernel to use for the magnification.
Supplying only a part of the learning kernel gives insights into the density
of the specific kernel part of the input function. E.g. one can see how dense the
linear part of a kernel is compared to the non-linear part etc.
"""
if kern is None:
kern = self.kern
mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
Sigma = np.zeros(mumuT.shape)
if var_jac.ndim == 3:
Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = var_jac.sum(-1)
else:
Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = self.output_dim*var_jac
G = 0.
if mean:
G += mumuT
if covariance:
G += Sigma
return G
def predict_magnification(self, Xnew, kern=None, mean=True, covariance=True):
"""
Predict the magnification factor as
sqrt(det(G))
for each point N in Xnew
"""
G = self.predict_wishard_embedding(Xnew, kern, mean, covariance)
from ..util.linalg import jitchol
mag = np.empty(Xnew.shape[0])
for n in range(Xnew.shape[0]):
try:
mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :]))))))
except:
mag[n] = np.sqrt(np.linalg.det(G[n, :, :]))
return mag
def posterior_samples_f(self,X,size=10, full_cov=True):
"""
Samples the posterior GP at the points X.
:param X: The points at which to take the samples.
:type X: np.ndarray (Nnew x self.input_dim)
:param size: the number of a posteriori samples.
:type size: int.
:param full_cov: whether to return the full covariance matrix, or just the diagonal.
:type full_cov: bool.
:returns: fsim: set of simulations
:rtype: np.ndarray (N x samples)
"""
m, v = self._raw_predict(X, full_cov=full_cov)
if self.normalizer is not None:
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
v = v.reshape(m.size,-1) if len(v.shape)==3 else v
if not full_cov:
fsim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
else:
fsim = np.random.multivariate_normal(m.flatten(), v, size).T
return fsim
def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None):
"""
Samples the posterior GP at the points X.
:param X: the points at which to take the samples.
:type X: np.ndarray (Nnew x self.input_dim.)
:param size: the number of a posteriori samples.
:type size: int.
:param full_cov: whether to return the full covariance matrix, or just the diagonal.
:type full_cov: bool.
:param noise_model: for mixed noise likelihood, the noise model to use in the samples.
:type noise_model: integer.
:returns: Ysim: set of simulations, a Numpy array (N x samples).
"""
fsim = self.posterior_samples_f(X, size, full_cov=full_cov)
Ysim = self.likelihood.samples(fsim, Y_metadata=Y_metadata)
return Ysim
def plot_f(self, plot_limits=None, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[],
levels=20, samples=0, fignum=None, ax=None, resolution=None,
plot_raw=True,
linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx',
apply_link=False):
"""
Plot the GP's view of the world, where the data is normalized and before applying a likelihood.
This is a call to plot with plot_raw=True.
Data will not be plotted in this, as the GP's view of the world
may live in another space, or units then the data.
Can plot only part of the data and part of the posterior functions
using which_data_rowsm which_data_ycols.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_ycols: 'all' or a list of integers
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.
:type fixed_inputs: a list of tuples
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
:type resolution: int
:param levels: number of levels to plot in a contour plot.
:param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure
:type levels: int
:param samples: the number of a posteriori samples to plot
:type samples: int
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param linecol: color of line to plot [Tango.colorsHex['darkBlue']]
:type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib
:param fillcol: color of fill [Tango.colorsHex['lightBlue']]
:type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib
:param Y_metadata: additional data associated with Y which may be needed
:type Y_metadata: dict
:param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')
:type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.
:param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f*
:type apply_link: boolean
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import models_plots
kw = {}
if linecol is not None:
kw['linecol'] = linecol
if fillcol is not None:
kw['fillcol'] = fillcol
return models_plots.plot_fit(self, plot_limits, which_data_rows,
which_data_ycols, fixed_inputs,
levels, samples, fignum, ax, resolution,
plot_raw=plot_raw, Y_metadata=Y_metadata,
data_symbol=data_symbol, | |
# ncgmp09_ValidateDatabase.py
# Python script to inventory tables, feature datasets
# and feature classes in a geodatabase and to check
# for conformance with NCGMP09 geodatabase schema.
# For more information, see tail of this file.
# Assumes ArcGIS 10 or higher.
# <NAME>, USGS
#
# Takes two arguments: <geodatabaseName> <outputWorkspace>
# and writes a file named <geodatabaseName>-NCGMP09conformance.txt.
# At present only works on a geodatabase in the local directory.
# Requires that ncgmp09_definition.py be present in the local directory
# or in the appropriate Python library directory.
# Incomplete error trapping and no input checking.
#
# NOTE: THIS CODE HAS NOT BEEN THOROUGHLY TESTED
# If you find problems, or think you see errors, please let me know.
# Zip up the database, the conformance report (if one is written),
# a brief discussion of what's wrong, and email zip file to me at
# <EMAIL>
# Please include "GeMS" in the subject line.
# Thanks!
import arcpy, sys, time, os.path, glob
from GeMS_Definition import tableDict, fieldNullsOKDict
from GeMS_utilityFunctions import *
versionString = 'GeMS_ValidateDatabase_ArcPro.py, version of 11 July 2019'
# modified to have output folder default to folder hosting input database
# modified for HTML output
# 15 Sept 2016 tableToHtml modified to better handle special characters
# 15 Sept 2016 removed check for nullable vs non-nullable field definitions
# 15 Sept 2016 removed DataSourcePolys from required feature classes
# 16 Sept 2016 Inventoried disallowed null values. Check for zero-length strings
# 19 Oct 2016 Changed search for 'Source' fields to look for "SourceID" instead of "Source" in field name
# 19 Dec 2016 Added check for null fields within DMU
# 19 Dec 2016 Deleted requirement that GenLith and GenLithConf be defined in Glossary
# 17 Mar 2017 Added MiscellaneousMapInformation and StandardLithology to tables listed to output HTML
# 13 August 2017 Cleaned up required table and feature class definition; added GeoMaterialDict to required elements
# 6 December 2017 Did a re-vamp on html, added style, tables, etc. - <NAME>
# 4 March 2018 Added check for .txt files in gdb
# 11 July 2019 Made changes to use in PRO
## need to check for and flag zero-length strings: should be '#', '#null' or None
debug = False
space6 = '  '
space4 = ' '
space2 = ' '
rdiv = '<div class="report">\n'
divend = '</div>\n'
style = """
<style>
.report {
font-family: Courier New, Courier, monospace;
margin-left: 20px;
margin-right: 20px;
}
h3, .tablediv h4 {
margin-left:15px;
}
h2,
h3 {
background-color: lightgray;
padding: 5px;
border-radius: 4px;
font-family: "Century Gothic", CenturyGothic, AppleGothic, sans-serif;
}
.ess-tables {
width: 95%;
margin-left: 15px;
}
.table-header:hover {
cursor:pointer;
}
table,
th,
td {
border: 1px solid gray;
border-collapse: collapse;
padding: 10px;
}
.fields {
color: darkblue;
font-weight:bold;
}
.tables {
color:darkorange;
font-weight:bold;
}
.values {
color:darkgreen;
font-weight:bold;
}
.highlight{
background-color:#f2fcd6;
padding:0 2px;
border-radius:3px;
border-bottom:1px solid gray;
}
li {
list-style: none;
margin-top: 5px;
margin-bottom: 5px;
}
#back-to-top {
position: fixed;
bottom: 20px;
right: 20px;
padding: 10px;
margin: 10px;
background-color: rgba(250,250,250,0.7);
border-radius:5px;
}
</style>"""
overviewhtml = """
<h2>Contents</h2>\n
<div class="report" id="contents">
<a href="#schema-errors">Schema Errors</a><br>
<a href="#extensions">Extensions to Schema</a><br>
<a href="#content-errors">Content Errors</a><br>
<a href="#match">MapUnits Match across map, DMU, CMU, and cross-sections</a><br>
<a href="#essential">Essential Tables</a><br>
<a href="#gdb-desc">Geodatabase Description</a><br>
</div>
<div class="report">
<h4>Color Codes</h4>
<span class="tables">Orange</span> are tables, feature classes, or feature datasets in the geodatabase.<br>
<span class="fields">Blue</span> are fields in a table.</br>
<span class="values">Green</span> are values in a field.</br>
</div>
<div id="back-to-top"><a href="#overview">Back to Top</a></div>
"""
# fields we don't want listed or described when inventorying dataset:
standardFields = ('OBJECTID', 'SHAPE', 'Shape', 'SHAPE_Length', 'SHAPE_Area', 'ZOrder',
'AnnotationClassID', 'Status', 'TextString', 'FontName', 'FontSize', 'Bold',
'Italic', 'Underline', 'VerticalAlignment', 'HorizontalAlignment',
'XOffset', 'YOffset', 'Angle', 'FontLeading', 'WordSpacing', 'CharacterWidth',
'CharacterSpacing', 'FlipAngle', 'Override', 'Shape_Length', 'Shape_Area', 'last_edited_date',
'last_edited_user', 'created_date', 'created_user')
# fields whose values must be defined in Glossary
gFieldDefList = ('Type', 'TypeModifier', 'LocationMethod', 'Lithology', 'ProportionTerm', 'TimeScale',
'Qualifier', 'ExistenceConfidence', 'IdentityConfidence', 'Property',
'ScientificConfidence', 'ParagraphStyle', 'AgeUnits')
requiredTables = ['DataSources', 'DescriptionOfMapUnits', 'Glossary', 'GeoMaterialDict']
requiredFeatureDataSets = ['GeologicMap']
requiredMapFeatureClasses = ['ContactsAndFaults', 'MapUnitPolys']
tables = []
fdsfc = []
all_IDs = [] # list of should-be unique identifiers
allMapUnitRefs = [] # list of MapUnit references (from various Poly feature data sets)
allGlossaryRefs = [] # list of all references to Glossary
allDataSourcesRefs = [] # list of all references to DataSources
missingRequiredValues = ['<span class="highlight">Fields that are missing required values</span>']
gdbDescription = []
schemaErrors = []
schemaExtensions = []
duplicateIDs = ['<span class="highlight">Duplicate _ID values</span>']
unreferencedIds = [
'<span class="highlight">OwnerIDs and ValueLinkIDs in ExtendedAttributes that are absent elsewhere in the database</span>']
extendedAttribIDs = []
missingSourceIDs = [
'<span class="highlight">Missing DataSources entries. Only one reference to each missing source is cited</span>']
unusedDataSources = [
'<span class="highlight">Entries in DataSources that are not otherwise referenced in database</span>']
dataSourcesIDs = []
missingDmuMapUnits = [
'<span class="highlight">MapUnits missing from DMU. Only one reference to each missing unit is cited</span>']
missingStandardLithMapUnits = [
'<span class="highlight">MapUnits missing from StandardLithology. Only one reference to each missing unit is cited</span>']
unreferencedDmuMapUnits = ['<span class="highlight">MapUnits in DMU that are not present on map or in CMU</span>']
unreferencedStandardLithMapUnits = [
'<span class="highlight">MapUnits in StandardLithology that are not present on map</span>']
equivalenceErrors = [
'<table style="text-align:center"><tr><th>MapUnit</th><th>map</th><th>DMU</th><th>CMU</th><th>cross sections</th></tr>']
dmuMapUnits = []
cmuMapUnits = []
gmapMapUnits = []
csMapUnits = []
standardLithMapUnits = []
missingGlossaryTerms = [
'<span class="highlight">Missing terms in Glossary. Only one reference to each missing term is cited</span>']
unusedGlossaryTerms = ['<span class="highlight">Terms in Glossary that are not otherwise used in geodatabase</span>']
glossaryTerms = []
unusedGeologicEvents = [
'<span class="highlight">Events in GeologicEvents that are not cited in ExtendedAttributes</span>']
hKeyErrors = ['<span class="highlight">HierarchyKey errors, DescriptionOfMapUnits</span>']
zeroLengthStrings = ['<span class="highlight">Zero-length strings</span>']
def listDataSet(dataSet):
addMsgAndPrint(' ' + dataSet)
startTime = time.time()
try:
nrows = arcpy.GetCount_management(dataSet)
elapsedTime = time.time() - startTime
if debug: addMsgAndPrint(' ' + str(nrows) + ' rows ' + "%.1f" % elapsedTime + ' sec')
gdbDescription.append(
'<h4><span class="tables">' + dataSet + '</span>, ' + str(nrows) + ' records</h4>\n<ul>\n')
except:
addMsgAndPrint('Could not get number of rows for ' + dataSet)
gdbDescription.append('<h4><span class="tables">' + dataSet + '</span>, unknown # of records</h4>')
startTime = time.time()
try:
fields = arcpy.ListFields(dataSet)
if debug: addMsgAndPrint(' ' + str(len(fields)) + ' fields ' + "%.1f" % elapsedTime + ' sec')
for field in fields:
if not (field.name in standardFields):
gdbDescription.append('<li><span class="fields">' + field.name + '</span> - ' + field.type + ':' + str(
field.length) + ' - Required: ' + str(field.required) + '</li>\n')
gdbDescription.append('</ul>')
except:
addMsgAndPrint('Could not inventory fields in ' + dataSet)
gdbDescription.append(space6 + 'no field inventory for ' + dataSet)
elapsedTime = time.time() - startTime
def checkMapFeatureClasses(fds, prefix, fcs):
addMsgAndPrint(' Checking for required feature classes...')
reqFeatureClasses = []
for fc in requiredMapFeatureClasses:
reqFeatureClasses.append(prefix + fc)
for fc in reqFeatureClasses:
if not (fc in fcs):
schemaErrors.append(
'Feature data set <span class="tables">' + fds + '</span>, feature class <span class="tables">' + fc + '</span> is missing')
def checkTableFields(dBTable, defTable):
dBtable = str(dBTable)
# build dictionary of required fields
requiredFields = {}
requiredFieldDefs = tableDict[defTable]
for fieldDef in requiredFieldDefs:
requiredFields[fieldDef[0]] = fieldDef
# build dictionary of existing fields
try:
existingFields = {}
fields = arcpy.ListFields(dBtable)
for field in fields:
existingFields[field.name] = field
# now check to see what is excess / missing
for field in list(requiredFields.keys()):
if field not in existingFields:
schemaErrors.append(
'<span class="tables">' + dBTable + '</span>, field <span class="fields">' + field + '</span> is missing')
if field in list(requiredFields.keys()):
if not (field in standardFields) and not (field in requiredFields):
schemaExtensions.append(
'<span class="tables">' + dBTable + '</span>, field <span class="fields">' + field + '</span>')
# check field definition
if field in list(requiredFields.keys()):
# field type
if existingFields[field].type != requiredFields[field][1]:
schemaErrors.append(
'<span class="tables">' + dBTable + '</span>, field <span class="fields">' + field + '</span>, type should be ' +
requiredFields[field][1])
# Using Arc field properties to enforce non-nullability is a bad idea
### need to use part of code below to check for illicit null values in nullable fields
### and null or empty values in fields that are non-nullable
# if existingFields[field].isNullable:
# # nullStatus = 'NullsOK'
# else:
# nullStatus = 'NoNulls'
# if nullStatus <> requiredFields[field][2]:
# schemaErrors.append(dBTable+', field '+field+' should be '+requiredFields[field][2])
except:
schemaErrors.append(dBTable + ' could not get field list. Fields not checked.')
def loadTableValues(tableName, fieldName, valueList):
try:
rows = arcpy.SearchCursor(tableName, '', '', fieldName)
except:
loadValuesFlag = False
else:
row = next(rows)
while row:
if row.getValue(fieldName) != None:
valueList.append(row.getValue(fieldName))
row | |
import unittest
from nose.tools import assert_equals, assert_true, assert_false
from robotide.robotapi import TestCaseFile
from robotide.controller import Project
from robotide.controller.macrocontrollers import KEYWORD_NAME_FIELD
from robotide.controller.commands import (
Undo, FindOccurrences, FindVariableOccurrences, NullObserver,
RenameKeywordOccurrences, ChangeCellValue)
from robotide.controller.filecontrollers import (
TestCaseFileController, TestCaseTableController, TestCaseController)
from robotide.publish import PUBLISHER
from robotide.publish.messages import RideItemStepsChanged,\
RideItemSettingsChanged, RideItemNameChanged
from robotide.namespace.namespace import Namespace
from robotide.spec.librarymanager import LibraryManager
from robotide.usages.commands import FindUsages
from resources import FakeSettings
import datafilereader
STEP1_KEYWORD = 'Log'
STEP2_ARGUMENT = 'No Operation'
TEST1_NAME = 'Test'
UNUSED_KEYWORD_NAME = 'Foo'
USERKEYWORD1_NAME = 'User Keyword'
USERKEYWORD2_NAME = 'Juuser kei woord'
SETUP_KEYWORD = 'Setup Kw'
TEMPLATE_KEYWORD = 'Template Kw'
SUITE_SETUP_KEYWORD = 'Suite Setup Kw'
SUITE_TEST_SETUP_KEYWORD = 'Test Setup Kw'
SUITE_TEST_TEMPLATE_KEYWORD = 'Test Template Kw'
SUITE_NAME = 'Some Suite'
KEYWORD_IN_USERKEYWORD1 = 'Some Keyword'
EMBEDDED_ARGUMENTS_KEYWORD = "Pick '${fruit}' and '${action}' it"
def TestCaseControllerWithSteps(project=None, source='some_suite.txt'):
tcf = TestCaseFile()
tcf.source = source
tcf.setting_table.suite_setup.name = 'Suite Setup Kw'
tcf.setting_table.test_setup.name = SUITE_TEST_SETUP_KEYWORD
tcf.setting_table.test_teardown.name = 'Test Teardown Kw'
tcf.setting_table.suite_teardown.name = 'Suite Teardown Kw'
tcf.setting_table.test_template.value = SUITE_TEST_TEMPLATE_KEYWORD
testcase = _create_testcase(tcf)
uk = tcf.keyword_table.add(USERKEYWORD1_NAME)
uk.add_step([KEYWORD_IN_USERKEYWORD1])
uk = tcf.keyword_table.add(USERKEYWORD2_NAME)
uk.add_step(['No Operation'])
uk = tcf.keyword_table.add(EMBEDDED_ARGUMENTS_KEYWORD)
uk.add_step(['No Operation'])
if project is None:
library_manager = LibraryManager(':memory:')
library_manager.create_database()
project = Project(Namespace(FakeSettings()),
library_manager=library_manager)
tcf_ctrl = TestCaseFileController(tcf, project)
project._controller = tcf_ctrl
tctablectrl = TestCaseTableController(tcf_ctrl,
tcf.testcase_table)
return TestCaseController(tctablectrl, testcase), project._namespace
def _create_testcase(tcf):
testcase = tcf.testcase_table.add(TEST1_NAME)
for step in [[STEP1_KEYWORD, 'Hello'],
['Run Keyword', STEP2_ARGUMENT],
[USERKEYWORD2_NAME],
["Pick 'apple' and 'peel' it"]]:
testcase.add_step(step)
for_loop = testcase.add_for_loop([': FOR', '${i}', 'IN RANGE', '10'])
for_loop.add_step(['Log', '${i}'])
testcase.setup.name = SETUP_KEYWORD
testcase.teardown.name = 'Teardown Kw'
testcase.template.value = TEMPLATE_KEYWORD
return testcase
def assert_occurrence(test_ctrl, kw_name, expected_source, expected_usage):
occ = _first_occurrence(test_ctrl, kw_name)
assert_equals(occ.location, expected_source,
'Occurrence not in the right place')
assert_equals(occ.usage, expected_usage, 'Usage not in the right place')
def assert_variable_occurrence(occurrences, source, usage, count):
times_found = 0
for occ in occurrences:
if occ.location == source and occ.usage == usage:
times_found += 1
assert_equals(times_found, count)
def check_for_variable_occurrences(test_ctrl, name, expected_occurrences):
occurrences = list(test_ctrl.execute(FindVariableOccurrences(name)))
processed_occurrences = 0
for source, usage, count in expected_occurrences:
assert_variable_occurrence(occurrences, source, usage, count)
processed_occurrences += count
assert_equals(processed_occurrences, len(occurrences))
def _first_occurrence(test_ctrl, kw_name):
occurrences = test_ctrl.execute(FindOccurrences(kw_name))
if not occurrences:
raise AssertionError('No occurrences found for "%s"' % kw_name)
return next(occurrences)
def _get_ctrl_by_name(self, name, datafiles):
for file in datafiles:
if file.name == name:
return file
for test in file.tests:
if test.name == name:
return test
for kw in file.keywords:
if kw.name == name:
return kw
return None
class TestFindOccurrencesWithFiles(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.project_ctrl = datafilereader.construct_project(
datafilereader.SIMPLE_TEST_SUITE_PATH)
cls.ts1 = datafilereader.get_ctrl_by_name('TestSuite1',
cls.project_ctrl.datafiles)
cls.ts2 = datafilereader.get_ctrl_by_name('TestSuite2',
cls.project_ctrl.datafiles)
cls.ts3 = datafilereader.get_ctrl_by_name('TestSuite3',
cls.project_ctrl.datafiles)
cls.resu = datafilereader.get_ctrl_by_name(
datafilereader.SIMPLE_TEST_SUITE_RESOURCE_NAME,
cls.project_ctrl.datafiles)
@classmethod
def tearDownClass(cls):
cls.project_ctrl.close()
def test_finds_only_occurrences_with_same_source(self):
self.assert_occurrences(self.ts1, 'My Keyword', 2)
self.assert_occurrences(self.ts2, 'My Keyword', 3)
self.assert_occurrences(self.resu, 'My Keyword', 3)
def test_first_occurrences_are_from_the_same_file(self):
occ = self.resu.execute(FindOccurrences('My Keyword'))
assert_true(self.resu.filename.endswith(occ.next().item.parent.source))
assert_equals(occ.next().source, self.ts2.source)
assert_equals(occ.next().source, self.ts2.source)
def test_finds_occurrences_that_are_unrecognized(self):
self.assert_occurrences(self.ts1, 'None Keyword', 2)
self.assert_occurrences(self.ts2, 'None Keyword', 4)
def test_finds_occurrences_that_override_builtin(self):
self.assert_occurrences(self.ts1, 'Log', 1)
self.assert_occurrences(self.ts2, 'Log', 2)
def test_ignores_definition_in_base_resource(self):
self.assert_occurrences(self.resu, 'Keyword In Both Resources', 1)
occ = _first_occurrence(self.resu, 'Keyword In Both Resources')
assert_equals(occ.item.parent.source, 'inner_resource.txt')
def test_rename_resu_occurrence_in_case_of_double_definition(self):
old_name = 'Keyword In Both Resources'
new_name = 'FiiFaa'
for kw in [k for k in self.resu.keywords if k.name == old_name]:
self.resu.execute(RenameKeywordOccurrences(kw.name, new_name,
NullObserver(),
kw.info))
assert_equals(kw.name, new_name)
def test_rename_embedded_arguments_keyword_but_dont_rename_occurrences(
self):
old_name = 'embedded ${args} keyword'
new_name = 'unembedded keyword'
self.assert_occurrences(self.ts3, old_name, 2)
self.assert_occurrences(self.ts3, new_name, 0)
self.ts3.execute(RenameKeywordOccurrences(old_name, new_name,
NullObserver()))
self.assert_occurrences(self.ts3, old_name, 1)
self.assert_occurrences(self.ts3, new_name, 1)
def test_rename_embedded_arguments_keyword_with_another_embedded_arguments_keyword(self):
old_name = '2nd embedded ${args} keyword'
new_name = '2nd embedded args keyword with ${trailing args}'
self.assert_occurrences(self.ts3, old_name, 2)
self.assert_occurrences(self.ts3, new_name, 0)
self.ts3.execute(RenameKeywordOccurrences(old_name, new_name,
NullObserver()))
self.assert_occurrences(self.ts3, old_name, 1)
self.assert_occurrences(self.ts3, new_name, 1)
def test_finding_from_test_setup_with_run_keyword(self):
self._assert_usage('Test Setup Keyword', 'Setup')
def test_finding_from_suite_setup_with_run_keyword(self):
self._assert_usage('Suite Setup Keyword', 'Suite Setup')
def test_finding_from_test_teardown_with_run_keyword(self):
self._assert_usage('Test Teardown Keyword', 'Teardown')
def test_finding_from_keyword_teardown(self):
self._assert_usage('Keyword Teardown Keyword', 'Teardown')
def test_finding_from_test_teardown_in_settings(self):
self._assert_usage('Test Teardown in Setting', 'Test Teardown')
def test_occurrences_in_suite_documentation_should_not_be_found(self):
self._assert_no_usages('suitedocmatch')
def test_occurrences_in_test_documentation_should_not_be_found(self):
self._assert_no_usages('testdocmatch')
def test_occurrences_in_keyword_documentation_should_not_be_found(self):
self._assert_no_usages('keyworddocmatch')
def _assert_usage(self, keyword, usage):
occ = list(self.ts2.execute(FindUsages(keyword)))
self.assertEqual(len(occ), 1)
self.assertEqual(occ[0].usage, usage)
def _assert_no_usages(self, keyword):
self.assertEqual(list(self.ts2.execute(FindUsages(keyword))), [])
def assert_occurrences(self, ctrl, kw_name, count):
assert_equals(sum(1 for _ in ctrl.execute(FindOccurrences(kw_name))),
count)
class FindOccurrencesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_ctrl, cls.namespace = TestCaseControllerWithSteps()
def test_no_occurrences(self):
find_occurrences = FindOccurrences('Keyword Name')
occurrences = self.test_ctrl.execute(find_occurrences)
assert_equals([i for i in occurrences], [])
def test_occurrences_in_steps(self):
assert_occurrence(self.test_ctrl, STEP1_KEYWORD, TEST1_NAME, 'Steps')
def test_occurrences_in_step_arguments(self):
assert_occurrence(self.test_ctrl, STEP2_ARGUMENT, TEST1_NAME, 'Steps')
def test_occurrences_are_case_and_space_insensitive(self):
assert_occurrence(self.test_ctrl, 'R un KE Y W O rd', TEST1_NAME,
'Steps')
assert_occurrence(self.test_ctrl, 'se tu p KW ', TEST1_NAME, 'Setup')
def test_embedded_arguments_occurrence(self):
assert_occurrence(self.test_ctrl, EMBEDDED_ARGUMENTS_KEYWORD,
TEST1_NAME, 'Steps')
def test_unknown_variable_occurrences(self):
self.assertEqual(list(self.test_ctrl.execute(FindOccurrences(
'${some unknown variable}'))), [])
def test_occurrences_in_test_metadata(self):
assert_occurrence(self.test_ctrl, SETUP_KEYWORD,
TEST1_NAME, 'Setup')
assert_occurrence(self.test_ctrl, 'Teardown Kw',
TEST1_NAME, 'Teardown')
assert_occurrence(self.test_ctrl, TEMPLATE_KEYWORD,
TEST1_NAME, 'Template')
def test_occurrences_in_suite_metadata(self):
assert_occurrence(self.test_ctrl, SUITE_SETUP_KEYWORD,
SUITE_NAME, 'Suite Setup')
assert_occurrence(self.test_ctrl, 'Test Setup Kw',
SUITE_NAME, 'Test Setup')
assert_occurrence(self.test_ctrl, 'Test Teardown Kw',
SUITE_NAME, 'Test Teardown')
assert_occurrence(self.test_ctrl, 'Suite Teardown Kw',
SUITE_NAME, 'Suite Teardown')
assert_occurrence(self.test_ctrl, 'Test Template Kw',
SUITE_NAME, 'Test Template')
def test_occurrences_in_user_keywords(self):
assert_occurrence(self.test_ctrl, KEYWORD_IN_USERKEYWORD1,
USERKEYWORD1_NAME, 'Steps')
def test_occurrence_in_user_keyword_name(self):
assert_occurrence(self.test_ctrl, USERKEYWORD1_NAME,
USERKEYWORD1_NAME, KEYWORD_NAME_FIELD)
class FindVariableOccurrencesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
project = datafilereader.construct_project(
datafilereader.FINDWHEREUSED_VARIABLES_PATH)
cls._suite1 = _get_ctrl_by_name(cls, "Suite 1", project.datafiles)
cls._suite2 = _get_ctrl_by_name(cls, "Suite 2", project.datafiles)
cls._resource1 = _get_ctrl_by_name(cls, "Res1", project.datafiles)
cls._case1 = _get_ctrl_by_name(cls, "Case 1", project.datafiles)
cls._case2 = _get_ctrl_by_name(cls, "Case 2", project.datafiles)
cls._case3 = _get_ctrl_by_name(cls, "Case 3", project.datafiles)
cls._case4 = _get_ctrl_by_name(cls, "Case 4", project.datafiles)
cls._case5 = _get_ctrl_by_name(cls, "Case 5", project.datafiles)
cls._case6 = _get_ctrl_by_name(cls, "Case 6", project.datafiles)
cls._kw1 = _get_ctrl_by_name(cls, "User KW 1", project.datafiles)
cls._kw2 = _get_ctrl_by_name(cls, "User KW 2", project.datafiles)
def test_occurrences_local_variable(self):
check_for_variable_occurrences(self._case2, "${log}",
((self._case2.name, 'Steps', 2),
(self._case2.name, 'Documentation',
1)))
check_for_variable_occurrences(self._kw2, "${arg1}",
((self._kw2.name, 'Arguments', 1),
(self._kw2.name, 'Documentation', 1),
(self._kw2.name, 'Steps', 1)))
check_for_variable_occurrences(
self._kw2, "@{arg2}", ((self._kw2.name, 'Arguments', 1),
(self._kw2.name, 'Teardown', 1),
(self._kw2.name, 'Steps', 1)))
def test_occurrences_file_variable(self):
check_for_variable_occurrences(self._case1, "${fileVar}",
((self._case2.name, 'Teardown', 1),
(self._case1.name, 'Setup', 1),
(self._case3.name, 'Steps', 1),
(self._suite1.name, 'Variable Table',
1)))
check_for_variable_occurrences(
self._kw2, "${resVar}",
((self._resource1.name, 'Variable Table', 1), (self._kw2.name,
'Steps', 1),
(self._kw1.name, 'Teardown', 1),
(self._case5.name, 'Steps', 1),
(self._case5.name, 'Documentation', 1)))
def test_occurrences_imported_variable(self):
check_for_variable_occurrences(
self._case5, "${resVar}",
((self._resource1.name, 'Variable Table', 1),
(self._kw2.name, 'Steps', 1),
(self._kw1.name, 'Teardown', 1),
(self._case5.name, 'Steps', 1),
(self._case5.name, 'Documentation', 1)))
def test_occurrences_external_file_variable(self):
check_for_variable_occurrences(
self._case2, "${ServerHost}", ((self._case1.name, 'Steps', 1),
(self._case2.name, 'Steps', 1),
(self._case5.name, 'Steps', 1)))
check_for_variable_occurrences(
self._case5, "${ServerHost}", ((self._case1.name, 'Steps', 1),
(self._case2.name, 'Steps', 1),
(self._case5.name, 'Steps', 1)))
check_for_variable_occurrences(
self._case1, "${ServerPort}", ((self._case1.name, 'Steps', 1),
(self._kw1.name, 'Steps', 1)))
def test_occurrences_builtin_variable(self):
check_for_variable_occurrences(self._kw1,
"${True}",
((self._case4.name, 'Steps', 1),
(self._case6.name, 'Setup', 1),
(self._case6.name, 'Steps', 1),
(self._kw1.name, 'Steps', 1)))
check_for_variable_occurrences(
self._case6, "${False}", ((self._case6.name, 'Documentation', 1),
(self._case1.name, 'Steps', 1),
(self._kw1.name, 'Steps', 1)))
check_for_variable_occurrences(
self._case3, "${EMPTY}",
((self._resource1.name, 'Variable Table', 1),
(self._case3.name, 'Steps', 1)))
class RenameOccurrenceTest(unittest.TestCase):
def setUp(self):
self.test_ctrl, self.namespace = TestCaseControllerWithSteps()
self._steps_have_changed = False
self._testcase_settings_have_changed = False
self._name_has_changed = False
self._listeners_and_topics = [(self._steps_changed,
RideItemStepsChanged),
(self._testcase_settings_changed,
RideItemSettingsChanged),
(self._name_changed,
RideItemNameChanged)]
for listener, topic in self._listeners_and_topics:
PUBLISHER.subscribe(listener, topic)
def tearDown(self):
for listener, topic in self._listeners_and_topics:
PUBLISHER.unsubscribe(listener, topic)
def _steps_changed(self, test):
self._steps_have_changed = True
def _testcase_settings_changed(self, message):
if self.test_ctrl == message.item:
self._testcase_settings_have_changed = True
def _name_changed(self, data):
self._name_has_changed = True
def _expected_messages(self, steps_have_changed=False,
testcase_settings_have_changed=False,
name_has_changed=False):
assert_equals(self._steps_have_changed, steps_have_changed)
assert_equals(self._testcase_settings_have_changed,
testcase_settings_have_changed)
assert_equals(self._name_has_changed, name_has_changed)
def _rename(self, original_name, new_name, source, usage):
self.test_ctrl.execute(RenameKeywordOccurrences(original_name,
new_name,
NullObserver()))
assert_occurrence(self.test_ctrl, new_name, source, usage)
def test_rename_updates_namespace(self):
assert_true(self.namespace.is_user_keyword(self.test_ctrl.datafile,
USERKEYWORD2_NAME))
assert_false(self.namespace.is_user_keyword(self.test_ctrl.datafile,
UNUSED_KEYWORD_NAME))
self._rename(USERKEYWORD2_NAME, UNUSED_KEYWORD_NAME, TEST1_NAME,
'Steps')
assert_true(self.namespace.is_user_keyword(self.test_ctrl.datafile,
UNUSED_KEYWORD_NAME))
assert_false(self.namespace.is_user_keyword(self.test_ctrl.datafile,
USERKEYWORD2_NAME))
def test_notifies_only_after_transaction_complete(self):
datas_ok = {'steps': False, 'name': False}
def name_changed_check_that_steps_have_also(data):
datas_ok['steps'] = \
self.test_ctrl.step(2).keyword == UNUSED_KEYWORD_NAME
def steps_changed_check_that_name_has_also(data):
datas_ok['name'] = any(True for i in
self.test_ctrl.datafile_controller.keywords
if i.name == UNUSED_KEYWORD_NAME)
PUBLISHER.subscribe(name_changed_check_that_steps_have_also,
RideItemNameChanged)
PUBLISHER.subscribe(steps_changed_check_that_name_has_also,
RideItemStepsChanged)
try:
self._rename(USERKEYWORD2_NAME, UNUSED_KEYWORD_NAME, TEST1_NAME,
'Steps')
finally:
PUBLISHER.unsubscribe(name_changed_check_that_steps_have_also,
RideItemNameChanged)
PUBLISHER.unsubscribe(steps_changed_check_that_name_has_also,
RideItemStepsChanged)
assert_true(datas_ok['steps'])
assert_true(datas_ok['name'])
def test_rename_in_steps(self):
self._rename(STEP1_KEYWORD, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
def test_rename_with_dollar_sign(self):
self._rename(STEP1_KEYWORD, UNUSED_KEYWORD_NAME+'$', TEST1_NAME,
'Steps')
self._expected_messages(steps_have_changed=True)
def test_undo_rename_in_step(self):
self._rename(STEP1_KEYWORD, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self.test_ctrl.execute(Undo())
assert_equals(self.test_ctrl.steps[0].keyword, STEP1_KEYWORD)
def test_undo_after_renaming_to_something_that_is_already_there(self):
self._rename(STEP1_KEYWORD, STEP2_ARGUMENT, TEST1_NAME, 'Steps')
self.test_ctrl.execute(Undo())
assert_equals(self.test_ctrl.steps[1].args[0], STEP2_ARGUMENT)
def test_rename_steps_argument(self):
self._rename(STEP2_ARGUMENT, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
assert_equals(self.test_ctrl.steps[1].as_list(), ['Run Keyword',
UNUSED_KEYWORD_NAME])
def test_user_keyword_rename(self):
self._rename(USERKEYWORD1_NAME, UNUSED_KEYWORD_NAME,
UNUSED_KEYWORD_NAME, KEYWORD_NAME_FIELD)
self._expected_messages(name_has_changed=True)
def test_rename_in_test_setup(self):
self._rename(SETUP_KEYWORD, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Setup')
self._expected_messages(testcase_settings_have_changed=True)
self.assertTrue(self.test_ctrl.dirty)
def test_rename_in_test_template(self):
self._rename(TEMPLATE_KEYWORD, UNUSED_KEYWORD_NAME,
TEST1_NAME, 'Template')
self._expected_messages(testcase_settings_have_changed=True)
self.assertTrue(self.test_ctrl.dirty)
def test_rename_in_suite_metadata(self):
self._rename(SUITE_SETUP_KEYWORD, UNUSED_KEYWORD_NAME, SUITE_NAME,
'Suite Setup')
self._expected_messages()
self.assertTrue(self.test_ctrl.dirty)
def test_rename_in_suite_test_setup(self):
self._rename(SUITE_TEST_SETUP_KEYWORD, UNUSED_KEYWORD_NAME, SUITE_NAME,
'Test Setup')
self._expected_messages()
self.assertTrue(self.test_ctrl.dirty)
def test_rename_in_suite_test_template(self):
self._rename(SUITE_TEST_TEMPLATE_KEYWORD, UNUSED_KEYWORD_NAME,
SUITE_NAME, 'Test Template')
self._expected_messages()
self.assertTrue(self.test_ctrl.dirty)
def test_rename_in_user_keywords(self):
self._rename(KEYWORD_IN_USERKEYWORD1, UNUSED_KEYWORD_NAME,
USERKEYWORD1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
def test_rename_given_prefixed_keywords(self):
kw = 'BLOdkajasdj'
self._add_step('Given '+kw)
self._rename(kw, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
self.assertEqual(self.test_ctrl.step(100).as_list()[100],
'Given '+UNUSED_KEYWORD_NAME)
def test_rename_when_prefixed_keywords(self):
kw = 'fjsdklhf37849'
self._add_step('wHEn '+kw)
self._rename(kw, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
self.assertEqual(self.test_ctrl.step(100).as_list()[100],
'wHEn '+UNUSED_KEYWORD_NAME)
def test_rename_then_prefixed_keywords(self):
kw = 'djkfsekrhnbdxcvzo dsjah'
self._add_step('THen '+kw)
self._rename(kw, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
self.assertEqual(self.test_ctrl.step(100).as_list()[100],
'THen '+UNUSED_KEYWORD_NAME)
def test_rename_and_prefixed_keywords(self):
kw = 'mmxznbfje uiriweyi yr iu fjkdhzxck'
self._add_step('AND '+kw)
self._rename(kw, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
self.assertEqual(self.test_ctrl.step(100).as_list()[100],
'AND '+UNUSED_KEYWORD_NAME)
def test_rename_but_prefixed_keywords(self):
kw = 'sdlmclkds dslcm ldsm sdclmklm'
self._add_step('bUt '+kw)
self._rename(kw, UNUSED_KEYWORD_NAME, TEST1_NAME, 'Steps')
self._expected_messages(steps_have_changed=True)
self.assertEqual(self.test_ctrl.step(100).as_list()[100],
'bUt '+UNUSED_KEYWORD_NAME)
def | |
import random
import math
import cv2
import numpy as np
import pylab as plt
import scipy.io as sio
import scipy.misc as sm
import os
from os import listdir, makedirs, system
def get_minibatches_idx(n,
minibatch_size,
shuffle=False,
min_frame=None,
trainfiles=None,
del_list=None):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = np.arange(n, dtype="int32")
if min_frame != None:
if del_list == None:
del_list = list()
for i in idx_list:
vid_path = trainfiles[i].split()[0]
length = len([f for f in listdir(vid_path) if f.endswith('.png')])
if length < min_frame:
del_list.append(i)
print('[!] Discarded %d samples from training set!' % len(del_list))
idx_list = np.delete(idx_list, del_list)
if shuffle:
random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(
idx_list[minibatch_start:minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches), del_list
def get_minibatches_idx2(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = np.arange(n, dtype="int32")
if shuffle:
random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(
idx_list[minibatch_start:minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j * h:j * h + h, i * w:i * w + w] = image
return img
def imsave(images, size, path):
return sm.imsave(path, merge(images, size))
def visualize_lm(posex, posey, visib, vid_path, vid_imgs, stidx, t,
image_size):
lines = [[0, 0, 1, 2], [1, 1, 2, 2], [1, 1, 3, 3], [3, 3, 5, 5],
[2, 2, 4, 4], [4, 4, 6, 6], [1, 2, 7, 8], [7, 7, 8, 8],
[7, 7, 9, 9], [9, 9, 11, 11], [8, 8, 10, 10], [10, 10, 12, 12]]
img = cv2.imread(vid_path + '/' + vid_imgs[stidx])
visib[posey > 1], visib[posey < 0], visib[posex > 1], visib[
posex < 0] = 0, 0, 0, 0
posey = posey * img.shape[0]
posex = posex * img.shape[1]
cpose = np.zeros((img.shape[0], img.shape[1], 48))
for j in range(12):
if visib[lines[j][0]] and visib[lines[j][1]] and \
visib[lines[j][2]] and visib[lines[j][3]]:
interp_x = np.linspace((posex[lines[j][0]] + posex[lines[j][1]]) / 2,
(posex[lines[j][2]] + posex[lines[j][3]]) / 2, 4,
True)
interp_y = np.linspace((posey[lines[j][0]] + posey[lines[j][1]]) / 2,
(posey[lines[j][2]] + posey[lines[j][3]]) / 2, 4,
True)
for k in range(4):
gmask = gauss2D_mask(
(interp_y[k], interp_x[k]), img.shape[:2], sigma=8.)
cpose[:, :, j * 4 + k] = gmask / gmask.max()
else:
if visib[lines[j][0]] and visib[lines[j][1]]:
point_x = (posex[lines[j][0]] + posex[lines[j][1]]) / 2
point_y = (posey[lines[j][0]] + posey[lines[j][1]]) / 2
gmask = gauss2D_mask((point_y, point_x), img.shape[:2], sigma=8.)
cpose[:, :, j * 4] = gmask / gmask.max()
if visib[lines[j][2]] and visib[lines[j][3]]:
point_x = (posex[lines[j][2]] + posex[lines[j][3]]) / 2
point_y = (posey[lines[j][2]] + posey[lines[j][3]]) / 2
gmask = gauss2D_mask((point_y, point_x), img.shape[:2], sigma=8.)
cpose[:, :, (j + 1) * 4 - 1] = gmask / gmask.max()
cpose = cv2.resize(cpose, (image_size, image_size))
return np.amax(cpose, axis=2)
def save_test_result(save_path, idx, pred, pose, mask, stidx, seen_step,
fut_step, vid_path, fname):
vid_path = vid_path.split()[0]
lm_size = 13
image_size = 128
samples = None
fname = fname.split()[0].split('frames')[1][1:]
save_path_tmp = save_path + '%s/' % fname
if not os.path.exists(save_path_tmp):
os.makedirs(save_path_tmp)
vid_imgs = sorted([f for f in listdir(vid_path) if f.endswith('.png')])
img = cv2.imread(vid_path + '/' + vid_imgs[0])
mat = {}
mat['x'] = pred[:, :lm_size] * img.shape[1]
mat['y'] = pred[:, lm_size:] * img.shape[0]
sio.savemat(save_path_tmp + 'joints.mat', mat)
for typ in range(2):
for stp in range(fut_step):
if typ == 0:
pre = pred[stp, :2 * lm_size]
posex, posey, visib = (pre[:lm_size], pre[lm_size:],
mask[seen_step + stp, :])
else:
posex, posey, visib = (pose[seen_step + stp, :lm_size],
pose[seen_step + stp, lm_size:],
mask[seen_step + stp, :])
sample = visualize_lm(posex, posey, visib, vid_path, vid_imgs, stidx,
stp, image_size)
file_name = ('pred_' if typ == 0 else 'gt_') + '%04d.png' % stp
sm.imsave(save_path_tmp + '/' + file_name, sample)
if typ == 0:
sample_full = visualize_lm(posex, posey, np.ones_like(visib), vid_path,
vid_imgs, stidx, stp, image_size)
full_name = 'pred_full_%04d.png' % stp
sm.imsave(save_path_tmp + '/' + full_name, sample_full)
sample = sample.reshape((1, image_size, image_size))
samples = sample if samples is None else np.concatenate(
[samples, sample], axis=0)
imsave(samples, [2, fut_step], save_path_tmp + '/' + 'gt_pred.png')
cmd1 = 'rm ' + save_path_tmp + '/' + 'gt.gif'
cmd2 = 'ffmpeg -loglevel panic -f image2 -framerate 7 -i '+save_path_tmp\
+'/gt_%04d.png '+save_path_tmp+'/gt.gif'
#system(cmd1);
system(cmd2)
cmd1 = 'rm ' + save_path_tmp + '/' + 'pred.gif'
cmd2 = 'ffmpeg -loglevel panic -f image2 -framerate 7 -i '+save_path_tmp\
+'/pred_%04d.png '+save_path_tmp+'/pred.gif'
#system(cmd1);
system(cmd2)
cmd1 = 'rm ' + save_path_tmp + '/' + 'pred_full.gif'
cmd2 = 'ffmpeg -loglevel panic -f image2 -framerate 7 -i '+save_path_tmp\
+'/pred_full_%04d.png '+save_path_tmp+'/pred_full.gif'
#system(cmd1);
system(cmd2)
print('\t%d done.' % idx)
def save_test_result_all(save_path, idx, pred, pose, mask, stidx, seen_step,
fut_step, vid_path, fname):
vid_path = vid_path.split()[0]
lm_size = 13
image_size = 128
samples = None
fname = fname.split()[0].split('frames')[1][1:]
save_path_tmp = save_path + '%s/' % fname
if not os.path.exists(save_path_tmp):
os.makedirs(save_path_tmp)
vid_imgs = sorted([f for f in listdir(vid_path) if f.endswith('.png')])
img = cv2.imread(vid_path + '/' + vid_imgs[0])
mat = {}
mat['x'] = pred[:, :lm_size] * img.shape[1]
mat['y'] = pred[:, lm_size:] * img.shape[0]
sio.savemat(save_path_tmp + 'joints.mat', mat)
for typ in range(2):
for stp in range(fut_step):
if typ == 0:
pre = pred[stp, :2 * lm_size]
posex, posey, visib = (pre[:lm_size], pre[lm_size:],
np.ones_like(mask[seen_step + stp, :]))
else:
posex, posey, visib = (pose[seen_step + stp, :lm_size],
pose[seen_step + stp, lm_size:],
mask[seen_step + stp, :])
sample = visualize_lm(posex, posey, visib, vid_path, vid_imgs, stidx,
stp, image_size)
file_name = ('pred_full_' if typ == 0 else 'gt_') + '%04d.png' % stp
sm.imsave(save_path_tmp + '/' + file_name, sample)
cmd1 = 'rm ' + save_path_tmp + '/' + 'gt.gif'
cmd2 = 'ffmpeg -loglevel panic -f image2 -framerate 7 -i '+save_path_tmp\
+'/gt_%04d.png '+save_path_tmp+'/gt.gif'
#system(cmd1);
system(cmd2)
cmd1 = 'rm ' + save_path_tmp + '/' + 'pred.gif'
cmd2 = 'ffmpeg -loglevel panic -f image2 -framerate 7 -i '+save_path_tmp\
+'/pred_%04d.png '+save_path_tmp+'/pred.gif'
#system(cmd1);
system(cmd2)
cmd1 = 'rm ' + save_path_tmp + '/' + 'pred_full.gif'
cmd2 = 'ffmpeg -loglevel panic -f image2 -framerate 7 -i '+save_path_tmp\
+'/pred_full_%04d.png '+save_path_tmp+'/pred_full.gif'
#system(cmd1);
system(cmd2)
print('\t%d done.' % idx)
def save_vid(line, pose_path, save_path, pad):
res = 0
try:
tokens = line.split()[0].split('frames')
ff = sio.loadmat(tokens[0] + 'labels' + tokens[1] + '.mat')
pose = np.load(pose_path + tokens[1] + '_img.npy')
bboxes = ff['bbox']
posey = ff['y']
posex = ff['x']
visib = ff['visibility']
action = ff['action']
imgs = sorted([f for f in listdir(line.split()[0]) if f.endswith('.jpg')])
box = np.zeros((4, ))
boxes = bboxes.round().astype('int32')
# box[0] = boxes[:,0].min()
# box[1] = boxes[:,1].min()
# box[2] = boxes[:,2].max()
# box[3] = boxes[:,3].max()
base_name = 'img_%s_%s.png' % (tokens[1].split('/')[-1], action[0])
pre_pose, cur_pose, new_pose = None, None, None
for j in range(len(imgs)):
box = bboxes[j].round().astype('int32')
img = cv2.imread(line.split()[0] + '/' + imgs[j])[:, :, ::-1]
y1 = box[1] - pad
y2 = box[3] + pad
x1 = box[0] - pad
x2 = box[2] + pad
cvisib = visib[j]
if y1 >= 0:
cposey = posey[j] - y1
else:
cposey = posey[j] - box[1]
if x1 >= 0:
cposex = posex[j] - x1
else:
cposex = posex[j] - box[0]
if y1 < 0: y1 = 0
if x1 < 0: x1 = 0
patch = img[y1:y2, x1:x2]
plt.clf()
plt.imshow(patch)
plt.axis('off')
plt.tight_layout()
# for k in range(cposey.shape[0]):
# if cvisib[k]:
# plt.plot(cposex[k],cposey[k], 'o')
"""if pre_pose is None:
new_pose = pose[j]
else:
cur_pose = pose[j]
new_pose = np.zeros_like(cur_pose)
match_list = range(pre_pose.shape[0])
for k in range(cur_pose.shape[0]):
cur_pnt = cur_pose[k]
min_dis = float('inf')
min_idx = -1
for idx in match_list:
pre_pnt = pre_pose[idx]
dis = (cur_pnt[0]-pre_pnt[0])**2+(cur_pnt[1]-pre_pnt[1])**2
if dis < min_dis:
min_idx = idx
min_dis = dis
new_pose[min_idx] = cur_pnt
match_list.remove(min_idx)
pre_pose = new_pose"""
new_pose = pose[j]
for k in range(new_pose.shape[0]):
plt.plot(new_pose[k, 0], new_pose[k, 1], 'o')
plt.savefig(save_path + base_name + '_%05d.png' % j)
cmd1 = 'ffmpeg -loglevel panic -f image2 -framerate 14 -i '+save_path\
+base_name+'_%05d.png '+save_path+'img_%s_%s.gif'%(tokens[1].split('/')[-1], action[0])
system(cmd1)
print('\t' + line.split()[0] + ' done.')
res = 1
except:
print('\t' + line.split()[0] + ' done.')
return res
def gauss2D_mask(center, shape, sigma=0.5):
m, | |
self.body.append('\\end{itemize}\n')
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def latex_image_length(self, width_str: str, scale: int = 100) -> str:
try:
return rstdim_to_latexdim(width_str, scale)
except ValueError:
logger.warning(__('dimension unit %s is invalid. Ignored.'), width_str)
return None
def is_inline(self, node: Element) -> bool:
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node: Element) -> None:
attrs = node.attributes
pre = [] # type: List[str]
# in reverse order
post = [] # type: List[str]
include_graphics_options = []
has_hyperlink = isinstance(node.parent, nodes.reference)
if has_hyperlink:
is_inline = self.is_inline(node.parent)
else:
is_inline = self.is_inline(node)
if 'width' in attrs:
if 'scale' in attrs:
w = self.latex_image_length(attrs['width'], attrs['scale'])
else:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in attrs:
if 'scale' in attrs:
h = self.latex_image_length(attrs['height'], attrs['scale'])
else:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'scale' in attrs:
if not include_graphics_options:
# if no "width" nor "height", \sphinxincludegraphics will fit
# to the available text width if oversized after rescaling.
include_graphics_options.append('scale=%s'
% (float(attrs['scale']) / 100.0))
if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
(1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
(0, 'center'): ('{\\hspace*{\\fill}', '\\hspace*{\\fill}}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# https://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', '\\hspace*{\\fill}}'),
(0, 'right'): ('{\\hspace*{\\fill}', '}'),
}
try:
pre.append(align_prepost[is_inline, attrs['align']][0])
post.append(align_prepost[is_inline, attrs['align']][1])
except KeyError:
pass
if self.in_parsed_literal:
pre.append('{\\sphinxunactivateextrasandspace ')
post.append('}')
if not is_inline and not has_hyperlink:
pre.append('\n\\noindent')
post.append('\n')
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
base, ext = path.splitext(uri)
if self.in_title and base:
# Lowercase tokens forcely because some fncychap themes capitalize
# the options of \sphinxincludegraphics unexpectly (ex. WIDTH=...).
self.body.append('\\lowercase{\\sphinxincludegraphics%s}{{%s}%s}' %
(options, base, ext))
else:
self.body.append('\\sphinxincludegraphics%s{{%s}%s}' %
(options, base, ext))
self.body.extend(post)
def depart_image(self, node: Element) -> None:
pass
def visit_figure(self, node: Element) -> None:
align = self.elements['figure_align']
if self.no_latex_floats:
align = "H"
if self.table:
# TODO: support align option
if 'width' in node:
length = self.latex_image_length(node['width'])
if length:
self.body.append('\\begin{sphinxfigure-in-table}[%s]\n'
'\\centering\n' % length)
else:
self.body.append('\\begin{sphinxfigure-in-table}\n\\centering\n')
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart')
self.context.append('\\end{sphinxfigure-in-table}\\relax\n')
elif node.get('align', '') in ('left', 'right'):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
self.body.append('\n\n') # Insert a blank line to prevent infinite loop
# https://github.com/sphinx-doc/sphinx/issues/7059
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
('r' if node['align'] == 'right' else 'l', length or '0pt'))
self.context.append('\\end{wrapfigure}\n')
elif self.in_minipage:
self.body.append('\n\\begin{center}')
self.context.append('\\end{center}\n')
else:
self.body.append('\n\\begin{figure}[%s]\n\\centering\n' % align)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart\n')
self.context.append('\\end{figure}\n')
def depart_figure(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_caption(self, node: Element) -> None:
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append('\\sphinxSetupCaptionForVerbatim{')
elif self.in_minipage and isinstance(node.parent, nodes.figure):
self.body.append('\\captionof{figure}{')
elif self.table and node.parent.tagname == 'figure':
self.body.append('\\sphinxfigcaption{')
else:
self.body.append('\\caption{')
def depart_caption(self, node: Element) -> None:
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
self.body.append(labels)
self.in_caption -= 1
def visit_legend(self, node: Element) -> None:
self.body.append('\n\\begin{sphinxlegend}')
def depart_legend(self, node: Element) -> None:
self.body.append('\\end{sphinxlegend}\n')
def visit_admonition(self, node: Element) -> None:
self.body.append('\n\\begin{sphinxadmonition}{note}')
self.no_latex_floats += 1
def depart_admonition(self, node: Element) -> None:
self.body.append('\\end{sphinxadmonition}\n')
self.no_latex_floats -= 1
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label))
self.no_latex_floats += 1
def _depart_named_admonition(self, node: Element) -> None:
self.body.append('\\end{sphinxadmonition}\n')
self.no_latex_floats -= 1
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node: Element) -> None:
pass
def depart_versionmodified(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
def add_target(id: str) -> None:
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# equations also need no extra blank line nor hypertarget
# TODO: fix this dependency on mathbase extension internals
if id.startswith('equation-'):
return
# insert blank line, if the target follows a paragraph node
index = node.parent.index(node)
if index > 0 and isinstance(node.parent[index - 1], nodes.paragraph):
self.body.append('\n')
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
next_node = node # type: nodes.Node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
elif domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
return
if 'refuri' in node:
return
if 'anonymous' in node:
return
if node.get('refid'):
prev_node = get_prev_node(node)
if isinstance(prev_node, nodes.reference) and node['refid'] == prev_node['refid']:
# a target for a hyperlink reference having alias
pass
else:
add_target(node['refid'])
for id in node['ids']:
add_target(id)
def depart_target(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append('\n\\begin{flushright}\n')
self.body.append('---')
def depart_attribution(self, node: Element) -> None:
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node: Element) -> None:
def escape(value: str) -> str:
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
value = value.replace(r'\}', r'\sphinxrightcurlybrace{}')
value = value.replace('"', '""')
value = value.replace('@', '"@')
value = value.replace('!', '"!')
value = value.replace('|', r'\textbar{}')
return value
def style(string: str) -> str:
match = EXTRA_RE.match(string)
if match:
return match.expand(r'\\spxentry{\1}\\spxextra{\2}')
else:
return '\\spxentry{%s}' % string
if not node.get('inline', True):
self.body.append('\n')
entries = node['entries']
for type, string, tid, ismain, key_ in entries:
m = ''
if ismain:
m = '|spxpagem'
try:
if type == 'single':
try:
p1, p2 = [escape(x) for x in split_into(2, 'single', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}' % (p1, P1, p2, P2, m))
except ValueError:
p = escape(split_into(1, 'single', string)[0])
P = style(p)
self.body.append(r'\index{%s@%s%s}' % (p, P, m))
elif type == 'pair':
p1, p2 = [escape(x) for x in split_into(2, 'pair', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}\index{%s@%s!%s@%s%s}' %
(p1, P1, p2, P2, m, p2, P2, p1, P1, m))
elif type == 'triple':
p1, p2, p3 = [escape(x) for x in split_into(3, 'triple', string)]
P1, P2, P3 = style(p1), style(p2), style(p3)
self.body.append(
r'\index{%s@%s!%s %s@%s %s%s}'
r'\index{%s@%s!%s, %s@%s, %s%s}'
r'\index{%s@%s!%s %s@%s %s%s}' %
(p1, P1, p2, p3, P2, P3, m,
p2, P2, p3, p1, P3, P1, m,
p3, P3, p1, p2, P1, P2, m))
elif type == 'see':
p1, p2 = [escape(x) for x in split_into(2, 'see', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
elif type == 'seealso':
p1, p2 = [escape(x) for x in split_into(2, 'seealso', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
else:
logger.warning(__('unknown index entry type %s found'), type)
except ValueError as err:
logger.warning(str(err))
if not node.get('inline', True):
self.body.append('\\ignorespaces ')
raise nodes.SkipNode
def visit_raw(self, node: Element) -> None:
if not self.is_inline(node):
self.body.append('\n')
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
if not self.is_inline(node):
self.body.append('\n')
raise nodes.SkipNode
def visit_reference(self, node: Element) -> None:
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
self.body += self.hypertarget(id, anchor=anchor)
if not self.is_inline(node):
self.body.append('\n')
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
self.body.append(r'\emph{')
if self.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
self.body.append(r'\sphinxtermref{')
else:
self.body.append(r'\sphinxcrossref{')
if self.config.latex_show_pagerefs and not self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
else:
if len(node) == 1 and uri == node[0]:
if node.get('nolinkurl'):
self.body.append('\\sphinxnolinkurl{%s}' % self.encode_uri(uri))
else:
| |
<reponame>skochanski/blinx_rev<filename>pyblinx/chunk.py
from .node import Node
from .texlist import Texlist
from .address import section_addresses, rawaddress
from .helpers import verify_file_arg_o, verify_file_arg_b
from .world_transform import transform
from struct import unpack
import operator
TEXTURE_MAGIC = 0x0241
TEXTURE_TYPE_SPEC = 0x0408
ESCAPE = b'\xff\x00\x00\x00'
class Chunk(Node) :
section_table = section_addresses()
def __init__(self, xbe, entry_offset, section, texlist=None, parent_coords=None, full=True) :
Node.__init__(self, xbe, entry_offset, section, texlist, parent_coords)
block = self.parse_block()
self.voffset = rawaddress(block['voffset'], self.section, Chunk.section_table) if block['voffset'] is not None else None
self.toffset = rawaddress(block['toffset'], self.section, Chunk.section_table) if block['toffset'] is not None else None
self.name = 'ch_' + self.section + '_' + hex(self.offset)
self.vertices = None
self.triangles = None
if full is True :
self.vertices, self.triangles = self.parse(world=True)
def __str__(self) :
return self.name
def parse_block(self) :
'''
Parse pointer block and return data.
'''
f = self.xbe
offset = rawaddress(self.block, self.section, Chunk.section_table)
f.seek(offset)
vdata_offset = unpack('i', f.read(4))[0]
if vdata_offset is 0 : vdata_offset = None
tdata_offset = unpack('i', f.read(4))[0]
if tdata_offset is 0 : tdata_offset = None
float_array_0 = []
for _ in range(6) : float_array_0.append(unpack('f', f.read(4))[0])
return {
'voffset': vdata_offset,
'toffset': tdata_offset,
'farray': float_array_0
}
def parse(self, world=True) :
'''
Parse vetices and triangles in chunk.
'''
print(f'Parsing chunk at {self.entry}')
v = self.parse_vertices(world=world)
t = self.parse_triangles()
return v, t
def write(self, file, texlist=None, clist=False) :
'''
Write .obj to open file handle. If texlist exists, reference material library.
'''
f = verify_file_arg_o(file, usage='w+')
if texlist is not None and clist is False :
f.write('mtllib {}.mtl\n'.format(texlist.name))
f.write('o {}\n'.format(self.name))
self.write_vertices(f)
self.write_texcoords(f)
matlist = texlist.matlist if texlist is not None else None
self.write_triangles(f, matlist)
def parse_vertices(self, world=True) :
'''
Reads vertex list from xbe. Returns a list[count], where each element is a tuple[3] denoting xyz.
'''
if self.voffset is None :
print(f'\t{hex(self.offset)}: This chunk contains no vertices')
return None
f = self.xbe
f.seek(self.voffset + 6)
count = unpack('h', f.read(2))[0]
f.seek(8, 1)
print(f'\t{hex(self.offset)}: Parsing {count} vertices at {hex(self.voffset)}... ', end='')
v = []
for _ in range(count) :
vertex = list(unpack('fff', f.read(12)))
if world is True :
v_local = transform(vertex, self.world_coords)
v_global = transform(v_local, self.parent_coords)
vertex = v_global
#w = tuple(map(operator.add, self.world_coords, self.parent_coords))
# print(str(self.world_coords[:3]) + ' + ' + str(self.parent_coords[:3]) + ' = ' + str(w[:3]))
#vertex = transform(vertex, w)
v.append(tuple(vertex))
f.seek(4, 1)
print('\tDone')
self.vertices = v
return v
def parse_triangles(self) :
'''
Read tripart list from xbe. Returns a list of tuples (tripart, texlist index) as defined in parse_tripart() without escape flags.
'''
if self.toffset is None :
print(f'\t{hex(self.offset)}: This chunk contains no triangles')
return None
f = self.xbe
f.seek(self.toffset)
print(f'\tParsing triangles at {hex(self.toffset)}... ')
# Hacky fix around unknown value at 0xbc58 in MDLEN. Probably others like it.
if unpack('i', f.read(4))[0] > 50 :
f.seek(-4, 1)
f.seek(2, 1)
header_size = unpack('h', f.read(2))[0] * 2
f.seek(header_size, 1)
t = []
i = 0
while(True) :
print(f'\tParsing triangle section {i}')
i += 1
j = 0
prev_tindex = 0
final = True
while(True) :
print(f'\t\tParsing tripart {j}')
j += 1
tripart = self.parse_tripart(prev_tindex=prev_tindex)
if(tripart[0] is not None and tripart[1] is not None) : #TODO: improve readability
t.append((tripart[0], tripart[1]))
prev_tindex = tripart[1]
if tripart[3] :
final = True
if tripart[2] :
break
if final : #FIXME: handle case where tripart = None
break
print('\tDone\n')
self.triangles = t
return t
def parse_tripart(self, type='texture', prev_tindex=0) :
'''
Reads tripart. Returns tuple (tripart, texlist index, last, final) where tripart is a list of tuples (vertex index, tex_x, tex_y),
texlist index assigns the texture, last is the escape flag, and final flag is true if there does not exist another triangle section.
'''
f = self.xbe
t = []
escape = False # Assume this is not the final tripart
final = True # Assume this is the final triangle section
# First, observe the first two bytes as an int16. This will be used to determine the type of the tripart: simple, textured with declared index,
# or textured without declared index. pyBlinx current does not support parsing simple triparts, and they will be skipped.
type_spec = unpack('h', f.read(2))[0]
# TODO: make logical flow for this section more intuitive
texlist_index=0
# The case where the texture index is not declared, but the tripart is textured. It uses the texture index passed into the method.
if type_spec == TEXTURE_MAGIC :
print(f'\t\t\t\tUsing prev tindex {prev_tindex}')
texlist_index = prev_tindex
# The case where the tripart is simple. The next tripart is probed for the escape symbol, but no actual parsing happens.
elif (type_spec - TEXTURE_TYPE_SPEC) % 0x1000 != 0 :
print('\t\t\tNon-texture tripart.')
type='simple' # Currently unused
tripart_size = unpack('h', f.read(2))[0] * 2
f.seek(tripart_size, 1)
tripart_end = f.tell()
esc_candidate = f.read(4)
if esc_candidate == ESCAPE :
print('\t\t\t\t ESCAPE SYMBOL FOUND IN SIMPLE TRIPART')
return(None, None, True, True)
else :
print('\t\t\t\tESCAPE SYMBOL NOT FOUND IN SIMPLE TRIPART')
f.seek(-4, 1)
return(None, None, False, True)
# The case where the tripart's texture index is declared.
else :
texlist_index = unpack('h', f.read(2))[0] ^ 0x4000
f.seek(2, 1)
# This next section navigates to the end of the tripart and probes four bytes. Using different interpretations of these bytes, it determines
# the behavior after the tristrip is parsed. More specifically, it determines the existence of more triparts or triangle sections. The output
# of this section will be passed in the returned tuple as booleans at [2] and [3].
tripart_size = unpack('h', f.read(2))[0] * 2
f.seek(tripart_size, 1)
tripart_end = f.tell()
esc_candidate = f.read(4)
can_float = unpack('f', esc_candidate)[0]
can_ints = unpack('HH', esc_candidate)
# The first four bytes of tripart headers that declare texture indexes is a float ~2.0. This is hacky and will falsely identify a simple tripart
# or a tripart that uses the previous texture index. I'm looking to phase this out when possible, since it is nondeterministic and relies on
# sketchy math.
if can_float < 1.5 :
escape = True
# The case where the next tripart does exist and uses the texture index declared in the current one. This would have been falsely marked positive by
# the previous check.
if can_ints[0] == TEXTURE_MAGIC :
print('\t\t\tTEXTURE MAGIC NUMBER')
escape = False
# The case where the next 4 bytes is the escape symbol, thus terminating triangle parsing after the current tristrip.
if esc_candidate == ESCAPE :
print('\t\t\tESCAPE SYMBOL FOUND')
escape = True
# The case where the current tristrip is the last in its triangle section but there exists a next triangle section. New triangle sections always
# start with 0x25XX 0xYYYY where XX is arbitrary (as far as I know) and YYYY is the size of the header. Headers have not been observed to be larger
# than 0x20 bytes.
if (can_ints[0] >> 0x8 == 0x25 and can_ints[1] < 0x20) :
print('\t\t\tANOTHER TRIANGLE SECTION EXISTS')
final = False
escape = True
f.seek(-(tripart_size + 4), 1) # Return to beginning of tripart.
# Parse the tripart.
t_length = unpack('h', f.read(2))[0]
for i in range(t_length) :
strip = []
s_length = abs(unpack('h', f.read(2))[0])
for _ in range(s_length) :
if type == 'texture' : # type is currently unused and will always be 'texture'.
raw_point = list(unpack('hhh', f.read(6)))
raw_point[0] += 1 #TODO: clean up
raw_point[1] /= 255.0
raw_point[2] /= -255.0
raw_point[2] += 1.0
else :
raw_point = [unpack('h', f.read(2))[0] + 1, 0, 0]
data_point = tuple(raw_point)
strip.append(data_point)
t.append(strip)
f.seek(tripart_end) # Verify file pointer is at end of tripart.
return (t, texlist_index, escape, final,)
def write_vertices(self, file) :
f = verify_file_arg_o(file)
verts = self.vertices
if not verts :
print('\tNo vertices found!')
return None
else :
print(f'Writing {len(verts)} vertices to {f.name}')
for v in verts :
ln = f'v {v[0]} {v[1]} {v[2]}\n'
f.write(ln)
def write_triangles(self, file, matlist=None) :
f = verify_file_arg_o(file)
triangles = self.triangles
if not triangles :
print('\tNo triangles found!')
return None
else | |
= access_key_id
self.owner_id = owner_id
self.resource_owner_id = resource_owner_id
self.resource_owner_account = resource_owner_account
self.owner_account = owner_account
self.project_id = project_id
self.material_ids = material_ids
def validate(self):
self.validate_required(self.project_id, 'project_id')
self.validate_required(self.material_ids, 'material_ids')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerId'] = self.resource_owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['OwnerAccount'] = self.owner_account
result['ProjectId'] = self.project_id
result['MaterialIds'] = self.material_ids
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_id = map.get('ResourceOwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.owner_account = map.get('OwnerAccount')
self.project_id = map.get('ProjectId')
self.material_ids = map.get('MaterialIds')
return self
class SetEditingProjectMaterialsResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class SearchEditingProjectRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_id=None, resource_owner_account=None, owner_account=None, end_time=None, start_time=None, status=None, page_no=None, page_size=None, sort_by=None, title=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_id = resource_owner_id
self.resource_owner_account = resource_owner_account
self.owner_account = owner_account
self.end_time = end_time
self.start_time = start_time
self.status = status
self.page_no = page_no
self.page_size = page_size
self.sort_by = sort_by
self.title = title
def validate(self):
pass
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerId'] = self.resource_owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['OwnerAccount'] = self.owner_account
result['EndTime'] = self.end_time
result['StartTime'] = self.start_time
result['Status'] = self.status
result['PageNo'] = self.page_no
result['PageSize'] = self.page_size
result['SortBy'] = self.sort_by
result['Title'] = self.title
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_id = map.get('ResourceOwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.owner_account = map.get('OwnerAccount')
self.end_time = map.get('EndTime')
self.start_time = map.get('StartTime')
self.status = map.get('Status')
self.page_no = map.get('PageNo')
self.page_size = map.get('PageSize')
self.sort_by = map.get('SortBy')
self.title = map.get('Title')
return self
class SearchEditingProjectResponse(TeaModel):
def __init__(self, request_id=None, total=None, project_list=None):
self.request_id = request_id
self.total = total
self.project_list = project_list
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.total, 'total')
self.validate_required(self.project_list, 'project_list')
if self.project_list:
self.project_list.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['Total'] = self.total
if self.project_list is not None:
result['ProjectList'] = self.project_list.to_map()
else:
result['ProjectList'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.total = map.get('Total')
if map.get('ProjectList') is not None:
temp_model = SearchEditingProjectResponseProjectList()
self.project_list = temp_model.from_map(map['ProjectList'])
else:
self.project_list = None
return self
class SearchEditingProjectResponseProjectListProject(TeaModel):
def __init__(self, project_id=None, creation_time=None, modified_time=None, status=None, description=None, title=None, cover_url=None, storage_location=None, region_id=None, duration=None):
self.project_id = project_id
self.creation_time = creation_time
self.modified_time = modified_time
self.status = status
self.description = description
self.title = title
self.cover_url = cover_url
self.storage_location = storage_location
self.region_id = region_id
self.duration = duration
def validate(self):
self.validate_required(self.project_id, 'project_id')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.modified_time, 'modified_time')
self.validate_required(self.status, 'status')
self.validate_required(self.description, 'description')
self.validate_required(self.title, 'title')
self.validate_required(self.cover_url, 'cover_url')
self.validate_required(self.storage_location, 'storage_location')
self.validate_required(self.region_id, 'region_id')
self.validate_required(self.duration, 'duration')
def to_map(self):
result = {}
result['ProjectId'] = self.project_id
result['CreationTime'] = self.creation_time
result['ModifiedTime'] = self.modified_time
result['Status'] = self.status
result['Description'] = self.description
result['Title'] = self.title
result['CoverURL'] = self.cover_url
result['StorageLocation'] = self.storage_location
result['RegionId'] = self.region_id
result['Duration'] = self.duration
return result
def from_map(self, map={}):
self.project_id = map.get('ProjectId')
self.creation_time = map.get('CreationTime')
self.modified_time = map.get('ModifiedTime')
self.status = map.get('Status')
self.description = map.get('Description')
self.title = map.get('Title')
self.cover_url = map.get('CoverURL')
self.storage_location = map.get('StorageLocation')
self.region_id = map.get('RegionId')
self.duration = map.get('Duration')
return self
class SearchEditingProjectResponseProjectList(TeaModel):
def __init__(self, project=None):
self.project = []
def validate(self):
self.validate_required(self.project, 'project')
if self.project:
for k in self.project:
if k :
k.validate()
def to_map(self):
result = {}
result['Project'] = []
if self.project is not None:
for k in self.project:
result['Project'].append(k.to_map() if k else None)
else:
result['Project'] = None
return result
def from_map(self, map={}):
self.project = []
if map.get('Project') is not None:
for k in map.get('Project'):
temp_model = SearchEditingProjectResponseProjectListProject()
temp_model = temp_model.from_map(k)
self.project.append(temp_model)
else:
self.project = None
return self
class ProduceEditingProjectVideoRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, project_id=None, timeline=None, title=None, description=None, cover_url=None, media_metadata=None, produce_config=None, user_data=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.project_id = project_id
self.timeline = timeline
self.title = title
self.description = description
self.cover_url = cover_url
self.media_metadata = media_metadata
self.produce_config = produce_config
self.user_data = user_data
def validate(self):
pass
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['ProjectId'] = self.project_id
result['Timeline'] = self.timeline
result['Title'] = self.title
result['Description'] = self.description
result['CoverURL'] = self.cover_url
result['MediaMetadata'] = self.media_metadata
result['ProduceConfig'] = self.produce_config
result['UserData'] = self.user_data
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.project_id = map.get('ProjectId')
self.timeline = map.get('Timeline')
self.title = map.get('Title')
self.description = map.get('Description')
self.cover_url = map.get('CoverURL')
self.media_metadata = map.get('MediaMetadata')
self.produce_config = map.get('ProduceConfig')
self.user_data = map.get('UserData')
return self
class ProduceEditingProjectVideoResponse(TeaModel):
def __init__(self, request_id=None, media_id=None, project_id=None):
self.request_id = request_id
self.media_id = media_id
self.project_id = project_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.media_id, 'media_id')
self.validate_required(self.project_id, 'project_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['MediaId'] = self.media_id
result['ProjectId'] = self.project_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.media_id = map.get('MediaId')
self.project_id = map.get('ProjectId')
return self
class GetEditingProjectMaterialsRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_id=None, resource_owner_account=None, owner_account=None, project_id=None, type=None, material_type=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_id = resource_owner_id
self.resource_owner_account = resource_owner_account
self.owner_account = owner_account
self.project_id = project_id
self.type = type
self.material_type = material_type
def validate(self):
self.validate_required(self.project_id, 'project_id')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerId'] = self.resource_owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['OwnerAccount'] = self.owner_account
result['ProjectId'] = self.project_id
result['Type'] = self.type
result['MaterialType'] = self.material_type
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_id = map.get('ResourceOwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.owner_account = map.get('OwnerAccount')
self.project_id = map.get('ProjectId')
self.type = map.get('Type')
self.material_type = map.get('MaterialType')
return self
class GetEditingProjectMaterialsResponse(TeaModel):
def __init__(self, request_id=None, material_list=None):
self.request_id = request_id
self.material_list = material_list
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.material_list, 'material_list')
if self.material_list:
self.material_list.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
if self.material_list is not None:
result['MaterialList'] = self.material_list.to_map()
else:
result['MaterialList'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
if map.get('MaterialList') is not None:
temp_model = GetEditingProjectMaterialsResponseMaterialList()
self.material_list = temp_model.from_map(map['MaterialList'])
else:
self.material_list = None
return self
class GetEditingProjectMaterialsResponseMaterialListMaterialSnapshots(TeaModel):
def __init__(self, snapshot=None):
self.snapshot = []
def validate(self):
self.validate_required(self.snapshot, 'snapshot')
def to_map(self):
result = {}
result['Snapshot'] = []
if self.snapshot is not None:
for k in self.snapshot:
result['Snapshot'].append(k)
else:
result['Snapshot'] = None
return result
def from_map(self, map={}):
self.snapshot = []
if map.get('Snapshot') is not None:
for k in map.get('Snapshot'):
self.snapshot.append(k)
else:
self.snapshot = None
return self
class GetEditingProjectMaterialsResponseMaterialListMaterialSprites(TeaModel):
def __init__(self, sprite=None):
self.sprite = []
def validate(self):
self.validate_required(self.sprite, 'sprite')
def to_map(self):
result = {}
result['Sprite'] = []
if self.sprite is not None:
for k in self.sprite:
result['Sprite'].append(k)
else:
result['Sprite'] = None
return result
def from_map(self, map={}):
self.sprite = []
if map.get('Sprite') is not None:
for k in map.get('Sprite'):
self.sprite.append(k)
else:
self.sprite = None
return self
class GetEditingProjectMaterialsResponseMaterialListMaterial(TeaModel):
def __init__(self, material_id=None, title=None, tags=None, status=None, size=None, duration=None, description=None, creation_time=None, modified_time=None, cover_url=None, cate_id=None, cate_name=None, source=None, sprite_config=None, snapshots=None, sprites=None):
self.material_id = material_id
self.title = title
self.tags = tags
self.status = status
self.size = size
self.duration = duration
self.description = description
self.creation_time = creation_time
self.modified_time = modified_time
self.cover_url = cover_url
self.cate_id = cate_id
self.cate_name = cate_name
self.source = source
self.sprite_config = sprite_config
self.snapshots = snapshots
self.sprites = sprites
def validate(self):
self.validate_required(self.material_id, 'material_id')
self.validate_required(self.title, 'title')
self.validate_required(self.tags, 'tags')
self.validate_required(self.status, 'status')
self.validate_required(self.size, 'size')
self.validate_required(self.duration, 'duration')
self.validate_required(self.description, 'description')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.modified_time, 'modified_time')
self.validate_required(self.cover_url, 'cover_url')
self.validate_required(self.cate_id, 'cate_id')
self.validate_required(self.cate_name, 'cate_name')
self.validate_required(self.source, 'source')
self.validate_required(self.sprite_config, 'sprite_config')
self.validate_required(self.snapshots, 'snapshots')
if self.snapshots:
self.snapshots.validate()
self.validate_required(self.sprites, 'sprites')
if self.sprites:
self.sprites.validate()
def to_map(self):
result = {}
result['MaterialId'] = self.material_id
result['Title'] = self.title
result['Tags'] = self.tags
result['Status'] = self.status
result['Size'] = self.size
result['Duration'] = self.duration
result['Description'] = self.description
result['CreationTime'] = self.creation_time
result['ModifiedTime'] = self.modified_time
result['CoverURL'] = self.cover_url
result['CateId'] = self.cate_id
result['CateName'] = self.cate_name
result['Source'] = self.source
result['SpriteConfig'] = self.sprite_config
if self.snapshots is not None:
result['Snapshots'] = self.snapshots.to_map()
else:
result['Snapshots'] = None
if self.sprites is not None:
result['Sprites'] = self.sprites.to_map()
else:
result['Sprites'] = None
return result
def from_map(self, map={}):
self.material_id = map.get('MaterialId')
self.title = map.get('Title')
self.tags = map.get('Tags')
self.status = map.get('Status')
self.size = map.get('Size')
self.duration = map.get('Duration')
self.description = map.get('Description')
self.creation_time = map.get('CreationTime')
self.modified_time = map.get('ModifiedTime')
self.cover_url = map.get('CoverURL')
self.cate_id = map.get('CateId')
self.cate_name = map.get('CateName')
self.source = map.get('Source')
self.sprite_config = map.get('SpriteConfig')
if map.get('Snapshots') is not None:
temp_model = GetEditingProjectMaterialsResponseMaterialListMaterialSnapshots()
self.snapshots = temp_model.from_map(map['Snapshots'])
else:
self.snapshots = None
if map.get('Sprites') is not None:
temp_model = GetEditingProjectMaterialsResponseMaterialListMaterialSprites()
self.sprites = temp_model.from_map(map['Sprites'])
else:
self.sprites = None
return self
class GetEditingProjectMaterialsResponseMaterialList(TeaModel):
def __init__(self, material=None):
self.material = []
def validate(self):
self.validate_required(self.material, 'material')
if self.material:
for k in | |
<reponame>XLearning-SCU/2021-NeurIPS-NCR
"""SGRAF model"""
import math
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.nn.utils.clip_grad import clip_grad_norm_
def l1norm(X, dim, eps=1e-8):
"""L1-normalize columns of X"""
norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps
X = torch.div(X, norm)
return X
def l2norm(X, dim=-1, eps=1e-8):
"""L2-normalize columns of X"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
def cosine_sim(x1, x2, dim=-1, eps=1e-8):
"""Returns cosine similarity between x1 and x2, computed along dim."""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
class EncoderImage(nn.Module):
"""
Build local region representations by common-used FC-layer.
Args: - images: raw local detected regions, shape: (batch_size, 36, 2048).
Returns: - img_emb: finial local region embeddings, shape: (batch_size, 36, 1024).
"""
def __init__(self, img_dim, embed_size, no_imgnorm=False):
super(EncoderImage, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer"""
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
# assuming that the precomputed features are already l2-normalized
img_emb = self.fc(images)
# normalize in the joint embedding space
if not self.no_imgnorm:
img_emb = l2norm(img_emb, dim=-1)
return img_emb
def load_state_dict(self, state_dict):
"""Overwrite the default one to accept state_dict from Full model"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImage, self).load_state_dict(new_state)
class EncoderText(nn.Module):
"""
Build local word representations by common-used Bi-GRU or GRU.
Args: - images: raw local word ids, shape: (batch_size, L).
Returns: - img_emb: final local word embeddings, shape: (batch_size, L, 1024).
"""
def __init__(
self,
vocab_size,
word_dim,
embed_size,
num_layers,
use_bi_gru=False,
no_txtnorm=False,
):
super(EncoderText, self).__init__()
self.embed_size = embed_size
self.no_txtnorm = no_txtnorm
# word embedding
self.embed = nn.Embedding(vocab_size, word_dim)
self.dropout = nn.Dropout(0.4)
# caption embedding
self.use_bi_gru = use_bi_gru
self.cap_rnn = nn.GRU(
word_dim, embed_size, num_layers, batch_first=True, bidirectional=use_bi_gru
)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
def forward(self, captions, lengths):
"""Handles variable size captions"""
# embed word ids to vectors
cap_emb = self.embed(captions)
cap_emb = self.dropout(cap_emb)
# pack the caption
packed = pack_padded_sequence(
cap_emb, lengths, batch_first=True, enforce_sorted=False
)
# forward propagate RNN
out, _ = self.cap_rnn(packed)
# reshape output to (batch_size, hidden_size)
cap_emb, _ = pad_packed_sequence(out, batch_first=True)
if self.use_bi_gru:
cap_emb = (
cap_emb[:, :, : cap_emb.size(2) // 2]
+ cap_emb[:, :, cap_emb.size(2) // 2 :]
) / 2
# normalization in the joint embedding space
if not self.no_txtnorm:
cap_emb = l2norm(cap_emb, dim=-1)
return cap_emb
class VisualSA(nn.Module):
"""
Build global image representations by self-attention.
Args: - local: local region embeddings, shape: (batch_size, 36, 1024)
- raw_global: raw image by averaging regions, shape: (batch_size, 1024)
Returns: - new_global: final image by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate, num_region):
super(VisualSA, self).__init__()
self.embedding_local = nn.Sequential(
nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1d(num_region),
nn.Tanh(),
nn.Dropout(dropout_rate),
)
self.embedding_global = nn.Sequential(
nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1d(embed_dim),
nn.Tanh(),
nn.Dropout(dropout_rate),
)
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
self.softmax = nn.Softmax(dim=1)
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.0) / np.sqrt(m.in_features + m.out_features)
m.weight.data.uniform_(-r, r)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, local, raw_global):
# compute embedding of local regions and raw global image
l_emb = self.embedding_local(local)
g_emb = self.embedding_global(raw_global)
# compute the normalized weights, shape: (batch_size, 36)
g_emb = g_emb.unsqueeze(1).repeat(1, l_emb.size(1), 1)
common = l_emb.mul(g_emb)
weights = self.embedding_common(common).squeeze(2)
weights = self.softmax(weights)
# compute final image, shape: (batch_size, 1024)
new_global = (weights.unsqueeze(2) * local).sum(dim=1)
new_global = l2norm(new_global, dim=-1)
return new_global
class TextSA(nn.Module):
"""
Build global text representations by self-attention.
Args: - local: local word embeddings, shape: (batch_size, L, 1024)
- raw_global: raw text by averaging words, shape: (batch_size, 1024)
Returns: - new_global: final text by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate):
super(TextSA, self).__init__()
self.embedding_local = nn.Sequential(
nn.Linear(embed_dim, embed_dim), nn.Tanh(), nn.Dropout(dropout_rate)
)
self.embedding_global = nn.Sequential(
nn.Linear(embed_dim, embed_dim), nn.Tanh(), nn.Dropout(dropout_rate)
)
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
self.softmax = nn.Softmax(dim=1)
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.0) / np.sqrt(m.in_features + m.out_features)
m.weight.data.uniform_(-r, r)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, local, raw_global):
# compute embedding of local words and raw global text
l_emb = self.embedding_local(local)
g_emb = self.embedding_global(raw_global)
# compute the normalized weights, shape: (batch_size, L)
g_emb = g_emb.unsqueeze(1).repeat(1, l_emb.size(1), 1)
common = l_emb.mul(g_emb)
weights = self.embedding_common(common).squeeze(2)
weights = self.softmax(weights)
# compute final text, shape: (batch_size, 1024)
new_global = (weights.unsqueeze(2) * local).sum(dim=1)
new_global = l2norm(new_global, dim=-1)
return new_global
class GraphReasoning(nn.Module):
"""
Perform the similarity graph reasoning with a full-connected graph
Args: - sim_emb: global and local alignments, shape: (batch_size, L+1, 256)
Returns; - sim_sgr: reasoned graph nodes after several steps, shape: (batch_size, L+1, 256)
"""
def __init__(self, sim_dim):
super(GraphReasoning, self).__init__()
self.graph_query_w = nn.Linear(sim_dim, sim_dim)
self.graph_key_w = nn.Linear(sim_dim, sim_dim)
self.sim_graph_w = nn.Linear(sim_dim, sim_dim)
self.relu = nn.ReLU()
self.init_weights()
def forward(self, sim_emb):
sim_query = self.graph_query_w(sim_emb)
sim_key = self.graph_key_w(sim_emb)
sim_edge = torch.softmax(torch.bmm(sim_query, sim_key.permute(0, 2, 1)), dim=-1)
sim_sgr = torch.bmm(sim_edge, sim_emb)
sim_sgr = self.relu(self.sim_graph_w(sim_sgr))
return sim_sgr
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Linear):
r = np.sqrt(6.0) / np.sqrt(m.in_features + m.out_features)
m.weight.data.uniform_(-r, r)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class AttentionFiltration(nn.Module):
"""
Perform the similarity Attention Filtration with a gate-based attention
Args: - sim_emb: global and local alignments, shape: (batch_size, L+1, 256)
Returns; - sim_saf: aggregated alignment after attention filtration, shape: (batch_size, 256)
"""
def __init__(self, sim_dim):
super(AttentionFiltration, self).__init__()
self.attn_sim_w = nn.Linear(sim_dim, 1)
self.bn = nn.BatchNorm1d(1)
self.init_weights()
def forward(self, sim_emb):
sim_attn = l1norm(
torch.sigmoid(self.bn(self.attn_sim_w(sim_emb).permute(0, 2, 1))), dim=-1
)
sim_saf = torch.matmul(sim_attn, sim_emb)
sim_saf = l2norm(sim_saf.squeeze(1), dim=-1)
return sim_saf
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Linear):
r = np.sqrt(6.0) / np.sqrt(m.in_features + m.out_features)
m.weight.data.uniform_(-r, r)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class EncoderSimilarity(nn.Module):
"""
Compute the image-text similarity by SGR, SAF, AVE
Args: - img_emb: local region embeddings, shape: (batch_size, 36, 1024)
- cap_emb: local word embeddings, shape: (batch_size, L, 1024)
Returns:
- sim_all: final image-text similarities, shape: (batch_size, batch_size).
"""
def __init__(self, embed_size, sim_dim, module_name="AVE", sgr_step=3):
super(EncoderSimilarity, self).__init__()
self.module_name = module_name
self.v_global_w = VisualSA(embed_size, 0.4, 36)
self.t_global_w = TextSA(embed_size, 0.4)
self.sim_tranloc_w = nn.Linear(embed_size, sim_dim)
self.sim_tranglo_w = nn.Linear(embed_size, sim_dim)
self.sim_eval_w = nn.Linear(sim_dim, 1)
self.sigmoid = nn.Sigmoid()
if module_name == "SGR":
self.SGR_module = nn.ModuleList(
[GraphReasoning(sim_dim) for i in range(sgr_step)]
)
elif module_name == "SAF":
self.SAF_module = AttentionFiltration(sim_dim)
else:
raise ValueError("Invalid module")
self.init_weights()
def forward(self, img_emb, cap_emb, cap_lens):
sim_all = []
n_image = img_emb.size(0)
n_caption = cap_emb.size(0)
# get enhanced global images by self-attention
img_ave = torch.mean(img_emb, 1)
img_glo = self.v_global_w(img_emb, img_ave)
for i in range(n_caption):
# get the i-th sentence
n_word = cap_lens[i]
cap_i = cap_emb[i, :n_word, :].unsqueeze(0)
cap_i_expand = cap_i.repeat(n_image, 1, 1)
# get enhanced global i-th text by self-attention
cap_ave_i = torch.mean(cap_i, 1)
cap_glo_i = self.t_global_w(cap_i, cap_ave_i)
# local-global alignment construction
Context_img = SCAN_attention(cap_i_expand, img_emb, smooth=9.0)
sim_loc = torch.pow(torch.sub(Context_img, cap_i_expand), 2)
sim_loc = l2norm(self.sim_tranloc_w(sim_loc), dim=-1)
sim_glo = torch.pow(torch.sub(img_glo, cap_glo_i), 2)
sim_glo = l2norm(self.sim_tranglo_w(sim_glo), dim=-1)
# concat the global and local alignments
sim_emb = torch.cat([sim_glo.unsqueeze(1), sim_loc], 1)
# compute the final similarity vector
if self.module_name == "SGR":
for module in self.SGR_module:
sim_emb = module(sim_emb)
sim_vec = sim_emb[:, 0, :]
else:
sim_vec = self.SAF_module(sim_emb)
# compute the final similarity score
sim_i = self.sigmoid(self.sim_eval_w(sim_vec))
sim_all.append(sim_i)
# (n_image, n_caption)
sim_all = torch.cat(sim_all, 1)
return sim_all
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Linear):
r = np.sqrt(6.0) / np.sqrt(m.in_features + m.out_features)
m.weight.data.uniform_(-r, r)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def SCAN_attention(query, context, smooth, eps=1e-8):
"""
query: (n_context, queryL, d)
context: (n_context, sourceL, d)
"""
# --> (batch, d, queryL)
queryT = torch.transpose(query, 1, 2)
# (batch, sourceL, d)(batch, d, queryL)
# --> (batch, sourceL, queryL)
attn = torch.bmm(context, queryT)
attn = nn.LeakyReLU(0.1)(attn)
attn = l2norm(attn, 2)
# --> (batch, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
# --> (batch, queryL, sourceL
attn = F.softmax(attn * smooth, dim=2)
# --> (batch, sourceL, queryL)
attnT = torch.transpose(attn, 1, 2).contiguous()
# --> (batch, d, sourceL)
contextT = | |
<reponame>meadowdata/meadowflow<gh_stars>1-10
from __future__ import annotations
import abc
import asyncio
import dataclasses
import functools
import io
import os.path
import pickle
import shlex
import threading
import uuid
from typing import Callable, TypeVar, Union, Any, Dict, Optional, Sequence, cast, Tuple
import cloudpickle
import fabric
import paramiko.ssh_exception
from meadowgrid import ServerAvailableFolder
from meadowgrid.agent import run_one_job
from meadowgrid.aws_integration import _get_default_region_name
from meadowgrid.config import MEADOWGRID_INTERPRETER
from meadowgrid.coordinator_client import (
_add_deployments_to_job,
_create_py_function,
_make_valid_job_id,
_pickle_protocol_for_deployed_interpreter,
_string_pairs_from_dict,
)
from meadowgrid.deployed_function import (
CodeDeployment,
InterpreterDeployment,
MeadowGridFunction,
VersionedCodeDeployment,
VersionedInterpreterDeployment,
)
from meadowgrid.ec2_alloc import allocate_ec2_instances
from meadowgrid.grid import _get_id_name_function, _get_friendly_name
from meadowgrid.grid_task_queue import (
get_results,
worker_loop,
create_queues_and_add_tasks,
)
from meadowgrid.meadowgrid_pb2 import (
Job,
JobToRun,
ProcessState,
PyCommandJob,
PyFunctionJob,
ServerAvailableInterpreter,
)
from meadowgrid.resource_allocation import Resources
_T = TypeVar("_T")
_U = TypeVar("_U")
# if num_concurrent_tasks isn't specified, by default, launch total_num_tasks *
# _DEFAULT_CONCURRENT_TASKS_FACTOR workers
_DEFAULT_CONCURRENT_TASKS_FACTOR = 0.5
async def _retry(
function: Callable[[], _T],
exception_types: Exception,
max_num_attempts: int = 3,
delay_seconds: float = 1,
) -> _T:
i = 0
while True:
try:
return function()
except exception_types as e: # type: ignore
i += 1
if i >= max_num_attempts:
raise
else:
print(f"Retrying on error: {e}")
await asyncio.sleep(delay_seconds)
@dataclasses.dataclass(frozen=True)
class Deployment:
interpreter: Union[InterpreterDeployment, VersionedInterpreterDeployment]
code: Union[CodeDeployment, VersionedCodeDeployment, None] = None
environment_variables: Optional[Dict[str, str]] = None
def _add_defaults_to_deployment(
deployment: Optional[Deployment],
) -> Tuple[
Union[InterpreterDeployment, VersionedInterpreterDeployment],
Union[CodeDeployment, VersionedCodeDeployment],
Dict[str, str],
]:
if deployment is None:
return (
ServerAvailableInterpreter(interpreter_path=MEADOWGRID_INTERPRETER),
ServerAvailableFolder(),
{},
)
return (
deployment.interpreter,
deployment.code or ServerAvailableFolder(),
deployment.environment_variables or {},
)
class Host(abc.ABC):
@abc.abstractmethod
async def run_job(self, job_to_run: JobToRun) -> Any:
pass
@dataclasses.dataclass(frozen=True)
class LocalHost(Host):
async def run_job(self, job_to_run: JobToRun) -> Any:
initial_update, continuation = await run_one_job(job_to_run)
if (
initial_update.process_state.state != ProcessState.ProcessStateEnum.RUNNING
or continuation is None
):
result = initial_update.process_state
else:
result = (await continuation).process_state
if result.state == ProcessState.ProcessStateEnum.SUCCEEDED:
return pickle.loads(result.pickled_result)
else:
# TODO make better error messages
raise ValueError(f"Error: {result.state}")
@dataclasses.dataclass(frozen=True)
class SshHost(Host):
address: str
# these options are forwarded directly to Fabric
fabric_kwargs: Optional[Dict[str, Any]] = None
async def run_job(self, job_to_run: JobToRun) -> Any:
with fabric.Connection(
self.address, **(self.fabric_kwargs or {})
) as connection:
job_io_prefix = ""
try:
# assumes that meadowgrid is installed in /meadowgrid/env as per
# build_meadowgrid_amis.md. Also uses the default working_folder, which
# should (but doesn't strictly need to) correspond to
# agent._set_up_working_folder
# try the first command 3 times, as this is when we actually try to
# connect to the remote machine.
home_result = await _retry(
lambda: connection.run("echo $HOME"),
cast(Exception, paramiko.ssh_exception.NoValidConnectionsError),
)
if not home_result.ok:
raise ValueError(
"Error getting home directory on remote machine "
+ home_result.stdout
)
remote_working_folder = f"{home_result.stdout.strip()}/meadowgrid"
mkdir_result = connection.run(f"mkdir -p {remote_working_folder}/io")
if not mkdir_result.ok:
raise ValueError(
"Error creating meadowgrid directory " + mkdir_result.stdout
)
job_io_prefix = f"{remote_working_folder}/io/{job_to_run.job.job_id}"
# serialize job_to_run and send it to the remote machine
with io.BytesIO(
job_to_run.SerializeToString()
) as job_to_run_serialized:
connection.put(
job_to_run_serialized, remote=f"{job_io_prefix}.job_to_run"
)
# fabric doesn't have any async APIs, which means that in order to run
# more than one fabric command at the same time, we need to have a
# thread per fabric command. We use an asyncio.Future here to make the
# API async, so from the user perspective, it feels like this function
# is async
# fabric is supposedly not threadsafe, but it seems to work as long as
# more than one connection is not being opened at the same time:
# https://github.com/fabric/fabric/pull/2010/files
result_future: asyncio.Future = asyncio.Future()
event_loop = asyncio.get_running_loop()
def run_and_wait() -> None:
try:
# use meadowrun to run the job
returned_result = connection.run(
"/meadowgrid/env/bin/meadowrun "
f"--job-id {job_to_run.job.job_id} "
f"--working-folder {remote_working_folder} "
# TODO this flag should only be passed in if we were
# originally using an EC2AllocHost
f"--needs-deallocation"
)
event_loop.call_soon_threadsafe(
lambda r=returned_result: result_future.set_result(r)
)
except Exception as e2:
event_loop.call_soon_threadsafe(
lambda e2=e2: result_future.set_exception(e2)
)
threading.Thread(target=run_and_wait).start()
result = await result_future
# TODO consider using result.tail, result.stdout
# see if we got a normal return code
if result.return_code != 0:
raise ValueError(f"Process exited {result.return_code}")
with io.BytesIO() as result_buffer:
connection.get(f"{job_io_prefix}.process_state", result_buffer)
result_buffer.seek(0)
process_state = ProcessState()
process_state.ParseFromString(result_buffer.read())
if process_state.state == ProcessState.ProcessStateEnum.SUCCEEDED:
job_spec_type = job_to_run.job.WhichOneof("job_spec")
# we must have a result from functions, in other cases we can
# optionally have a result
if job_spec_type == "py_function" or process_state.pickled_result:
return pickle.loads(process_state.pickled_result)
else:
return None
else:
# TODO we should throw a better exception
raise ValueError(f"Running remotely failed: {process_state}")
finally:
if job_io_prefix:
remote_paths = " ".join(
[
f"{job_io_prefix}.job_to_run",
f"{job_io_prefix}.state",
f"{job_io_prefix}.result",
f"{job_io_prefix}.process_state",
f"{job_io_prefix}.initial_process_state",
]
)
try:
# -f so that we don't throw an error on files that don't
# exist
connection.run(f"rm -f {remote_paths}")
except Exception as e:
print(
f"Error cleaning up files on remote machine: "
f"{remote_paths} {e}"
)
# TODO also clean up log file?s
@dataclasses.dataclass(frozen=True)
class EC2AllocHost(Host):
"""A placeholder for a host that will be allocated/created by ec2_alloc.py"""
logical_cpu_required: int
memory_gb_required: float
interruption_probability_threshold: float
region_name: Optional[str] = None
private_key_filename: Optional[str] = None
async def run_job(self, job_to_run: JobToRun) -> Any:
hosts = await allocate_ec2_instances(
Resources(self.memory_gb_required, self.logical_cpu_required, {}),
1,
self.interruption_probability_threshold,
self.region_name or await _get_default_region_name(),
)
fabric_kwargs: Dict[str, Any] = {"user": "ubuntu"}
if self.private_key_filename:
fabric_kwargs["connect_kwargs"] = {
"key_filename": self.private_key_filename
}
if len(hosts) != 1:
raise ValueError(f"Asked for one host, but got back {len(hosts)}")
for host, job_ids in hosts.items():
if len(job_ids) != 1:
raise ValueError(f"Asked for one job allocation but got {len(job_ids)}")
# Kind of weird that we're changing the job_id here, but okay as long as
# job_id remains mostly an internal concept
job_to_run.job.job_id = job_ids[0]
return await SshHost(host, fabric_kwargs).run_job(job_to_run)
@dataclasses.dataclass(frozen=True)
class EC2AllocHosts:
"""
A placeholder for a set of hosts that will be allocated/created by ec2_alloc.py
"""
logical_cpu_required_per_task: int
memory_gb_required_per_task: float
interruption_probability_threshold: float
# defaults to half the number of total tasks
num_concurrent_tasks: Optional[int] = None
region_name: Optional[str] = None
private_key_filename: Optional[str] = None
async def run_function(
function: Callable[..., _T],
host: Host,
deployment: Optional[Deployment] = None,
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Dict[str, Any]] = None,
) -> _T:
"""
Same as run_function_async, but runs on a remote machine, specified by "host".
Connects to the remote machine over SSH via the fabric library
https://www.fabfile.org/ fabric_kwargs are passed directly to fabric.Connection().
The remote machine must have meadowgrid installed as per build_meadowgrid_amis.md
"""
job_id, friendly_name, pickled_function = _get_id_name_function(function)
interpreter, code, environment_variables = _add_defaults_to_deployment(deployment)
pickle_protocol = _pickle_protocol_for_deployed_interpreter()
job = Job(
job_id=_make_valid_job_id(job_id),
job_friendly_name=_make_valid_job_id(friendly_name),
environment_variables=_string_pairs_from_dict(environment_variables),
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
py_function=_create_py_function(
MeadowGridFunction.from_pickled(pickled_function, args, kwargs),
pickle_protocol,
),
)
_add_deployments_to_job(job, code, interpreter)
# TODO figure out what to do about the [0], which is there for dropping effects
return (await host.run_job(JobToRun(job=job)))[0]
async def run_command(
args: Union[str, Sequence[str]],
host: Host,
deployment: Optional[Deployment] = None,
) -> None:
"""
Runs the specified command on a remote machine. See run_function_remote for more
details on requirements for the remote host.
"""
job_id = str(uuid.uuid4())
if isinstance(args, str):
args = shlex.split(args)
# this is kind of a silly way to get a friendly name--treat the first three
# elements of args as if they're paths and take the last part of each path
friendly_name = "-".join(os.path.basename(arg) for arg in args[:3])
interpreter, code, environment_variables = _add_defaults_to_deployment(deployment)
job = Job(
job_id=_make_valid_job_id(job_id),
job_friendly_name=_make_valid_job_id(friendly_name),
environment_variables=_string_pairs_from_dict(environment_variables),
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
py_command=PyCommandJob(command_line=args),
)
_add_deployments_to_job(job, code, interpreter)
await host.run_job(JobToRun(job=job))
async def run_map(
function: Callable[[_T], _U],
args: Sequence[_T],
hosts: EC2AllocHosts,
deployment: Optional[Deployment] = None,
) -> Sequence[_U]:
"""Equivalent to map(function, args), but runs distributed."""
if not hosts.num_concurrent_tasks:
num_concurrent_tasks = len(args) // 2 + 1
else:
num_concurrent_tasks = min(hosts.num_concurrent_tasks, len(args))
region_name = hosts.region_name or await _get_default_region_name()
# the first stage of preparation, which happens concurrently:
# 1. get hosts
allocated_hosts_future = asyncio.create_task(
allocate_ec2_instances(
Resources(
hosts.memory_gb_required_per_task,
hosts.logical_cpu_required_per_task,
{},
),
num_concurrent_tasks,
hosts.interruption_probability_threshold,
region_name,
)
)
# 2. create SQS queues and add tasks to the request queue
queues_future = asyncio.create_task(create_queues_and_add_tasks(region_name, args))
# 3. prepare some variables for constructing the worker jobs
friendly_name = _make_valid_job_id(_get_friendly_name(function))
interpreter, code, environment_variables = _add_defaults_to_deployment(deployment)
environment_variables = _string_pairs_from_dict(environment_variables)
pickle_protocol = _pickle_protocol_for_deployed_interpreter()
fabric_kwargs: Dict[str, Any] = {"user": "ubuntu"}
if hosts.private_key_filename:
fabric_kwargs["connect_kwargs"] = {"key_filename": hosts.private_key_filename}
# now wait for 1 and 2 to complete:
request_queue_url, result_queue_url = await queues_future
allocated_hosts = await allocated_hosts_future
# Now we will run worker_loop jobs on the hosts we got:
pickled_worker_function = cloudpickle.dumps(
functools.partial(
worker_loop, function, request_queue_url, result_queue_url, region_name
),
protocol=pickle_protocol,
)
worker_tasks = []
worker_id = 0
for public_address, worker_job_ids in allocated_hosts.items():
for worker_job_id in worker_job_ids:
job = Job(
job_id=worker_job_id,
job_friendly_name=friendly_name,
environment_variables=environment_variables,
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
py_function=PyFunctionJob(
pickled_function=pickled_worker_function,
pickled_function_arguments=pickle.dumps(
([public_address, worker_id], {}), protocol=pickle_protocol
),
),
)
_add_deployments_to_job(job, code, interpreter)
worker_tasks.append(
asyncio.create_task(
SshHost(public_address, | |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" KafkaConsumer class
This class consume event / command / result in Kafka topics.
Todo:
* In function listen_store_records, add commit store (later V2)
"""
import asyncio
import json
from logging import Logger, getLogger
from typing import List, Dict, Any, Union
from aiokafka import (AIOKafkaConsumer)
from aiokafka.errors import (IllegalStateError, UnsupportedVersionError, CommitFailedError,
KafkaError, KafkaTimeoutError)
from kafka.errors import KafkaConnectionError
from tonga.models.handlers.command.command_handler import BaseCommandHandler
from tonga.models.handlers.event.event_handler import BaseEventHandler
from tonga.models.handlers.result.result_handler import BaseResultHandler
from tonga.models.records.base import BaseRecord
from tonga.models.store.base import BaseStoreRecordHandler
from tonga.models.store.store_record import StoreRecord
from tonga.services.consumer.base import BaseConsumer
from tonga.services.consumer.errors import (ConsumerConnectionError, AioKafkaConsumerBadParams,
KafkaConsumerError, ConsumerKafkaTimeoutError,
IllegalOperation, TopicPartitionError,
NoPartitionAssigned, OffsetError, UnknownStoreRecordHandler,
UnknownHandler, UnknownHandlerReturn,
HandlerException, KafkaConsumerAlreadyStartedError,
KafkaConsumerNotStartedError)
from tonga.services.coordinator.assignors.statefulset_assignors import StatefulsetPartitionAssignor
from tonga.services.coordinator.client.kafka_client import KafkaClient
from tonga.services.coordinator.transaction.kafka_transaction import (KafkaTransactionalManager,
KafkaTransactionContext)
from tonga.services.errors import BadSerializer
from tonga.services.serializer.base import BaseSerializer
from tonga.services.serializer.kafka_key import KafkaKeySerializer
from tonga.stores.manager.base import BaseStoreManager
from tonga.stores.manager.errors import UninitializedStore
from tonga.models.structs.positioning import (BasePositioning, KafkaPositioning)
__all__ = [
'KafkaConsumer',
]
class KafkaConsumer(BaseConsumer):
"""KafkaConsumer is a client that publishes records to the Kafka cluster.
"""
_client: KafkaClient
serializer: BaseSerializer
_bootstrap_servers: Union[str, List[str]]
_client_id: str
_topics: List[str]
_group_id: str
_auto_offset_reset: str
_max_retries: int
_retry_interval: int
_retry_backoff_coeff: int
_isolation_level: str
_assignors_data: Dict[str, Any]
_store_manager: BaseStoreManager
_running: bool
_kafka_consumer: AIOKafkaConsumer
_transactional_manager: KafkaTransactionalManager
__current_offsets: Dict[str, BasePositioning]
__last_offsets: Dict[str, BasePositioning]
__last_committed_offsets: Dict[str, BasePositioning]
_loop: asyncio.AbstractEventLoop
logger: Logger
def __init__(self, client: KafkaClient, serializer: BaseSerializer, topics: List[str],
loop: asyncio.AbstractEventLoop, client_id: str = None, group_id: str = None,
auto_offset_reset: str = 'earliest', max_retries: int = 10, retry_interval: int = 1000,
retry_backoff_coeff: int = 2, assignors_data: Dict[str, Any] = None,
store_manager: BaseStoreManager = None, isolation_level: str = 'read_uncommitted',
transactional_manager: KafkaTransactionalManager = None) -> None:
"""
KafkaConsumer constructor
Args:
client (KafkaClient): Initialization class (contains, client_id / bootstraps_server)
serializer (BaseSerializer): Serializer encode & decode event
topics (List[str]): List of topics to subscribe to
loop (asyncio.AbstractEventLoop): Asyncio loop
client_id (str): Client name (if is none, KafkaConsumer use KafkaClient client_id)
group_id (str): name of the consumer group, and to use for fetching and committing offsets.
If None, offset commits are disabled
auto_offset_reset (str): A policy for resetting offsets on OffsetOutOfRange errors: โearliestโ will move to
the oldest available message, โlatestโ will move to the most recent.
Any other value will raise the exception
max_retries (int): Number of retries before critical failure
retry_interval (int): Interval before next retries
retry_backoff_coeff (int): Backoff coeff for next retries
assignors_data (Dict[str, Any]): Dict with assignors information, more details in
StatefulsetPartitionAssignor
store_manager (BaseStoreManager): If this store_manager is set, consumer call initialize_store_manager()
otherwise listen_event was started
isolation_level (str): Controls how to read messages written transactionally. If set to read_committed,
will only return transactional messages which have been committed.
If set to read_uncommitted, will return all messages, even transactional messages
which have been aborted. Non-transactional messages will be returned unconditionally
in either mode.
Returns:
None
"""
super().__init__()
self.logger = getLogger('tonga')
# Register KafkaClient
self._client = client
# Set default assignors_data if is None
if assignors_data is None:
assignors_data = {}
# Create client_id
if client_id is None:
self._client_id = self._client.client_id + '-' + str(self._client.cur_instance)
else:
self._client_id = client_id
if isinstance(serializer, BaseSerializer):
self.serializer = serializer
else:
raise BadSerializer
self._bootstrap_servers = self._client.bootstrap_servers
self._topics = topics
self._group_id = group_id
self._auto_offset_reset = auto_offset_reset
self._max_retries = max_retries
self._retry_interval = retry_interval
self._retry_backoff_coeff = retry_backoff_coeff
self._isolation_level = isolation_level
self._assignors_data = assignors_data
self._store_manager = store_manager
self._running = False
self._loop = loop
self.__current_offsets = dict()
self.__last_offsets = dict()
self.__last_committed_offsets = dict()
self._transactional_manager = transactional_manager
try:
self.logger.info(json.dumps(assignors_data))
statefulset_assignor = StatefulsetPartitionAssignor(bytes(json.dumps(assignors_data), 'utf-8'))
self._kafka_consumer = AIOKafkaConsumer(*self._topics, loop=self._loop,
bootstrap_servers=self._bootstrap_servers,
client_id=self._client_id, group_id=group_id,
value_deserializer=self.serializer.decode,
auto_offset_reset=self._auto_offset_reset,
isolation_level=self._isolation_level, enable_auto_commit=False,
key_deserializer=KafkaKeySerializer.decode,
partition_assignment_strategy=[statefulset_assignor])
except KafkaError as err:
self.logger.exception('%s', err.__str__())
raise err
except ValueError as err:
self.logger.exception('%s', err.__str__())
raise AioKafkaConsumerBadParams
self.logger.debug('Create new consumer %s, group_id %s', self._client_id, group_id)
async def start_consumer(self) -> None:
"""
Start consumer
Returns:
None
Raises:
AttributeError: KafkaConsumerError
ValueError: If KafkaError or KafkaTimoutError is raised, exception value is contain
in KafkaConsumerError.msg
"""
if self._running:
raise KafkaConsumerAlreadyStartedError
for retry in range(2):
try:
await self._kafka_consumer.start()
self._running = True
self.logger.debug('Start consumer : %s, group_id : %s, retry : %s', self._client_id, self._group_id,
retry)
except KafkaTimeoutError as err:
self.logger.exception('%s', err.__str__())
await asyncio.sleep(1)
except KafkaConnectionError as err:
self.logger.exception('%s', err.__str__())
await asyncio.sleep(1)
except KafkaError as err:
self.logger.exception('%s', err.__str__())
raise err
else:
break
else:
raise ConsumerConnectionError
async def stop_consumer(self) -> None:
"""
Stop consumer
Returns:
None
Raises:
AttributeError: KafkaConsumerError
ValueError: If KafkaError is raised, exception value is contain
in KafkaConsumerError.msg
"""
if not self._running:
raise KafkaConsumerNotStartedError
try:
await self._kafka_consumer.stop()
self._running = False
self.logger.debug('Stop consumer : %s, group_id : %s', self._client_id, self._group_id)
except KafkaTimeoutError as err:
self.logger.exception('%s', err.__str__())
raise ConsumerKafkaTimeoutError
except KafkaError as err:
self.logger.exception('%s', err.__str__())
raise err
def is_running(self) -> bool:
return self._running
async def get_last_committed_offsets(self) -> Dict[str, BasePositioning]:
"""
Get last committed offsets
Returns:
Dict[str, KafkaPositioning]: Contains all assigned partitions with last committed offsets
"""
last_committed_offsets: Dict[str, BasePositioning] = dict()
self.logger.debug('Get last committed offsets')
if self._group_id is None:
raise IllegalOperation
for tp in self._kafka_consumer.assignment():
offset = await self._kafka_consumer.committed(tp)
last_committed_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \
KafkaPositioning(tp.topic, tp.partition, offset)
return last_committed_offsets
async def get_current_offsets(self) -> Dict[str, BasePositioning]:
"""
Get current offsets
Returns:
Dict[str, KafkaPositioning]: Contains all assigned partitions with current offsets
"""
current_offsets: Dict[str, BasePositioning] = dict()
self.logger.debug('Get current offsets')
for tp in self._kafka_consumer.assignment():
try:
offset = await self._kafka_consumer.position(tp)
current_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \
KafkaPositioning(tp.topic, tp.partition, offset)
except IllegalStateError as err:
self.logger.exception('%s', err.__str__())
raise err
return current_offsets
async def get_beginning_offsets(self) -> Dict[str, BasePositioning]:
"""
Get beginning offsets
Returns:
Dict[str, KafkaPositioning]: Contains all assigned partitions with beginning offsets
"""
beginning_offsets: Dict[str, BasePositioning] = dict()
self.logger.debug('Get beginning offsets')
for tp in self._kafka_consumer.assignment():
try:
offset = (await self._kafka_consumer.beginning_offsets([tp]))[tp]
beginning_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \
KafkaPositioning(tp.topic, tp.partition, offset)
except KafkaTimeoutError as err:
self.logger.exception('%s', err.__str__())
raise ConsumerKafkaTimeoutError
except UnsupportedVersionError as err:
self.logger.exception('%s', err.__str__())
raise err
return beginning_offsets
async def get_last_offsets(self) -> Dict[str, BasePositioning]:
"""
Get last offsets
Returns:
Dict[str, KafkaPositioning]: Contains all assigned partitions with last offsets
"""
last_offsets: Dict[str, BasePositioning] = dict()
self.logger.debug('Get last offsets')
for tp in self._kafka_consumer.assignment():
try:
offset = (await self._kafka_consumer.end_offsets([tp]))[tp]
last_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \
KafkaPositioning(tp.topic, tp.partition, offset)
except KafkaTimeoutError as err:
self.logger.exception('%s', err.__str__())
raise ConsumerKafkaTimeoutError
except UnsupportedVersionError as err:
self.logger.exception('%s', err.__str__())
raise err
return last_offsets
async def load_offsets(self, mod: str = 'earliest') -> None:
"""
This method was call before consume topics, assign position to consumer
Args:
mod: Start position of consumer (earliest, latest, committed)
Returns:
None
"""
self.logger.debug('Load offset mod : %s', mod)
if not self._running:
await self.start_consumer()
if mod == 'latest':
await self.seek_to_end()
elif mod == 'earliest':
await self.seek_to_beginning()
elif mod == 'committed':
await self.seek_to_last_commit()
else:
raise KafkaConsumerError
self.__current_offsets = await self.get_current_offsets()
self.__last_offsets = await self.get_last_offsets()
if self._group_id is not None:
self.__last_committed_offsets = await self.get_last_committed_offsets()
for key, kafka_positioning in self.__last_committed_offsets.items():
if kafka_positioning.get_current_offset() is None:
self.logger.debug('Seek to beginning, no committed offsets was found')
await self.seek_to_beginning(kafka_positioning)
async def debug_print_all_msg(self):
"""
Debug method, useful for display all msg was contained in assigned topic/partitions
Returns:
None
"""
while True:
message = await self._kafka_consumer.getone()
self.logger.info('----------------------------------------------------------------------------------------')
self.logger.info('Topic %s, Partition %s, Offset %s, Key %s, Value %s, Headers %s',
message.topic, message.partition, message.offset, message.key, message.value,
message.headers)
self.logger.info('----------------------------------------------------------------------------------------')
async def listen_records(self, mod: str = 'earliest') -> None:
"""
Listens records from assigned topic / partitions
Args:
mod: Start position of consumer (earliest, latest, committed)
Returns:
None
"""
if not self._running:
await self.load_offsets(mod)
self.pprint_consumer_offsets()
async for msg in self._kafka_consumer:
# Debug Display
self.logger.debug("---------------------------------------------------------------------------------")
self.logger.debug('New Message on consumer %s, Topic %s, Partition %s, Offset %s, '
'Key %s, Value %s, Headers %s', self._client_id, msg.topic, msg.partition,
msg.offset, msg.key, msg.value, msg.headers)
self.pprint_consumer_offsets()
self.logger.debug("---------------------------------------------------------------------------------")
key = KafkaPositioning.make_class_assignment_key(msg.topic, msg.partition)
self.__current_offsets[key].set_current_offset(msg.offset)
if self._transactional_manager is not None:
self._transactional_manager.set_ctx(KafkaTransactionContext(msg.topic, msg.partition,
msg.offset, self._group_id))
# self.last_offsets = await self.get_last_offsets()
sleep_duration_in_ms = self._retry_interval
for retries in range(0, self._max_retries):
try:
record_class = msg.value['record_class']
handler_class = msg.value['handler_class']
if handler_class is None:
self.logger.debug('Empty handler')
break
self.logger.debug('Event name : %s Event content :\n%s',
record_class.event_name(), record_class.__dict__)
# Calls handle if event is instance BaseHandler
if isinstance(handler_class, BaseEventHandler):
transactional = await handler_class.handle(event=record_class)
elif isinstance(handler_class, BaseCommandHandler):
transactional = await handler_class.execute(event=record_class)
elif isinstance(handler_class, BaseResultHandler):
transactional = await handler_class.on_result(event=record_class)
else:
# Otherwise raise KafkaConsumerUnknownHandler
raise UnknownHandler
# If result is none (no transactional process), check if consumer has an
# group_id (mandatory to commit in Kafka)
if transactional is | |
<filename>plenum/test/consensus/view_change/test_new_view_builder.py
import pytest
from plenum.common.messages.node_messages import Checkpoint, ViewChange
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.view_change_service import NewViewBuilder
from plenum.server.consensus.batch_id import BatchID
from plenum.test.checkpoints.helper import cp_digest
from plenum.test.consensus.view_change.helper import calc_committed
from plenum.test.greek import genNodeNames
from plenum.test.simulation.sim_random import DefaultSimRandom
N = 4
F = 1
# TestRunningTimeLimitSec = 600
@pytest.fixture
def consensus_data_provider():
validators = genNodeNames(N)
return ConsensusSharedData("nodeA", validators, 0)
@pytest.fixture
def builder(consensus_data_provider):
return NewViewBuilder(consensus_data_provider)
def test_calc_batches_empty(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vcs = [
ViewChange(viewNo=1, stableCheckpoint=0, prepared=[], preprepared=[], checkpoints=[cp]),
ViewChange(viewNo=1, stableCheckpoint=0, prepared=[], preprepared=[], checkpoints=[cp]),
ViewChange(viewNo=1, stableCheckpoint=0, prepared=[], preprepared=[], checkpoints=[cp]),
ViewChange(viewNo=1, stableCheckpoint=0, prepared=[], preprepared=[], checkpoints=[cp]),
]
assert [] == builder.calc_batches(cp, vcs)
def test_calc_batches_quorum(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(0, 0, 3, "digest3")],
checkpoints=[cp])
vcs = [vc]
assert builder.calc_batches(cp, vcs) is None
vcs.append(vc)
assert builder.calc_batches(cp, vcs) is None
vcs.append(vc)
assert builder.calc_batches(cp, vcs)
def test_calc_batches_same_data(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")]
def test_calc_batches_same_data_prev_pp_viewno(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
preprepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == [BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")]
def test_calc_batches_diff_pp_viewno(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc1 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
preprepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
preprepared=[BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
checkpoints=[cp])
vcs = [vc1, vc1, vc2, vc2]
assert builder.calc_batches(cp, vcs) is None
def test_calc_batches_diff_pp_viewno_in_prepare_and_preprepare(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
preprepared=[BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) is None
def test_calc_batches_diff_pp_viewno_in_preprepare(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc1 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
preprepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")],
preprepared=[BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
checkpoints=[cp])
vcs = [vc1, vc2, vc2, vc2]
assert builder.calc_batches(cp, vcs) is None
def test_calc_batches_must_be_in_pre_prepare(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
# all nodes are malicious here since all added (0, 2) into prepared without adding to pre-prepared
# so, None here means we can not calculate NewView reliably
assert builder.calc_batches(cp, vcs) is None
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 1, 1, "digest1"), BatchID(0, 1, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
# all nodes are malicious here since their view_no is different from the PrePrepare ones
# so, None here means we can not calculate NewView reliably
assert builder.calc_batches(cp, vcs) is None
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
preprepared=[BatchID(0, 1, 1, "digest1")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
# all nodes are malicious here since their pp_view_no is different from the PrePrepare ones
# so, None here means we can not calculate NewView reliably
assert builder.calc_batches(cp, vcs) is None
vc1 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vcs = [vc1, vc2, vc2, vc2]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1")]
def test_calc_batches_takes_prepared_only(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(0, 0, 3, "digest3"),
BatchID(0, 0, 4, "digest4")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == []
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(0, 0, 3, "digest3"),
BatchID(0, 0, 4, "digest4")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")]
def test_calc_batches_takes_max_view_same_pp_view(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"),
BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(1, 1, 1, "digest1"),
BatchID(1, 1, 2, "digest2")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == [BatchID(1, 1, 1, "digest1"), BatchID(1, 1, 2, "digest2")]
def test_calc_batches_takes_max_view_diff_pp_view(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(1, 0, 1, "digest1"),
BatchID(1, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(1, 0, 1, "digest1"),
BatchID(1, 0, 2, "digest2")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == [BatchID(1, 0, 1, "digest1"), BatchID(1, 0, 2, "digest2")]
def test_calc_batches_respects_checkpoint(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=10, digest=cp_digest(10))
vc = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == []
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=10, digest=cp_digest(10))
vc = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 10, "digest10"), BatchID(0, 0, 11, "digest11"),
BatchID(1, 0, 12, "digest12")],
preprepared=[BatchID(0, 0, 10, "digest10"), BatchID(0, 0, 11, "digest11"),
BatchID(1, 0, 12, "digest12")],
checkpoints=[cp])
vcs = [vc, vc, vc, vc]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 11, "digest11"), BatchID(1, 0, 12, "digest12")]
def test_calc_batches_takes_quorum_of_prepared(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc1 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest2")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vc3 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vcs = [vc1, vc2, vc2, vc2]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1")]
vcs = [vc3, vc2, vc2, vc2]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1")]
vcs = [vc3, vc3, vc3, vc3]
assert builder.calc_batches(cp, vcs) == []
vcs = [vc1, vc1, vc2, vc2]
assert builder.calc_batches(cp, vcs) is None
# since we have enough pre-prepares
vcs = [vc2, vc3, vc3, vc3]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1")]
vcs = [vc2, vc2, vc3, vc3]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1")]
def test_calc_batches_takes_one_prepared_if_weak_quorum_of_preprepared(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc1 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")],
checkpoints=[cp])
vc3 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vc4 = ViewChange(viewNo=1, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vcs = [vc1, vc2, vc3, vc4]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1"), (0, 0, 2, "digest2")]
def test_calc_batches_takes_next_view_one_prepared_if_weak_quorum_of_preprepared(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc1 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(1, 1, 2, "digest2")],
checkpoints=[cp])
vc3 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vc4 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1")],
checkpoints=[cp])
vcs = [vc1, vc2, vc3, vc4]
assert builder.calc_batches(cp, vcs) == [BatchID(0, 0, 1, "digest1"), (1, 1, 2, "digest2")]
def test_calc_batches_takes_next_view_prepared_if_old_view_prepared(builder):
cp = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest=cp_digest(0))
vc1 = ViewChange(viewNo=2, stableCheckpoint=0,
prepared=[(1, 1, 1, "digest2")],
preprepared=[(0, 0, 1, "digest1"), (1, 1, 1, "digest2")],
checkpoints=[cp])
vc2 = ViewChange(viewNo=0, stableCheckpoint=0,
prepared=[BatchID(0, 0, 1, "digest1")],
preprepared=[BatchID(0, 0, 1, "digest1"), BatchID(1, | |
"Given sequence dictionary empty"
c_uc = 0
for seq_id in seqs_dic:
c_uc += len(re.findall(r"[A-Z]", seqs_dic[seq_id]))
return c_uc
#######################################################################
def seqs_dic_count_lc_nts(seqs_dic):
"""
Count number of lowercase nucleotides in sequences stored in sequence
dictionary.
>>> seqs_dic = {'seq1': "gtACGTac", 'seq2': 'cgtACacg'}
>>> seqs_dic_count_lc_nts(seqs_dic)
10
>>> seqs_dic = {'seq1': "ACGT", 'seq2': 'ACGTAC'}
>>> seqs_dic_count_lc_nts(seqs_dic)
0
"""
assert seqs_dic, "Given sequence dictionary empty"
c_uc = 0
for seq_id in seqs_dic:
c_uc += len(re.findall(r"[a-z]", seqs_dic[seq_id]))
return c_uc
#######################################################################
def count_file_rows(in_file):
"""
Count number of file rows for given input file.
>>> test_file = "test-data/test1.bed"
>>> count_file_rows(test_file)
7
>>> test_file = "test-data/empty_file"
>>> count_file_rows(test_file)
0
"""
check_cmd = "cat " + in_file + " | wc -l"
output = subprocess.getoutput(check_cmd)
row_count = int(output.strip())
return row_count
#######################################################################
def bed_check_six_col_format(bed_file):
"""
Check whether given .bed file has 6 columns.
>>> test_bed = "test-data/test1.bed"
>>> bed_check_six_col_format(test_bed)
True
>>> test_bed = "test-data/empty_file"
>>> bed_check_six_col_format(test_bed)
False
"""
six_col_format = False
with open(bed_file) as f:
for line in f:
cols = line.strip().split("\t")
if len(cols) == 6:
six_col_format = True
break
f.closed
return six_col_format
#######################################################################
def bed_check_unique_ids(bed_file):
"""
Check whether .bed file (6 column format with IDs in column 4)
has unique column 4 IDs.
>>> test_bed = "test-data/test1.bed"
>>> bed_check_unique_ids(test_bed)
True
>>> test_bed = "test-data/test2.bed"
>>> bed_check_unique_ids(test_bed)
False
"""
check_cmd = "cut -f 4 " + bed_file + " | sort | uniq -d"
output = subprocess.getoutput(check_cmd)
if output:
return False
else:
return True
#######################################################################
def get_seq_lengths_from_seqs_dic(seqs_dic):
"""
Given a dictionary of sequences, return dictionary of sequence lengths.
Mapping is sequence ID -> sequence length.
"""
seq_len_dic = {}
assert seqs_dic, "sequence dictionary seems to be empty"
for seq_id in seqs_dic:
seq_l = len(seqs_dic[seq_id])
seq_len_dic[seq_id] = seq_l
return seq_len_dic
#######################################################################
def bed_get_region_lengths(bed_file):
"""
Read in .bed file, store and return region lengths in dictionary.
key : region ID (.bed col4)
value : region length (.bed col3-col2)
>>> test_file = "test-data/test4.bed"
>>> bed_get_region_lengths(test_file)
{'CLIP1': 10, 'CLIP2': 10}
"""
id2len_dic = {}
with open(bed_file) as f:
for line in f:
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_l = site_e - site_s
assert (
site_id not in id2len_dic
), 'column 4 IDs not unique in given .bed file "%s"' % (bed_file)
id2len_dic[site_id] = site_l
f.closed
assert (
id2len_dic
), 'No IDs read into dic (input file "%s" empty or malformatted?)' % (bed_file)
return id2len_dic
#######################################################################
def graphprot_get_param_dic(params_file):
"""
Read in GraphProt .params file and store in dictionary.
key = parameter
value = parameter value
>>> params_file = "test-data/test.params"
>>> graphprot_get_param_dic(params_file)
{'epochs': '20', 'lambda': '0.01', 'R': '1', 'D': '3', 'bitsize': '14', \
'model_type': 'sequence', 'pos_train_ws_pred_median': '0.760321', \
'pos_train_profile_median': '5.039610', \
'pos_train_avg_profile_median_1': '4.236340', \
'pos_train_avg_profile_median_2': '3.868431', \
'pos_train_avg_profile_median_3': '3.331277', \
'pos_train_avg_profile_median_4': '2.998667', \
'pos_train_avg_profile_median_5': '2.829782', \
'pos_train_avg_profile_median_6': '2.626623', \
'pos_train_avg_profile_median_7': '2.447083', \
'pos_train_avg_profile_median_8': '2.349919', \
'pos_train_avg_profile_median_9': '2.239829', \
'pos_train_avg_profile_median_10': '2.161676'}
"""
param_dic = {}
with open(params_file) as f:
for line in f:
cols = line.strip().split(" ")
param = cols[0]
setting = cols[1]
if re.search(".+:", param):
m = re.search("(.+):", line)
par = m.group(1)
param_dic[par] = setting
f.close()
return param_dic
#######################################################################
def graphprot_filter_predictions_file(in_file, out_file, sc_thr=0):
"""
Filter GraphProt .predictions file by given score thr_sc.
"""
OUTPRED = open(out_file, "w")
with open(in_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
score = float(cols[2])
if score < sc_thr:
continue
OUTPRED.write("%s\n" % (row))
f.close()
OUTPRED.close()
#######################################################################
def fasta_read_in_ids(fasta_file):
"""
Given a .fa file, read in header IDs in order appearing in file,
and store in list.
>>> test_file = "test-data/test3.fa"
>>> fasta_read_in_ids(test_file)
['SERBP1_K562_rep01_544', 'SERBP1_K562_rep02_709', 'SERBP1_K562_rep01_316']
"""
ids_list = []
with open(fasta_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
ids_list.append(seq_id)
f.close()
return ids_list
#######################################################################
def graphprot_profile_calc_avg_profile(
in_file, out_file, ap_extlr=5, seq_ids_list=False, method=1
):
"""
Given a GraphProt .profile file, calculate average profiles and output
average profile file.
Average profile means that the position-wise scores will get smoothed
out by calculating for each position a new score, taking a sequence
window -ap_extlr to +ap_extlr relative to the position
and calculate the mean score over this window. The mean score then
becomes the new average profile score at this position.
Two different implementations of the task are given:
method=1 (new python implementation, slower + more memory but easy to read)
method=2 (old perl implementation, faster and less memory but more code)
>>> in_file = "test-data/test2.profile"
>>> out_file1 = "test-data/test2_1.avg_profile"
>>> out_file2 = "test-data/test2_2.avg_profile"
>>> out_file4 = "test-data/test2_3.avg_profile"
>>> graphprot_profile_calc_avg_profile(in_file, \
out_file1, ap_extlr=2, method=1)
>>> graphprot_profile_calc_avg_profile(in_file, \
out_file2, ap_extlr=2, method=2)
>>> diff_two_files_identical(out_file1, out_file2)
True
>>> test_list = ["s1", "s2", "s3", "s4"]
>>> out_file3_exp = "test-data/test3_added_ids_exp.avg_profile"
>>> out_file3 = "test-data/test3_added_ids_out.avg_profile"
>>> graphprot_profile_calc_avg_profile(in_file, out_file3, \
ap_extlr=2, method=1, seq_ids_list=test_list)
>>> diff_two_files_identical(out_file3_exp, out_file3)
True
"""
if method == 1:
# Dictionary of lists, with list of scores (value) for each site (key).
lists_dic = {}
site_starts_dic = {}
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
site_id = int(cols[0])
pos = int(cols[1]) # 0-based.
score = float(cols[2])
# Store first position of site.
if site_id not in site_starts_dic:
site_starts_dic[site_id] = pos
if site_id in lists_dic:
lists_dic[site_id].append(score)
else:
lists_dic[site_id] = []
lists_dic[site_id].append(score)
f.close()
# Check number of IDs (# FASTA IDs has to be same as # site IDs).
if seq_ids_list:
c_seq_ids = len(seq_ids_list)
c_site_ids = len(site_starts_dic)
assert (
c_seq_ids == c_site_ids
), "# sequence IDs != # site IDs (%i != %i)" % (c_seq_ids, c_site_ids)
OUTPROF = open(out_file, "w")
# For each site, calculate average profile scores list.
for site_id in lists_dic:
# Convert profile score list to average profile scores list.
aps_list = list_moving_window_average_values(
lists_dic[site_id], win_extlr=ap_extlr
)
start_pos = site_starts_dic[site_id]
# Get original FASTA sequence ID.
if seq_ids_list:
site_id = seq_ids_list[site_id]
for i, sc in enumerate(aps_list):
pos = i + start_pos + 1 # make 1-based.
OUTPROF.write("%s\t%i\t%f\n" % (site_id, pos, sc))
OUTPROF.close()
elif method == 2:
OUTPROF = open(out_file, "w")
# Old site ID.
old_id = ""
# Current site ID.
cur_id = ""
# Scores list.
scores_list = []
site_starts_dic = {}
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
cur_id = int(cols[0])
pos = int(cols[1]) # 0-based.
score = float(cols[2])
# Store first position of site.
if cur_id not in site_starts_dic:
site_starts_dic[cur_id] = pos
# Case: new site (new column 1 ID).
if cur_id != old_id:
# Process old id scores.
if scores_list:
aps_list = list_moving_window_average_values(
scores_list, win_extlr=ap_extlr
)
start_pos = site_starts_dic[old_id]
seq_id = old_id
# Get original FASTA sequence ID.
if seq_ids_list:
seq_id = seq_ids_list[old_id]
for i, sc in enumerate(aps_list):
pos = i + start_pos + 1 # make 1-based.
OUTPROF.write("%s\t%i\t%f\n" % (seq_id, pos, sc))
# Reset list.
scores_list = []
old_id = cur_id
scores_list.append(score)
else:
# Add to scores_list.
scores_list.append(score)
f.close()
# Process last block.
if scores_list:
aps_list = list_moving_window_average_values(
scores_list, win_extlr=ap_extlr
)
start_pos = site_starts_dic[old_id]
seq_id = old_id
# Get original FASTA sequence ID.
if seq_ids_list:
seq_id = seq_ids_list[old_id]
for i, sc in enumerate(aps_list):
pos = i + start_pos + 1 # make 1-based.
OUTPROF.write("%s\t%i\t%f\n" % (seq_id, pos, sc))
OUTPROF.close()
#######################################################################
def graphprot_profile_extract_peak_regions(
in_file, out_file, max_merge_dist=0, sc_thr=0
):
"""
Extract peak regions from GraphProt .profile file.
Store the peak regions (defined as regions with scores >= sc_thr)
as to out_file in 6-column .bed.
TODO:
Add option for genomic coordinates input (+ - polarity support).
Output genomic regions instead of sequence regions.
>>> in_file = "test-data/test4.avg_profile"
>>> out_file = "test-data/test4_out.peaks.bed"
>>> exp_file = "test-data/test4_out_exp.peaks.bed"
>>> exp2_file = "test-data/test4_out_exp2.peaks.bed"
>>> empty_file = "test-data/empty_file"
>>> graphprot_profile_extract_peak_regions(in_file, out_file)
>>> diff_two_files_identical(out_file, exp_file)
True
>>> graphprot_profile_extract_peak_regions(in_file, out_file, sc_thr=10)
>>> diff_two_files_identical(out_file, empty_file)
True
>>> graphprot_profile_extract_peak_regions(in_file, out_file, \
max_merge_dist=2)
>>> diff_two_files_identical(out_file, exp2_file)
True
"""
OUTPEAKS = open(out_file, "w")
# Old site ID.
old_id = ""
# Current site ID.
cur_id = ""
# Scores list.
scores_list = []
site_starts_dic = {}
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
cur_id = cols[0]
pos = int(cols[1]) # 0-based.
score = float(cols[2])
# Store first position of site.
if cur_id not in site_starts_dic:
# If first position != zero, we assume positions are 1-based.
if pos != 0:
# Make index 0-based.
site_starts_dic[cur_id] | |
Standard <https://github.com/toml-lang/toml>`_\\n* `The currently '
'supported TOML specification '
'<https://github.com/toml-lang/toml/blob/v0.5.0/README.md>`_\\n\\nInstallation\\n============\\n\\nTo '
'install the latest release on `PyPI '
'<https://pypi.org/project/toml/>`_,\\nsimply run:\\n\\n::\\n\\n pip '
'install toml\\n\\nOr to install the latest development version, '
'run:\\n\\n::\\n\\n git clone https://github.com/uiri/toml.git\\n cd '
'toml\\n python setup.py install\\n\\nQuick '
'Tutorial\\n==============\\n\\n*toml.loads* takes in a string containing '
'standard TOML-formatted data and\\nreturns a dictionary containing the '
'parsed data.\\n\\n.. code:: pycon\\n\\n >>> import toml\\n >>> '
'toml_string = \\"\\"\\"\\n ... # This is a TOML document.\\n ...\\n ... '
'title = \\"TOML Example\\"\\n ...\\n ... [owner]\\n ... name = \\"Tom '
'Preston-Werner\\"\\n ... dob = 1979-05-27T07:32:00-08:00 # First class '
'dates\\n ...\\n ... [database]\\n ... server = \\"192.168.1.1\\"\\n ... '
'ports = [ 8001, 8001, 8002 ]\\n ... connection_max = 5000\\n ... enabled '
'= true\\n ...\\n ... [servers]\\n ...\\n ... # Indentation (tabs '
'and/or spaces) is allowed but not required\\n ... [servers.alpha]\\n '
'... ip = \\"10.0.0.1\\"\\n ... dc = \\"eqdc10\\"\\n ...\\n ... '
'[servers.beta]\\n ... ip = \\"10.0.0.2\\"\\n ... dc = '
'\\"eqdc10\\"\\n ...\\n ... [clients]\\n ... data = [ [\\"gamma\\", '
'\\"delta\\"], [1, 2] ]\\n ...\\n ... # Line breaks are OK when inside '
'arrays\\n ... hosts = [\\n ... \\"alpha\\",\\n ... \\"omega\\"\\n '
'... ]\\n ... \\"\\"\\"\\n >>> parsed_toml = '
'toml.loads(toml_string)\\n\\n\\n*toml.dumps* takes a dictionary and returns '
'a string containing the\\ncorresponding TOML-formatted data.\\n\\n.. code:: '
'pycon\\n\\n >>> new_toml_string = toml.dumps(parsed_toml)\\n >>> '
'print(new_toml_string)\\n title = \\"TOML Example\\"\\n [owner]\\n name '
'= \\"<NAME>ner\\"\\n dob = 1979-05-27T07:32:00Z\\n '
'[database]\\n server = \\"192.168.1.1\\"\\n ports = [ 8001, 8001, '
'8002,]\\n connection_max = 5000\\n enabled = true\\n [clients]\\n data '
'= [ [ \\"gamma\\", \\"delta\\",], [ 1, 2,],]\\n hosts = [ \\"alpha\\", '
'\\"omega\\",]\\n [servers.alpha]\\n ip = \\"10.0.0.1\\"\\n dc = '
'\\"eqdc10\\"\\n [servers.beta]\\n ip = \\"10.0.0.2\\"\\n dc = '
'\\"eqdc10\\"\\n\\nFor more functions, view the API Reference '
'below.\\n\\nNote\\n----\\n\\nFor Numpy users, by default the data types '
'``np.floatX`` will not be translated to floats by toml, but will instead be '
'encoded as strings. To get around this, specify the ``TomlNumpyEncoder`` '
'when saving your data.\\n\\n.. code:: pycon\\n\\n >>> import toml\\n >>> '
'import numpy as np\\n >>> a = np.arange(0, 10, dtype=np.double)\\n >>> '
'output = {\'a\': a}\\n >>> toml.dumps(output)\\n \'a = [ \\"0.0\\", '
'\\"1.0\\", \\"2.0\\", \\"3.0\\", \\"4.0\\", \\"5.0\\", \\"6.0\\", '
'\\"7.0\\", \\"8.0\\", \\"9.0\\",]\\\\n\'\\n >>> toml.dumps(output, '
"encoder=toml.TomlNumpyEncoder())\\n 'a = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, "
"6.0, 7.0, 8.0, 9.0,]\\\\n'\\n\\nAPI "
'Reference\\n=============\\n\\n``toml.load(f, _dict=dict)``\\n Parse a '
'file or a list of files as TOML and return a dictionary.\\n\\n '
':Args:\\n * ``f``: A path to a file, list of filepaths (to be read into '
'single\\n object) or a file descriptor\\n * ``_dict``: The class of '
'the dictionary object to be returned\\n\\n :Returns:\\n A dictionary '
'(or object ``_dict``) containing parsed TOML data\\n\\n :Raises:\\n * '
'``TypeError``: When ``f`` is an invalid type or is a list '
'containing\\n invalid types\\n * ``TomlDecodeError``: When an error '
'occurs while decoding the file(s)\\n\\n``toml.loads(s, _dict=dict)``\\n '
'Parse a TOML-formatted string to a dictionary.\\n\\n :Args:\\n * ``s``: '
'The TOML-formatted string to be parsed\\n * ``_dict``: Specifies the '
'class of the returned toml dictionary\\n\\n :Returns:\\n A dictionary '
'(or object ``_dict``) containing parsed TOML data\\n\\n :Raises:\\n * '
'``TypeError``: When a non-string object is passed\\n * '
'``TomlDecodeError``: When an error occurs while decoding the\\n '
'TOML-formatted string\\n\\n``toml.dump(o, f, encoder=None)``\\n Write a '
'dictionary to a file containing TOML-formatted data\\n\\n :Args:\\n * '
'``o``: An object to be converted into TOML\\n * ``f``: A File descriptor '
'where the TOML-formatted output should be stored\\n * ``encoder``: An '
'instance of ``TomlEncoder`` (or subclass) for encoding the object. If '
'``None``, will default to ``TomlEncoder``\\n\\n :Returns:\\n A string '
'containing the TOML-formatted data corresponding to object ``o``\\n\\n '
':Raises:\\n * ``TypeError``: When anything other than file descriptor is '
'passed\\n\\n``toml.dumps(o, encoder=None)``\\n Create a TOML-formatted '
'string from an input object\\n\\n :Args:\\n * ``o``: An object to be '
'converted into TOML\\n * ``encoder``: An instance of ``TomlEncoder`` (or '
'subclass) for encoding the object. If ``None``, will default to '
'``TomlEncoder``\\n\\n :Returns:\\n A string containing the '
'TOML-formatted data corresponding to object '
'``o``\\n\\n\\n\\nLicensing\\n=========\\n\\nThis project is released under '
'the terms of the MIT Open Source License. View\\n*LICENSE.txt* for more '
'information.\\n\\n\\n", "origin": "<NAME> <<EMAIL>>"}'),
('/usr/lib/python3.8/site-packages/packaging',
'{"name": "packaging", "version": "20.4", "type": "python", "location": '
'"/usr/lib/python3.8/site-packages", "files": '
'["/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/INSTALLER", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/LICENSE", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/LICENSE.APACHE", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/LICENSE.BSD", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/METADATA", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/RECORD", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/WHEEL", '
'"/usr/lib/python3.8/site-packages/packaging-20.4.dist-info/top_level.txt", '
'"/usr/lib/python3.8/site-packages/packaging/__about__.py", '
'"/usr/lib/python3.8/site-packages/packaging/__init__.py", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/__about__.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/__init__.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/_compat.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/_structures.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/_typing.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/markers.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/requirements.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/specifiers.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/tags.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/utils.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/__pycache__/version.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/packaging/_compat.py", '
'"/usr/lib/python3.8/site-packages/packaging/_structures.py", '
'"/usr/lib/python3.8/site-packages/packaging/_typing.py", '
'"/usr/lib/python3.8/site-packages/packaging/markers.py", '
'"/usr/lib/python3.8/site-packages/packaging/py.typed", '
'"/usr/lib/python3.8/site-packages/packaging/requirements.py", '
'"/usr/lib/python3.8/site-packages/packaging/specifiers.py", '
'"/usr/lib/python3.8/site-packages/packaging/tags.py", '
'"/usr/lib/python3.8/site-packages/packaging/utils.py", '
'"/usr/lib/python3.8/site-packages/packaging/version.py"], "license": '
'"BSD-2-Clause or Apache-2.0", "metadata": "Metadata-Version: 2.1\\nName: '
'packaging\\nVersion: 20.4\\nSummary: Core utilities for Python '
'packages\\nHome-page: https://github.com/pypa/packaging\\nAuthor: Donald '
'Stufft and individual contributors\\nAuthor-email: '
'<EMAIL>\\nLicense: BSD-2-Clause or Apache-2.0\\nPlatform: '
'UNKNOWN\\nClassifier: Development Status :: 5 - '
'Production/Stable\\nClassifier: Intended Audience :: '
'Developers\\nClassifier: License :: OSI Approved :: Apache Software '
'License\\nClassifier: License :: OSI Approved :: BSD License\\nClassifier: '
'Programming Language :: Python\\nClassifier: Programming Language :: Python '
':: 2\\nClassifier: Programming Language :: Python :: 2.7\\nClassifier: '
'Programming Language :: Python :: 3\\nClassifier: Programming Language :: '
'Python :: 3.4\\nClassifier: Programming Language :: Python :: '
'3.5\\nClassifier: Programming Language :: Python :: 3.6\\nClassifier: '
'Programming Language :: Python :: 3.7\\nClassifier: Programming Language :: '
'Python :: 3.8\\nClassifier: Programming Language :: Python :: '
'Implementation :: CPython\\nClassifier: Programming Language :: Python :: '
'Implementation :: PyPy\\nRequires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, '
'!=3.3.*\\nDescription-Content-Type: text/x-rst\\nRequires-Dist: pyparsing '
'(>=2.0.2)\\nRequires-Dist: six\\n\\npackaging\\n=========\\n\\nCore '
'utilities for Python packages.\\n\\nThe ``packaging`` project includes the '
'following: version handling, specifiers,\\nmarkers, requirements, tags, '
'utilities.\\n\\nDocumentation\\n-------------\\n\\nThe `documentation`_ '
'provides information and the API for the following:\\n\\n- Version '
'Handling\\n- Specifiers\\n- Markers\\n- Requirements\\n- Tags\\n- '
'Utilities\\n\\nInstallation\\n------------\\n\\nUse ``pip`` to install '
'these utilities::\\n\\n pip install '
'packaging\\n\\nDiscussion\\n----------\\n\\nIf you run into bugs, you can '
'file them in our `issue tracker`_.\\n\\nYou can also join ``#pypa`` on '
'Freenode to ask questions or get involved.\\n\\n\\n.. _`documentation`: '
'https://packaging.pypa.io/\\n.. _`issue tracker`: '
'https://github.com/pypa/packaging/issues\\n\\n\\nCode of '
'Conduct\\n---------------\\n\\nEveryone interacting in the packaging '
"project's codebases, issue trackers, chat\\nrooms, and mailing lists is "
'expected to follow the `PyPA Code of Conduct`_.\\n\\n.. _PyPA Code of '
'Conduct: '
'https://www.pypa.io/en/latest/code-of-conduct/\\n\\nContributing\\n------------\\n\\nThe '
'``CONTRIBUTING.rst`` file outlines how to contribute to this project '
'as\\nwell as how to report a potential security issue. The documentation '
'for this\\nproject also covers information about `project development`_ and '
'`security`_.\\n\\n.. _`project development`: '
'https://packaging.pypa.io/en/latest/development/\\n.. _`security`: '
'https://packaging.pypa.io/en/latest/security/\\n\\nProject '
'History\\n---------------\\n\\nPlease review the ``CHANGELOG.rst`` file or '
'the `Changelog documentation`_ for\\nrecent changes and project '
'history.\\n\\n.. _`Changelog documentation`: '
'https://packaging.pypa.io/en/latest/changelog/\\n\\nChangelog\\n---------\\n\\n20.4 '
'- 2020-05-19\\n~~~~~~~~~~~~~~~~~\\n\\n* Canonicalize version before '
'comparing specifiers. (`#282 '
'<https://github.com/pypa/packaging/issues/282>`__)\\n* Change type hint for '
'``canonicalize_name`` to return\\n ``packaging.utils.NormalizedName``.\\n '
'This enables the use of static typing tools (like mypy) to detect mixing '
'of\\n normalized and un-normalized names.\\n\\n20.3 - '
'2020-03-05\\n~~~~~~~~~~~~~~~~~\\n\\n* Fix changelog for 20.2.\\n\\n20.2 - '
'2020-03-05\\n~~~~~~~~~~~~~~~~~\\n\\n* Fix a bug that caused a 32-bit OS '
'that runs on a 64-bit ARM CPU (e.g. ARM-v8,\\n aarch64), to report the '
'wrong bitness.\\n\\n20.1 - 2020-01-24\\n~~~~~~~~~~~~~~~~~~~\\n\\n* Fix a '
'bug caused by reuse of an exhausted iterator. (`#257 '
'<https://github.com/pypa/packaging/issues/257>`__)\\n\\n20.0 - '
'2020-01-06\\n~~~~~~~~~~~~~~~~~\\n\\n* Add type hints (`#191 '
'<https://github.com/pypa/packaging/issues/191>`__)\\n\\n* Add proper trove '
'classifiers for PyPy support (`#198 '
'<https://github.com/pypa/packaging/issues/198>`__)\\n\\n* Scale back '
'depending on ``ctypes`` for manylinux support detection (`#171 '
'<https://github.com/pypa/packaging/issues/171>`__)\\n\\n* Use '
'``sys.implementation.name`` where appropriate for ``packaging.tags`` (`#193 '
'<https://github.com/pypa/packaging/issues/193>`__)\\n\\n* Expand upon the '
'API provded by ``packaging.tags``: ``interpreter_name()``, '
'``mac_platforms()``, ``compatible_tags()``, ``cpython_tags()``, '
'``generic_tags()`` (`#187 '
'<https://github.com/pypa/packaging/issues/187>`__)\\n\\n* Officially '
'support Python 3.8 (`#232 '
'<https://github.com/pypa/packaging/issues/232>`__)\\n\\n* Add ``major``, '
'``minor``, and ``micro`` aliases to ``packaging.version.Version`` (`#226 '
'<https://github.com/pypa/packaging/issues/226>`__)\\n\\n* Properly mark | |
<reponame>cpitts1/snakeplane
# Copyright (C) 2019 Alteryx, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for plugin, input/output anchors/managers."""
# Built in Libraries
import copy
import os
import sys
from collections import OrderedDict, namedtuple, UserDict
from functools import partial
from types import SimpleNamespace
from typing import Any, List, Tuple, Union
import AlteryxPythonSDK as sdk
import snakeplane.interface_utilities as interface_utils
import snakeplane.plugin_utilities as plugin_utils
from snakeplane.constants import SNAKEPLANE_NULL_VALUE_PLACEHOLDER
import xmltodict
class AyxPlugin:
"""Base plugin class to be modified by snakeplane."""
def __init__(
self,
n_tool_id: int,
alteryx_engine: sdk.AlteryxEngine,
output_anchor_mgr: sdk.OutputAnchorManager,
) -> None:
# Initialization data
self._engine_vars = SimpleNamespace()
self._engine_vars.n_tool_id = n_tool_id
self._engine_vars.alteryx_engine = alteryx_engine
self._engine_vars.output_anchor_mgr = output_anchor_mgr
self._raised_missing = False
# Plugin State vars
self._state_vars = SimpleNamespace(
initialized=False,
input_anchors={},
output_anchors={},
config_data=None,
required_input_names=[],
)
# Pull in the config XML data from conf file using the name of the tool
xml_files = [
file
for file in os.listdir(plugin_utils.get_tool_path(self.tool_name))
if file.lower().endswith(".xml")
]
with open(
os.path.join(plugin_utils.get_tool_path(self.tool_name), xml_files[0])
) as fd:
self._state_vars.config_data = xmltodict.parse(fd.read())
# Plugin Error Methods
self.logging = SimpleNamespace(
display_error_msg=partial(
self._engine_vars.alteryx_engine.output_message,
self._engine_vars.n_tool_id,
sdk.EngineMessageType.error,
),
display_warn_msg=partial(
self._engine_vars.alteryx_engine.output_message,
self._engine_vars.n_tool_id,
sdk.EngineMessageType.warning,
),
display_info_msg=partial(
self._engine_vars.alteryx_engine.output_message,
self._engine_vars.n_tool_id,
sdk.EngineMessageType.info,
),
)
# Default to no inputs or outputs
for connection in plugin_utils.get_xml_config_input_connections(
self._state_vars.config_data
):
self._state_vars.input_anchors[connection["@Name"]] = []
# Track names of the inputs that are required for this tool to run
if connection["@Optional"] == "False":
self._state_vars.required_input_names.append(connection["@Name"])
for connection in plugin_utils.get_xml_config_output_connections(
self._state_vars.config_data
):
self._state_vars.output_anchors[connection["@Name"]] = OutputAnchor()
# Custom data
self.user_data = SimpleNamespace()
# Configure managers, this must occur last so the instance
# is properly configured
self.input_manager = InputManager(self)
self.output_manager = OutputManager(self)
@property
def initialized(self) -> bool:
"""Getter for plugin initialization state."""
return self._state_vars.initialized
@initialized.setter
def initialized(self, value: bool) -> None:
"""Setter for plugin initialization state."""
self._state_vars.initialized = bool(value)
@property
def update_only_mode(self) -> bool:
"""Getter for if designer is in update only mode."""
return (
self._engine_vars.alteryx_engine.get_init_var(
self._engine_vars.n_tool_id, "UpdateOnly"
)
== "True"
)
@property
def all_inputs_completed(self) -> bool:
"""
Check that all required inputs have successfully completed.
Parameters
----------
current_plugin : object
An AyxPlugin object
Returns
-------
bool
Boolean indication of if all inputs have completed.
"""
all_inputs_completed = True
if self.initialized:
for name in self._state_vars.required_input_names:
connections = self._state_vars.input_anchors[name]
if len(connections) == 0 or not all(
[connection.completed for connection in connections]
):
all_inputs_completed = False
else:
all_inputs_completed = False
return all_inputs_completed
@property
def all_required_inputs_initialized(self) -> bool:
"""Getter for checking if all required inputs have been initialized."""
for anchor_name in self._state_vars.required_input_names:
input = self._state_vars.input_anchors[anchor_name]
if not input or not all([connection.initialized for connection in input]):
return False
return True
def update_sys_path(self) -> None:
"""Update the sys path to include the current tools libs."""
# Add lib to sys path
tool_path = plugin_utils.get_tool_path(self.tool_name)
sys.path.append(tool_path)
sys.path.append(os.path.join(tool_path, "Lib", "site-packages"))
def assert_all_inputs_connected(self) -> bool:
"""Raise an error if there are any missing input connections."""
for anchor_name in self._state_vars.required_input_names:
input = self._state_vars.input_anchors[anchor_name]
if not input:
if not self._raised_missing:
self.logging.display_error_msg("Missing Incoming Connection(s).")
self._raised_missing = True
return False
return True
def save_output_anchor_refs(self) -> None:
"""Save all references to output anchors."""
# Get references to the output anchors
for anchor_name in self._state_vars.output_anchors:
self._state_vars.output_anchors[
anchor_name
]._handler = self._engine_vars.output_anchor_mgr.get_output_anchor(
anchor_name
)
def save_interface(self, name: str, interface: object) -> None:
"""Save the interface internally."""
self._state_vars.input_anchors[name].append(interface)
def update_progress(self, d_percentage: float) -> None:
"""Update the progress on this anchor."""
self._engine_vars.alteryx_engine.output_tool_progress(
self._engine_vars.n_tool_id, d_percentage
) # Inform the Alteryx engine of the tool's progress.
for _, anchor in self._state_vars.output_anchors.items():
# Inform the downstream tool of this tool's progress.
anchor._handler.update_progress(d_percentage)
def close_all_outputs(self) -> None:
"""Force all output anchors to close."""
# Close all output anchors
for _, anchor in self._state_vars.output_anchors.items():
anchor._handler.close()
# Checks whether connections were properly closed.
for anchor_name in self._state_vars.output_anchors:
self._state_vars.output_anchors[anchor_name]._handler.assert_close()
def push_all_output_records(self) -> None:
"""
For each output anchor on the plugin, flush all the output records.
Parameters
----------
current_plugin: object
The plugin for which to flush output records
Returns
-------
None
"""
for _, output_anchor in self._state_vars.output_anchors.items():
output_anchor.push_records(self)
def push_all_metadata(self) -> None:
"""Pushes all output anchor metadata downstream."""
for _, anchor in self._state_vars.output_anchors.items():
anchor.push_metadata(self)
def clear_accumulated_records(self) -> None:
"""
Clear all accumulated records from all plugin interfaces.
Parameters
----------
plugin: object
The plugin to clear all records from
Returns
-------
None
This function has side effects on plugin, and therefore has no return
"""
for _, anchor in self._state_vars.input_anchors.items():
for connection in anchor:
connection._interface_record_vars.record_list_in = []
def create_record_info(self) -> sdk.RecordInfo:
"""Create a new record info object."""
return sdk.RecordInfo(self._engine_vars.alteryx_engine)
class AyxPluginInterface:
"""Input interface base definition."""
def __init__(self, parent: object, name: str) -> None:
self.parent = parent
self.name = name
self.initialized = False
self._interface_record_vars = SimpleNamespace(
record_info_in=None, record_list_in=[], column_metadata=None
)
self._interface_state = SimpleNamespace(
input_complete=False, d_progress_percentage=0, data_processing_mode="batch"
)
self.is_last_chunk = None
@property
def metadata(self) -> object:
"""Input metadata getter."""
return copy.deepcopy(self._interface_record_vars.column_metadata)
@property
def data(self) -> Union[object, List[List[Any]]]:
"""Input data getter."""
if (
self.parent.process_data_mode == "stream"
and self.parent.process_data_input_type == "list"
):
return self._interface_record_vars.record_list_in[0]
elif self.parent.process_data_input_type == "list":
return self._interface_record_vars.record_list_in
else:
try:
import pandas as pd
except ImportError:
err_str = """The Pandas library must be installed to
allow dataframe as input_type."""
self.parent.logging.display_error_msg(err_str)
raise ImportError(err_str)
else:
return pd.DataFrame(
self._interface_record_vars.record_list_in,
columns=self.metadata.get_column_names(),
)
@property
def completed(self) -> bool:
"""Interface completed getter."""
return self._interface_state.input_complete
@completed.setter
def completed(self, val) -> None:
"""Interface completed setter."""
self._interface_state.input_complete = val
@property
def anchor_metadata(self):
"""Anchor metadata getter."""
return self._interface_record_vars.column_metadata
@anchor_metadata.setter
def anchor_metadata(self, val) -> None:
"""Anchor metadata setter."""
self._interface_record_vars.column_metadata = val
@property
def record_info(self):
"""Getter for Input Anchor record_info object."""
return self._interface_record_vars.record_info_in
def get_values_from_record(
self: object, in_record: object
) -> Tuple[List[Union[int, float, bool, str, bytes]], dict]:
"""
Get a list of values from an incoming record.
Parameters
----------
interface_obj : object
An AyxPluginInterface object for the current interface
in_record: object
An Alteryx RecordRef object for the record to be processed
Returns
---------
Tuple[List[int, float, bool, str, bytes], dict]
The return takes the form (record, metadata)
where:
record: A list of the parsed record values
metadata: a dict containing the names, types, sizes,
sources, and descriptions of each field
"""
fields = self._interface_record_vars.fields
field_getters = self._interface_record_vars.field_getters
row = [field_getters[field](in_record) for field in fields]
return row
def accumulate_record(self, record: sdk.RecordRef) -> None:
"""Accumulate an incoming record."""
row = self.get_values_from_record(record)
self._interface_record_vars.record_list_in.append(row)
class InputManager(UserDict):
"""Manager of input anchors with helper functions."""
def __init__(self, plugin: object) -> None:
self._plugin = plugin
self.data = self._plugin._state_vars.input_anchors
@property
def tool_id(self) -> int:
"""Getter for the current tool ID."""
return self._plugin._engine_vars.n_tool_id
@property
def workflow_config(self) -> OrderedDict:
"""Getter for the workflow config."""
return self._plugin.workflow_config
class OutputManager(UserDict):
"""Manager of output anchors."""
def __init__(self, plugin: object) -> None:
self._plugin = plugin
self.data = self._plugin._state_vars.output_anchors
def get_temp_file_path(self) -> str:
"""Create a temp file using the Alteryx Engine."""
return self._plugin._engine_vars.alteryx_engine.create_temp_file_name()
@staticmethod
def create_anchor_metadata():
"""Create a new anchor metadata object."""
return AnchorMetadata()
class OutputAnchor:
"""Output anchor bookkeeping class with helpers."""
def __init__(self) -> None:
self._data = None
self._metadata = None
self._record_info_out = None
self._record_creator = None
self._handler = None
@property
def data(self) -> Union[object, List[List[Any]]]:
"""Getter for anchor data."""
return self._data
@data.setter
def data(self, data: Union[object, List[List[Any]]]) -> None:
"""Setter for anchor data."""
self._data = data
@property
def metadata(self) -> object:
"""Getter for the anchor metadata."""
return copy.deepcopy(self._metadata)
@metadata.setter
def metadata(self, metadata):
"""Setter for anchor metadata."""
self._metadata = metadata
def get_data_list(self) -> List[List[Any]]:
"""Get the list of data to push downstream as a list of lists."""
if interface_utils.is_dataframe(self._data):
return interface_utils.dataframe_to_list(self._data)
elif type(self._data) == list and not type(self._data[0]) == list:
return [self._data]
return self._data
def push_metadata(self: object, plugin: object) -> None:
"""Propagate the metadata downstream for this anchor."""
out_col_metadata = self.metadata
if out_col_metadata is None:
return
if self._record_info_out is None:
self._record_info_out = plugin.create_record_info()
interface_utils.build_ayx_record_info(
out_col_metadata, self._record_info_out
)
self._handler.init(self._record_info_out)
def push_records(self, plugin: object) -> None:
"""
Flush all records for an output | |
<filename>deepinspect/coco_gender/coco_gender_deepinspect.py
import math, os, random, json, pickle, sys, pdb, csv, copy
import string, shutil, time, argparse
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from tqdm import tqdm as tqdm
import matplotlib
matplotlib.use('Agg')
import torch.nn.functional as F
import torch, torchvision
import torch.nn as nn
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from data_loader import CocoObject
from model import MultilabelObject
globalcoverage = [] # [{file, label, layercoverage, yhat}]
ann_dir = '/home/yuchi/Downloads/cocodataset/annotations'
image_dir = '/home/yuchi/Downloads/cocodataset/'
def get_id2object():
from pycocotools.coco import COCO
ann_path = os.path.join(ann_dir, "instances_train2014.json")
cocoAPI = COCO(ann_path)
data = json.load(open(ann_path))
#81 objects
id2object = dict()
object2id = dict()
person_id = -1
for idx, elem in enumerate(data['categories']):
if elem['name'] == 'person':
person_id = idx
print("person index: " + str(idx))
id2object[idx] = "man"
object2id["man"] = idx
continue
id2object[idx] = elem['name']
object2id[elem['name']] = idx
id2object[80] = "woman"
object2id['woman'] = 80
assert person_id != -1
return id2object
def hook_all_conv_layer(net, handler):
for l in net._modules.keys():
if isinstance(net._modules.get(l), torch.nn.modules.conv.Conv2d):
net._modules.get(l).register_forward_hook(handler)
hook_all_conv_layer(net._modules.get(l), handler)
def get_channel_coverage_group_exp(self, input, output):
from torchncoverage import NCoverage
global globalcoverage
nc = NCoverage(threshold = 0.5)
#print('Layer: ' + str(self))
covered_channel_group = nc.get_channel_coverage_group(output.data)
for c in xrange(len(covered_channel_group)):
#print(c)
d = -1*(c+1)
if "layercoverage" not in globalcoverage[d]:
globalcoverage[d]["layercoverage"] = []
# total 53 cnn layer
assert len(globalcoverage[d]["layercoverage"]) <= 53
#print('covered percentage: ' + str(len(covered_channel)*1.0/len(output.data[0])))
covered_channel = covered_channel_group[d]
#print('total number of channels: ' + str(len(output.data[0])))
#print('covered channels: ' + str(len(covered_channel)))
#print('covered percentage: ' + str(len(covered_channel)*1.0/len(output.data[0])))
globalcoverage[d]["layercoverage"].append((len(output.data[0]), covered_channel))
if len(globalcoverage[-1]["layercoverage"]) == 53:
with open('globalcoveragecocoexp_test_0.5.pickle', 'ab') as handle:
pickle.dump(globalcoverage, handle, protocol=pickle.HIGHEST_PROTOCOL)
globalcoverage = []
#print(len(globalcoverage[-1]["layercoverage"]))
def get_id2object_pkl():
from pycocotools.coco import COCO
ann_path = os.path.join(ann_dir, "instances_train2014.json")
cocoAPI = COCO(ann_path)
data = json.load(open(ann_path))
#81 objects
id2object = dict()
object2id = dict()
person_id = -1
for idx, elem in enumerate(data['categories']):
if elem['name'] == 'person':
person_id = idx
print("person index: " + str(idx))
id2object[idx] = "man"
object2id["man"] = idx
continue
id2object[idx] = elem['name']
object2id[elem['name']] = idx
id2object[80] = "woman"
object2id['woman'] = 80
assert person_id != -1
with open('id2object.pickle', 'wb') as handle:
pickle.dump(id2object, handle, protocol=pickle.HIGHEST_PROTOCOL)
#Get coverage of all validating data in train dataset.
def get_coverage_test():
global globalcoverage
ann_dir = '/local/yuchi/dataset/coco/annotations'
image_dir = '/local/yuchi/dataset/coco/'
crop_size = 224
image_size = 256
batch_size = 16
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
val_transform = transforms.Compose([
transforms.Scale(image_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize])
# Data samplers.
train_data = CocoObject(ann_dir = ann_dir, image_dir = image_dir,
split = 'test', transform = val_transform)
image_ids = train_data.new_image_ids
image_path_map = train_data.image_path_map
#80 objects
id2object = train_data.id2object
id2labels = train_data.id2labels
# Data loaders / batch assemblers.
train_loader = torch.utils.data.DataLoader(train_data, batch_size = batch_size,
shuffle = False, num_workers = 4,
pin_memory = True)
model = MultilabelObject(None, 81).cuda()
hook_all_conv_layer(model, get_channel_coverage_group_exp)
log_dir = "./"
log_dir1 = "/home/yuchi/work/coco/backup"
checkpoint = torch.load(os.path.join(log_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
model.eval()
t = tqdm(train_loader, desc = 'Activation')
count = 0
for batch_idx, (images, objects, image_ids) in enumerate(t):
images = Variable(images).cuda()
objects = Variable(objects).cuda()
for i in xrange(len(image_ids)):
globalcoverage.append({})
image_file_name = image_path_map[int(image_ids[i])]
yhat = []
'''
for j in xrange(len(object_preds[i])):
a = object_preds_r[i][j].cpu().data.numpy()
if a[0] > 0.5:
yhat.append(id2object[j])
'''
globalcoverage[-1]["file"] = image_file_name
globalcoverage[-1]["yhat"] = yhat
globalcoverage[-1]["dataset"] = "test"
globalcoverage[-1]["jlabel"] = id2labels[int(image_ids[i])]
object_preds = model(images)
m = nn.Sigmoid()
object_preds_r = m(object_preds)
count = count + len(image_ids)
if count % 1000 == 0:
print("count: " + str(count))
#import pickle
#with open('globalcoveragecocoexp_val.pickle', 'ab') as handle:
#pickle.dump(globalcoverage, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Get yhats
def get_yhats_val():
crop_size = 224
image_size = 256
batch_size = 16
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
val_transform = transforms.Compose([
transforms.Scale(image_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize])
# Data samplers.
train_data = CocoObject(ann_dir = ann_dir, image_dir = image_dir,
split = 'val', transform = val_transform)
image_ids = train_data.new_image_ids
image_path_map = train_data.image_path_map
#80 objects
id2object = train_data.id2object
id2labels = train_data.id2labels
# Data loaders / batch assemblers.
train_loader = torch.utils.data.DataLoader(train_data, batch_size = batch_size,
shuffle = False, num_workers = 4,
pin_memory = True)
model = MultilabelObject(None, 81).cuda()
log_dir = "./"
checkpoint = torch.load(os.path.join(log_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
model.eval()
t = tqdm(train_loader, desc = 'Activation')
count = 0
yhats = []
labels = []
imagefiles = []
for batch_idx, (images, objects, image_ids) in enumerate(t):
images = Variable(images).cuda()
objects = Variable(objects).cuda()
object_preds = model(images)
m = nn.Sigmoid()
object_preds_r = m(object_preds)
count = count + len(image_ids)
for i in xrange(len(image_ids)):
image_file_name = image_path_map[image_ids[i]]
yhat = []
label = id2labels[image_ids[i]]
for j in xrange(len(object_preds[i])):
a = object_preds_r[i][j].cpu().data.numpy()
if a[0] > 0.5:
yhat.append(id2object[j])
yhats.append(yhat)
labels.append(label)
imagefiles.append(image_file_name)
if count % 1000 == 0:
print("count: " + str(count))
with open('globalyhats_val.pickle', 'wb') as handle:
pickle.dump(yhats, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('globallabels_val.pickle', 'wb') as handle:
pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('imagefiles_val.pickle', 'wb') as handle:
pickle.dump(imagefiles, handle, protocol=pickle.HIGHEST_PROTOCOL)
'''
with open('globalyhats.pickle', 'rb') as handle:
yhats = pickle.load(handle)
with open('globallabels.pickle', 'rb') as handle:
labels = pickle.load(handle)
with open('imagefiles.pickle', 'rb') as handle:
imagefiles = pickle.load(handle)
'''
# Get yhats
def get_yhats_test(confidence=0.5):
ann_dir = '/local/yuchi/dataset/coco/annotations'
image_dir = '/local/yuchi/dataset/coco/'
crop_size = 224
image_size = 256
batch_size = 16
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
val_transform = transforms.Compose([
transforms.Scale(image_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize])
# Data samplers.
train_data = CocoObject(ann_dir = ann_dir, image_dir = image_dir,
split = 'test', transform = val_transform)
image_ids = train_data.new_image_ids
image_path_map = train_data.image_path_map
#80 objects
id2object = train_data.id2object
id2labels = train_data.id2labels
# Data loaders / batch assemblers.
train_loader = torch.utils.data.DataLoader(train_data, batch_size = batch_size,
shuffle = False, num_workers = 4,
pin_memory = True)
model = MultilabelObject(None, 81).cuda()
log_dir = "./"
checkpoint = torch.load(os.path.join(log_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
model.eval()
t = tqdm(train_loader, desc = 'Activation')
count = 0
yhats = []
labels = []
imagefiles = []
res = list()
for batch_idx, (images, objects, image_ids) in enumerate(t):
images = Variable(images).cuda()
objects = Variable(objects).cuda()
object_preds = model(images)
m = nn.Sigmoid()
object_preds_r = m(object_preds)
count = count + len(image_ids)
for i in xrange(len(image_ids)):
image_file_name = image_path_map[int(image_ids[i])]
yhat = []
label = id2labels[int(image_ids[i])]
for j in xrange(len(object_preds[i])):
a = object_preds_r[i][j].cpu().data.numpy()
if a > confidence:
yhat.append(id2object[j])
yhats.append(yhat)
labels.append(label)
imagefiles.append(image_file_name)
res.append((image_ids, object_preds.data.cpu(), objects.data.cpu()))
if count % 1000 == 0:
print("count: " + str(count))
preds_object = torch.cat([entry[1] for entry in res], 0)
targets_object = torch.cat([entry[2] for entry in res], 0)
eval_score_object = average_precision_score(targets_object.numpy(), preds_object.numpy())
print('\nmean average precision of object classifier on test data is {}\n'.format(eval_score_object))
with open('globalyhats_test.pickle', 'wb') as handle:
pickle.dump(yhats, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('globallabels_test.pickle', 'wb') as handle:
pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('imagefiles_test.pickle', 'wb') as handle:
pickle.dump(imagefiles, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Get yhats
def get_yhats_train(confidence=0.5):
crop_size = 224
image_size = 256
batch_size = 16
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
val_transform = transforms.Compose([
transforms.Scale(image_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize])
# Data samplers.
train_data = CocoObject(ann_dir = ann_dir, image_dir = image_dir,
split = 'train', transform = val_transform)
image_ids = train_data.new_image_ids
image_path_map = train_data.image_path_map
#80 objects
id2object = train_data.id2object
id2labels = train_data.id2labels
# Data loaders / batch assemblers.
train_loader = torch.utils.data.DataLoader(train_data, batch_size = batch_size,
shuffle = False, num_workers = 4,
pin_memory = True)
model = MultilabelObject(None, 81).cuda()
log_dir = "./"
checkpoint = torch.load(os.path.join(log_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
model.eval()
t = tqdm(train_loader, desc = 'Activation')
count = 0
yhats = []
labels = []
imagefiles = []
for batch_idx, (images, objects, image_ids) in enumerate(t):
images = Variable(images).cuda()
objects = Variable(objects).cuda()
object_preds = model(images)
m = nn.Sigmoid()
object_preds_r = m(object_preds)
count = count + len(image_ids)
for i in xrange(len(image_ids)):
image_file_name = image_path_map[image_ids[i]]
yhat = []
label = id2labels[image_ids[i]]
for j in xrange(len(object_preds[i])):
a = object_preds_r[i][j].cpu().data.numpy()
if a[0] > confidence:
yhat.append(id2object[j])
yhats.append(yhat)
labels.append(label)
imagefiles.append(image_file_name)
if count % 1000 == 0:
print("count: " + str(count))
with open('globalyhats_train.pickle', 'wb') as handle:
pickle.dump(yhats, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('globallabels_train.pickle', 'wb') as handle:
pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('imagefiles_train.pickle', 'wb') as handle:
pickle.dump(imagefiles, handle, protocol=pickle.HIGHEST_PROTOCOL)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def get_val_transform():
crop_size = 224
image_size = 256
normalize = transforms.Normalize(mean = | |
temp_priority = priority_obj.priority(STEPS[index], step_value['value'])
else:
temp_priority = priority_obj.priority('Evgen', step_value['value'])
step_template = fill_template(STEPS[index], ctag, temp_priority, output_formats, memory)
if 'priority' in step_value['changes']:
temp_priority = step_value['changes']['priority']
st_exec = StepExecution(request=cur_request,slice=input_list,step_template=step_template,
priority=temp_priority)
no_parent = True
if parent_step:
st_exec.step_parent = parent_step
no_parent = False
st_exec.status = step_status_definition(step_value['is_skipped'], index<=approve_level,
index>=waiting_level)
st_exec.set_task_config(task_config)
st_exec.remove_task_config('spreadsheet_original')
if 'input_events' in step_value['changes']:
st_exec.input_events = step_value['changes']['input_events']
else:
st_exec.input_events = total_events
if st_exec.status not in SKIPPED_STATUS:
total_events = -1
still_skipped = False
st_exec.save_with_current_time()
if no_parent:
st_exec.step_parent = st_exec
st_exec.save()
if (st_exec.status == 'Approved') and (st_exec.get_task_config('PDA')):
set_action(step_in_db)
parent_step = st_exec
new_step = True
for step in to_delete:
step.step_parent = step
step.save()
step.delete()
except Exception as e:
_logger.error("Problem step save/approval %s"%str(e))
error_slices.append(int(slice))
else:
if no_action:
no_action_slices.append(int(slice))
except Exception as e:
_logger.error("Problem step save/approval %s"%str(e))
raise e
return error_slices,no_action_slices
def filter_mc_campaign(cur_request, tasks, input_step_project_campaigns=[]):
result = tasks
events = -1
if cur_request.campaign == 'MC16':
result = []
events = 0
subcampaign = cur_request.subcampaign
for task_id in tasks:
task = ProductionTask.objects.get(id=task_id)
if task.request.campaign == cur_request.campaign:
if ((subcampaign == 'MC16d') and (task.request.subcampaign == 'MC16c')) or (task.request.subcampaign == subcampaign) or (task.request.subcampaign in input_step_project_campaigns):
result.append(task_id)
events += task.total_events
else:
if (subcampaign == 'MC16a') or ('valid' in task.name) or (task.request.subcampaign in input_step_project_campaigns):
result.append(task_id)
events += task.total_events
return result, events
def filter_input_datasets(dataset_events, reqid, filter_type, input_step_project_campaigns = []):
result = []
for item in dataset_events:
if filter_type:
if filter_type not in item['dataset_name']:
continue
cur_request = TRequest.objects.get(reqid=reqid)
new_tasks, new_events = filter_mc_campaign(cur_request,item['tasks'], input_step_project_campaigns)
if len(new_tasks)<len(item['tasks']):
if len(new_tasks)>0:
result.append({'dataset_name':item['dataset_name'],'events':str(new_events), 'excluded':True})
else:
result.append({'dataset_name':item['dataset_name'],'events':item['events'], 'excluded':False})
return result
def form_skipped_slice(slice, reqid):
cur_request = TRequest.objects.get(reqid=reqid)
input_list = InputRequestList.objects.filter(request=cur_request, slice=int(slice))[0]
existed_steps = StepExecution.objects.filter(request=cur_request, slice=input_list)
# Check steps which already exist in slice
try:
ordered_existed_steps, existed_foreign_step = form_existed_step_list(existed_steps)
except ValueError as e:
ordered_existed_steps, existed_foreign_step = [],None
if ordered_existed_steps[0].status == 'Skipped' and input_list.dataset:
return {slice:[]}
processed_tags = []
last_step_name = ''
input_step_format = ''
input_step_project_campaigns = []
for step in ordered_existed_steps:
if step.status == 'NotCheckedSkipped' or step.status == 'Skipped':
processed_tags.append(step.step_template.ctag)
last_step_name = step.step_template.step
else:
input_step_format = step.get_task_config('input_format')
project_cmapaigns = step.get_project_mode('runOnlyCampaign')
if project_cmapaigns:
input_step_project_campaigns = list(map(lambda x: x[x.find(':')+1:],project_cmapaigns.split(',')))
break
if input_list.input_data and processed_tags:
try:
input_type = ''
default_input_type_prefix = {
'Evgen': {'format':'EVNT','prefix':''},
'Evgen Merge': {'format':'EVNT','prefix':'.'},
'Simul': {'format':'HITS','prefix':'.', 'filter':'simul'},
'Merge': {'format':'HITS','prefix':'.','filter':'merge'},
'Reco': {'format':'AOD','prefix':'recon.'},
'Rec Merge': {'format':'AOD','prefix':'merge.'}
}
filter_type = ''
if last_step_name in default_input_type_prefix:
if input_step_format:
input_type = default_input_type_prefix[last_step_name]['prefix'] + input_step_format
else:
input_type = default_input_type_prefix[last_step_name]['prefix'] + default_input_type_prefix[last_step_name]['format']
if 'filter' in default_input_type_prefix[last_step_name]:
filter_type = default_input_type_prefix[last_step_name]['filter']
if ('/' in input_list.input_data) and input_list.input_data.split('/')[0].isdigit():
dsid = input_list.input_data.split('/')[0]
job_option_pattern = input_list.input_data.split('/')[1].split('.')[1]
else:
dsid = input_list.input_data.split('.')[1]
job_option_pattern = input_list.input_data.split('.')[2]
dataset_events = find_skipped_dataset(dsid,job_option_pattern,processed_tags,input_type)
# if input_type=='merge.HITS':
# dataset_events += find_skipped_dataset(dsid,job_option_pattern,processed_tags,'simul.HITS')
# if input_type=='simul.HITS':
# dataset_events += find_skipped_dataset(dsid,job_option_pattern,processed_tags,'merge.HITS')
#print dataset_events
#return {slice:[x for x in dataset_events if x['events']>=input_list.input_events ]}
return {slice:filter_input_datasets(dataset_events, reqid, filter_type, input_step_project_campaigns)}
except Exception as e:
logging.error("Can't find skipped dataset: %s" %str(e))
return {slice:[]}
return {slice:[]}
def get_skipped_steps(production_request, slice):
existed_steps = StepExecution.objects.filter(request=production_request, slice=slice)
# Check steps which already exist in slice
try:
ordered_existed_steps, existed_foreign_step = form_existed_step_list(existed_steps)
except ValueError as e:
ordered_existed_steps, existed_foreign_step = [],None
processed_tags = []
last_step_name = ''
last_step = None
for step in ordered_existed_steps:
if step.status == 'NotCheckedSkipped' or step.status == 'Skipped':
processed_tags.append(step.step_template.ctag)
last_step_name = step.step_template.step
else:
last_step = step
break
return last_step_name, processed_tags, last_step
@csrf_protect
def find_input_datasets(request, reqid):
if request.method == 'POST':
results = {'success':False}
start_time = time()
slice_dataset_dict = {}
data = request.body
slices = json.loads(data)
for slice_number in slices:
try:
slice_dataset_dict.update(form_skipped_slice(slice_number,reqid))
except Exception as e:
pass
results.update({'success':True,'data':slice_dataset_dict})
_jsonLogger.info('Finish find input datasets for MC slices', extra=form_json_request_dict(reqid,request,
{'duration':time()-start_time,'slices':json.dumps(slices)}))
return HttpResponse(json.dumps(results), content_type='application/json')
@csrf_protect
def change_request_priority(request, reqid, old_priority, new_priority):
if request.method == 'POST':
slices = list(InputRequestList.objects.filter(request=reqid,priority=int(old_priority)))
slice_steps = {}
for slice in slices:
slice_steps.update({str(slice.slice) : {'changes':{'priority':str(int(new_priority))}}})
try:
save_slice_changes(reqid, slice_steps)
except Exception as e:
logging.error("Can't update slice priority: %s" %str(e))
results = {}
results.update({'success':True})
fill_request_priority(reqid,reqid)
return HttpResponse(json.dumps(results), content_type='application/json')
MC_COORDINATORS= ['cgwenlan','jzhong','jgarcian','mcfayden','jferrand','mehlhase','schwanen','lserkin','jcosta','boeriu',
'onofrio','jmonk','kado']
def request_approve_status(production_request, request, user_name='', is_superuser=None):
if request and not user_name:
try:
user_name = request.user.username
except:
pass
if request and not is_superuser:
try:
is_superuser = request.user.is_superuser
except:
pass
if (production_request.request_type == 'MC') and (production_request.phys_group != 'VALI'):
# change to VOMS
_logger.debug("request:%s is registered by %s" % (str(production_request.reqid),user_name))
if (user_name in MC_COORDINATORS) or ('MCCOORD' in egroup_permissions(user_name)) or is_superuser:
return 'approved'
else:
if (production_request.request_type == 'GROUP') and (production_request.cstatus == 'waiting'):
if ParentToChildRequest.objects.filter(parent_request=production_request).exists():
if ParentToChildRequest.objects.filter(parent_request=production_request)[0].train_id:
if HashTag.objects.filter(hashtag__iexact='PatternToMerge').exists():
patterns = list(HashTagToRequest.objects.filter(hashtag=HashTag.objects.filter(hashtag__iexact='PatternToMerge')[0]).values_list('request_id',flat=True))
if TrainProduction.objects.get(id=ParentToChildRequest.objects.filter(parent_request=production_request)[0].train_id).pattern_request_id in patterns:
return 'registered'
# if (production_request.request_type == 'GROUP') and (production_request.cstatus == 'waiting') \
# and (production_request.phys_group != 'VALI'):
# if (production_request.phys_group in egroup_permissions(request.user.username)) or is_superuser:
# return 'approved'
# else:
# return 'registered'
return 'approved'
def remove_input(good_slices, reqid):
removed_input_slices = []
for slice_number in good_slices:
input_list = InputRequestList.objects.get(request=reqid, slice=int(slice_number))
existed_steps = StepExecution.objects.filter(request=reqid, slice=input_list)
try:
ordered_existed_steps, existed_foreign_step = form_existed_step_list(existed_steps)
if (ordered_existed_steps[0].step_template.step == 'Evgen') and (ordered_existed_steps[0].status in ['NotChecked','Approved']):
if input_list.dataset:
if ('EVNT' not in input_list.dataset) and ('TXT' not in input_list.dataset):
input_list.dataset = None
input_list.save()
removed_input_slices.append(slice_number)
except:
pass
return removed_input_slices
def fill_all_slices_from_0_slice(reqid):
slices = InputRequestList.objects.filter(request=reqid,is_hide=False).order_by('slice')
steps_slice_0 = list(StepExecution.objects.filter(request = reqid,slice=slices[0]))
steps_total_count = StepExecution.objects.filter(request = reqid).count()
if len(steps_slice_0) == steps_total_count:
ordered_existed_steps, existed_foreign_step = form_existed_step_list(steps_slice_0)
#steps_dict = [model_to_dict(x) for x in ordered_existed_steps]
for step in ordered_existed_steps:
step.id = None
step.step_parent = step
for slice in slices:
if slice.slice != 0:
parent = None
for step_dict in ordered_existed_steps:
current_step = deepcopy(step_dict)
current_step.slice = slice
if parent:
current_step.step_parent = parent
current_step.save()
if not parent:
current_step.step_parent = current_step
current_step.input_events = slice.input_events
current_step.save()
parent = current_step
def save_slice_changes(reqid, slice_steps):
not_changed = []
for slice_number, steps_status in list(slice_steps.items()):
if slice_number != '-1':
if steps_status['changes']:
do_action = False
for field in ['jobOption','datasetName','eventsNumber','comment','priority']:
if steps_status['changes'].get(field):
do_action = True
if do_action:
try:
current_slice = InputRequestList.objects.get(request=reqid,slice=int(slice_number))
if StepExecution.objects.filter(slice=current_slice,status = 'Approved',request=reqid).count() > 0:
not_changed.append(slice)
else:
if steps_status['changes'].get('jobOption'):
current_slice.input_data = steps_status['changes'].get('jobOption')
current_slice.save()
if steps_status['changes'].get('datasetName'):
change_dataset_in_slice(reqid,slice_number,steps_status['changes'].get('datasetName'))
if steps_status['changes'].get('eventsNumber'):
current_slice.input_events = steps_status['changes'].get('eventsNumber')
current_slice.save()
if int(steps_status['changes'].get('eventsNumber')) == -1:
for step in steps_status['sliceSteps']:
if step['value']:
if step['changes']:
if 'input_events' not in step['changes']:
step['changes'].update({'input_events':'-1'})
else:
step['changes'] = {'input_events':'-1'}
if steps_status['changes'].get('comment'):
new_comment = steps_status['changes'].get('comment')
if not re.match(r'\(\w+\).*',new_comment):
if re.match(r'\((?P<type>\w+)\).*',current_slice.comment):
new_comment ='('+re.match(r'\((?P<type>\w+)\).*',current_slice.comment)['type'] +')' + new_comment
current_slice.comment = new_comment
current_slice.save()
if steps_status['changes'].get('priority'):
current_slice.priority = steps_status['changes'].get('priority')
current_slice.save()
priority_dict = get_priority_object(current_slice.priority)
for step in StepExecution.objects.filter(slice=current_slice,request=reqid):
step.priority = priority_dict.priority(step.step_template.step, step.step_template.ctag)
step.save()
except Exception as e:
not_changed.append(slice)
return []
def find_input_per_file(dataset_name):
if 'tid' not in dataset_name:
to_search = dataset_name.replace('/','')[dataset_name.find(':')+1:]+'_tid%'
else:
to_search = dataset_name.replace('/','')[dataset_name.find(':')+1:]
try:
datasets = ProductionDataset.objects.extra(where=['name like %s'], params=[to_search])
for dataset in datasets:
current_task = ProductionTask.objects.get(id=dataset.task_id)
if current_task.status not in ProductionTask.RED_STATUS:
return current_task.step.get_task_config('nEventsPerJob')
return ''
except Exception as e:
return ''
def find_evgen_missing(evgen_tag,job_options, energy = '13TeV'):
task_name = job_options.split('.')[0].lower() + '_'+energy+'.'+job_options.split('.')[1]+'.' +\
job_options.split('.')[2] + '.' + 'evgen' + '.' + evgen_tag
tasks = ProductionTask.objects.filter(name=task_name)
if tasks:
total_events = 0
for task in tasks:
if task.status not in (ProductionTask.RED_STATUS + ['obsolete']):
total_events += task.total_events
return total_events
else:
return 0
@csrf_protect
def check_slices_for_request_split(request, production_request):
results = {'success':False, 'do_split': False}
if request.method == 'POST':
try:
data = request.body
input_dict = json.loads(data)
slices_evgen = input_dict
do_split = False
no_events = False
pr_request = TRequest.objects.get(reqid=production_request)
project = pr_request.project_id
if pr_request.phys_group != 'VALI':
if project > 'mc16':
for slice_evgen in slices_evgen:
if slice_evgen[0] != -1:
slice = InputRequestList.objects.get(request=production_request,slice=slice_evgen[0])
evgen_events = find_evgen_missing(slice_evgen[1],slice.input_data)
if evgen_events <= (0.95 * float(slice.input_events)):
do_split = True
break
results = {'success':True, 'do_split': do_split, 'no_events': no_events }
except Exception as e:
pass
return HttpResponse(json.dumps(results), content_type='application/json')
def split_slice_between_projects(slice, parent_request, child_request, step_to_split_number):
#Clone slice to two requests
_logger.debug('Clone slice %s from %s to %s'%(str(slice),str(parent_request.reqid),str(child_request.reqid)))
new_slice_number = clone_slices(parent_request.reqid,child_request.reqid,[slice.slice],-1,True,False,False,{},step_to_split_number)[0]
ordered_existed_steps, existed_foreign_step = form_existed_step_list(StepExecution.objects.filter(request=parent_request, slice=slice))
new_slice = InputRequestList.objects.get(request=child_request,slice=new_slice_number)
child_steps, existed_foreign_step = form_existed_step_list(StepExecution.objects.filter(request=child_request,
slice=new_slice))
new_slice.input_events = child_steps[0].get_task_config('split_events')
new_slice.save()
if slice.input_events > new_slice.input_events:
slice.input_events = slice.input_events - new_slice.input_events
slice.save()
parent_steps = {ordered_existed_steps[step_to_split_number].step_parent.id:child_steps[step_to_split_number-1]}
new_parent_slice_number = clone_slices(parent_request.reqid,parent_request.reqid,[slice.slice],step_to_split_number,True,False,False,parent_steps)[0]
new_parent_slice = InputRequestList.objects.get(request=parent_request,slice=new_parent_slice_number)
new_parent_slice.input_events = new_slice.input_events
new_parent_slice.save()
# Approve child request:
for step in child_steps:
if step:
step.status = 'Approved'
step.input_events = step.get_task_config('split_events')
step.remove_task_config('split_events')
step.save()
set_request_status('cron',child_request.reqid,'approved','Automatic cloned approve', 'Request was automatically approved')
# Waiting new slice
cloned_parent_slice, existed_foreign_step = form_existed_step_list(StepExecution.objects.filter(request=parent_request,
slice=new_parent_slice))
for index, step in enumerate(cloned_parent_slice):
if step:
if ordered_existed_steps[index+step_to_split_number].status in ['Approved','Waiting']:
step.status = 'Waiting'
step.save()
for index, step in enumerate(ordered_existed_steps):
if step and (index<step_to_split_number):
step.status = 'Skipped'
step.save()
# work with original slice
containers = form_skipped_slice(slice.slice, parent_request.reqid)[slice.slice]
if containers:
_logger.debug('Find containers: %s'%(str(containers)))
containers.sort(key= lambda x: x['events'])
change_dataset_in_slice(parent_request.reqid,slice.slice,containers[0]['dataset_name'])
for index, step in enumerate(ordered_existed_steps):
if step:
if step.status == 'Waiting':
step.status = 'Approved'
step.save()
else:
| |
<filename>katsdpscripts/RTS/spectral_baseline.py
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
from numpy.ma import MaskedArray
import scipy.interpolate as interpolate
import scipy.ndimage as ndimage
import katdal
from katdal import averager
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
from katsdpscripts.RTS import rfilib
from katsdpscripts import git_info
import h5py
from six.moves import range
def read_and_select_file(data, bline, target=None, channels=None, polarisation=None, flags_file=None, **kwargs):
"""
Read in the input h5 file and make a selection based on kwargs.
file: {string} filename of h5 file to open in katdal
Returns:
A masked array with the visibility data to plot and the frequency array to plot.
"""
#reset selection
data.select()
#Make selection from dictionary
select_data={}
#Antenna
ants=bline.split(',')
(ant1,ant2) = (ants[0],ants[1]) if len(ants)>1 else (ants[0],ants[0])
select_data['ants'] = (ant1,ant2)
if ant1 != ant2: select_data['corrprods']='cross'
#Target
if target == None:
target = data.catalogue.targets[0]
select_data['targets']=target
#Polarisation
if polarisation is 'I':
#Need HH and VV for I, can get this from corrprods
select_data['corrprods']=((ant1 + 'v', ant2 + 'v'),(ant1 + 'h', ant2 + 'h'))
else:
select_data['pol']=polarisation
#Only tracks- no slews
select_data['scans']='track'
# Secect desired channel range
# Select frequency channels and setup defaults if not specified
num_channels = len(data.channels)
if channels is None:
# Default is drop first and last 5% of the bandpass
start_chan = num_channels // 20
end_chan = start_chan * 19
else:
start_chan = int(channels.split(',')[0])
end_chan = int(channels.split(',')[1])
chan_range = list(range(start_chan,end_chan+1))
select_data['channels']=chan_range
data.select(strict=False, **select_data)
# Check there is some data left over
if data.shape[0] == 0:
raise ValueError('Selection has resulted in no data to process.')
# Insert flags if coming from file
if flags_file is not None:
ff = h5py.File(flags_file, 'r')
data._flags = ff['flags']
# Get the selected visibilities and flags (average to stokes I if required and extend flags across corr products)
vis = np.empty(data.shape[:-1], dtype=np.float32)
flags = np.empty(data.shape[:-1], dtype=np.bool)
weights = np.empty(data.shape[:-1], dtype=np.float32)
for dump in range(data.shape[0]):
vis[dump] = np.sum(np.abs(data.vis[dump]), axis=-1)
flags[dump] = np.sum(data.flags[dump], axis=-1, dtype=np.bool)
weights[dump] = np.sum(data.weights[dump], axis=-1)
outputvis = np.ma.masked_array(vis, mask=flags)
return outputvis, weights, data
class onedbackground():
def __init__(self, smoothing=3, background_method='spline'):
self.smoothing = smoothing
self.getbackground = getattr(self, '_'+background_method)
def _rolling_window(self, a, window, axis=-1, pad=False, mode='reflect', **kwargs):
"""
This method produces a rolling window shaped data with the rolled data in the last col
#Stolen from spassmoor - TM
a : n-D array of data
window : integer is the window size
axis : integer, axis to move the window over
default is the last axis.
pad : {Boolean} Pad the array to the origanal size
mode : {str, function} from the function numpy.pad
One of the following string values or a user supplied function.
'constant' Pads with a constant value.
'edge' Pads with the edge values of array.
'linear_ramp' Pads with the linear ramp between end_value and the
array edge value.
'maximum' Pads with the maximum value of all or part of the
vector along each axis.
'mean' Pads with the mean value of all or part of the
con vector along each axis.
'median' Pads with the median value of all or part of the
vector along each axis.
'minimum' Pads with the minimum value of all or part of the
vector along each axis.
'reflect' Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric' Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap' Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
<function> of the form padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
see numpy.pad notes
**kwargs are passed to the function numpy.pad
Returns:
an array with shape = np.array(a.shape+(window,))
and the rolled data on the last axis
Example:
import numpy as np
data = np.random.normal(loc=1,scale=np.sin(5*np.pi*np.arange(10000).astype(float)/10000.)+1.1, size=10000)
stddata = rolling_window(data, 400).std(axis=-1)
"""
if axis == -1 : axis = len(a.shape)-1
if pad :
pad_width = []
for i in range(len(a.shape)):
if i == axis:
pad_width += [(window//2,window//2 -1 +np.mod(window,2))]
else :
pad_width += [(0,0)]
a = np.pad(a,pad_width=pad_width,mode=mode,**kwargs)
a1 = np.swapaxes(a,axis,-1) # Move target axis to last axis in array
shape = a1.shape[:-1] + (a1.shape[-1] - window + 1, window)
strides = a1.strides + (a1.strides[-1],)
return np.lib.stride_tricks.as_strided(a1, shape=shape, strides=strides).swapaxes(-2,axis) # Move original axis to
def _spline(self, data):
spike_width = self.smoothing
x=np.arange(data.shape[0])
# Final iteration has knots separated by "spike_width".
npieces = int(data.shape[0]/spike_width)
psize = (x[-1]+1)/npieces
firstindex = int((data.shape[0]%psize))
indices = np.trim_zeros(np.arange(firstindex,data.shape[0],psize))
#remove the masked indices
indices = [index for index in indices if ~data.mask[index]]
# Get the final background.
finalfit = interpolate.LSQUnivariateSpline(x,data.data,indices,k=3,w=(~data.mask).astype(np.float))
background = np.asarray(finalfit(x),data.dtype)
return background
def _median(self, data):
background = np.ma.median(MaskedArray(self._rolling_window(data.data, self.smoothing,pad=True), \
mask=self._rolling_window(data.mask, self.smoothing, pad=True,mode='edge')),axis=-1)
return background.data
def _gaussian(self, data):
mask = np.ones_like(data)
mask[data.mask]=0.0
sigma = self.smoothing
weight = ndimage.gaussian_filter1d(mask,sigma,mode='constant',cval=0.0)
background = ndimage.gaussian_filter1d(data.data*mask,sigma,mode='constant',cval=0.0)/weight
return background
def condition_data(vis,flags,weight,polarisation):
"""
Make the data ameniable for plotting.
- Convert to stokes I if required, in opts.
- Make vis into a masked array
- Normalise vis by the mean of each spectrum
Returns
=======
visdata = the normalised masked array constructed from vis, flags (same shape as vis)
"""
#Convert to Stokes I
if polarisation == 'I':
vis = vis[:,:,0] + vis[:,:,1]
flags = np.logical_or(flags[:,:,0],flags[:,:,1])
weight = weight[:,:,0] + weight[:,:,1]
else:
vis=np.squeeze(vis)
flags=np.squeeze(flags)
weight=np.squeeze(weight)
# Get the abs (we only care about amplitudes)
visdata = np.abs(vis)
#Make a masked array
visdata = MaskedArray(visdata, mask=flags)
return visdata, flags, weight
def correct_by_mean(vis, axis="Time"):
"""
Subtract the median of the visibiliy along a given
axis (usually either Time or Baseline).
"""
# Get the mean of each spectrum and normalise by it
if axis=="Time":
medianvis = np.ma.mean(vis, axis=0)
corrected_vis = vis - medianvis[np.newaxis,:]
elif axis=="Channel":
medianvis = np.ma.mean(vis, axis=1)
corrected_vis = vis - medianvis[:,np.newaxis]
return corrected_vis
def weighted_avg_and_std(values, weights, axis=None):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
average = np.ma.average(values, axis=axis, weights=weights)
variance = np.ma.average((values-average)**2, axis=axis, weights=weights) # Fast and numerically precise
return (average, np.sqrt(variance))
def plot_std_results(corr_visdata_std,mean_visdata,freqdata,flagdata, baseline, pol, freqav, timeav, obs_details, pdf):
#Frequency Range in MHz
start_freq = freqdata[0]
end_freq = freqdata[-1]
#Get flag frequencies
#print visdata.shape,np.sum(visdata.mask, axis=0)
channel_width = freqdata[1]-freqdata[0]
flagged_chans = np.sum(flagdata, axis=0, dtype=np.float)
# Show where 50% of data is flagged
flagged_chans = flagged_chans/flagdata.shape[0] > 0.5
flag_freqs=freqdata[flagged_chans]
#Set up the figure
fig = plt.figure(figsize=(8.3,8.3))
fig.subplots_adjust(hspace=0.0)
#Plot the gain vs elevation for each target
ax1 = plt.subplot(211)
ax1.axhline(0.005,ls='--', color='red')
ax1.plot(freqdata,corr_visdata_std/mean_visdata*100.0)
ax1.set_yscale('log')
plt.ylabel('Standard Deviation (% of mean)')
tstring = 'Spectral Baseline, %s'%baseline
if pol=='I':
tstring += ', Stokes I'
else:
tstring += ', %s pol'%pol
# Add some pertinent information.
pstring = 'Time average: %4.1f min.\n'%(timeav)
pstring += 'Frequency average: %4.1f MHz.\n'%(freqav)
pstring += 'Median standard deviation: %6.4f%%'%np.ma.median(corr_visdata_std/mean_visdata*100.0)
plt.figtext(0.5,0.83,pstring)
plt.grid()
#plot title
plt.title(tstring)
plt.suptitle(obs_details)
#Plot the spectrum with standard deviations around it
ax2 = plt.subplot(212, sharex=ax1)
ax2.plot(freqdata,mean_visdata)
plt.figtext(0.6,0.47,'Average spectrum')
plt.ylabel('Amplitude')
plt.xlabel('Frequency (MHz)')
#Overlay rfi
rfilib.plot_RFI_mask(ax1,main=False,extra=flag_freqs,channelwidth=channel_width)
rfilib.plot_RFI_mask(ax2,main=False,extra=flag_freqs,channelwidth=channel_width)
if end_freq<start_freq:
plt.xlim((end_freq,start_freq))
else:
plt.xlim((start_freq,end_freq))
#Convert ticks to MHZ
ticks = ticker.FuncFormatter(lambda x, pos: '{:4.0f}'.format(x/1.e6))
ax2.xaxis.set_major_formatter(ticks)
plt.grid()
plt.figtext(0.89, 0.13, git_info(), horizontalalignment='right',fontsize=10)
pdf.savefig(fig)
plt.close(fig)
def analyse_spectrum(input_file,output_dir='.',polarisation='HH,VV',baseline=None,target=None,freqav=None,timeav=None,freq_chans=None,correct='spline',flags_file=None,smooth=3):
"""
Plot the mean and standard deviation of the bandpass amplitude for a given target in a file
Inputs
======
polarisation: Comma separated list of polarisations to produce spectrum of, options are I, HH, VV, HV, VH. Default is I.
baseline: Baseline to load (e.g. 'ant1,ant1' for antenna 1 auto-corr), default is first single-dish baseline in file.
target: Target to plot spectrum of, default is the first target in the file.
freqav: Frequency averaging interval in MHz. Default is a bin size that will produce 100 frequency channels.
timeav: Time averageing interval in minutes. Default is the shortest scan length on the selected target.
freq_chans: Range of frequency channels to keep (zero-based, specified as 'start,end', default is 90% of the bandpass.
correct: Method to use to correct the spectrum in each average timestamp. Options are 'spline' - fit | |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import os
import re
import sys
import debtcollector.moves
import fixtures
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
import six
from six.moves import urllib
import testscenarios
import testtools
from tempest import clients
from tempest.common import cred_client
from tempest.common import credentials_factory as credentials
from tempest.common import fixed_network
import tempest.common.generator.valid_generator as valid
import tempest.common.validation_resources as vresources
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
LOG = logging.getLogger(__name__)
CONF = config.CONF
idempotent_id = decorators.idempotent_id
def attr(**kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
return f
return decorator
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
}
return service_list
def services(*args):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume',
'network', 'identity', 'object_storage']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(**kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def requires_ext(**kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
'identity': CONF.identity_feature_enabled.api_extensions
}
if len(config_dict[service]) == 0:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
def is_scheduler_filter_enabled(filter_name):
"""Check the list of enabled compute scheduler filters from config. """
filters = CONF.compute_feature_enabled.scheduler_available_filters
if len(filters) == 0:
return False
if 'all' in filters:
return True
if filter_name in filters:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n"
+ str(at_exit_set))
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
"""The test base class defines Tempest framework for class level fixtures.
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be
overwritten by test classes. Set-up stages are:
- skip_checks
- setup_credentials
- setup_clients
- resource_setup
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
- clear_credentials (defined in the base test class)
- resource_cleanup
"""
setUpClassCalled = False
_service = None
# NOTE(andreaf) credentials holds a list of the credentials to be allocated
# at class setup time. Credential types can be 'primary', 'alt', 'admin' or
# a list of roles - the first element of the list being a label, and the
# rest the actual roles
credentials = []
# Resources required to validate a server using ssh
validation_resources = {}
network_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
# stress tests too early, and depending on testr order will fail unit tests
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
'[%(name)s] %(message)s')
# Client manager class to use in this test case.
client_manager = clients.Manager
# A way to adjust slow test classes
TIMEOUT_SCALING_FACTOR = 1
@classmethod
def setUpClass(cls):
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
# Stack of (name, callable) to be invoked in reverse order at teardown
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_credentials))
cls.setup_credentials()
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
etype, cls.__name__))
cls.tearDownClass()
try:
six.reraise(etype, value, trace)
finally:
del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# Save any existing exception, we always want to re-raise the original
# exception only
etype, value, trace = sys.exc_info()
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
while cls.teardowns:
name, teardown = cls.teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
# TODO(andreaf): Till we have the ability to cleanup only
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s" % (name, te))
else:
LOG.exception("teardown of %s failed: %s" % (name, te))
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, and not before, re-raise
# the first one
if re_raise and etype is not None:
try:
six.reraise(etype, value, trace)
finally:
del trace # to avoid circular refs
@classmethod
def skip_checks(cls):
"""Class level skip checks.
Subclasses verify in here all conditions that might prevent the
execution of the entire test class.
Checks implemented here may not make use API calls, and should rely on
configuration alone.
In general skip checks that require an API call are discouraged.
If one is really needed it may be implemented either in the
resource_setup or at test level.
"""
identity_version = cls.get_identity_version()
if 'admin' in cls.credentials and not credentials.is_admin_available(
identity_version=identity_version):
msg = "Missing Identity Admin API credentials in configuration."
raise cls.skipException(msg)
if 'alt' in cls.credentials and not credentials.is_alt_available(
identity_version=identity_version):
msg = "Missing a 2nd set of API credentials in configuration."
raise cls.skipException(msg)
if hasattr(cls, 'identity_version'):
if cls.identity_version == 'v2':
if not CONF.identity_feature_enabled.api_v2:
raise cls.skipException("Identity api v2 is not enabled")
elif cls.identity_version == 'v3':
if not CONF.identity_feature_enabled.api_v3:
raise cls.skipException("Identity api v3 is not enabled")
@classmethod
def setup_credentials(cls):
"""Allocate credentials and create the client managers from them.
For every element of credentials param function creates tenant/user,
Then it creates client manager for that credential.
Network related | |
= nTurbines
self.direction_id = direction_id
self.use_rotor_components = use_rotor_components
self.cp_points = cp_points
self.cp_curve_spline = cp_curve_spline
# set finite difference options (only used for testing)
self.deriv_options['check_form'] = 'central'
self.deriv_options['check_step_size'] = 1.0e-6
self.deriv_options['check_step_calc'] = 'relative'
if not differentiable:
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'forward'
self.add_param('air_density', 1.1716, units='kg/(m*m*m)', desc='air density in free stream')
self.add_param('rotorDiameter', np.zeros(nTurbines) + 126.4, units='m', desc='rotor diameters of all turbine')
self.add_param('Cp', np.zeros(nTurbines)+(0.7737/0.944) * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2), desc='power coefficient for all turbines')
self.add_param('generatorEfficiency', np.zeros(nTurbines)+0.944, desc='generator efficiency of all turbines')
self.add_param('wtVelocity%i' % direction_id, np.zeros(nTurbines), units='m/s',
desc='effective hub velocity for each turbine')
self.add_param('rated_power', np.ones(nTurbines)*5000., units='kW',
desc='rated power for each turbine', pass_by_obj=True)
self.add_param('cut_in_speed', np.ones(nTurbines) * 3.0, units='m/s',
desc='cut-in speed for each turbine', pass_by_obj=True)
self.add_param('cp_curve_cp', np.zeros(cp_points),
desc='cp as a function of wind speed', pass_by_obj=True)
self.add_param('cp_curve_wind_speed', np.ones(cp_points), units='m/s',
desc='wind speeds corresponding to cp curve cp points', pass_by_obj=True)
# self.add_param('cp_curve_spline', None, units='m/s',
# desc='spline corresponding to cp curve', pass_by_obj=True)
# for power curve calculation
self.add_param('use_power_curve_definition', val=False, pass_by_obj=True)
self.add_param('rated_wind_speed', np.ones(nTurbines)*11.4, units='m/s',
desc='rated wind speed for each turbine', pass_by_obj=True)
self.add_param('cut_out_speed', np.ones(nTurbines) * 25.0, units='m/s',
desc='cut-out speed for each turbine', pass_by_obj=True)
# self.add_param('cp_curve_spline', None, units='m/s',
# desc='spline corresponding to cp curve', pass_by_obj=True)
# outputs
self.add_output('wtPower%i' % direction_id, np.zeros(nTurbines), units='kW', desc='power output of each turbine')
self.add_output('dir_power%i' % direction_id, 0.0, units='kW', desc='total power output of the wind farm')
def solve_nonlinear(self, params, unknowns, resids):
# obtain necessary inputs
use_rotor_components = self.use_rotor_components
direction_id = self.direction_id
nTurbines = self.nTurbines
wtVelocity = self.params['wtVelocity%i' % direction_id]
rated_power = params['rated_power']
cut_in_speed = params['cut_in_speed']
air_density = params['air_density']
rotorArea = 0.25*np.pi*np.power(params['rotorDiameter'], 2)
Cp = params['Cp']
generatorEfficiency = params['generatorEfficiency']
cp_curve_cp = params['cp_curve_cp']
cp_curve_wind_speed = params['cp_curve_wind_speed']
# cp_curve_spline = params['cp_curve_spline']
cp_curve_spline = self.cp_curve_spline
if params['use_power_curve_definition']:
# obtain necessary inputs
rated_wind_speed = params['rated_wind_speed']
cut_out_speed = params['cut_out_speed']
wtPower = np.zeros(nTurbines)
# Check to see if turbine produces power for experienced wind speed
for n in np.arange(0, nTurbines):
# If we're between the cut-in and rated wind speeds
if ((cut_in_speed[n] <= wtVelocity[n])
and (wtVelocity[n] < rated_wind_speed[n])):
# Calculate the curve's power
wtPower[n] = rated_power[n] * ((wtVelocity[n] - cut_in_speed[n])
/ (rated_wind_speed[n] - cut_in_speed[n])) ** 3
# If we're between the rated and cut-out wind speeds
elif ((rated_wind_speed[n] <= wtVelocity[n])
and (wtVelocity[n] < cut_out_speed[n])):
# Produce the rated power
wtPower[n] = rated_power[n]
# calculate total power for this direction
dir_power = np.sum(wtPower)
else:
if self.cp_points > 1.:
# print('entered Cp')
if cp_curve_spline is None:
for i in np.arange(0, nTurbines):
Cp[i] = np.interp(wtVelocity[i], cp_curve_wind_speed, cp_curve_cp)
# Cp[i] = spl(wtVelocity[i])
else:
# print('using spline')
Cp = cp_curve_spline(wtVelocity)
# calculate initial values for wtPower (W)
wtPower = generatorEfficiency*(0.5*air_density*rotorArea*Cp*np.power(wtVelocity, 3))
# adjust units from W to kW
wtPower /= 1000.0
# rated_velocity = np.power(1000.*rated_power/(generator_efficiency*(0.5*air_density*rotorArea*Cp)), 1./3.)
#
# dwt_power_dvelocitiesTurbines = np.eye(nTurbines)*generator_efficiency*(1.5*air_density*rotorArea*Cp *
# np.power(wtVelocity, 2))
# dwt_power_dvelocitiesTurbines /= 1000.
# adjust wt power based on rated power
if not use_rotor_components:
for i in range(0, nTurbines):
if wtPower[i] >= rated_power[i]:
wtPower[i] = rated_power[i]
for i in range(0, nTurbines):
if wtVelocity[i] < cut_in_speed[i]:
wtPower[i] = 0.0
# if np.any(rated_velocity+1.) >= np.any(wtVelocity) >= np.any(rated_velocity-1.) and not \
# use_rotor_components:
# for i in range(0, nTurbines):
# if wtVelocity[i] >= rated_velocity[i]+1.:
# spline_start_power = generator_efficiency[i]*(0.5*air_density*rotorArea[i]*Cp[i]*np.power(rated_velocity[i]-1., 3))
# deriv_spline_start_power = 3.*generator_efficiency[i]*(0.5*air_density*rotorArea[i]*Cp[i]*np.power(rated_velocity[i]-1., 2))
# spline_end_power = generator_efficiency[i]*(0.5*air_density*rotorArea[i]*Cp[i]*np.power(rated_velocity[i]+1., 3))
# wtPower[i], deriv = hermite_spline(wtVelocity[i], rated_velocity[i]-1.,
# rated_velocity[i]+1., spline_start_power,
# deriv_spline_start_power, spline_end_power, 0.0)
# dwt_power_dvelocitiesTurbines[i][i] = deriv/1000.
#
# if np.any(wtVelocity) >= np.any(rated_velocity+1.) and not use_rotor_components:
# for i in range(0, nTurbines):
# if wtVelocity[i] >= rated_velocity[i]+1.:
# wtPower = rated_power
# dwt_power_dvelocitiesTurbines[i][i] = 0.0
# self.dwt_power_dvelocitiesTurbines = dwt_power_dvelocitiesTurbines
# calculate total power for this direction
dir_power = np.sum(wtPower)
# pass out results
unknowns['wtPower%i' % direction_id] = wtPower
unknowns['dir_power%i' % direction_id] = dir_power
# print(wtPower)
def linearize(self, params, unknowns, resids):
# obtain necessary inputs
direction_id = self.direction_id
use_rotor_components = self.use_rotor_components
nTurbines = self.nTurbines
wtVelocity = self.params['wtVelocity%i' % direction_id]
air_density = params['air_density']
rotorDiameter = params['rotorDiameter']
rotorArea = 0.25*np.pi*np.power(rotorDiameter, 2)
Cp = params['Cp']
generatorEfficiency = params['generatorEfficiency']
rated_power = params['rated_power']
cut_in_speed = params['cut_in_speed']
wtPower = unknowns['wtPower%i' % direction_id]
cp_curve_cp = params['cp_curve_cp']
cp_curve_wind_speed = params['cp_curve_wind_speed']
cp_curve_spline = self.cp_curve_spline
if params['use_power_curve_definition']:
# obtain necessary inputs
rated_wind_speed = params['rated_wind_speed']
cut_out_speed = params['cut_out_speed']
dwtPower_dwtVelocity = np.zeros([nTurbines, nTurbines])
# Check to see if turbine produces power for experienced wind speed
for n in np.arange(0, nTurbines):
# If we're between the cut-in and rated wind speeds
if ((cut_in_speed[n] <= wtVelocity[n])
and (wtVelocity[n] < rated_wind_speed[n])):
# Calculate the derivative of the power curve
dwtPower_dwtVelocity[n, n] = (3. * rated_power[n] * ((wtVelocity[n] - cut_in_speed[n])
/ (rated_wind_speed[n] - cut_in_speed[
n])) ** 2) * (1. / (rated_wind_speed[n] - cut_in_speed[n]))
# If we're between the rated and cut-out wind speeds
elif ((rated_wind_speed[n] <= wtVelocity[n])
and (wtVelocity[n] < cut_out_speed[n])):
# Produce the rated power
dwtPower_dwtVelocity[n, n] = 0.0
# calculate total power for this direction
ddir_power_dwtVelocity = np.matmul(dwtPower_dwtVelocity, np.ones(nTurbines))
J = {}
# populate Jacobian dict
J['wtPower%i' % direction_id, 'wtVelocity%i' % direction_id] = dwtPower_dwtVelocity
J['wtPower%i' % direction_id, 'rotorDiameter'] = np.zeros([nTurbines, nTurbines])
J['wtPower%i' % direction_id, 'Cp'] = np.zeros([nTurbines, nTurbines])
J['dir_power%i' % direction_id, 'wtVelocity%i' % direction_id] = np.reshape(ddir_power_dwtVelocity,
[1, nTurbines])
J['dir_power%i' % direction_id, 'rotorDiameter'] = np.zeros([1, nTurbines])
J['dir_power%i' % direction_id, 'Cp'] = np.zeros([1, nTurbines])
else:
dCpdV = np.zeros_like(Cp)
if self.cp_points > 1. and self.cp_curve_spline is None:
for i in np.arange(0, nTurbines):
Cp[i] = np.interp(wtVelocity[i], cp_curve_wind_speed, cp_curve_cp)
# Cp[i] = spl(wtVelocity[i])
dv = 1E-6
dCpdV[i] = (np.interp(wtVelocity[i]+dv, cp_curve_wind_speed, cp_curve_cp) -
np.interp(wtVelocity[i]- dv, cp_curve_wind_speed, cp_curve_cp))/(2.*dv)
elif self.cp_curve_spline is not None:
# get Cp from the spline
dCpdV_spline = cp_curve_spline.derivative()
Cp = np.zeros_like(wtVelocity)
dCpdV = np.zeros_like(wtVelocity)
for i in np.arange(0, len(wtVelocity)):
Cp[i] = cp_curve_spline(wtVelocity[i])
dCpdV[i] = dCpdV_spline(wtVelocity[i])
# calcuate initial gradient values
dwtPower_dwtVelocity = np.eye(nTurbines)*0.5*generatorEfficiency*air_density*rotorArea*\
(3.*Cp*np.power(wtVelocity, 2) + np.power(wtVelocity,3)*dCpdV)
dwtPower_dCp = np.eye(nTurbines)*generatorEfficiency*(0.5*air_density*rotorArea*np.power(wtVelocity, 3))
dwtPower_drotorDiameter = np.eye(nTurbines)*generatorEfficiency*(0.5*air_density*(0.5*np.pi*rotorDiameter)*Cp *
np.power(wtVelocity, 3))
# dwt_power_dvelocitiesTurbines = self.dwt_power_dvelocitiesTurbines
# adjust gradients for unit conversion from W to kW
dwtPower_dwtVelocity /= 1000.
dwtPower_dCp /= 1000.
dwtPower_drotorDiameter /= 1000.
# rated_velocity = np.power(1000.*rated_power/(generator_efficiency*(0.5*air_density*rotorArea*Cp)), 1./3.)
# if np.any(rated_velocity+1.) >= np.any(wtVelocity) >= np.any(rated_velocity-1.) and not \
# use_rotor_components:
#
# spline_start_power = generator_efficiency*(0.5*air_density*rotorArea*Cp*np.power(rated_velocity-1., 3))
# deriv_spline_start_power = 3.*generator_efficiency*(0.5*air_density*rotorArea*Cp*np.power(rated_velocity-1., 2))
# spline_end_power = generator_efficiency*(0.5*air_density*rotorArea*Cp*np.power(rated_velocity+1., 3))
# wtPower, dwt_power_dvelocitiesTurbines = hermite_spline(wtVelocity, rated_velocity-1.,
# rated_velocity+1., spline_start_power,
# deriv_spline_start_power, spline_end_power, 0.0)
# set gradients for turbines above rated power to zero
for i in range(0, nTurbines):
if wtPower[i] >= rated_power[i]:
dwtPower_dwtVelocity[i][i] = 0.0
dwtPower_dCp[i][i] = 0.0
dwtPower_drotorDiameter[i][i] = 0.0
# set gradients for turbines above rated power to zero
for i in range(0, nTurbines):
if wtVelocity[i] < cut_in_speed[i]:
dwtPower_dwtVelocity[i][i] = 0.0
dwtPower_dCp[i][i] = 0.0
dwtPower_drotorDiameter[i][i] = 0.0
# compile elements of Jacobian
ddir_power_dwtVelocity = np.array([np.sum(dwtPower_dwtVelocity, 0)])
ddir_power_dCp = np.array([np.sum(dwtPower_dCp, 0)])
ddir_power_drotorDiameter = np.array([np.sum(dwtPower_drotorDiameter, 0)])
# initialize Jacobian dict
J = {}
# populate Jacobian dict
J['wtPower%i' % direction_id, 'wtVelocity%i' % direction_id] = dwtPower_dwtVelocity
J['wtPower%i' % direction_id, 'Cp'] = dwtPower_dCp
J['wtPower%i' % direction_id, 'rotorDiameter'] = dwtPower_drotorDiameter
J['dir_power%i' % direction_id, 'wtVelocity%i' % direction_id] = ddir_power_dwtVelocity
J['dir_power%i' % direction_id, 'Cp'] = ddir_power_dCp
J['dir_power%i' % direction_id, 'rotorDiameter'] = ddir_power_drotorDiameter
return J
class PositionConstraintComp(Component):
""" Calculates spacing and boundary constraints
Written by <NAME>, 2019
"""
def __init__(self, nTurbines, nBoundaries):
super(PositionConstraintComp, self).__init__()
self.nTurbines = nTurbines
# Explicitly size input arrays
self.add_param('turbineX', val=np.zeros(nTurbines))
self.add_param('turbineY', val=np.zeros(nTurbines))
self.add_param('rotorDiameter', val=np.zeros(nTurbines))
self.add_param('boundaryVertices', val=np.zeros((nBoundaries,2)))
self.add_param('boundaryNormals', val=np.zeros((nBoundaries,2)))
self.add_output('spacing_constraint', val=np.zeros((nTurbines-1)*nTurbines/2), pass_by_object=True)
self.add_output('boundary_constraint', val=np.zeros(nTurbines), pass_by_object=True)
def solve_nonlinear(self, params, unknowns, resids):
global nCalls_con
nCalls_con += 1
turbineX = params['turbineX']
turbineY = params['turbineY']
rotorDiameter = params['rotorDiameter']
nTurbines = turbineX.size()
boundaryVertices = params['boundaryVertices']
boundaryNormals = params['boundaryNormals']
dx = np.eye(self.nTurbines)
dy = np.zeros((self.nTurbines,self.nTurbines))
_,ss_dx,_,bd_dx = position_constraints.constraints_position_dv(turbineX,dx,turbineY,dy,
boundaryVertices,boundaryNormals)
dx = np.zeros((self.nTurbines,self.nTurbines))
dy = np.eye(self.nTurbines)
ss,ss_dy,bd,bd_dy = position_constraints.constraints_position_dv(turbineX,dx,turbineY,dy,
boundaryVertices,boundaryNormals)
bounds = np.zeros(nTurbines)
index = np.zeros(nTurbines)
for i in range(nTurbines):
bounds[i] = np.min(bd[i])
index[i] = np.argmin(bd[i])
self.index = index
self.ss_dx = ss_dx
self.ss_dy = ss_dy
self.bd_dx = bd_dx
self.bd_dy = bd_dy
unknowns['spacing_constraint'] = ss-(2.*rotorDiameter[0])**2
unknowns['boundary_constraint'] = bounds
def linearize(self, params, unknowns, resids):
nTurbines = params['turbineX'].size()
# initialize Jacobian dict
J = {}
# populate Jacobian dict
J[('spacing_constraint', 'turbineX')] = self.ss_dx.T
J[('spacing_constraint', 'turbineY')] = self.ss_dy.T
db_dx = np.zeros((self.nTurbines,self.nTurbines))
db_dy = np.zeros((self.nTurbines,self.nTurbines))
for i in range(nTurbines):
db_dx[i][i] = self.bd_dx[i][i][self.index[i]]
db_dy[i][i] = self.bd_dy[i][i][self.index[i]]
J[('boundary_constraint','turbineX')] = | |
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.tables = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.tables.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.usage = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DiskUsage')
if self.tables is not None:
oprot.writeFieldBegin('tables', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.tables))
for iter6 in self.tables:
oprot.writeString(iter6.encode('utf-8') if sys.version_info[0] == 2 else iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.usage is not None:
oprot.writeFieldBegin('usage', TType.I64, 2)
oprot.writeI64(self.usage)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class KeyValue(object):
"""
Attributes:
- key
- value
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'key', (Key, Key.thrift_spec), None, ), # 1
(2, TType.STRING, 'value', 'BINARY', None, ), # 2
)
def __init__(self, key=None, value=None,):
self.key = key
self.value = value
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.key = Key()
self.key.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.value = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('KeyValue')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRUCT, 1)
self.key.write(oprot)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 2)
oprot.writeBinary(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ScanResult(object):
"""
Attributes:
- results
- more
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'results', (TType.STRUCT, (KeyValue, KeyValue.thrift_spec), False), None, ), # 1
(2, TType.BOOL, 'more', None, None, ), # 2
)
def __init__(self, results=None, more=None,):
self.results = results
self.more = more
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.results = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = KeyValue()
_elem12.read(iprot)
self.results.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.more = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ScanResult')
if self.results is not None:
oprot.writeFieldBegin('results', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.results))
for iter13 in self.results:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.more is not None:
oprot.writeFieldBegin('more', TType.BOOL, 2)
oprot.writeBool(self.more)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Range(object):
"""
Attributes:
- start
- startInclusive
- stop
- stopInclusive
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'start', (Key, Key.thrift_spec), None, ), # 1
(2, TType.BOOL, 'startInclusive', None, None, ), # 2
(3, TType.STRUCT, 'stop', (Key, Key.thrift_spec), None, ), # 3
(4, TType.BOOL, 'stopInclusive', None, None, ), # 4
)
def __init__(self, start=None, startInclusive=None, stop=None, stopInclusive=None,):
self.start = start
self.startInclusive = startInclusive
self.stop = stop
self.stopInclusive = stopInclusive
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.start = Key()
self.start.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.startInclusive = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.stop = Key()
self.stop.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.stopInclusive = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Range')
if self.start is not None:
oprot.writeFieldBegin('start', TType.STRUCT, 1)
self.start.write(oprot)
oprot.writeFieldEnd()
if self.startInclusive is not None:
oprot.writeFieldBegin('startInclusive', TType.BOOL, 2)
oprot.writeBool(self.startInclusive)
oprot.writeFieldEnd()
if self.stop is not None:
oprot.writeFieldBegin('stop', TType.STRUCT, 3)
self.stop.write(oprot)
oprot.writeFieldEnd()
if self.stopInclusive is not None:
oprot.writeFieldBegin('stopInclusive', TType.BOOL, 4)
oprot.writeBool(self.stopInclusive)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ScanColumn(object):
"""
Attributes:
- colFamily
- colQualifier
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'colFamily', 'BINARY', None, ), # 1
(2, TType.STRING, 'colQualifier', 'BINARY', None, ), # 2
)
def __init__(self, colFamily=None, colQualifier=None,):
self.colFamily = colFamily
self.colQualifier = colQualifier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.colFamily = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.colQualifier = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ScanColumn')
if self.colFamily is not None:
oprot.writeFieldBegin('colFamily', TType.STRING, 1)
oprot.writeBinary(self.colFamily)
oprot.writeFieldEnd()
if self.colQualifier is not None:
oprot.writeFieldBegin('colQualifier', TType.STRING, 2)
oprot.writeBinary(self.colQualifier)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IteratorSetting(object):
"""
Attributes:
- priority
- name
- iteratorClass
- properties
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'priority', None, None, ), # 1
(2, TType.STRING, 'name', 'UTF8', None, ), # 2
(3, TType.STRING, 'iteratorClass', 'UTF8', None, ), # 3
(4, TType.MAP, 'properties', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4
)
def __init__(self, priority=None, name=None, iteratorClass=None, properties=None,):
self.priority = priority
self.name = name
self.iteratorClass = iteratorClass
self.properties = properties
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.priority = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.iteratorClass = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.properties = {}
(_ktype15, _vtype16, _size14) = iprot.readMapBegin()
for _i18 in range(_size14):
_key19 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.properties[_key19] = _val20
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IteratorSetting')
if self.priority is not None:
oprot.writeFieldBegin('priority', TType.I32, 1)
oprot.writeI32(self.priority)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.iteratorClass is not None:
oprot.writeFieldBegin('iteratorClass', TType.STRING, 3)
oprot.writeString(self.iteratorClass.encode('utf-8') if sys.version_info[0] == 2 else self.iteratorClass)
oprot.writeFieldEnd()
if self.properties is not None:
oprot.writeFieldBegin('properties', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
for kiter21, viter22 in self.properties.items():
oprot.writeString(kiter21.encode('utf-8') if sys.version_info[0] == 2 else kiter21)
| |
[
'hbase-site/hbase.hregion.memstore.flush.size'],
'hbase-site/hbase.client.scanner.caching': [
'hbase-site/hbase.client.scanner.caching',
'global/client_scannercaching'],
'hbase-site/zookeeper.session.timeout': [
'hbase-site/zookeeper.session.timeout',
'global/zookeeper_sessiontimeout'],
'hbase-site/hbase.client.keyvalue.maxsize': [
'hbase-site/hbase.client.keyvalue.maxsize',
'global/hfile_max_keyvalue_size'],
'hdfs-site/dfs.support.append': [
'hdfs-site/dfs.support.append',
'hbase-site/dfs.support.append',
'global/hdfs_support_append'],
'hbase-site/dfs.client.read.shortcircuit': [
'hbase-site/dfs.client.read.shortcircuit',
'global/hdfs_enable_shortcircuit_read']
}
def __init__(self):
super(HBaseService, self).__init__(
HBaseService.get_service_id())
self.configurations.add('hbase-site')
@classmethod
def get_service_id(cls):
return 'HBASE'
def validate(self, cluster_spec, cluster):
# check for a single HBASE_SERVER
count = cluster_spec.get_deployed_node_group_count('HBASE_MASTER')
if count != 1:
raise ex.InvalidComponentCountException('HBASE_MASTER', 1, count)
def register_service_urls(self, cluster_spec, url_info):
master_ip = cluster_spec.determine_component_hosts(
'HBASE_MASTER').pop().management_ip
hbase_config = cluster_spec.configurations['hbase-site']
info_port = hbase_config['hbase.master.info.port']
url_info['HBase'] = {
'Web UI': 'http://%s:%s/master-status' % (master_ip, info_port),
'Logs': 'http://%s:%s/logs' % (master_ip, info_port),
'Zookeeper Info': 'http://%s:%s/zk.jsp' % (master_ip, info_port),
'JMX': 'http://%s:%s/jmx' % (master_ip, info_port),
'Debug Dump': 'http://%s:%s/dump' % (master_ip, info_port),
'Thread Stacks': 'http://%s:%s/stacks' % (master_ip, info_port)
}
return url_info
def register_user_input_handlers(self, ui_handlers):
for prop_name in self.property_map:
ui_handlers[prop_name] = (
self._handle_config_property_update)
ui_handlers['hbase-site/hbase.rootdir'] = (
self._handle_user_property_root_dir)
def _handle_config_property_update(self, user_input, configurations):
self._update_config_values(configurations, user_input.value,
self.property_map[user_input.config.name])
def _handle_user_property_root_dir(self, user_input, configurations):
configurations['hbase-site']['hbase.rootdir'] = user_input.value
match = re.search('(^hdfs://)(.*?)(/.*)', user_input.value)
if match:
configurations['global']['hbase_hdfs_root_dir'] = match.group(3)
else:
raise e.InvalidDataException(
_("Invalid value for property 'hbase-site/hbase.rootdir' : %s")
% user_input.value)
def finalize_configuration(self, cluster_spec):
nn_servers = cluster_spec.determine_component_hosts('NAMENODE')
if nn_servers:
self._replace_config_token(
cluster_spec, '%NN_HOST%', nn_servers.pop().fqdn(),
{'hbase-site': ['hbase.rootdir']})
zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER')
if zk_servers:
zk_list = [z.fqdn() for z in zk_servers]
self._replace_config_token(
cluster_spec, '%ZOOKEEPER_HOSTS%', ','.join(zk_list),
{'hbase-site': ['hbase.zookeeper.quorum']})
def finalize_ng_components(self, cluster_spec):
hbase_ng = cluster_spec.get_node_groups_containing_component(
'HBASE_MASTER')
components = hbase_ng[0].components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if not cluster_spec.get_deployed_node_group_count(
'HBASE_REGIONSERVER'):
components.append('HBASE_REGIONSERVER')
else:
hbase_ng = cluster_spec.get_node_groups_containing_component(
'HBASE_REGIONSERVER')
for ng in hbase_ng:
components = ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
class ZookeeperService(Service):
def __init__(self):
super(ZookeeperService, self).__init__(
ZookeeperService.get_service_id())
@classmethod
def get_service_id(cls):
return 'ZOOKEEPER'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER')
if count < 1:
raise ex.InvalidComponentCountException(
'ZOOKEEPER_SERVER', '1+', count)
# check if HDFS HA is enabled
if cluster_spec.is_hdfs_ha_enabled(cluster):
# check if we have an odd number of zookeeper_servers > 3
if not (count >= 3 and (count % 2 == 1)):
raise ex.NameNodeHAConfigurationError(
"ZOOKEEPER_SERVER count should be an odd number "
"greater than 3 for NameNode High Availability. "
"Actual ZOOKEEPER_SERVER count is %s" % count)
def is_mandatory(self):
return True
class OozieService(Service):
def __init__(self):
super(OozieService, self).__init__(OozieService.get_service_id())
self.configurations.add('oozie-site')
@classmethod
def get_service_id(cls):
return 'OOZIE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('OOZIE_SERVER')
if count != 1:
raise ex.InvalidComponentCountException(
'OOZIE_SERVER', 1, count)
count = cluster_spec.get_deployed_node_group_count('OOZIE_CLIENT')
if not count:
raise ex.InvalidComponentCountException(
'OOZIE_CLIENT', '1+', count)
def finalize_configuration(self, cluster_spec):
oozie_servers = cluster_spec.determine_component_hosts('OOZIE_SERVER')
if oozie_servers:
oozie_server = oozie_servers.pop()
name_list = [oozie_server.fqdn(), oozie_server.internal_ip,
oozie_server.management_ip]
self._replace_config_token(
cluster_spec, '%OOZIE_HOST%', oozie_server.fqdn(),
{'global': ['oozie_hostname'],
'oozie-site': ['oozie.base.url']})
self._replace_config_token(
cluster_spec, '%OOZIE_HOST%', ",".join(name_list),
{'core-site': ['hadoop.proxyuser.oozie.hosts']})
def finalize_ng_components(self, cluster_spec):
oozie_ng = cluster_spec.get_node_groups_containing_component(
'OOZIE_SERVER')[0]
components = oozie_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE2_CLIENT' not in components:
components.append('MAPREDUCE2_CLIENT')
# per AMBARI-3483
if 'YARN_CLIENT' not in components:
components.append('YARN_CLIENT')
# ensure that mr and hdfs clients are colocated with oozie client
client_ngs = cluster_spec.get_node_groups_containing_component(
'OOZIE_CLIENT')
for ng in client_ngs:
components = ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE2_CLIENT' not in components:
components.append('MAPREDUCE2_CLIENT')
def register_service_urls(self, cluster_spec, url_info):
oozie_ip = cluster_spec.determine_component_hosts(
'OOZIE_SERVER').pop().management_ip
port = self._get_port_from_cluster_spec(cluster_spec, 'oozie-site',
'oozie.base.url')
url_info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oozie_ip, port)
}
return url_info
def register_user_input_handlers(self, ui_handlers):
ui_handlers['oozie-site/oozie.service.JPAService.jdbc.username'] = (
self._handle_user_property_db_user)
ui_handlers['oozie.service.JPAService.jdbc.password'] = (
self._handle_user_property_db_pwd)
def _handle_user_property_db_user(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.username'] = (
user_input.value)
def _handle_user_property_db_pwd(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.password'] = (
user_input.value)
class GangliaService(Service):
def __init__(self):
super(GangliaService, self).__init__(GangliaService.get_service_id())
@classmethod
def get_service_id(cls):
return 'GANGLIA'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('GANGLIA_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('GANGLIA_SERVER', 1, count)
def is_user_template_component(self, component):
return component.name != 'GANGLIA_MONITOR'
def finalize_ng_components(self, cluster_spec):
for ng in cluster_spec.node_groups.values():
if 'GANGLIA_MONITOR' not in ng.components:
ng.components.append('GANGLIA_MONITOR')
class AmbariService(Service):
def __init__(self):
super(AmbariService, self).__init__(AmbariService.get_service_id(),
False)
self.configurations.add('ambari')
# TODO(jspeidel): don't hard code default admin user
self.admin_user_name = 'admin'
@classmethod
def get_service_id(cls):
return 'AMBARI'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('AMBARI_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('AMBARI_SERVER', 1, count)
def register_service_urls(self, cluster_spec, url_info):
ambari_ip = cluster_spec.determine_component_hosts(
'AMBARI_SERVER').pop().management_ip
port = cluster_spec.configurations['ambari'].get(
'server.port', '8080')
url_info['Ambari Console'] = {
'Web UI': 'http://{0}:{1}'.format(ambari_ip, port)
}
return url_info
def is_user_template_component(self, component):
return component.name != 'AMBARI_AGENT'
def register_user_input_handlers(self, ui_handlers):
ui_handlers['ambari-stack/ambari.admin.user'] = (
self._handle_user_property_admin_user)
ui_handlers['ambari-stack/ambari.admin.password'] = (
self._handle_user_property_admin_password)
def is_mandatory(self):
return True
def _handle_user_property_admin_user(self, user_input, configurations):
admin_user = next(user for user in self.users
if user.name == 'admin')
admin_user.name = user_input.value
self.admin_user_name = user_input.value
def _handle_user_property_admin_password(self, user_input, configurations):
admin_user = next(user for user in self.users
if user.name == self.admin_user_name)
admin_user.password = user_input.value
class SqoopService(Service):
def __init__(self):
super(SqoopService, self).__init__(SqoopService.get_service_id())
@classmethod
def get_service_id(cls):
return 'SQOOP'
def finalize_ng_components(self, cluster_spec):
sqoop_ngs = cluster_spec.get_node_groups_containing_component('SQOOP')
for ng in sqoop_ngs:
if 'HDFS_CLIENT' not in ng.components:
ng.components.append('HDFS_CLIENT')
if 'MAPREDUCE2_CLIENT' not in ng.components:
ng.components.append('MAPREDUCE2_CLIENT')
class NagiosService(Service):
def __init__(self):
super(NagiosService, self).__init__(NagiosService.get_service_id())
@classmethod
def get_service_id(cls):
return 'NAGIOS'
def finalize_ng_components(self, cluster_spec):
# per AMBARI-2946
nagios_ngs = (
cluster_spec.get_node_groups_containing_component('NAGIOS_SERVER'))
for ng in nagios_ngs:
if 'YARN_CLIENT' not in ng.components:
ng.components.append('YARN_CLIENT')
if 'MAPREDUCE2_CLIENT' not in ng.components:
ng.components.append('MAPREDUCE2_CLIENT')
if cluster_spec.get_deployed_node_group_count('OOZIE_SERVER'):
if 'OOZIE_CLIENT' not in ng.components:
ng.components.append('OOZIE_CLIENT')
if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'):
if 'HIVE_CLIENT' not in ng.components:
ng.components.append('HIVE_CLIENT')
if 'HCAT' not in ng.components:
if not cluster_spec.get_deployed_node_group_count(
'HCATALOG'):
hcat_service = next(service for service in
cluster_spec.services if
service.name == 'HCATALOG')
hcat_service.deployed = True
ng.components.append('HCAT')
class HueService(Service):
default_web_ui_port = '8000'
required_services = ['HIVE', 'OOZIE', 'WEBHCAT', 'YARN']
def __init__(self):
super(HueService, self).__init__(HueService.get_service_id(), False)
@classmethod
def get_service_id(cls):
return "HUE"
@staticmethod
def _get_java_home_from_config(config):
return (config.get('java64_home', None)
or config.get('java_home', None) if config else None)
@staticmethod
def _get_java_home(cluster_spec):
java_home = HueService._get_java_home_from_config(
cluster_spec.configurations.get('hue', None)
)
if not java_home:
java_home = HueService._get_java_home_from_config(
cluster_spec.configurations.get('global', None)
)
return java_home or '/opt/jdk1.6.0_31'
@staticmethod
def _append_host_substitution(cluster_spec, component, var_name,
var_pattern_name, subs):
hosts = cluster_spec.determine_component_hosts(component)
if hosts:
subs[var_name] = hosts.pop().fqdn() or 'localhost'
subs[var_pattern_name] = subs[var_name].replace('.', '\.')
@staticmethod
def _create_hue_ini_file_section(property_sub_tree, level):
properties = property_sub_tree['properties']
sections = property_sub_tree['sections']
s = ''
if properties:
for name, value in six.iteritems(properties):
s += ' ' * (level * 2)
s += "{0} = {1}\n".format(name, value)
if sections:
for name, section in six.iteritems(sections):
s += "\n"
s += ' ' * ((level - 1) * 2)
s += '[' * level
s += name
s += ']' * level
s += "\n"
s += HueService._create_hue_ini_file_section(section,
level + 1)
return s
@staticmethod
def _create_hue_ini_file(property_tree):
if property_tree:
return HueService._create_hue_ini_file_section(property_tree, 1)
else:
return ''
@staticmethod
def _create_hue_property_tree(cluster_spec):
config_name = 'hue-ini'
LOG.info(_LI('Creating Hue ini property tree from configuration named '
'{0}').format(config_name))
hue_ini_property_tree = {'sections': {}, 'properties': {}}
config = cluster_spec.configurations[config_name]
if config is None:
LOG.warning(_LW('Missing configuration named {0}, aborting Hue ini'
' file creation').format(config_name))
else:
# replace values in hue-ini configuration
subs = {}
subs['%JAVA_HOME%'] = HueService._get_java_home(cluster_spec)
HueService._append_host_substitution(cluster_spec,
'NAMENODE',
'%NN_HOST%',
'%NN_HOST_PATTERN%',
subs)
HueService._append_host_substitution(cluster_spec,
'RESOURCEMANAGER',
'%RM_HOST%',
'%RM_HOST_PATTERN%',
subs)
HueService._append_host_substitution(cluster_spec,
'HISTORYSERVER',
'%HS_HOST%',
'%HS_HOST_PATTERN%',
subs)
HueService._append_host_substitution(cluster_spec,
'OOZIE_SERVER',
'%OOZIE_HOST%',
'%OOZIE_HOST_PATTERN%',
subs)
HueService._append_host_substitution(cluster_spec,
'WEBHCAT_SERVER',
'%WEBHCAT_HOST%',
'%WEBHCAT_HOST_PATTERN%',
subs)
HueService._append_host_substitution(cluster_spec,
'HUE',
'%HUE_HOST%',
'%HUE_HOST_PATTERN%',
subs)
# Parse configuration properties into Hue ini configuration tree
# where <token1>:<token2>:<token3> = <value>
# becomes
# <token1> {
# <token2> {
# <token3>=<value>
# }
# }
for prop_name, prop_value in six.iteritems(config):
# Skip empty property values
if prop_value:
# Attempt to make any necessary substitutions
if subs:
for placeholder, sub in six.iteritems(subs):
if prop_value.find(placeholder) >= 0:
value = prop_value.replace(placeholder, sub)
LOG.debug('Converting placeholder in property '
'{0}:\n\t\t{1}\n\tto\n\t\t{2}\n'.
format(prop_name, prop_value, value))
prop_value = value
# If the property value still is a value, add it and it's
# relevant path to the tree
if prop_value and len(prop_value) > 0:
node = hue_ini_property_tree
tokens = prop_name.split('/')
if tokens:
name = tokens.pop()
while tokens:
token = tokens.pop(0)
if token not in node['sections']:
data = {'sections': {},
'properties': {}}
node['sections'][token] = data
node = node['sections'][token]
# TODO(rlevas) : handle collisions
node['properties'][name] = prop_value
return hue_ini_property_tree
@staticmethod
def _merge_configurations(cluster_spec, src_config_name, dst_config_name):
LOG.info(_LI('Merging configuration properties: %(source)s -> '
'%(destination)s'),
{'source': src_config_name, 'destination': dst_config_name})
src_config = cluster_spec.configurations[src_config_name]
dst_config = cluster_spec.configurations[dst_config_name]
if src_config is None:
LOG.warning(_LW('Missing source configuration property set, '
'aborting merge: {0}').format(src_config_name))
elif dst_config is None:
LOG.warning(_LW('Missing destination configuration property set, '
'aborting merge: {0}').format(dst_config_name))
else:
for property_name, property_value in six.iteritems(src_config):
if property_name in dst_config:
if dst_config[property_name] == src_config[property_name]:
LOG.debug('Skipping unchanged configuration property '
'in {0} and {1}: {2}'.format(dst_config_name,
src_config_name,
property_name))
else:
LOG.warning(_LW('Overwriting existing configuration '
'property in %(dst_config_name)s from '
'%(src_config_name)s for Hue: | |
<gh_stars>100-1000
""" Cisco_IOS_XR_ipv4_bgp_oc_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-bgp\-oc package operational data.
This module contains definitions
for the following management objects\:
oc\-bgp\: OC\-BGP operational data
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BgpOcAfi(Enum):
"""
BgpOcAfi (Enum Class)
BGP Address family
.. data:: ipv4 = 0
IPv4 unicast
.. data:: ipv6 = 5
IPv6 unicast
"""
ipv4 = Enum.YLeaf(0, "ipv4")
ipv6 = Enum.YLeaf(5, "ipv6")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['BgpOcAfi']
class BgpOcInvalidRouteReason(Enum):
"""
BgpOcInvalidRouteReason (Enum Class)
Invalid route reason
.. data:: valid_route = 1
Valid route
.. data:: invalid_clsuter_loop = 2
ClusterLoop
.. data:: invalid_as_path_loop = 3
AsPathLoop
.. data:: invalid_origin_at_or_id = 4
OriginatorID
.. data:: invalid_as_confed_loop = 5
ASConfedLoop
"""
valid_route = Enum.YLeaf(1, "valid-route")
invalid_clsuter_loop = Enum.YLeaf(2, "invalid-clsuter-loop")
invalid_as_path_loop = Enum.YLeaf(3, "invalid-as-path-loop")
invalid_origin_at_or_id = Enum.YLeaf(4, "invalid-origin-at-or-id")
invalid_as_confed_loop = Enum.YLeaf(5, "invalid-as-confed-loop")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['BgpOcInvalidRouteReason']
class BgpOcOriginAttr(Enum):
"""
BgpOcOriginAttr (Enum Class)
Origin Type
.. data:: igp = 0
IGP
.. data:: egp = 1
EGP
.. data:: incomplete = 2
Incomplete
"""
igp = Enum.YLeaf(0, "igp")
egp = Enum.YLeaf(1, "egp")
incomplete = Enum.YLeaf(2, "incomplete")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['BgpOcOriginAttr']
class OcBgp(_Entity_):
"""
OC\-BGP operational data
.. attribute:: bgp_rib
BGP\-RIB operational data
**type**\: :py:class:`BgpRib <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp, self).__init__()
self._top_entity = None
self.yang_name = "oc-bgp"
self.yang_parent_name = "Cisco-IOS-XR-ipv4-bgp-oc-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("bgp-rib", ("bgp_rib", OcBgp.BgpRib))])
self._leafs = OrderedDict()
self.bgp_rib = OcBgp.BgpRib()
self.bgp_rib.parent = self
self._children_name_map["bgp_rib"] = "bgp-rib"
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp, [], name, value)
class BgpRib(_Entity_):
"""
BGP\-RIB operational data
.. attribute:: afi_safi_table
AFI\-SAFIs information
**type**\: :py:class:`AfiSafiTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib, self).__init__()
self.yang_name = "bgp-rib"
self.yang_parent_name = "oc-bgp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("afi-safi-table", ("afi_safi_table", OcBgp.BgpRib.AfiSafiTable))])
self._leafs = OrderedDict()
self.afi_safi_table = OcBgp.BgpRib.AfiSafiTable()
self.afi_safi_table.parent = self
self._children_name_map["afi_safi_table"] = "afi-safi-table"
self._segment_path = lambda: "bgp-rib"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib, [], name, value)
class AfiSafiTable(_Entity_):
"""
AFI\-SAFIs information
.. attribute:: ipv4_unicast
IPv4 Unicast
**type**\: :py:class:`Ipv4Unicast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast>`
**config**\: False
.. attribute:: ipv6_unicast
IPv6 Unicast
**type**\: :py:class:`Ipv6Unicast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable, self).__init__()
self.yang_name = "afi-safi-table"
self.yang_parent_name = "bgp-rib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv4-unicast", ("ipv4_unicast", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast)), ("ipv6-unicast", ("ipv6_unicast", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast))])
self._leafs = OrderedDict()
self.ipv4_unicast = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast()
self.ipv4_unicast.parent = self
self._children_name_map["ipv4_unicast"] = "ipv4-unicast"
self.ipv6_unicast = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast()
self.ipv6_unicast.parent = self
self._children_name_map["ipv6_unicast"] = "ipv6-unicast"
self._segment_path = lambda: "afi-safi-table"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable, [], name, value)
class Ipv4Unicast(_Entity_):
"""
IPv4 Unicast
.. attribute:: loc_rib
Local rib route table
**type**\: :py:class:`LocRib <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib>`
**config**\: False
.. attribute:: open_config_neighbors
Neighbor list
**type**\: :py:class:`OpenConfigNeighbors <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast, self).__init__()
self.yang_name = "ipv4-unicast"
self.yang_parent_name = "afi-safi-table"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("loc-rib", ("loc_rib", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib)), ("open-config-neighbors", ("open_config_neighbors", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors))])
self._leafs = OrderedDict()
self.loc_rib = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib()
self.loc_rib.parent = self
self._children_name_map["loc_rib"] = "loc-rib"
self.open_config_neighbors = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors()
self.open_config_neighbors.parent = self
self._children_name_map["open_config_neighbors"] = "open-config-neighbors"
self._segment_path = lambda: "ipv4-unicast"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast, [], name, value)
class LocRib(_Entity_):
"""
Local rib route table
.. attribute:: routes
routes table
**type**\: :py:class:`Routes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes>`
**config**\: False
.. attribute:: num_routes
Number of routes in adjacency rib out\-bound post\-policy table
**type**\: :py:class:`NumRoutes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.NumRoutes>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib, self).__init__()
self.yang_name = "loc-rib"
self.yang_parent_name = "ipv4-unicast"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("routes", ("routes", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes)), ("num-routes", ("num_routes", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.NumRoutes))])
self._leafs = OrderedDict()
self.routes = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes()
self.routes.parent = self
self._children_name_map["routes"] = "routes"
self.num_routes = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.NumRoutes()
self.num_routes.parent = self
self._children_name_map["num_routes"] = "num-routes"
self._segment_path = lambda: "loc-rib"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv4-unicast/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib, [], name, value)
class Routes(_Entity_):
"""
routes table
.. attribute:: route
route entry
**type**\: list of :py:class:`Route <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes, self).__init__()
self.yang_name = "routes"
self.yang_parent_name = "loc-rib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("route", ("route", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route))])
self._leafs = OrderedDict()
self.route = YList(self)
self._segment_path = lambda: "routes"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv4-unicast/loc-rib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes, [], name, value)
class Route(_Entity_):
"""
route entry
.. attribute:: route
Network in prefix/length format
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
**config**\: False
.. attribute:: neighbor_address
Neighbor address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: path_id
Path ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_name
Prefix
**type**\: :py:class:`PrefixName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.PrefixName>`
**config**\: False
.. attribute:: route_attr_list
RouteAttributesList
**type**\: :py:class:`RouteAttrList <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.RouteAttrList>`
**config**\: False
.. attribute:: ext_attributes_list
ExtAttributesList
**type**\: :py:class:`ExtAttributesList <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.ExtAttributesList>`
**config**\: False
.. attribute:: last_modified_date
LastModifiedDate
**type**\: :py:class:`LastModifiedDate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.LastModifiedDate>`
**config**\: False
.. attribute:: last_update_recieved
LastUpdateRecieved
**type**\: :py:class:`LastUpdateRecieved <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.LastUpdateRecieved>`
**config**\: False
.. attribute:: valid_route
ValidRoute
**type**\: bool
**config**\: False
.. attribute:: invalid_reason
IndentityRef
**type**\: :py:class:`BgpOcInvalidRouteReason <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.BgpOcInvalidRouteReason>`
**config**\: False
.. attribute:: best_path
BestPath
**type**\: bool
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route, self).__init__()
self.yang_name = "route"
self.yang_parent_name = "routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("prefix-name", ("prefix_name", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.PrefixName)), ("route-attr-list", ("route_attr_list", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.RouteAttrList)), ("ext-attributes-list", ("ext_attributes_list", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.ExtAttributesList)), ("last-modified-date", ("last_modified_date", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.LastModifiedDate)), ("last-update-recieved", ("last_update_recieved", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.LastUpdateRecieved))])
self._leafs = OrderedDict([
('route', (YLeaf(YType.str, 'route'), ['str','str'])),
('neighbor_address', (YLeaf(YType.str, 'neighbor-address'), ['str','str'])),
('path_id', (YLeaf(YType.uint32, 'path-id'), ['int'])),
('valid_route', (YLeaf(YType.boolean, 'valid-route'), ['bool'])),
('invalid_reason', (YLeaf(YType.enumeration, 'invalid-reason'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper', 'BgpOcInvalidRouteReason', '')])),
('best_path', (YLeaf(YType.boolean, 'best-path'), ['bool'])),
])
self.route = None
self.neighbor_address = None
self.path_id = None
self.valid_route = None
self.invalid_reason = None
self.best_path = None
self.prefix_name = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.PrefixName()
self.prefix_name.parent = self
self._children_name_map["prefix_name"] = "prefix-name"
self.route_attr_list = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.RouteAttrList()
self.route_attr_list.parent = self
self._children_name_map["route_attr_list"] = "route-attr-list"
self.ext_attributes_list = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.ExtAttributesList()
self.ext_attributes_list.parent = self
self._children_name_map["ext_attributes_list"] = "ext-attributes-list"
self.last_modified_date = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.LastModifiedDate()
self.last_modified_date.parent = self
self._children_name_map["last_modified_date"] = "last-modified-date"
self.last_update_recieved = OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.LastUpdateRecieved()
self.last_update_recieved.parent = self
self._children_name_map["last_update_recieved"] = "last-update-recieved"
self._segment_path = lambda: "route"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv4-unicast/loc-rib/routes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route, ['route', 'neighbor_address', 'path_id', 'valid_route', 'invalid_reason', 'best_path'], name, value)
class PrefixName(_Entity_):
"""
Prefix
.. attribute:: prefix
Prefix
**type**\: :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.PrefixName.Prefix>`
**config**\: False
.. attribute:: prefix_length
Prefix length
**type**\: int
**range:** 0..255
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.PrefixName, self).__init__()
self.yang_name = "prefix-name"
self.yang_parent_name = "route"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("prefix", ("prefix", OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.LocRib.Routes.Route.PrefixName.Prefix))])
self._leafs | |
return qs
def get_search_form(self):
"""
Return the PartnerSearchForm on the page.
Parameters:
- self : the object itself
Returns:
the PartnerSearchForm on the page.
"""
search_form = self.form = PartnerSearchForm(self.request.REQUEST)
return search_form
class EditStudyView(LoginRequiredMixin, UpdateView):
"""
This is the Django view implementation to edit study.
@author: TCSASSEMBLER
@version: 1.0
"""
#Represents the model class for the view. This value isn't supposed to change.
model = Study
#Represents the template name. This value isn't supposed to change.
template_name = "studies/edit.html"
#Represents the form class for the view. This value isn't supposed to change.
form_class = StudyForm
#Represents the context object name for the study. This value isn't supposed to change.
context_object_name = 'study'
@transaction.commit_on_success
def form_valid(self, form):
context = self.get_context_data()
form.instance.owner = self.request.user;
# Get the formsets for data request
study_data_request_formset = context['study_data_request_formset']
if study_data_request_formset.is_valid():
StudyDataRequest.objects.filter(study=self.object).delete()
exist_study = self.object
status = form.instance.status
form.instance.status = 0
form.instance.created_on = exist_study.created_on
form.instance.last_modified_on = exist_study.last_modified_on
form.instance.executed_on = exist_study.executed_on
form.instance.completed_on = exist_study.completed_on
form.instance.expiration_time = exist_study.expiration_time
# Set default expiration time if not specified
if form.cleaned_data['expiration_time'] is None:
form.instance.expiration_time = form.instance.created_on + datetime.timedelta(seconds=DEFAULT_STUDY_EXPIRATION_TIME)
form.cleaned_data['expiration_time'] = form.instance.expiration_time
self.object = form.save()
study_data_request_formset.instance = self.object
study_data_request_formset.save()
operate_result = 'saved'
if status == 1:
# Execute study
execute_study(self.object)
operate_result = 'executed'
return self.render_to_response(self.get_context_data(form=form, operate_result=operate_result))
else:
return self.render_to_response(self.get_context_data(form=form, operate_result=''))
def form_invalid(self, form):
"""
This method will be called if the submitted form is invalid.
Parameters:
- self : the object itself
- form : the submitted form
Returns:
the response
"""
return self.render_to_response(self.get_context_data(form=form, operate_result=''))
def get_context_data(self, **kwargs):
"""
Return the context data.
Parameters:
- self : the object itself
- kwargs : the key/value arguments
Returns:
the context data
"""
context = super(EditStudyView, self).get_context_data(**kwargs)
# inline formsets
if self.request.POST:
context['study_data_request_formset'] = StudyDataRequestFormSet(self.request.POST, instance=self.object)
else:
context['study_data_request_formset'] = StudyDataRequestFormSet(instance=self.object)
context['partners'] = Partner.objects.all()
context['partner_tags'] = PartnerTag.objects.all()
return context
def get_queryset(self):
"""
Return the query set for the view.
Parameters:
- self : the object itself
Returns:
the query set
"""
qs = super(UpdateView, self).get_queryset().filter(status__exact=0)
# Staff(Admin) can view all studies
if self.request.user.is_staff == True:
return qs;
# Regular user can view own studies
else:
return qs.filter(owner__exact=self.request.user)
class CreateStudyChartView(LoginRequiredMixin, CreateView, JSONResponseMixin):
"""
This is the Django view implementation to create study chart.
@author: TCSASSEMBLER
@version: 1.0
"""
#Represents the model class for the view. This value isn't supposed to change.
model = StudyChart
#Represents the form class for the view. This value isn't supposed to change.
form_class = StudyChartForm
def form_valid(self, form):
"""
This method will be called if the submitted form is valid.
Parameters:
- self : the object itself
- form : the submitted form
Returns:
the response
"""
self.object = form.save()
if self.request.is_ajax():
data = {
'pk': self.object.pk,
}
return self.render_to_json_response(data)
else:
return HttpResponseRedirect('/studies')
class CreateStudyView(LoginRequiredMixin, CreateView):
"""
This is the Django view implementation to create study.
@author: TCSASSEMBLER
@version: 1.0
"""
#Represents the model class for the view. This value isn't supposed to change.
model = Study
#Represents the template name. This value isn't supposed to change.
template_name = "studies/create.html"
#Represents the form class for the view. This value isn't supposed to change.
form_class = CreateStudyForm
@transaction.commit_on_success
def form_valid(self, form):
context = self.get_context_data()
form.instance.owner = self.request.user;
# Get the formsets for data request
study_data_request_formset = context['study_data_request_formset']
if study_data_request_formset.is_valid():
now = timezone.localtime(timezone.now()) #datetime.datetime.now()
# Set default expiration time if not specified
if form.cleaned_data['expiration_time'] is None:
form.instance.expiration_time = now + datetime.timedelta(seconds=DEFAULT_STUDY_EXPIRATION_TIME)
form.cleaned_data['expiration_time'] = form.instance.expiration_time
status = form.instance.status
form.instance.description = 'beneficiary: ' + form.instance.beneficiary_query + '\n'
form.instance.description = form.instance.description + 'carrier: ' + form.instance.carrier_query + '\n'
form.instance.description = form.instance.description + 'inpatient: ' + form.instance.inpatient_query + '\n'
form.instance.description = form.instance.description + 'outpatient: ' + form.instance.outpatient_query + '\n'
form.instance.description = form.instance.description + 'prescription: ' + form.instance.prescription_query + '\n'
form.instance.status = 0
form.instance.created_on = now
form.instance.last_modified_on = now
self.object = form.save()
if study_data_request_formset.total_form_count():
study_data_request_formset.instance = self.object
study_data_request_formset.save()
else:
for partner in Partner.objects.all():
StudyDataRequest.objects.create(study=self.object, partner=partner, status=0, response_data='')
operate_result = 'saved'
if status == 1:
# Execute study
execute_study(self.object)
operate_result = 'executed'
return self.render_to_response(self.get_context_data(form=form, operate_result=operate_result, study_id=self.object.id))
else:
return self.render_to_response(self.get_context_data(form=form, operate_result='', study_id=''))
def form_invalid(self, form):
"""
This method will be called if the submitted form is invalid.
Parameters:
- self : the object itself
- form : the submitted form
Returns:
the response
"""
return self.render_to_response(self.get_context_data(form=form, operate_result='', study_id=''))
"""
Return the context data.
Parameters:
- self : the object itself
- kwargs : the key/value arguments
Returns:
the context data
"""
def get_context_data(self, **kwargs):
context = super(CreateStudyView, self).get_context_data(**kwargs)
# inline formsets
if self.request.POST:
context['study_data_request_formset'] = StudyDataRequestFormSet(self.request.POST, instance=self.object)
else:
context['study_data_request_formset'] = StudyDataRequestFormSet()
context['partners'] = Partner.objects.all()
context['partner_tags'] = PartnerTag.objects.all()
return context
class ViewStudyBusinessRuleView(LoginRequiredMixin, DetailView):
"""
This is the Django view implementation to view study business rule.
@author: TCSASSEMBLER
@version: 1.0
"""
#Represents the model class for the view. This value isn't supposed to change.
model = Study
#Represents the template name. This value isn't supposed to change.
template_name = "studies/business_rule.html"
#Represents the context object name for the study. This value isn't supposed to change.
context_object_name = "study"
def get_context_data(self, **kwargs):
context = super(ViewStudyBusinessRuleView, self).get_context_data(**kwargs)
context['transactions'] = self.transactions
return context
def get_queryset(self):
"""
Return the query set for the view.
Parameters:
- self : the object itself
Returns:
the query set
"""
self.transactions = StudyDataRequest.objects.filter(study__id=self.kwargs['pk'])
qs = super(DetailView, self).get_queryset()
# Staff(Admin) can view all studies
if self.request.user.is_staff == True:
return qs;
# Regular user can view own studies
else:
return qs.filter(owner__exact=self.request.user)
class ViewStudyResultsView(LoginRequiredMixin, DetailView):
#Represents the model class for the view. This value isn't supposed to change.
model = Study
#Represents the template name. This value isn't supposed to change.
template_name = "studies/results.html"
#Represents the context object name for study. This value isn't supposed to change.
context_object_name = "study"
def get_context_data(self, **kwargs):
"""
Return the context data.
Parameters:
- self : the object itself
- kwargs : the key/value arguments
Returns:
the context data
"""
context = super(ViewStudyResultsView, self).get_context_data(**kwargs)
# Form
context['search_form'] = self.get_search_form()
context['study_chart_form'] = StudyChartForm()
# Claim data
claim_data_dict = externals.filter_claim_data(context['search_form'])
context['beneficiary_claims'] = helper.createClaimDataDetails('Beneficiary', claim_data_dict['Beneficiary'].filter(study=self.object))
context['carrier_claims'] = helper.createClaimDataDetails('Carrier', claim_data_dict['Carrier'].filter(study=self.object))
context['inpatient_claims'] = helper.createClaimDataDetails('Inpatient', claim_data_dict['Inpatient'].filter(study=self.object))
context['outpatient_claims'] = helper.createClaimDataDetails('Outpatient', claim_data_dict['Outpatient'].filter(study=self.object))
context['prescription_claims'] = helper.createClaimDataDetails('Prescription', claim_data_dict['Prescription'].filter(study=self.object))
# Study charts
context['study_charts'] = StudyChart.objects.filter(study=self.object)
selected_fields = self.request.GET.getlist('selected_beneficiary_claim_data_fields')
if not selected_fields:
selected_fields = sorted(CLAIM_DATA_FIELDS['Beneficiary'].keys())
context['beneficiary_claim_data_fields'] = helper.getClaimColumns('Beneficiary', selected_fields)
context['selected_beneficiary_claim_data_fields'] = [CLAIM_DATA_FIELDS['Beneficiary'][x] for x in selected_fields]
context['selected_beneficiary_claim_data_fields'].sort(key=lambda v:v['order'])
selected_fields = self.request.GET.getlist('selected_carrier_claim_data_fields')
if not selected_fields:
selected_fields = sorted(CLAIM_DATA_FIELDS['Carrier'].keys())
context['carrier_claim_data_fields'] = helper.getClaimColumns('Carrier', selected_fields)
context['selected_carrier_claim_data_fields'] = [CLAIM_DATA_FIELDS['Carrier'][x] for x in selected_fields]
context['selected_carrier_claim_data_fields'].sort(key=lambda v:v['order'])
selected_fields = self.request.GET.getlist('selected_inpatient_claim_data_fields')
if not selected_fields:
selected_fields = sorted(CLAIM_DATA_FIELDS['Inpatient'].keys())
context['inpatient_claim_data_fields'] = helper.getClaimColumns('Inpatient', selected_fields)
context['selected_inpatient_claim_data_fields'] = [CLAIM_DATA_FIELDS['Inpatient'][x] for x in selected_fields]
context['selected_inpatient_claim_data_fields'].sort(key=lambda v:v['order'])
selected_fields = self.request.GET.getlist('selected_outpatient_claim_data_fields')
if not selected_fields:
selected_fields = sorted(CLAIM_DATA_FIELDS['Outpatient'].keys())
context['outpatient_claim_data_fields'] = helper.getClaimColumns('Outpatient', selected_fields)
context['selected_outpatient_claim_data_fields'] = [CLAIM_DATA_FIELDS['Outpatient'][x] for x in selected_fields]
context['selected_outpatient_claim_data_fields'].sort(key=lambda v:v['order'])
selected_fields = self.request.GET.getlist('selected_prescription_claim_data_fields')
if not selected_fields:
selected_fields = sorted(CLAIM_DATA_FIELDS['Prescription'].keys())
context['prescription_claim_data_fields'] = helper.getClaimColumns('Prescription', selected_fields)
context['selected_prescription_claim_data_fields'] = [CLAIM_DATA_FIELDS['Prescription'][x] for x in selected_fields]
context['selected_prescription_claim_data_fields'].sort(key=lambda v:v['order'])
# DARC emails
context['darc_emails'] = DARCEmail.objects.all()
return context
def get_queryset(self):
"""
Return the query set for the view.
Parameters:
- self : the object itself
Returns:
the query set
"""
qs = super(DetailView, self).get_queryset()
# Staff(Admin) can view all studies
if self.request.user.is_staff == True:
return qs;
# Regular user can view own studies
else:
return qs.filter(owner__exact=self.request.user)
def get_search_form(self):
"""
Return the ClaimDataSearchForm on the page.
Parameters:
- self : the object itself
Returns:
the ClaimDataSearchForm on the page.
"""
search_form = self.form = ClaimDataSearchForm(self.request.REQUEST)
return search_form
class LoginView(FormView):
'''
This is the Django view implementation for user login.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@author: caoweiquan322
@version: 1.0
'''
form_class = AuthenticationForm
template_name = "login.html"
# Here we setup the member variables for logging.
CLASS_NAME = 'hfppnetwork.sms.views.LoginView'
LOGGER = logging.getLogger(CLASS_NAME)
def form_valid(self, form):
'''
This method will be called if the submitted form is valid.
@param self: the object itself
@param form: the validated form
@return: result of its parent
'''
# Do logging
signature = self.CLASS_NAME + '.form_valid'
helper.log_entrance(self.LOGGER, signature, {'form': form})
login(self.request, form.get_user())
ret = super(LoginView, self).form_valid(form)
# | |
import re
from typing import List, Tuple, Optional, Set
import sqlparse
import time
import os
import json
import logging
from tqdm.notebook import tqdm as tq
from sqlparse.tokens import Keyword, DML, Token, Wildcard, Literal, Punctuation, Whitespace, Newline, Comment, Operator
from sqlparse.sql import Identifier, IdentifierList, Function, Parenthesis
from sql_translate.engine import regex
from sql_translate import utils
from termcolor import colored
# case insensitive wrapper enforcing that re methods actually have an impact
Regex = regex.Regex()
class _GlobalTranslator():
def __init__(self):
pass
class GlobalHiveToPresto(_GlobalTranslator):
def __init__(self):
self.from_language = "Hive"
self.to_language = "Presto"
with open(os.path.join(os.path.dirname(__file__), "..", "reserved_keywords", "reserved_keywords.json")) as f:
self.reserved_keywords = [rkwarg.upper() for rkwarg in json.load(f)["content"]]
self.gbt = GroupByTranslator()
def translate_query(self, query: str) -> str:
"""Main runner for the global translation.
Executes a bunch of O(n) transformations (typically regex substitutions) on the entire SQL.
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
query = self._remove_dollar_sign(query)
query = self._replace_double_quotes(query)
query = self._replace_back_ticks(query)
query = self._add_double_quotes(query)
query = utils.protect_regex_curly_brackets(query)
query = self._increment_array_indexes(query)
query = self._cast_divisions_to_double(query)
query = self._fix_rlike_calls(query)
query = self._over_shortcut(query)
query = self._fix_lateral_view_explode_calls(query)
query = self._fix_double_equals(query)
query = self._fix_aliasing_on_broadcasting(query)
query = self._fix_interval_formatting(query) # WARNING: Must happen !!AFTER!! _fix_aliasing_on_broadcasting
query = self.gbt.fix_group_by_calls(query)
return query
def _remove_dollar_sign(self, query: str) -> str:
"""Remove the dollar sign coming from Hue (on EMR clusters) used to indicate a variable.
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return query.replace("${", "{")
def _replace_double_quotes(self, query: str) -> str:
"""All double quotes in Hive are replaced by single quotes.
Double quotes have a different purpose in Presto.
For more information: https://prestodb.io/docs/current/migration/from-hive.html
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return query.replace('"', "'") # Double & single quotes are identical in Hive but different in Presto.
def _replace_back_ticks(self, query: str) -> str:
"""Back ticks from Hive are replaced by double quotes in Presto. There are no back ticks in Presto.
For more information: https://prestodb.io/docs/current/migration/from-hive.html
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return query.replace('`', '"') # Handles spaces in column names & any other unicode character.
def _add_double_quotes(self, query: str) -> str:
"""Identifiers that start with a digit need double quotes in Presto
For more information: https://prestodb.io/docs/current/migration/from-hive.html
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"\b(?P<content>(\d\w*[a-z]\w*))\b", # WARNING: Does not support column names starting with just a single digit.
lambda match: f'"{match.groupdict()["content"]}"', # Surround with double quotes
query,
strict=False
)
def _increment_array_indexes(self, query: str) -> str:
"""Arrays indexing is 1 based on Presto while it's 0 based in Hive.
For more information: https://prestodb.io/docs/current/migration/from-hive.html
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"\[(\d+)\]",
lambda match: f"[{int(match.group(1))+1}]",
query,
strict=False # Some files might not have array calls
)
def _fix_rlike_calls(self, query: str) -> str:
"""Translate RLIKE to LIKE
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"\brlike\b",
"like",
query,
strict=False
)
def _over_shortcut(self, query: str) -> str:
"""Frame OVER statement as a function by gluing it to the upcoming parenthesis right after it.
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"\bover\s+\(",
"over(",
query,
strict=False
)
def _fix_lateral_view_explode_calls(self, query: str) -> str:
"""Lateral view explode in Hive is translated by CROSS JOIN UNNEST in Presto.
For more information: https://prestodb.io/docs/current/migration/from-hive.html
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"lateral\s+view\s+explode\s*\((?P<explode_content>.+)\)\s+(?P<name>{vtn})(\s+as)\s+(?P<alias>{vtn})".format(vtn=utils.valid_presto_table_names),
lambda match: f"CROSS JOIN unnest({match['explode_content']}) AS {match['name']} {utils.function_placeholder}({match['alias']})",
query,
strict=False
)
def _fix_double_equals(self, query: str) -> str:
"""Hive tolerates single or double equales in comparison but Presto is single equal only.
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"==",
"=",
query,
strict=False
)
def _cast_divisions_to_double(self, query: str) -> str:
"""By default, Presto does an integer division when encountering two integers around a / sign.
For instance, 3/2 = 1. Therefore, to properly translate it at least one side needs to be cast to double (both sides done here)
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
ColumnCaster = utils.ColumnCaster()
logging.debug("Flattening SQL...")
start = time.perf_counter()
flattened_tokens = list(sqlparse.parse(query)[0].flatten()) # Very intensive!
logging.debug(f"SQL was flattened in {time.perf_counter() - start} s!")
division_operators = sum([
True
for token in flattened_tokens
if token.ttype == Operator and token.value == "/"
]) # Count how many operators there are
logging.debug(f"Found {division_operators} division operator(s)")
# Multi stage query copy/paste
for division_operator in range(division_operators):
logging.debug(f"Fixing division operation {division_operator}/{division_operators}")
counter = 0
idx = 0
for token in sqlparse.parse(query)[0].flatten():
if token.ttype == Operator and token.value == "/":
if counter == division_operator:
query = ColumnCaster.cast_non_trivial_tokens(query, token, idx, "double", {"b_type_0": "", "f_type_0": ""}) # Cast both sides
break
else:
counter += 1
idx += len(token.value)
return query
def _fix_aliasing_on_broadcasting(self, sql: str) -> str:
"""When aliasing a broadcast column, the "AS" needs to be present otherwise sqlparse fails.
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
def helper(match: re.match):
alias = match.groupdict()["alias"]
if alias.upper() in self.reserved_keywords or alias.upper() in ("ASC", "DESC"): # List of Presto reserved keywords
return match.group() # Not an alias but the continuation of the SQL logic
else:
return match.group()[:-len(alias)] + "as " + alias
numbers = r"\b(\d+\.)?\d+\b" # Floats & integers
strings = r"""(`|'|").*?(`|'|")""" # Careful, non greedy match around quote marks
return Regex.sub(
r"""({numbers}|{strings})\s+(?P<alias>`?([a-zA-Z]\w*|"\d\w*")`?)""".format(numbers=numbers, strings=strings),
lambda match: helper(match),
sql,
strict=False
)
def _fix_interval_formatting(self, query: str) -> str:
"""Removes the "AS" statement (if any) between an "interval" statement and its alias.
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
return Regex.sub(
r"(interval\s+'.+?'\s+)as\s+(\w+)",
lambda match: match.group(1) + match.group(2),
query,
strict=False
)
def move_insert_statement(self, sql: str) -> str:
"""Move the Hive insert statement (which is before the final select) all the way to the top of the file.
In Presto, the insert statement is the first thing in the query (before any with clause).
Args:
query (str): Input SQL
Returns:
str: Transformed SQL
"""
# I. Find partition statement
partitioned_table = False
pattern = r"(?P<insert_statement>{base_pattern})".format(base_pattern=r"\s+".join(utils.regex_hive_insert))
try:
result = Regex.search(pattern, sql)
if result["operation"].lower() == "overwrite table":
print(
f"WARNING: {self.to_language} does not support 'INSERT OVERWRITE'. It only supports 'INSERT INTO'.\n"
"Replacing 'INSERT OVERWRITE' by INSERT INTO in the translation. Be careful about duplicates when running the query."
)
partitioned_table = True
except Exception:
pattern = r"(?P<insert_statement>{base_pattern})".format(base_pattern=utils.regex_hive_insert[0])
result = Regex.search(pattern, sql)
# III. [Optional] If the table is partitioned, the partition key must be the last column in the final select statement
# print(f"Before parsing final select:{sql}")
is_distinct, span, final_select = utils.parse_final_select(sql)
final_select = [
[c for c in column if c] # Filter out the None when there is no alias
for column in final_select
]
if partitioned_table:
for idx, column in enumerate(final_select):
# expression or alias is an exact match -> make it the last column
if Regex.search(r"^(\w+\.)?{partition_name}$".format(partition_name=result["partition_name"]), column[0], strict=False) \
or (result["partition_name"] in column[1] if len(column) == 2 else False): # If there is an alias/column[1], check if there is a match
final_select_clean = final_select[:idx] + final_select[idx+1:]
last_column = ",\n" + " AS ".join(final_select[idx]) + "\n" # Becomes last column, aliased or not
break
else: # Did not find the partition name. Add the partition value as last column
final_select_clean = final_select
last_column = ",\n" + (result["partition_value"] if result["partition_value"] else result["partition_name"]) + "\n"
else: # Simple reformatting
final_select_clean = final_select
last_column = "\n"
sql = sql[:span[0]] \
+ "SELECT" + (" DISTINCT" if is_distinct else "") + "\n" \
+ ",\n".join([" AS ".join(n) for n in final_select_clean]) \
+ last_column \
+ sql[span[1]:]
# print(f"After parsing final select:{sql}")
# II. Move statement & cleanup
sql = f'INSERT INTO {result["database"]}.{result["table"]}\n' + \
sql.replace(result["insert_statement"], "")
# print(f"After moving statement:{sql}")
sql = "\n".join([
row
for row in sql.split('\n')
if row.strip()
]) # Remove empty lines
# print(f"After removing empty lines:{sql}")
return sql
class GroupByTranslator():
"""This object aims at fixing some translation issues related to group by statements.
"""
def breakdown_real_name(self, token: Token, options: Set[str]) -> Set[str]:
"""Recursively break down the real name in a token into elementary components. One of them must be a column name
that will try to | |
<gh_stars>0
from collections import namedtuple
import numpy as np
import torch
from scipy.spatial.distance import cdist
from scipy.spatial import Delaunay, Voronoi, voronoi_plot_2d
from scipy.sparse import dok_matrix
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
from torch_geometric.utils import get_laplacian
HORIZONTAL_PLAYER = 0
VERTICAL_PLAYER = 1
EMPTY = -1
NEXT_PLAYER = 2
POINT_X = 3
POINT_Y = 4
N_STEPS = 10
WinState = namedtuple('WinState', ['is_ended', 'winner'])
def merge_nodes(merge_nodes, player_nodes, edge_index):
"""
merge nodes by redirecting any connected edges, only the edges are changed
"""
m_nodes = merge_nodes.copy()
while m_nodes:
node = m_nodes.pop()
#if node not in merged:
# get edges going out from the node
c1_out_mask = (edge_index[0] == node)
if c1_out_mask.any():
# get connected nodes filled by same player
c1_nodes = edge_index[1, c1_out_mask]
c1_nodes = c1_nodes[np.in1d(c1_nodes, player_nodes)]
# get all edges in and out of these nodes
out_edge_mask = np.in1d(edge_index[0], c1_nodes)
in_edge_mask = np.in1d(edge_index[1], c1_nodes)
# form new edges to 2-hop adjacent nodes that are not filled by player
c2_nodes = np.unique(edge_index[1, out_edge_mask])
c2_nodes = c2_nodes[c2_nodes != node]
#c2_nodes = c2_nodes[~np.in1d(c2_nodes, nodes)]
new_edges = np.stack([c2_nodes, np.full_like(c2_nodes, node)])
# print('Node: ', node)
# print('Connected nodes: ', c1_nodes)
# print('Remaining edges', edge_index[:, ~(out_edge_mask|in_edge_mask)].T)
# print('New edges', new_edges.T)
# remove all edges from merged nodes and add new edges
edge_index = np.concatenate([
edge_index[:, ~(out_edge_mask|in_edge_mask)],
new_edges,
new_edges[[1,0]]
], axis=1)
edge_index = np.unique(edge_index, axis=1)
# return the node to the queue if it is still connected
c1_out_mask = (edge_index[0] == node)
if c1_out_mask.any():
# get connected nodes filled by same player
c1_nodes = edge_index[1, c1_out_mask]
c1_nodes = c1_nodes[np.in1d(c1_nodes, player_nodes)]
if not c1_nodes.size == 0:
m_nodes.append(node)
return edge_index
class VortexBoard():
def __init__(self, edge_index, node_attr, n_steps, use_edge_weight):
self.edge_index = edge_index
self.node_attr = node_attr
self.node_count = self.node_attr.shape[0]
self.tnode_ndx = {
"top": self.node_count-4,
"bottom": self.node_count-3,
"left": self.node_count-2,
"right": self.node_count-1
}
self.n_steps = n_steps
self.features = self.n_steps * 6
self._nn_attr = None
self.use_edge_weight = use_edge_weight
@property
def nn_attr(self):
""" lazy calculation of input for the neural network
"""
if self._nn_attr is None:
self._nn_attr = self.get_nn_attr()
return self._nn_attr
@property
def shape(self):
return self.node_count, self.features
def set_stone(self, node_ndx, player):
"""
Sets a stone at the given node index for the given player
"""
assert self.node_attr[node_ndx, :2].sum() == 0.
assert player == HORIZONTAL_PLAYER or player == VERTICAL_PLAYER
self.node_attr[node_ndx, player] = 1.
assert self.node_attr[node_ndx, :2].sum() == 1.
def get_node_state(self, node_ndx):
"""
Gets the state of a node ie. which player has a stome on it
"""
if self.node_attr[node_ndx, HORIZONTAL_PLAYER]:
return HORIZONTAL_PLAYER
elif self.node_attr[node_ndx, VERTICAL_PLAYER]:
return VERTICAL_PLAYER
else:
return EMPTY
def move(self, node_ndx):
p = self.get_player()
self.set_stone(node_ndx, p)
self.node_attr[:, NEXT_PLAYER] = (self.node_attr[:, NEXT_PLAYER] + 1) % 2 # Toggle player
def take_action(self, a):
p = self.get_player()
self.node_attr[:, p] += a.astype(np.float32) # Next move
self.node_attr[:, NEXT_PLAYER] = (self.node_attr[:, NEXT_PLAYER] + 1) % 2 # Toggle player
def get_vor_attr(self, player):
"""
get the node attribute array froma single player perspective
removes edges to other player nodes
Layers:
0 - side 1 node of current player
1 - side 2 node of current player
2 - current player filled nodes
"""
if player == HORIZONTAL_PLAYER:
them = VERTICAL_PLAYER
us = HORIZONTAL_PLAYER
s1, s2 = self.tnode_ndx['left'], self.tnode_ndx['right']
else:
them = HORIZONTAL_PLAYER
us = VERTICAL_PLAYER
s1, s2 = self.tnode_ndx['top'], self.tnode_ndx['bottom']
# set the stones
v_attr = np.zeros((self.node_count, 3))
v_attr[:, 2] = self.node_attr[:, us]
v_attr[s1] = np.array([1., 0, 1.])
v_attr[s2] = np.array([0, 1., 1.])
# remove edges to other player filled nodes
t_mask = self.node_attr[:, them] > 0.
their_nodes = t_mask.nonzero()[0]
m = ~(np.in1d(self.edge_index[0], their_nodes) | np.in1d(self.edge_index[1], their_nodes))
v_edge_index = self.edge_index[:, m]
# merge this player's connected internal nodes
o_mask = self.node_attr[:-4, us] > 0.
our_nodes = o_mask.nonzero()[0].tolist()
v_edge_index = merge_nodes(our_nodes, our_nodes, v_edge_index)
# merge this player's side nodes to remaining internal nodes
o_mask = self.node_attr[:, us] > 0.
all_our_nodes = o_mask.nonzero()[0] # our nodes including sides
o_mask[:-4] = False
side_nodes = o_mask.nonzero()[0].tolist()
v_edge_index = merge_nodes(side_nodes, our_nodes, v_edge_index)
# edges have weight 1. if both connected nodes are empty or 2. if they connect to one of our nodes
v_edge_weight = np.ones_like(v_edge_index[0], dtype=np.float32)
m = (np.in1d(v_edge_index[0], all_our_nodes) | np.in1d(v_edge_index[1], all_our_nodes))
v_edge_weight[m] = 2.
return v_attr, v_edge_index, v_edge_weight
def get_player(self):
return int(self.node_attr[0, NEXT_PLAYER])
def get_nn_attr(self):
"""
preprocess the attributes for the neural network
runs n_steps of message passing Laplacian trnasformation for each side view
"""
current_player = self.get_player()
steps = []
# do message passing and concatenate the timesteps
# first n_steps x 3 layers are the current player
# next n_steps x 3 layers are the other player
for p in [current_player, (current_player+1)%2]:
v_attr, v_edge_index, v_edge_weight = self.get_vor_attr(p)
L = get_laplacian(
edge_index=torch.tensor(v_edge_index),
edge_weight=torch.tensor(v_edge_weight) if self.use_edge_weight else None,
normalization='sym',
num_nodes=self.node_count
)
L = torch.sparse_coo_tensor(L[0], L[1])
v_attr = torch.tensor(v_attr, dtype=torch.float32)
for i in range(self.n_steps):
steps.append(v_attr.numpy())
if i < self.n_steps-1:
v_attr = L @ v_attr
x = np.concatenate(steps, axis=1)
# reverse the side node order for the horizontal player so that the first two side nodes are
# always those of the current player, and the last two are those of the other player
x1 = x.copy()
if current_player == HORIZONTAL_PLAYER:
x1[-4] = x[-2]
x1[-3] = x[-1]
x1[-2] = x[-4]
x1[-1] = x[-3]
return x1
def get_player_graphs(self):
current_player = self.get_player()
x_1, edge_index_1, _ = self.get_vor_attr(current_player)
x_2, edge_index_2, _ = self.get_vor_attr((current_player+1)%2)
return x_1, x_2, edge_index_1, edge_index_2
def get_available_actions(self):
return ~((self.node_attr[:, HORIZONTAL_PLAYER] > 0.) | (self.node_attr[:, VERTICAL_PLAYER] > 0.))
def tostring(self):
return self.node_attr[:, :NEXT_PLAYER+1].tobytes()
def copy(self):
return VortexBoard(self.edge_index.copy(), self.node_attr.copy(), n_steps=self.n_steps, use_edge_weight=self.use_edge_weight)
def check_game_over(self):
""" checks whether HORIZONTAL_PLAYER has made a left-right connection or
VERTICAL_PLAYER has made a top-bottom connection
"""
def is_connected(player, start_node, end_node):
# see if we can connect the start_node to the end_node
# using a depth-first search
todo = set([start_node])
done = set()
while todo:
node = todo.pop()
if node == end_node:
return True
neighbourhood_mask = self.edge_index[0] == node
neighbourhood_ndx = self.edge_index[1, neighbourhood_mask]
connected_mask = self.node_attr[neighbourhood_ndx, player] > 0.
n = set(neighbourhood_ndx[connected_mask])
todo = todo.union(n - done)
done.add(node)
return False
rval = np.zeros(2)
if is_connected(VERTICAL_PLAYER, self.tnode_ndx["top"], self.tnode_ndx["bottom"]):
rval[VERTICAL_PLAYER] = 1.
elif is_connected(HORIZONTAL_PLAYER, self.tnode_ndx["left"], self.tnode_ndx["right"]):
rval[HORIZONTAL_PLAYER] = 1.
else:
rval = None
return rval
def print_board_str(self):
print(self.edge_index)
print(self.node_attr)
@classmethod
def new_vortex_board(cls, size, n_steps=10, use_edge_weight=True):
""" construct a new empty vortex board with approximately the same complexity
as a hex board of size: size
0 - HORIZONTAL_PLAYER
1 - VERTICAL_PLAYER
2 - NEXT_PLAYER
3 - POINT_X
4 - POINT_Y
"""
min_dist = 3 / (size * 4)
# set up the border points
points = np.concatenate([
np.linspace((0., 0.), (1., 0.), size)[:-1],
np.linspace((0., 1.), (1., 1.), size)[1:],
np.linspace((0., 0.), (0., 1.), size)[1:],
np.linspace((1., 0.), (1., 1.), size)[:-1]
])
left_border_ndx = (points[:, 0] == 0.0).nonzero()[0]
right_border_ndx = (points[:, 0] == 1.0).nonzero()[0]
bottom_border_ndx = (points[:, 1] == 0.0).nonzero()[0]
top_border_ndx = (points[:, 1] == 1.0).nonzero()[0]
# sample the inner points
inner_point_count = size**2 - ((size - 1) * 4)
for i in range(inner_point_count):
while(True):
p = np.random.random_sample((1, 2))
dist = cdist(points, p, metric="euclidean").min()
if dist > min_dist:
points = np.concatenate([points, p])
break
# set up node attribues
node_count = points.shape[0] + 4
tnode_ndx = {
"top": node_count-4,
"bottom": node_count-3,
"left": node_count-2,
"right": node_count-1
}
node_attr = np.zeros((node_count, 5))
# set up terminal (off-board) player nodes
node_attr[tnode_ndx["top"], VERTICAL_PLAYER] = 1.
node_attr[tnode_ndx["bottom"], VERTICAL_PLAYER] = 1.
node_attr[tnode_ndx["left"], HORIZONTAL_PLAYER] = 1.
node_attr[tnode_ndx["right"], HORIZONTAL_PLAYER] = 1.
node_attr[:points.shape[0], POINT_X:] = points
node_attr[-4:, POINT_X:] = np.array([
[0.5, 1.2],
[0.5, -0.2],
[-0.2, 0.5],
[1.2, 0.5]
])
# build the adjacency matrix for the graph
tri = Delaunay(points)
adj = dok_matrix((node_count, node_count), dtype=np.int8)
for s in tri.simplices:
for i1 in range(3):
i2 = (i1 + 1) % 3
v1 = s[i1]
v2 = s[i2]
adj[v1, v2] = 1
adj[v2, v1] = 1
adj = adj.tocoo()
edge_index = np.stack([adj.row, adj.col])
# add the terminal edges to the border nodes
def add_edges(edge_index, node_ndx, nodes):
new_edge_index = np.stack([
np.full_like(nodes, fill_value=node_ndx),
nodes
])
edge_index = np.concatenate([
edge_index,
new_edge_index,
new_edge_index[[1, 0]]
], axis=1)
return edge_index
edge_index = add_edges(edge_index, tnode_ndx["top"], top_border_ndx)
edge_index = add_edges(edge_index, tnode_ndx["bottom"], bottom_border_ndx)
edge_index = add_edges(edge_index, tnode_ndx["left"], left_border_ndx)
| |
postCellId="../AS4/0/"/>
</projection>
<projection id="NC_AVAR_AS5_Generic_GJ" postsynapticPopulation="AS5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS5/0/"/>
</projection>
<projection id="NC_AVAR_AS5_FMRFamide" postsynapticPopulation="AS5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS5/0/"/>
</projection>
<projection id="NC_AVAR_AS6_Generic_GJ" postsynapticPopulation="AS6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS6/0/"/>
</projection>
<projection id="NC_AVAR_AS6_FMRFamide" postsynapticPopulation="AS6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS6/0/"/>
</projection>
<projection id="NC_AVAR_AS7_FMRFamide" postsynapticPopulation="AS7" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS7/0/"/>
</projection>
<projection id="NC_AVAR_AS7_Generic_GJ" postsynapticPopulation="AS7" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS7/0/"/>
</projection>
<projection id="NC_AVAR_AS8_Generic_GJ" postsynapticPopulation="AS8" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS8/0/"/>
</projection>
<projection id="NC_AVAR_AS8_FMRFamide" postsynapticPopulation="AS8" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS8/0/"/>
</projection>
<projection id="NC_AVAR_AS9_Generic_GJ" postsynapticPopulation="AS9" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS9/0/"/>
</projection>
<projection id="NC_AVAR_AS9_FMRFamide" postsynapticPopulation="AS9" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AS9/0/"/>
</projection>
<projection id="NC_AVAR_AVAL_FMRFamide" postsynapticPopulation="AVAL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_AVAR_AVAL_Generic_GJ" postsynapticPopulation="AVAL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_AVAR_AVBL_FMRFamide" postsynapticPopulation="AVBL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVBL/0/"/>
</projection>
<projection id="NC_AVAR_AVDL_FMRFamide" postsynapticPopulation="AVDL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_AVAR_AVDR_FMRFamide" postsynapticPopulation="AVDR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_AVAR_AVEL_FMRFamide" postsynapticPopulation="AVEL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVEL/0/"/>
</projection>
<projection id="NC_AVAR_AVER_FMRFamide" postsynapticPopulation="AVER" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../AVER/0/"/>
</projection>
<projection id="NC_AVAR_DA1_FMRFamide" postsynapticPopulation="DA1" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA1/0/"/>
</projection>
<projection id="NC_AVAR_DA1_Generic_GJ" postsynapticPopulation="DA1" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA1/0/"/>
</projection>
<projection id="NC_AVAR_DA2_FMRFamide" postsynapticPopulation="DA2" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA2/0/"/>
</projection>
<projection id="NC_AVAR_DA2_Generic_GJ" postsynapticPopulation="DA2" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA2/0/"/>
</projection>
<projection id="NC_AVAR_DA3_Generic_GJ" postsynapticPopulation="DA3" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA3/0/"/>
</projection>
<projection id="NC_AVAR_DA3_FMRFamide" postsynapticPopulation="DA3" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA3/0/"/>
</projection>
<projection id="NC_AVAR_DA4_Generic_GJ" postsynapticPopulation="DA4" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA4/0/"/>
</projection>
<projection id="NC_AVAR_DA4_FMRFamide" postsynapticPopulation="DA4" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA4/0/"/>
</projection>
<projection id="NC_AVAR_DA5_FMRFamide" postsynapticPopulation="DA5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA5/0/"/>
</projection>
<projection id="NC_AVAR_DA5_Generic_GJ" postsynapticPopulation="DA5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA5/0/"/>
</projection>
<projection id="NC_AVAR_DA6_Generic_GJ" postsynapticPopulation="DA6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA6/0/"/>
</projection>
<projection id="NC_AVAR_DA6_FMRFamide" postsynapticPopulation="DA6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA6/0/"/>
</projection>
<projection id="NC_AVAR_DA7_FMRFamide" postsynapticPopulation="DA7" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA7/0/"/>
</projection>
<projection id="NC_AVAR_DA8_Generic_GJ" postsynapticPopulation="DA8" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA8/0/"/>
</projection>
<projection id="NC_AVAR_DA8_FMRFamide" postsynapticPopulation="DA8" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA8/0/"/>
</projection>
<projection id="NC_AVAR_DA9_FMRFamide" postsynapticPopulation="DA9" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DA9/0/"/>
</projection>
<projection id="NC_AVAR_DB3_FMRFamide" postsynapticPopulation="DB3" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DB3/0/"/>
</projection>
<projection id="NC_AVAR_DB5_FMRFamide" postsynapticPopulation="DB5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DB5/0/"/>
</projection>
<projection id="NC_AVAR_DB5_Generic_GJ" postsynapticPopulation="DB5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DB5/0/"/>
</projection>
<projection id="NC_AVAR_DB6_FMRFamide" postsynapticPopulation="DB6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../DB6/0/"/>
</projection>
<projection id="NC_AVAR_LUAL_FMRFamide" postsynapticPopulation="LUAL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../LUAL/0/"/>
</projection>
<projection id="NC_AVAR_LUAR_FMRFamide" postsynapticPopulation="LUAR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../LUAR/0/"/>
</projection>
<projection id="NC_AVAR_PDEL_FMRFamide" postsynapticPopulation="PDEL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../PDEL/0/"/>
</projection>
<projection id="NC_AVAR_PDER_FMRFamide" postsynapticPopulation="PDER" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../PDER/0/"/>
</projection>
<projection id="NC_AVAR_PVCL_FMRFamide" postsynapticPopulation="PVCL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_AVAR_PVCR_Generic_GJ" postsynapticPopulation="PVCR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_AVAR_PVCR_FMRFamide" postsynapticPopulation="PVCR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_AVAR_RIGL_FMRFamide" postsynapticPopulation="RIGL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../RIGL/0/"/>
</projection>
<projection id="NC_AVAR_RIML_Generic_GJ" postsynapticPopulation="RIML" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../RIML/0/"/>
</projection>
<projection id="NC_AVAR_RIMR_Generic_GJ" postsynapticPopulation="RIMR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../RIMR/0/"/>
</projection>
<projection id="NC_AVAR_SABD_FMRFamide" postsynapticPopulation="SABD" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../SABD/0/"/>
</projection>
<projection id="NC_AVAR_SABVL_FMRFamide" postsynapticPopulation="SABVL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../SABVL/0/"/>
</projection>
<projection id="NC_AVAR_SABVL_Generic_GJ" postsynapticPopulation="SABVL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../SABVL/0/"/>
</projection>
<projection id="NC_AVAR_SABVR_Generic_GJ" postsynapticPopulation="SABVR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../SABVR/0/"/>
</projection>
<projection id="NC_AVAR_URYDR_Generic_GJ" postsynapticPopulation="URYDR" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../URYDR/0/"/>
</projection>
<projection id="NC_AVAR_URYVL_Generic_GJ" postsynapticPopulation="URYVL" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../URYVL/0/"/>
</projection>
<projection id="NC_AVAR_VA10_Generic_GJ" postsynapticPopulation="VA10" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA10/0/"/>
</projection>
<projection id="NC_AVAR_VA10_FMRFamide" postsynapticPopulation="VA10" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA10/0/"/>
</projection>
<projection id="NC_AVAR_VA11_Generic_GJ" postsynapticPopulation="VA11" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA11/0/"/>
</projection>
<projection id="NC_AVAR_VA11_FMRFamide" postsynapticPopulation="VA11" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA11/0/"/>
</projection>
<projection id="NC_AVAR_VA12_Generic_GJ" postsynapticPopulation="VA12" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_AVAR_VA2_FMRFamide" postsynapticPopulation="VA2" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA2/0/"/>
</projection>
<projection id="NC_AVAR_VA3_Generic_GJ" postsynapticPopulation="VA3" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA3/0/"/>
</projection>
<projection id="NC_AVAR_VA3_FMRFamide" postsynapticPopulation="VA3" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA3/0/"/>
</projection>
<projection id="NC_AVAR_VA4_FMRFamide" postsynapticPopulation="VA4" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA4/0/"/>
</projection>
<projection id="NC_AVAR_VA4_Generic_GJ" postsynapticPopulation="VA4" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA4/0/"/>
</projection>
<projection id="NC_AVAR_VA5_FMRFamide" postsynapticPopulation="VA5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA5/0/"/>
</projection>
<projection id="NC_AVAR_VA5_Generic_GJ" postsynapticPopulation="VA5" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA5/0/"/>
</projection>
<projection id="NC_AVAR_VA6_Generic_GJ" postsynapticPopulation="VA6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA6/0/"/>
</projection>
<projection id="NC_AVAR_VA6_FMRFamide" postsynapticPopulation="VA6" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA6/0/"/>
</projection>
<projection id="NC_AVAR_VA7_Generic_GJ" postsynapticPopulation="VA7" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA7/0/"/>
</projection>
<projection id="NC_AVAR_VA8_Generic_GJ" postsynapticPopulation="VA8" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA8/0/"/>
</projection>
<projection id="NC_AVAR_VA8_FMRFamide" postsynapticPopulation="VA8" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA8/0/"/>
</projection>
<projection id="NC_AVAR_VA9_FMRFamide" postsynapticPopulation="VA9" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VA9/0/"/>
</projection>
<projection id="NC_AVAR_VB9_Generic_GJ" postsynapticPopulation="VB9" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VB9/0/"/>
</projection>
<projection id="NC_AVAR_VD13_FMRFamide" postsynapticPopulation="VD13" presynapticPopulation="AVAR" synapse="">
<connection id="0" preCellId="../AVAR/0/" postCellId="../VD13/0/"/>
</projection>
<projection id="NC_AVBL_AQR_Generic_GJ" postsynapticPopulation="AQR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AQR/0/"/>
</projection>
<projection id="NC_AVBL_AS10_Glutamate" postsynapticPopulation="AS10" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS10/0/"/>
</projection>
<projection id="NC_AVBL_AS3_Glutamate" postsynapticPopulation="AS3" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS3/0/"/>
</projection>
<projection id="NC_AVBL_AS4_Glutamate" postsynapticPopulation="AS4" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS4/0/"/>
</projection>
<projection id="NC_AVBL_AS5_Glutamate" postsynapticPopulation="AS5" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS5/0/"/>
</projection>
<projection id="NC_AVBL_AS6_Glutamate" postsynapticPopulation="AS6" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS6/0/"/>
</projection>
<projection id="NC_AVBL_AS7_Generic_GJ" postsynapticPopulation="AS7" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS7/0/"/>
</projection>
<projection id="NC_AVBL_AS9_Glutamate" postsynapticPopulation="AS9" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AS9/0/"/>
</projection>
<projection id="NC_AVBL_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_AVBL_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_AVBL_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVBR/0/"/>
</projection>
<projection id="NC_AVBL_AVBR_Generic_GJ" postsynapticPopulation="AVBR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVBR/0/"/>
</projection>
<projection id="NC_AVBL_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_AVBL_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_AVBL_AVEL_Glutamate" postsynapticPopulation="AVEL" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVEL/0/"/>
</projection>
<projection id="NC_AVBL_AVER_Glutamate" postsynapticPopulation="AVER" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVER/0/"/>
</projection>
<projection id="NC_AVBL_AVL_Glutamate" postsynapticPopulation="AVL" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../AVL/0/"/>
</projection>
<projection id="NC_AVBL_DB3_Generic_GJ" postsynapticPopulation="DB3" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../DB3/0/"/>
</projection>
<projection id="NC_AVBL_DB4_Generic_GJ" postsynapticPopulation="DB4" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../DB4/0/"/>
</projection>
<projection id="NC_AVBL_DB5_Generic_GJ" postsynapticPopulation="DB5" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../DB5/0/"/>
</projection>
<projection id="NC_AVBL_DB6_Generic_GJ" postsynapticPopulation="DB6" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../DB6/0/"/>
</projection>
<projection id="NC_AVBL_DB7_Generic_GJ" postsynapticPopulation="DB7" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../DB7/0/"/>
</projection>
<projection id="NC_AVBL_DVA_Generic_GJ" postsynapticPopulation="DVA" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_AVBL_PVNR_Glutamate" postsynapticPopulation="PVNR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../PVNR/0/"/>
</projection>
<projection id="NC_AVBL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_AVBL_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_AVBL_RID_Generic_GJ" postsynapticPopulation="RID" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../RID/0/"/>
</projection>
<projection id="NC_AVBL_SDQR_Generic_GJ" postsynapticPopulation="SDQR" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../SDQR/0/"/>
</projection>
<projection id="NC_AVBL_SIBVL_Generic_GJ" postsynapticPopulation="SIBVL" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../SIBVL/0/"/>
</projection>
<projection id="NC_AVBL_VA10_Glutamate" postsynapticPopulation="VA10" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VA10/0/"/>
</projection>
<projection id="NC_AVBL_VA2_Glutamate" postsynapticPopulation="VA2" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VA2/0/"/>
</projection>
<projection id="NC_AVBL_VA7_Glutamate" postsynapticPopulation="VA7" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VA7/0/"/>
</projection>
<projection id="NC_AVBL_VB1_Generic_GJ" postsynapticPopulation="VB1" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB1/0/"/>
</projection>
<projection id="NC_AVBL_VB10_Generic_GJ" postsynapticPopulation="VB10" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB10/0/"/>
</projection>
<projection id="NC_AVBL_VB11_Generic_GJ" postsynapticPopulation="VB11" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB11/0/"/>
</projection>
<projection id="NC_AVBL_VB2_Glutamate" postsynapticPopulation="VB2" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB2/0/"/>
</projection>
<projection id="NC_AVBL_VB2_Generic_GJ" postsynapticPopulation="VB2" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB2/0/"/>
</projection>
<projection id="NC_AVBL_VB4_Generic_GJ" postsynapticPopulation="VB4" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB4/0/"/>
</projection>
<projection id="NC_AVBL_VB5_Generic_GJ" postsynapticPopulation="VB5" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB5/0/"/>
</projection>
<projection id="NC_AVBL_VB6_Generic_GJ" postsynapticPopulation="VB6" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB6/0/"/>
</projection>
<projection id="NC_AVBL_VB7_Generic_GJ" postsynapticPopulation="VB7" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB7/0/"/>
</projection>
<projection id="NC_AVBL_VB8_Generic_GJ" postsynapticPopulation="VB8" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB8/0/"/>
</projection>
<projection id="NC_AVBL_VB9_Generic_GJ" postsynapticPopulation="VB9" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VB9/0/"/>
</projection>
<projection id="NC_AVBL_VC3_Glutamate" postsynapticPopulation="VC3" presynapticPopulation="AVBL" synapse="">
<connection id="0" preCellId="../AVBL/0/" postCellId="../VC3/0/"/>
</projection>
<projection id="NC_AVBR_AS1_Glutamate" postsynapticPopulation="AS1" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS1/0/"/>
</projection>
<projection id="NC_AVBR_AS10_Glutamate" postsynapticPopulation="AS10" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS10/0/"/>
</projection>
<projection id="NC_AVBR_AS3_Glutamate" postsynapticPopulation="AS3" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS3/0/"/>
</projection>
<projection id="NC_AVBR_AS4_Glutamate" postsynapticPopulation="AS4" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS4/0/"/>
</projection>
<projection id="NC_AVBR_AS5_Glutamate" postsynapticPopulation="AS5" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS5/0/"/>
</projection>
<projection id="NC_AVBR_AS6_Generic_GJ" postsynapticPopulation="AS6" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS6/0/"/>
</projection>
<projection id="NC_AVBR_AS6_Glutamate" postsynapticPopulation="AS6" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS6/0/"/>
</projection>
<projection id="NC_AVBR_AS7_Glutamate" postsynapticPopulation="AS7" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS7/0/"/>
</projection>
<projection id="NC_AVBR_AS7_Generic_GJ" postsynapticPopulation="AS7" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AS7/0/"/>
</projection>
<projection id="NC_AVBR_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_AVBR_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="AVBR" synapse="">
<connection id="0" preCellId="../AVBR/0/" | |
self.shape:
utcs = [utc] * self.shape[0]
argsets = zip(year, month, day, hour, minute, second, milli, utcs)
dt = array([datetime(*args) for args in argsets])
else:
dt = datetime(year, month, day, hour, minute, second, milli, utc)
return dt, leap_second
def utc_iso(self, places=0):
"""Return an ISO 8601 string like ``2014-01-18T01:35:38Z`` in UTC.
If this Julian date is an array of dates, then a sequence of
strings is returned instead of a single string.
"""
if places:
power_of_ten = 10 ** places
offset = _half_second / power_of_ten
year, month, day, hour, minute, second = self._utc_tuple(offset)
second, fraction = divmod(second, 1.0)
fraction *= power_of_ten
format = '%%04d-%%02d-%%02dT%%02d:%%02d:%%02d.%%0%ddZ' % places
args = (year, month, day, hour, minute, second, fraction)
else:
format = '%04d-%02d-%02dT%02d:%02d:%02dZ'
args = self._utc_tuple(_half_second)
if self.shape:
return [format % tup for tup in zip(*args)]
else:
return format % args
def utc_jpl(self):
"""Convert to a string like ``A.D. 2014-Jan-18 01:35:37.5000 UT``.
Returns a string for this date and time in UTC, in the format
used by the JPL HORIZONS system. If this Julian date is an
array of dates, then a sequence of strings is returned instead
of a single string.
"""
offset = _half_second / 1e4
year, month, day, hour, minute, second = self._utc_tuple(offset)
second, fraction = divmod(second, 1.0)
fraction *= 1e4
bc = year < 1
year = abs(year - bc)
era = where(bc, 'B.C.', 'A.D.')
format = '%s %04d-%s-%02d %02d:%02d:%02d.%04d UT'
args = (era, year, _months[month], day, hour, minute, second, fraction)
if self.shape:
return [format % tup for tup in zip(*args)]
else:
return format % args
def utc_strftime(self, format):
"""Format this UTC time according to a Python date-formatting string.
This internally calls the Python ``strftime()`` routine from the
Standard Library ``time()`` module, for which you can find a
quick reference at ``http://strftime.org/``. If this Julian
date is an array of dates, then a sequence of strings is
returned instead of a single string.
"""
tup = self._utc_tuple(_half_second)
year, month, day, hour, minute, second = tup
second = second.astype(int)
zero = zeros_like(year)
tup = (year, month, day, hour, minute, second, zero, zero, zero)
if self.shape:
return [strftime(format, item) for item in zip(*tup)]
else:
return strftime(format, tup)
def _utc_tuple(self, offset=0.0):
"""Return UTC as (year, month, day, hour, minute, second.fraction).
The `offset` is added to the UTC time before it is split into
its components. This is useful if the user is going to round
the result before displaying it. If the result is going to be
displayed as seconds, for example, set `offset` to half a second
and then throw away the fraction; if the result is going to be
displayed as minutes, set `offset` to thirty seconds and then
throw away the seconds; and so forth.
"""
tai = self.tai + offset
leap_dates, leap_offsets = self.cache.run(usno_leapseconds)
leap_reverse_dates = leap_dates + leap_offsets / DAY_S
i = searchsorted(leap_reverse_dates, tai, 'right')
j = tai - leap_offsets[i] / DAY_S
whole, fraction = divmod(j + 0.5, 1.0)
whole = whole.astype(int)
year, month, day = calendar_date(whole)
hour, hfrac = divmod(fraction * 24.0, 1.0)
minute, second = divmod(hfrac * 3600.0, 60.0)
is_leap_second = j < leap_dates[i-1]
second += is_leap_second
return year, month, day, hour.astype(int), minute.astype(int), second
def _utc_float(self):
"""Return UTC as a floating point Julian date."""
tai = self.tai
leap_dates, leap_offsets = self.cache.run(usno_leapseconds)
leap_reverse_dates = leap_dates + leap_offsets / DAY_S
i = searchsorted(leap_reverse_dates, tai, 'right')
return tai - leap_offsets[i] / DAY_S
def __getattr__(self, name):
# Cache of several expensive functions of time.
if name == 'P':
self.P = P = compute_precession(self.tdb)
return P
if name == 'PT':
self.PT = PT = rollaxis(self.P, 1)
return PT
if name == 'N':
self.N = N = compute_nutation(self)
return N
if name == 'NT':
self.NT = NT = rollaxis(self.N, 1)
return NT
if name == 'M':
self.M = M = einsum('ij...,jk...,kl...->il...', self.N, self.P, B)
return M
if name == 'MT':
self.MT = MT = rollaxis(self.M, 1)
return MT
# Conversion between timescales.
if name == 'tai':
self.tai = tai = self.tt - tt_minus_tai
return tai
if name == 'utc':
utc = self._utc_tuple()
utc = array(utc) if self.shape else utc
self.utc = utc = utc
return utc
if name == 'tdb':
tt = self.tt
self.tdb = tdb = tt + tdb_minus_tt(tt) / DAY_S
return tdb
if name == 'ut1':
self.ut1 = ut1 = self.tt - self.delta_t / DAY_S
return ut1
if name == 'gmst':
self.gmst = gmst = sidereal_time(self)
return gmst
if name == 'gast':
self.gast = gast = self.gmst + earth_tilt(self)[2] / 3600.0
return gast
raise AttributeError('no such attribute %r' % name)
def __eq__(self, other_jd):
return self.tt == other_jd.tt
def now():
"""Return the current date and time as a `JulianDate` object.
For the return value to be correct, your operating system time and
timezone settings must be set so that the Python Standard Library
constructor ``datetime.datetime.utcnow()`` returns a correct UTC
date and time.
"""
return JulianDate(utc=datetime.utcnow().replace(tzinfo=utc))
def julian_day(year, month=1, day=1):
"""Given a proleptic Gregorian calendar date, return a Julian day int."""
janfeb = month < 3
return (day
+ 1461 * (year + 4800 - janfeb) // 4
+ 367 * (month - 2 + janfeb * 12) // 12
- 3 * ((year + 4900 - janfeb) // 100) // 4
- 32075)
def julian_date(year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Given a proleptic Gregorian calendar date, return a Julian date float."""
return julian_day(year, month, day) - 0.5 + (
second + minute * 60.0 + hour * 3600.0) / DAY_S
def calendar_date(jd_integer):
"""Convert Julian Day `jd_integer` into a Gregorian (year, month, day)."""
k = jd_integer + 68569
n = 4 * k // 146097
k = k - (146097 * n + 3) // 4
m = 4000 * (k + 1) // 1461001
k = k - 1461 * m // 4 + 31
month = 80 * k // 2447
day = k - 2447 * month // 80
k = month // 11
month = month + 2 - 12 * k
year = 100 * (n - 49) + m + k
return year, month, day
def tdb_minus_tt(jd_tdb):
"""Computes how far TDB is in advance of TT, given TDB.
Given that the two time scales never diverge by more than 2ms, TT
can also be given as the argument to perform the conversion in the
other direction.
"""
t = (jd_tdb - T0) / 36525.0
# USNO Circular 179, eq. 2.6.
return (0.001657 * sin ( 628.3076 * t + 6.2401)
+ 0.000022 * sin ( 575.3385 * t + 4.2970)
+ 0.000014 * sin (1256.6152 * t + 6.1969)
+ 0.000005 * sin ( 606.9777 * t + 4.0212)
+ 0.000005 * sin ( 52.9691 * t + 0.4444)
+ 0.000002 * sin ( 21.3299 * t + 5.5431)
+ 0.000010 * t * sin ( 628.3076 * t + 4.2490))
def usno_leapseconds(cache):
"""Download the USNO table of leap seconds as a ``(2, N+1)`` NumPy array.
The array has two rows ``[leap_dates leap_offsets]``. The first row
is used to find where a given date ``jd`` falls in the table::
index = np.searchsorted(leap_dates, jd, 'right')
This can return a value from ``0`` to ``N``, allowing the
corresponding UTC offset to be fetched with::
offset = leap_offsets[index]
The offset is the number of seconds that must be added to a UTC time
to build the corresponding TAI time.
"""
with cache.open_url('http://maia.usno.navy.mil/ser7/leapsec.dat') as f:
lines = f.readlines()
linefields = [line.split() for line in lines]
dates = [float(fields[4]) for fields in linefields]
offsets = [float(fields[6]) for fields in linefields]
dates.insert(0, float('-inf'))
dates.append(float('inf'))
offsets.insert(0, offsets[0])
offsets.insert(1, offsets[0])
return array([dates, offsets])
def _utc_datetime_to_tai(leap_dates, leap_offsets, dt):
try:
utc_datetime = dt.astimezone(utc)
except ValueError:
raise ValueError(_naive_complaint)
tup = utc_datetime.utctimetuple()
year, month, day, hour, minute, second, wday, yday, dst = tup
return _utc_to_tai(leap_dates, leap_offsets, year, month, day,
hour, minute, second + dt.microsecond / 1000000.00)
def _utc_date_to_tai(leap_dates, leap_offsets, d):
return _utc_to_tai(leap_dates, leap_offsets, d.year, d.month, d.day)
def _utc_to_tai(leap_dates, leap_offsets, | |
<filename>apitest/api_test/api/automationReport.py<gh_stars>0
from django.core.exceptions import ObjectDoesNotExist
from api_test.common.auth import TokenAuthentication
from rest_framework.views import APIView
from django.db.models import Q
from django.core import serializers
from django.db import connection,connections
from django.db.models import F, Q, Case, When, Count,Sum,Value,IntegerField
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from api_test.common.api_response import JsonResponse
from api_test.models import Project,AutomationResult,AutomationResultFailDetail,ApiAutomationCoverage,Automation
from api_test.serializers import ProjectSerializer,AutomationResultSerializer,ApiAutomationCoverageSerializer
from api_test.common.jsonUtil import json
from api_test.common.auth import permission_required
import traceback
import logging
import datetime
import ast
class Automation_Summary(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅ่ชๅจๅๆง่กๆฑๆป
:param request:
:return:
"""
try:
page_size = int(request.GET.get("page_size", 20))
page = int(request.GET.get("page", 1))
except (TypeError, ValueError):
return JsonResponse(code="999985", msg="page and page_size must be integer!")
project_id = request.GET.get("project_id")
start_time = request.GET.get("start_time")
end_time = request.GET.get("end_time")
if end_time:
end_time=(datetime.datetime.strptime(end_time,'%Y-%m-%d')+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
try:
kwargs={}
args=(~Q(automation__type= 'reuse') & ~Q(automation__type= 'list') & ~Q(description= '่ฐ่ฏ'))
if start_time and end_time:
kwargs={"automation__isnull":False,"api__isnull":True,"step__isnull":True,"testTime__gte":start_time,"testTime__lte":end_time}
else:
kwargs={"automation__isnull":False,"api__isnull":True,"step__isnull":True}
if project_id:
kwargs["project_id"]=project_id
results=AutomationResult.objects.filter(args,**kwargs).values("automation_id","name").annotate(passed=Sum(Case(When(result="PASS",then=Value(1)),default=Value(0),output_field=IntegerField())),total=Count("id"),duration=Sum("duration")).order_by("-total")
except:
traceback.print_exc()
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
paginator = Paginator(results, page_size) # paginatorๅฏน่ฑก
pages = paginator.num_pages # ๆป้กตๆฐ
total=len(results)
try:
obm = paginator.page(page)
except PageNotAnInteger:
obm = paginator.page(1)
except EmptyPage:
obm = paginator.page(paginator.num_pages)
# data=serializers.serialize("json",obm.object_list,ensure_ascii=False)
return JsonResponse(data={"data": obm.object_list,
"page": page,
"pages": pages,
"total": total
}, code="999999", msg="ๆๅ!")
class Automation_Result(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅ่ชๅจๅๆง่กๆ
ๅต
:param request:
:return:
"""
try:
page_size = int(request.GET.get("page_size", 20))
page = int(request.GET.get("page", 1))
except (TypeError, ValueError):
return JsonResponse(code="999985", msg="page and page_size must be integer!")
automation_id = request.GET.get("automation_id")
start_time = request.GET.get("start_time")
end_time = request.GET.get("end_time")
if end_time:
end_time=(datetime.datetime.strptime(end_time,'%Y-%m-%d')+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
trace=request.GET.get("trace")
try:
kwargs={}
args=()
if automation_id:
if start_time and end_time:
kwargs={"automation":automation_id,"step__isnull":True,"api__isnull":True,"testTime__gte":start_time,"testTime__lte":end_time}
else:
kwargs={"automation":automation_id,"step__isnull":True,"api__isnull":True}
results=AutomationResult.objects.filter(**kwargs).order_by("-id")
paginator = Paginator(results, page_size) # paginatorๅฏน่ฑก
pages = paginator.num_pages # ๆป้กตๆฐ
total=len(results)
obm = paginator.page(page)
serialize = AutomationResultSerializer(obm, many=True)
data={"data":serialize.data,"page":page,"pages":pages,"total":total}
elif trace:
if start_time and end_time:
args=(~Q(automation__type= 'list'))
kwargs={"automation__isnull":False,"trace":trace,"step__isnull":True,"api__isnull":True,"testTime__gte":start_time,"testTime__lte":end_time}
else:
args=(~Q(automation__type= 'list'))
kwargs={"automation__isnull":False,"trace":trace,"step__isnull":True,"api__isnull":True}
results=AutomationResult.objects.filter(args,**kwargs).order_by("-id")
total=len(results)
page_size=total
paginator = Paginator(results, page_size) # paginatorๅฏน่ฑก
pages = paginator.num_pages # ๆป้กตๆฐ
obm = paginator.page(page)
serialize = AutomationResultSerializer(obm, many=True)
data={"data":serialize.data,"page":page,"pages":pages,"page_size":page_size,"total":total}
result=AutomationResult.objects.filter(trace=trace,automation__isnull=True,step__isnull=True,api__isnull=True).order_by("-id")
if len(result)>0:
data["report"]=json.loads(result[0].details)
return JsonResponse(data=data,code="999999",msg="ๆๅ!")
except:
traceback.print_exc()
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
class Automations_Result(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅ่ชๅจๅๆง่กๆ
ๅต
:param request:
:return:
"""
try:
page_size = int(request.GET.get("page_size", 20))
page = int(request.GET.get("page", 1))
except (TypeError, ValueError):
return JsonResponse(code="999985", msg="page and page_size must be integer!")
project_id = request.GET.get("project_id")
start_time = request.GET.get("start_time")
end_time = request.GET.get("end_time")
if end_time:
end_time=(datetime.datetime.strptime(end_time,'%Y-%m-%d')+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
type=request.GET.get("type", "publish")
try:
kwargs={}
args=()
if type=="publish":
if start_time and end_time:
kwargs={"description__contains":"ไธ็บฟๅ","automation__isnull":True,"testTime__gte":start_time,"testTime__lte":end_time}
else:
kwargs={"description__contains":"ไธ็บฟๅ","automation__isnull":True}
elif type=="task":
if start_time and end_time:
args=(~Q(description__contains="ไธ็บฟๅ"))
kwargs={"automation__isnull":True,"testTime__gte":start_time,"testTime__lte":end_time}
else:
args=(~Q(description__contains="ไธ็บฟๅ"))
kwargs={"automation__isnull":True}
elif type=="list":
if start_time and end_time:
kwargs={"automation__type":"list","testTime__gte":start_time,"testTime__lte":end_time}
else:
kwargs={"automation__type":"list"}
if project_id:
kwargs["project_id"]=project_id
if len(args)>0:
results=AutomationResult.objects.filter(args,**kwargs).order_by("-id")
else:
results=AutomationResult.objects.filter(**kwargs).order_by("-id")
except:
traceback.print_exc()
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
paginator = Paginator(results, page_size) # paginatorๅฏน่ฑก
pages = paginator.num_pages # ๆป้กตๆฐ
total=len(results)
try:
obm = paginator.page(page)
except PageNotAnInteger:
obm = paginator.page(1)
except EmptyPage:
obm = paginator.page(paginator.num_pages)
serialize = AutomationResultSerializer(obm, many=True)
return JsonResponse(data={"data": serialize.data,
"page": page,
"pages": pages,
"total": total
}, code="999999", msg="ๆๅ!")
class Automation_Detail(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅ่ชๅจๅๆง่ก่ฏฆๆ
:param request:
:return:
"""
automation_id = request.GET.get("automation_id")
trace = request.GET.get("trace")
try:
results=AutomationResult.objects.values("id","step_id","name","testTime","duration","result","details").filter(Q(step__isnull=False)|Q(api__isnull=False),automation=automation_id,trace=trace).order_by("id")
for result in results:
result["details"]=json.loads(result["details"])
parent=AutomationResult.objects.filter(automation=automation_id,trace=trace,step__isnull=True,api__isnull=True).order_by("-id").first()
parentResult=AutomationResultSerializer(parent)
return JsonResponse(code="999999", msg="ๆๅ๏ผ", data={"results":results,"parentResult":parentResult.data})
except:
logging.error(traceback.format_exc())
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
class ApiAutomatedCoverage(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅๆฅๅฃ่ชๅจๅ่ฆ็็
:param request:
:return:
"""
try:
page_size = int(request.GET.get("page_size", 20))
page = int(request.GET.get("page", 1))
except (TypeError, ValueError):
return JsonResponse(code="999985", msg="page and page_size must be integer!")
project_id = request.GET.get("project_id")
try:
if project_id:
results=ApiAutomationCoverage.objects.filter(project=project_id).order_by("-num")
else:
results=ApiAutomationCoverage.objects.all().order_by("-num")
except:
traceback.print_exc()
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
paginator = Paginator(results, page_size) # paginatorๅฏน่ฑก
pages = paginator.num_pages # ๆป้กตๆฐ
total=len(results)
try:
obm = paginator.page(page)
except PageNotAnInteger:
obm = paginator.page(1)
except EmptyPage:
obm = paginator.page(paginator.num_pages)
serialize = ApiAutomationCoverageSerializer(obm, many=True)
return JsonResponse(data={"data": serialize.data,
"page": page,
"pages": pages,
"total": total
}, code="999999", msg="ๆๅ!")
def daysBetween(date1,date2):
date1=datetime.datetime.strptime(date1,"%Y-%m-%d")
date2=datetime.datetime.strptime(date2,"%Y-%m-%d")
return (date2-date1).days
class Automation_Chart(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅ่ชๅจๅ็จไพๆง่ก็ป่ฎกไฟกๆฏ
:param request:
:return:
"""
project_id = request.GET.get("project_id")
start_time = request.GET.get("start_time")
end_time = request.GET.get("end_time")
if end_time:
end_time=(datetime.datetime.strptime(end_time,'%Y-%m-%d')+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
days=0 if not start_time or not end_time else daysBetween(start_time,end_time)
period = request.GET.get("period","week") if 7<days<=30 else "day" if 0<days<=7 else "month"
try:
cursor = connection.cursor()
sql="SELECT {date} as date,count(*) as num,round(sum(CASE WHEN result='PASS' THEN 1 ELSE 0 END)*100/count(*),2) as rate from api_test_automationresult result left join api_test_automation auto on result.automation_id=auto.id where %s result.description!='่ฐ่ฏ' and result.step_id is null and result.automation_id is not null and auto.type!='reuse' and auto.type!='list' %s GROUP BY date having num>0 order by date" % ("result.project_id={projectId} and" if project_id else "","and result.testTime >='{startDate}' AND result.testTime < '{endDate}'" if start_time and end_time else "")
if period=="day":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="DATE_FORMAT( testTime, '%Y-%m-%d')")
elif period=="week":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="date_sub(DATE_FORMAT( testTime, '%Y-%m-%d'),interval WEEKDAY(testTime) day)")
elif period=="month":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="DATE_FORMAT( testTime, '%Y-%m')")
elif period=="quarter":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="concat(year(testTime),' Q',QUARTER(testTime))")
elif period=="year":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="year(testTime)")
logging.info("ๆง่กsql: %s" % sql)
cursor.execute(sql)
# deal with in the line chart
line_db = cursor.fetchall()
line_name = [line[0] for line in line_db]
line_x = [line[1] if line[1] else 0 for line in line_db]
line_x1 = [line[2] if line[2] else 0 for line in line_db]
autoChart = {'line_name': line_name, 'line_x': line_x, 'line_x1': line_x1}
FAIL_TYPE = {
'code':'็ผ็ ',
'env':'็ฏๅข',
'data':'ๆฐๆฎ',
'other':'ๅ
ถไป'
}
sql="SELECT detail.type as failType,count(*) as num from api_test_automationresult result right join api_test_automationresultfaildetail detail on result.id=detail.result_id where %s result.step_id is null %s GROUP BY failType having num>0 order by num desc" % ("result.project_id={projectId} and" if project_id else "","and result.testTime >='{startDate}' AND result.testTime < '{endDate}'" if start_time and end_time else "")
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time)
logging.info("ๆง่กsql: %s" % sql)
cursor.execute(sql)
# deal with in the line chart
line_db = cursor.fetchall()
line_name = [FAIL_TYPE[line[0]] for line in line_db]
line_data = [{"name":FAIL_TYPE[line[0]],"value":line[1]} for line in line_db]
failTypeChart = {'line_name': line_name, 'line_data': line_data}
SEVERITY = {
'fatal':'่ดๅฝ็',
'critical':'ไธฅ้็',
'major':'ไธ่ฌ็',
'minor':'ๅพฎๅฐ็'
}
sql="SELECT detail.severity as failSeverity,count(*) as num from api_test_automationresult result right join api_test_automationresultfaildetail detail on result.id=detail.result_id where %s result.step_id is null %s GROUP BY failSeverity having num>0 order by num desc" % ("result.project_id={projectId} and" if project_id else "","and result.testTime >='{startDate}' AND result.testTime < '{endDate}'" if start_time and end_time else "")
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time)
logging.info("ๆง่กsql: %s" % sql)
cursor.execute(sql)
# deal with in the line chart
line_db = cursor.fetchall()
line_name = [SEVERITY[line[0]] for line in line_db]
line_data = [{"name":SEVERITY[line[0]],"value":line[1]} for line in line_db]
failSeverityChart = {'line_name': line_name, 'line_data': line_data}
return JsonResponse(code="999999", msg="ๆๅ๏ผ", data={"autoChart":autoChart,"failTypeChart":failTypeChart,"failSeverityChart":failSeverityChart})
except:
logging.error(traceback.format_exc())
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
class Publish_Chart(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅๅๅธ้กน็ฎๆต่ฏ็ป่ฎกไฟกๆฏ
:param request:
:return:
"""
project_id = request.GET.get("project_id")
start_time = request.GET.get("start_time")
end_time = request.GET.get("end_time")
if end_time:
end_time=(datetime.datetime.strptime(end_time,'%Y-%m-%d')+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
period = request.GET.get("period","week") if not (start_time and end_time) or daysBetween(start_time,end_time) > 7 else "day"
try:
cursor = connection.cursor()
sql="SELECT {date} as date,count(*) as num,round(sum(CASE WHEN result='PASS' THEN 1 ELSE 0 END)*100/count(*),2) as rate from api_test_automationresult where " + ("project_id={projectId} and" if project_id else "") + " step_id is null and automation_id is null and description like '%ไธ็บฟๅ%'" + (" and testTime >='{startDate}' AND testTime < '{endDate}'" if start_time and end_time else "") + " GROUP BY date having num>0 order by date"
if period=="day":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="DATE_FORMAT( testTime, '%Y-%m-%d')")
elif period=="week":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="date_sub(DATE_FORMAT( testTime, '%Y-%m-%d'),interval WEEKDAY(testTime) day)")
elif period=="month":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="DATE_FORMAT( testTime, '%Y-%m')")
elif period=="quarter":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="concat(year(testTime),' Q',QUARTER(testTime))")
elif period=="year":
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time,date="year(testTime)")
# print(sql)
cursor.execute(sql)
# deal with in the line chart
line_db = cursor.fetchall()
line_name = [line[0] for line in line_db]
line_x = [line[1] if line[1] else 0 for line in line_db]
line_x1 = [line[2] if line[2] else 0 for line in line_db]
publish_chart = {'line_name': line_name, 'line_x': line_x, 'line_x1': line_x1}
sql="SELECT name,count(*) as num from api_test_automationresult where " + ("project_id={projectId} and" if project_id else "") + " step_id is null and automation_id is null and description like '%ไธ็บฟๅ%'" + (" and testTime >='{startDate}' AND testTime < '{endDate}'" if start_time and end_time else "") + " GROUP BY name having num>0 order by num desc"
sql = sql.format(projectId=project_id,startDate=start_time,endDate=end_time)
logging.info("ๆง่กsql: %s" % sql)
cursor.execute(sql)
# deal with in the line chart
line_db = cursor.fetchall()
line_name = [line[0] for line in line_db]
line_data = [{"name":line[0],"value":line[1]} for line in line_db]
publish_pie = {'line_name': line_name, 'line_data': line_data}
return JsonResponse(code="999999", msg="ๆๅ๏ผ", data={"publish_chart":publish_chart,"publish_pie":publish_pie})
except:
logging.error(traceback.format_exc())
return JsonResponse(code="999998", msg="ๅคฑ่ดฅ๏ผ")
class Api_Chart(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
่ทๅๆฅๅฃๆง่ก็ป่ฎกไฟกๆฏ
:param request:
| |
in this loop
# Simplifications for j=i, kij is always 0 by definition.
t200 = a_alphas[i]*zs[i]
a_alpha_j_rows[i] += t200
a_alpha += t200*zs[i]
zi_zj = zs[i]*zs[i]
da_alpha_dT_ij = -da_alpha_dT_i - da_alpha_dT_i#da_alpha_dT_i*-2.0
da_alpha_dT_j_rows[i] += zs[i]*da_alpha_dT_ij
da_alpha_dT_ij *= zi_zj
da_alpha_dT -= 0.5*(da_alpha_dT_ij + (workingd1 + workingd1))
d2a_alpha_dT2 += d2a_alpha_dT2_i*zi_zj + (workings2 + workings2)
for i in range(N):
da_alpha_dT_j_rows[i] *= -0.5
return a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows
def eos_mix_dV_dzs(T, P, Z, b, delta, epsilon, a_alpha, db_dzs, ddelta_dzs,
depsilon_dzs, da_alpha_dzs, N, out=None):
if out is None:
out = [0.0]*N
T = T
RT = R*T
V = Z*RT/P
x0 = delta
x1 = a_alpha = a_alpha
x2 = epsilon = epsilon
x0V = x0*V
Vmb = V - b
x5 = Vmb*Vmb
x1x5 = x1*x5
x0x1x5 = x0*x1x5
t0 = V*x1x5
x6 = x2*x1x5
x9 = V*V
x7 = x9*t0
x8 = x2*t0
x10 = x0V + x2 + x9
x10x10 = x10*x10
x11 = R*T*x10*x10x10
x13 = x0x1x5*x9
x7x8 = x7 + x8
t2 = -1.0/(x0V*x0x1x5 + x0*x6 - x11 + 3.0*x13 + x7x8 + x7x8)
t1 = t2*x10x10*x5
t3 = x0V*x1x5
t4 = x1x5*x9
t5 = t2*(t3 + t4 + x6)
t6 = t2*(x13 + x7x8)
x11t2 = x11*t2
for i in range(N):
out[i] = t5*depsilon_dzs[i] - t1*da_alpha_dzs[i] + x11t2*db_dzs[i] + t6*ddelta_dzs[i]
return out
def G_dep_lnphi_d_helper(T, P, b, delta, epsilon, a_alpha, N,
Z, dbs, depsilons, ddelta, dVs, da_alphas, G, out=None):
if out is None:
out = [0.0]*N
x3 = b
x4 = delta
x5 = epsilon
RT = R*T
x0 = V = Z*RT/P
x2 = 1.0/(RT)
x6 = x4*x4 - 4.0*x5
if x6 == 0.0:
# VDW has x5 as zero as delta, epsilon = 0
x6 = 1e-100
x7 = 1.0/sqrt(x6)
x8 = a_alpha
x9 = x0 + x0
x10 = x4 + x9
x11 = x2 + x2
x12 = x11*catanh(x10*x7).real
x15 = x7*x7
db_dns = dbs
depsilon_dns = depsilons
ddelta_dns = ddelta
dV_dns = dVs
da_alpha_dns = da_alphas
t1 = P*x2
t2 = x11*x15*x8/(x10*x10*x15 - 1.0)
t3 = x12*x8*x15*x7
t4 = x12*x7
t5 = 1.0/(x0 - x3)
t6 = x4 + x9
if G:
t1 *= RT
t2 *= RT
t3 *= RT
t4 *= RT
t5 *= RT
c0 = t1 + t2*2.0 - t5
for i in range(N):
x13 = ddelta_dns[i]
x14 = x13*x4 - 2.0*depsilon_dns[i]
x16 = x14*x15
diff = (dV_dns[i]*c0 - t4*da_alpha_dns[i] + t5*db_dns[i]
+ t2*(x13 - x16*t6) + x14*t3 )
# diff = (x1*t1 + t2*(x1 + x1 + x13 - x16*t6) + x14*t3 - t4*da_alpha_dns[i] - t5*(x1 - db_dns[i]))
out[i] = diff
return out
def eos_mix_a_alpha_volume(gas, T, P, zs, kijs, b, delta, epsilon, a_alphas, a_alpha_roots, a_alpha_j_rows=None, vec0=None):
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, T, zs, kijs, a_alpha_j_rows, vec0)
V0, V1, V2 = volume_solutions_halley(T, P, b, delta, epsilon, a_alpha)
if not gas:
# Prefer liquid, ensure V0 is the smalest root
if V1 != 0.0:
if V0 > V1 and V1 > b:
V0 = V1
if V0 > V2 and V2 > b:
V0 = V2
else:
if V1 != 0.0:
if V0 < V1 and V1 > b:
V0 = V1
if V0 < V2 and V2 > b:
V0 = V2
Z = Z = P*V0/(R*T)
return Z, a_alpha, a_alpha_j_rows
def eos_mix_db_dns(b, bs, N, out=None):
if out is None:
out = [0.0]*N
for i in range(N):
out[i] = bs[i] - b
return out
def eos_mix_da_alpha_dns(a_alpha, a_alpha_j_rows, N, out=None):
if out is None:
out = [0.0]*N
a_alpha_n_2 = -2.0*a_alpha
for i in range(N):
out[i] = 2.0*a_alpha_j_rows[i] + a_alpha_n_2
return out
def RK_d3delta_dninjnks(b, bs, N, out=None):
if out is None:
out = [[[0.0]*N for _ in range(N)] for _ in range(N)]# numba: delete
# out = np.zeros((N, N, N)) # numba: uncomment
m6b = -6.0*b
for i in range(N):
bi = bs[i]
d3b_dnjnks = out[i]
for j in range(N):
bj = bs[j]
r = d3b_dnjnks[j]
x0 = m6b + 2.0*(bi + bj)
for k in range(N):
r[k] = x0 + 2.0*bs[k]
return out
def PR_ddelta_dzs(bs, N, out=None):
if out is None:
out = [0.0]*N
for i in range(N):
out[i] = 2.0*bs[i]
return out
def PR_ddelta_dns(bs, b, N, out=None):
if out is None:
out = [0.0]*N
nb2 = -2.0*b
for i in range(N):
out[i] = 2.0*bs[i] + nb2
return out
def PR_depsilon_dns(b, bs, N, out=None):
if out is None:
out = [0.0]*N
b2 = b + b
b2b = b2*b
for i in range(N):
out[i] = b2b - b2*bs[i]
return out
def PR_d2delta_dninjs(b, bs, N, out=None):
if out is None:
out = [[0.0]*N for _ in range(N)]# numba: delete
# out = np.zeros((N, N)) # numba: uncomment
bb = 2.0*b
for i in range(N):
bi = bs[i]
r = out[i]
x0 = 2.0*(bb - bi)
for j in range(N):
r[j] = x0 - 2.0*bs[j]
return out
def PR_d3delta_dninjnks(b, bs, N, out=None):
if out is None:
out = [[[0.0]*N for _ in range(N)] for _ in range(N)]# numba: delete
# out = np.zeros((N, N, N)) # numba: uncomment
m3b = -3.0*b
for i in range(N):
bi = bs[i]
d3b_dnjnks = out[i]
for j in range(N):
bj = bs[j]
r = d3b_dnjnks[j]
x0 = 4.0*(m3b + bi + bj)
for k in range(N):
r[k] = x0 + 4.0*bs[k]
return out
def PR_d2epsilon_dzizjs(b, bs, N, out=None):
if out is None:
out = [[0.0]*N for _ in range(N)]# numba: delete
# out = np.zeros((N, N)) # numba: uncomment
for i in range(N):
l = out[i]
x0 = -2.0*bs[i]
for j in range(N):
l[j] = x0*bs[j]
return out
def PR_depsilon_dzs(b, bs, N, out=None):
if out is None:
out = [0.0]*N
b2n = -2.0*b
for i in range(N):
out[i] = b2n*bs[i]
return out
def PR_d2epsilon_dninjs(b, bs, N, out=None):
if out is None:
out = [[0.0]*N for _ in range(N)]# numba: delete
# out = np.zeros((N, N)) # numba: uncomment
bb = b + b
b2 = b*b
c0 = -bb*bb - 2.0*b2
c1 = 2.0*(b + 0.5*bb)
c2 = 2.0*b + bb
for i in range(N):
l = out[i]
bi = bs[i]
x0 = c0 + c1*bi
x1 = c2 - 2.0*bi
for j in range(N):
l[j] = x0 + bs[j]*x1
return out
def PR_d3epsilon_dninjnks(b, bs, N, out=None):
if out is None:
out = [[[0.0]*N for _ in range(N)] for _ in range(N)]# numba: delete
# out = np.zeros((N, N, N)) # numba: uncomment
c0 = 24.0*b*b
for i in range(N):
bi = bs[i]
d3b_dnjnks = out[i]
c10 = -12.0*b + 4.0*bi
c11 = c0 -12.0*b*bi
c12 = (-12.0*b + 4.0*bi)
for j in range(N):
bj = bs[j]
x0 = c11 + bj*c12
x1 = c10 + 4.0*bj
row = d3b_dnjnks[j]
for k in range(N):
bk = bs[k]
term = x0 + bk*x1
row[k] = term
return out
def PR_translated_ddelta_dzs(b0s, cs, N, out=None):
if out is None:
out = [0.0]*N
for i in range(N):
out[i] = 2.0*(cs[i] + b0s[i])
return out
def PR_translated_d2epsilon_dzizjs(b0s, cs, N, out=None):
if out is None:
out = [[0.0]*N for _ in range(N)] # numba: delete
# out = np.zeros((N, N)) # numba: uncomment
for j in range(N):
r = out[j]
x1 = 2.0*b0s[j]
x2 = 2.0*cs[j]
for i in range(N):
# Optimized
r[i] = x1*(cs[i] - b0s[i]) + x2*(b0s[i] + cs[i])
return out
def PR_translated_d2epsilon_dninjs(b0s, cs, b, c, N, out=None):
if out is None:
out = [[0.0]*N for _ in range(N)] # numba: delete
# out = np.zeros((N, N)) # numba: uncomment
b0 = b + c
v0 = -6.0*b0*b0 + 12.0*b0*c + 6.0*c*c
v1 = 4.0*b0 - 4.0*c
v2 = (-4.0*b0 - 4.0*c)
for i in range(N):
l = out[i]
b0i = b0s[i]
ci = cs[i]
x0 = v0 + b0i*v1 + ci*v2
x1 = v1 - 2.0*b0i + 2.0*ci
x2 = v2 + 2.0*b0i + 2.0*ci
for j in range(N):
l[j] = x0 + b0s[j]*x1 + cs[j]*x2
return out
def PR_translated_ddelta_dns(b0s, cs, delta, N, out=None):
if out is None:
out = [0.0]*N
for | |
#!/usr/bin/env python3
import numpy as np
import poppy
import time
from astropy import units as u
from astropy.constants import codata2014 as const
from astropy.modeling.blackbody import blackbody_lambda
from functools import reduce
from .inject_images import InjectCompanion
from .my_warnings import warnings
class SubtractImages(InjectCompanion):
'''
This class is meant to be inherited by a KlipRetrieve() class instance, not
used individually.
Creates KLIP bases, carries out PSF subtraction, saves the results, and
also calculates and saves information about contrast and separation in the
resulting subtracted images.
The key methods that handle calculation and new object creation are...
- self._generate_klip_proj(): uses the reference images as a library
from which to make KLIP projections of each slice of each target image.
Its output is self.klip_proj, an HDUList of these projections.
- self._generate_theo_klip_proj() achieves the same result when
self.align_style is 'theoretical', though the path is different
due to the different structure of reference images in that scenario.
- self._generate_contrasts(): uses the result of the previous method to
calculate radial profiles for different versions of the target images.
It outputs four HDULists -- self.pre_prof_hdu, self.post_prof_hdu,
self.photon_prof_hdu, and self.pre_avg_hdu -- with contrast versus
separation information for every slice of each target image, before and
after subtraction.
'''
def __init__(self):
super().__init__()
def _count_photons(self,
temp_star=6000*u.K, rad_star=1*u.solRad, dist=1.5*u.pc,
wv=4*u.micron, exp_time=2000*u.second, throughput=.3):
'''
***Something in here is incorrect?***
Returns the number of photons received by a detector based on the
stellar and instrument parameters specified as arguments in astropy
units.
Remember that photon counting has Poisson-type error, so photon noise is
the square root of this function's result. A fuller explanation of the
process is available in `subtract_psfs.ipynb` (coming soon?).
'''
# interpet unitless quantities (e.g. source_proj below) in radians
u.set_enabled_equivalencies(u.dimensionless_angles())
# calculate stellar attributes
#lum_star = const.sigma_sb * temp_star * np.pi * (rad_star)**2
#flux_bol = lum_star / (4 * np.pi * dist**2)
#source_proj = np.arctan(rad_star / dist)**2 # exact
source_proj = (rad_star / dist)**2 # approximated
# define JWST info
diam_jwst = 6.5 * u.m
area_scope = np.pi * (diam_jwst / 2)**2
#wv = np.mean([self.lo_wv, self.hi_wv]) * u.m
# resolve_pwr = (len(self.wvlnths) * np.mean([self.lo_wv, self.hi_wv])
# / (self.hi_wv - self.lo_wv))
#wv_resolution = wv / resolve_pwr
# (not all KlipCreate sessions use resolve_pwr,
# so need a safer way to calc resolution)
#wv = np.mean([self.lo_wv, self.hi_wv]) * u.m # not needed
wv_resolution = (self.hi_wv - self.lo_wv) * u.m / len(self.wvlnths)
# approximating that each wavelength slice is the same width
# calculate blackbody radiation & photon info based on target wavelength
with warnings.catch_warnings(record=True) as w:
# ignore astropy 4's blackbody-related deprecation warning, for now
warnings.simplefilter('ignore')
bb_rad = blackbody_lambda(wv, temp_star)
photon_nrg = const.h * const.c / wv
# get number of photons received by detector and resulting noise
num_photons = (throughput * area_scope * source_proj * wv_resolution
*photon_nrg**(-1) * exp_time * bb_rad).decompose().to('')
#photon_noise = np.sqrt(num_phot)
return num_photons#, photon_noise
def _get_klip_basis(self, ref, explain=None, modes=None, verbose=False):
'''
Use a a Karhunen-Loรจve transform to create a set of basis vectors from a
reference library to be used for KLIP projection later on.
Argument `ref` is a numpy array (not HDUList) of some number of
reference images.
Argument `explain` is the fraction of variance you want explained by
`ref`'s eigenvalues, throwing out those aren't needed after the KL
transform.
Argument `modes` is the explicit maximum number of eigenvalues to keep.
(You can use either `explain` or `modes`, but not both.)
Pass this function's output to self._project_onto_basis() to complete
the KLIP projection process.
'''
if (explain is not None) + (modes is not None) > 1:
raise ValueError('only one of `explain`/`modes` can have a value')
elif (explain is not None) + (modes is not None) < 1:
raise ValueError('either `explain` or `modes` must have a value')
my_pr = lambda *args, **kwargs: (print(*args, **kwargs)
if verbose else None)
# flatten psf arrays and find eigenv*s for the result
ref_flat = ref.reshape(ref.shape[0], -1)
e_vals, e_vecs = np.linalg.eig(np.dot(ref_flat, ref_flat.T))
my_pr('********', "eigenvalues are {e_vals}", sep='\n')
# sort eigenvalues ("singular values") in descending order
desc = np.argsort(e_vals)[::-1] # sort indices of e_vals in desc. order
sv = np.sqrt(e_vals[desc]).reshape(-1, 1)
# do the KL transform
Z = np.dot(1 / sv * e_vecs[:,desc].T, ref_flat)
my_pr(f"Z shape is {Z.shape}")
if explain:
test_vars = [np.sum(e_vals[0:i+1]) / np.sum(e_vals) > explain
for i, _ in enumerate(e_vals)]
modes = np.argwhere(np.array(test_vars) == True).flatten()[0] + 1
# limit Z to a certain number of bases
Z_trim = Z[:modes,:]
my_pr(f"trimmed Z shape is {Z_trim.shape}")
return Z_trim
def _project_onto_basis(self, target, Z_trim, verbose=False):
'''
Help estimate PSF intensity by projecting a target image onto a KL
basis made from the reference images.
Argument `target` is a 2D array representing a slice from some target
observation's data cube of images.
Argument `Z_trim` is the result of self._get_klip_basis() for the
target image's reference image library at the same wavelength.
Separating that method from this one helps with the speed of
self._generate_klip_proj() since target images with the same wavelength
share the same library of reference images.
'''
my_pr = lambda *args, **kwargs: (print(*args, **kwargs)
if verbose else None)
# flatten target arrays
targ_flat = target.flatten()
if verbose:
my_pr(f"target shape is {targ_flat.shape}", end='\n********')
# project onto KL basis to estimate PSF intensity
proj = np.dot(targ_flat, Z_trim.T)
klipped = np.dot(Z_trim.T, proj).reshape(target.shape)
return klipped
def _generate_klip_proj(self, cube_list, verbose=True):
'''
Generates a HDUList of KLIP projections for every slice of each
post-padded target image data cube. The result is used in the
post-subtraction plotting methods.
Argument `cube_list` is an HDUList of *aligned*, NaN-less data cubes.
`self.stackable_cubes` is usually the only appropriate choice here.
Argument `verbose` is a boolean that, when True, allows the method to
print progress messages.
'''
print_ast = lambda text: print('\n********', text, '********', sep='\n')
my_pr = lambda txt, **kwargs: (print_ast(txt, **kwargs)
if verbose else None)
my_pr('generating KLIP projections of target images '
'in `self.klip_proj`...')
# collect all images in one 4D array.
# dimensions are: number of ref & tgt data cubes,
# number of wavelength slices, and the 2D shape of a post-padded image
cube_list = self._pklcopy(cube_list)
all_cubes = np.array([cube.data for cube in cube_list])
# separate the reference and target data cubes
refs_all = all_cubes[:len(self.positions)]
tgts_all = all_cubes[len(self.positions):]
# set up hdulist of klip projections for all slices of all target images
# (otherwise, has the same HDU structure as stackable_cubes)
klip_proj = self._pklcopy(cube_list[len(self.positions):])
# carry out klip projections for all slices of every target image
# and insert them into the HDUList generated above
for sl in range(tgts_all.shape[1]): # number of wavelength slices
refs_sliced = refs_all[:,sl]
tgts_sliced = tgts_all[:,sl]
ref_klip_basis = self._get_klip_basis(refs_sliced,
#explain=.99)
modes=len(self.positions))
for j, tg in enumerate(tgts_sliced):
ref_klip = self._project_onto_basis(tg, ref_klip_basis)
klip_proj[j].data[sl] = ref_klip
return klip_proj
def _generate_theo_klip_proj(self, cube_list, fine_ref_cubes, verbose=True):
'''
**Exclusively for theoretically-aligned HDULists.**
Produces the same output as `self._generate_klip_proj()` -- an HDUList
of KLIP projections for every slice of each post-padded target image
data cube. The result is used in the post-subtraction plotting methods.
Argument `cube_list` is an HDUList of *aligned*, NaN-less data cubes.
`self.stackable_cubes` is usually the only appropriate argument here;
its latter half of aligned target images is what will be used here.
Argument `fine_ref_cubes` is a list of 4D arrays. Each array is a set of
"fine-aligned" reference cubes that was made to match a certain target.
The references in index -1 of `fine_ref_cubes` match with the target
cube at index -1 of `cube_list`, and so on.
Argument `verbose` is a boolean that, when True, allows the method to
print progress messages.
'''
print_ast = lambda text: print('\n********', text, '********', sep='\n')
my_pr = lambda txt: print_ast(txt) if verbose else None
my_pr('generating KLIP projections of target images '
'in `self.klip_proj`...')
# set up hdulist of klip projections for all slices of all target images
# (otherwise, has the same HDU structure as cube_list)
cube_list = self._pklcopy(cube_list)
fine_tgt_cubes = [cb.data for cb in cube_list[len(self.positions):]]
klip_proj = self._pklcopy(cube_list[len(self.positions):])
# carry out klip projections for all slices of every target image
# and insert them | |
'endip': services["endip"], 'zoneid': services["zoneid"]}
if "podid" in services:
cmd['podid'] = services["podid"]
cmd['vlan'] = services["vlan"]
if account:
cmd['account'] = account
if domainid:
cmd['domainid'] = domainid
return PublicIpRange(api_client.createVlanIpRange(**cmd))
def delete(self, api_client):
"""Delete VlanIpRange"""
cmd = {'id': self.vlan.id}
api_client.deleteVlanIpRange(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""Lists all VLAN IP ranges."""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listVlanIpRanges(**cmd)
@classmethod
def dedicate(
cls, api_client, id, account=None, domainid=None, projectid=None):
"""Dedicate VLAN IP range"""
cmd = {'id': id, 'account': account, 'domainid': domainid, 'projectid': projectid}
return PublicIpRange(api_client.dedicatePublicIpRange(**cmd))
def release(self, api_client):
"""Release VLAN IP range"""
cmd = {'id': self.vlan.id}
return api_client.releasePublicIpRange(**cmd)
class SecondaryStagingStore:
"""Manage Staging Store"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def create(cls, api_client, url, provider, services=None):
"""Create Staging Storage"""
cmd = {'url': url, 'provider': provider}
if services:
if "zoneid" in services:
cmd['zoneid'] = services["zoneid"]
if "details" in services:
cmd['details'] = services["details"]
if "scope" in services:
cmd['scope'] = services["scope"]
return SecondaryStagingStore(api_client.createSecondaryStagingStore(**cmd))
def delete(self, api_client):
"""Delete Staging Storage"""
cmd = {'id': self.id}
api_client.deleteSecondaryStagingStore(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listSecondaryStagingStores(**cmd)
class ImageStore:
"""Manage image stores"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def create(cls, api_client, url, provider, services=None):
"""Add Image Store"""
cmd = {'url': url, 'provider': provider}
if services:
if "zoneid" in services:
cmd['zoneid'] = services["zoneid"]
if "details" in services:
cmd['details'] = services["details"]
if "scope" in services:
cmd['scope'] = services["scope"]
return ImageStore(api_client.addImageStore(**cmd))
def delete(self, api_client):
"""Delete Image Store"""
cmd = {'id': self.id}
api_client.deleteImageStore(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listImageStores(**cmd)
class PhysicalNetwork:
"""Manage physical network storage"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def create(cls, api_client, services, zoneid, domainid=None):
"""Create physical network"""
cmd = {'name': services["name"], 'zoneid': zoneid}
if domainid:
cmd['domainid'] = domainid
return PhysicalNetwork(api_client.createPhysicalNetwork(**cmd))
def delete(self, api_client):
"""Delete Physical Network"""
cmd = {'id': self.id}
api_client.deletePhysicalNetwork(**cmd)
def update(self, api_client, **kwargs):
"""Update Physical network state"""
cmd = {'id': self.id}
cmd.update(kwargs)
return api_client.updatePhysicalNetwork(**cmd)
def addTrafficType(self, api_client, type):
"""Add Traffic type to Physical network"""
cmd = {'physicalnetworkid': self.id, 'traffictype': type}
return api_client.addTrafficType(**cmd)
@classmethod
def dedicate(cls, api_client, vlanrange, physicalnetworkid,
account=None, domainid=None, projectid=None):
"""Dedicate guest vlan range"""
cmd = {'vlanrange': vlanrange, 'physicalnetworkid': physicalnetworkid, 'account': account, 'domainid': domainid, 'projectid': projectid}
return PhysicalNetwork(api_client.dedicateGuestVlanRange(**cmd))
def release(self, api_client):
"""Release guest vlan range"""
cmd = {'id': self.id}
return api_client.releaseDedicatedGuestVlanRange(**cmd)
@classmethod
def listDedicated(cls, api_client, **kwargs):
"""Lists all dedicated guest vlan ranges"""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listDedicatedGuestVlanRanges(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""Lists all physical networks"""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return map(lambda pn: PhysicalNetwork(
pn.__dict__), api_client.listPhysicalNetworks(**cmd))
class VpnCustomerGateway(BaseAbstract):
"""Manage VPN Customer Gateway"""
def __init__(self, items):
super(VpnCustomerGateway, self).__init__(items)
@classmethod
def create(cls, api_client, services=None, name=None, gateway=None, cidrlist=None, account=None, domainid=None,
presharedkey=None, ikepolicy=None, esppolicy=None):
"""Create VPN Customer Gateway"""
cmd = {'name': name, 'gateway': gateway, 'cidrlist': cidrlist}
if not services:
services = {}
if "ipsecpsk" in services:
cmd['ipsecpsk'] = services["ipsecpsk"]
elif presharedkey:
cmd['ipsecpsk'] = presharedkey
if "ikepolicy" in services:
cmd['ikepolicy'] = services["ikepolicy"]
elif ikepolicy:
cmd['ikepolicy'] = ikepolicy
if "ikelifetime" in services:
cmd['ikelifetime'] = services["ikelifetime"]
if "esppolicy" in services:
cmd['esppolicy'] = services["esppolicy"]
elif esppolicy:
cmd['esppolicy'] = esppolicy
if "esplifetime" in services:
cmd['esplifetime'] = services["esplifetime"]
if "dpd" in services:
cmd['dpd'] = services["dpd"]
if "forceencap" in services:
cmd['forceencap'] = services["forceencap"]
if account:
cmd['account'] = account
if domainid:
cmd['domainid'] = domainid
cmd['fetch_result'] = True
return VpnCustomerGateway(api_client.createVpnCustomerGateway(**cmd).get('vpncustomergateway'))
def update(self, api_client, services, name, gateway, cidrlist):
"""Updates VPN Customer Gateway"""
cmd = {'id': self.id, 'name': name, 'gateway': gateway, 'cidrlist': cidrlist}
if "ipsecpsk" in services:
cmd['ipsecpsk'] = services["ipsecpsk"]
if "ikepolicy" in services:
cmd['ikepolicy'] = services["ikepolicy"]
if "ikelifetime" in services:
cmd['ikelifetime'] = services["ikelifetime"]
if "esppolicy" in services:
cmd['esppolicy'] = services["esppolicy"]
if "esplifetime" in services:
cmd['esplifetime'] = services["esplifetime"]
if "dpd" in services:
cmd['dpd'] = services["dpd"]
if "forceencap" in services:
cmd['forceencap'] = services["forceencap"]
return api_client.updateVpnCustomerGateway(**cmd)
def delete(self, api_client):
"""Delete VPN Customer Gateway"""
cmd = {'id': self.id}
api_client.deleteVpnCustomerGateway(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""List all VPN customer Gateway"""
cmd = {}
# cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return super(VpnCustomerGateway, cls).list(api_client.listVpnCustomerGateways(**cmd).get('vpncustomergateway'))
class Project:
"""Manage Project life cycle"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def create(cls, api_client, services, account=None, domainid=None):
"""Create project"""
cmd = {'displaytext': services["displaytext"], 'name': "-".join([services["name"], random_gen()])}
if account:
cmd['account'] = account
if domainid:
cmd['domainid'] = domainid
return Project(api_client.createProject(**cmd))
def delete(self, api_client):
"""Delete Project"""
cmd = {'id': self.id}
api_client.deleteProject(**cmd)
def update(self, api_client, **kwargs):
"""Updates the project"""
cmd = {'id': self.id}
# cmd.update(kwargs)
return api_client.updateProject(**cmd)
def activate(self, api_client):
"""Activates the suspended project"""
cmd = {'id': self.id}
return api_client.activateProject(**cmd)
def suspend(self, api_client):
"""Suspend the active project"""
cmd = {'id': self.id}
return api_client.suspendProject(**cmd)
def addAccount(self, api_client, account=None, email=None):
"""Add account to project"""
cmd = {'projectid': self.id}
if account:
cmd['account'] = account
if email:
cmd['email'] = email
return api_client.addAccountToProject(**cmd)
def deleteAccount(self, api_client, account):
"""Delete account from project"""
cmd = {'projectid': self.id, 'account': account}
return api_client.deleteAccountFromProject(**cmd)
@classmethod
def listAccounts(cls, api_client, **kwargs):
"""Lists all accounts associated with projects."""
cmd = {}
# cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listProjectAccounts(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""Lists all projects."""
cmd = {}
# cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listProjects(**cmd)
class ProjectInvitation:
"""Manage project invitations"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def update(cls, api_client, projectid, accept, account=None, token=None):
"""Updates the project invitation for that account"""
cmd = {'projectid': projectid, 'accept': accept}
if account:
cmd['account'] = account
if token:
cmd['token'] = token
return api_client.updateProjectInvitation(**cmd)
@staticmethod
def delete(api_client, id):
"""Deletes the project invitation"""
cmd = {'id': id}
return api_client.deleteProjectInvitation(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""Lists project invitations"""
cmd = {}
# cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listProjectInvitations(**cmd)
class Configurations(BaseAbstract):
"""Manage Configuration"""
def __init__(self, items=None):
if items:
super(Configurations, self).__init__(items)
@classmethod
def update(cls, api_client, name, value=None, zoneid=None):
"""Updates the specified configuration"""
cmd = {'name': name, 'value': value}
if zoneid:
cmd['zoneid'] = zoneid
api_client.updateConfiguration(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""Lists configurations"""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return super(Configurations, cls).list(api_client.listConfigurations(**cmd).get('configuration', []))
@classmethod
def listCapabilities(cls, api_client, **kwargs):
"""Lists capabilities"""
cmd = {}
# cmd.update(kwargs)
return api_client.listCapabilities(**cmd)
class NiciraNvp:
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def add(cls, api_client, services, physicalnetworkid,
hostname=None, username=None, password=<PASSWORD>, transportzoneuuid=None):
cmd = {'physicalnetworkid': physicalnetworkid}
if hostname:
cmd['hostname'] = hostname
else:
cmd['hostname'] = services['hostname']
if username:
cmd['username'] = username
else:
cmd['username'] = services['username']
if password:
cmd['password'] = password
else:
cmd['password'] = services['password']
if transportzoneuuid:
cmd['transportzoneuuid'] = transportzoneuuid
else:
cmd['transportzoneuuid'] = services['transportZoneUuid']
return NiciraNvp(api_client.addNiciraNvpDevice(**cmd))
def delete(self, api_client):
cmd = {'nvpdeviceid': self.nvpdeviceid}
api_client.deleteNiciraNvpDevice(**cmd)
return
@classmethod
def list(cls, api_client, **kwargs):
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listNiciraNvpDevices(**cmd)
class NetworkServiceProvider:
"""Manage network serivce providers for CloudStack"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def add(cls, api_client, name, physicalnetworkid, servicelist):
"""Adds network service provider"""
cmd = {'name': name, 'physicalnetworkid': physicalnetworkid, 'servicelist': servicelist}
return NetworkServiceProvider(
api_client.addNetworkServiceProvider(**cmd))
def delete(self, api_client):
"""Deletes network service provider"""
cmd = {'id': self.id}
return api_client.deleteNetworkServiceProvider(**cmd)
def update(self, api_client, **kwargs):
"""Updates network service provider"""
cmd = {'id': self.id}
cmd.update(kwargs)
return api_client.updateNetworkServiceProvider(**cmd)
@classmethod
def update(cls, api_client, id, **kwargs):
"""Updates network service provider"""
cmd = {'id': id}
cmd.update(kwargs)
return api_client.updateNetworkServiceProvider(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""List network service providers"""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listNetworkServiceProviders(**cmd)
class Router:
"""Manage router life cycle"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def start(cls, api_client, id):
"""Starts the router"""
cmd = {'id': id}
return api_client.startRouter(**cmd)
@classmethod
def stop(cls, api_client, id, forced=None):
"""Stops the router"""
cmd = {'id': id}
if forced:
cmd['forced'] = forced
return api_client.stopRouter(**cmd)
@classmethod
def reboot(cls, api_client, id):
"""Reboots the router"""
cmd = {'id': id}
return api_client.rebootRouter(**cmd)
@classmethod
def destroy(cls, api_client, id):
"""Destroy the router"""
cmd = {'id': id}
return api_client.destroyRouter(**cmd)
@classmethod
def change_service_offering(cls, api_client, id, serviceofferingid):
"""Change | |
import itertools
from .. import AssertsCompiledSQL
from .. import AssertsExecutionResults
from .. import config
from .. import fixtures
from ..assertions import assert_raises
from ..assertions import eq_
from ..assertions import in_
from ..assertsql import CursorSQL
from ..schema import Column
from ..schema import Table
from ... import bindparam
from ... import case
from ... import column
from ... import Computed
from ... import exists
from ... import false
from ... import ForeignKey
from ... import func
from ... import Identity
from ... import Integer
from ... import literal
from ... import literal_column
from ... import null
from ... import select
from ... import String
from ... import table
from ... import testing
from ... import text
from ... import true
from ... import tuple_
from ... import union
from ... import util
from ... import values
from ...exc import DatabaseError
from ...exc import ProgrammingError
class CollateTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(100)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "collate data1"},
{"id": 2, "data": "collate data2"},
],
)
def _assert_result(self, select, result):
with config.db.connect() as conn:
eq_(conn.execute(select).fetchall(), result)
@testing.requires.order_by_collation
def test_collate_order_by(self):
collation = testing.requires.get_order_by_collation(testing.config)
self._assert_result(
select(self.tables.some_table).order_by(
self.tables.some_table.c.data.collate(collation).asc()
),
[(1, "collate data1"), (2, "collate data2")],
)
class OrderByLabelTest(fixtures.TablesTest):
"""Test the dialect sends appropriate ORDER BY expressions when
labels are used.
This essentially exercises the "supports_simple_order_by_label"
setting.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("q", String(50)),
Column("p", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"},
{"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"},
{"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"},
],
)
def _assert_result(self, select, result):
with config.db.connect() as conn:
eq_(conn.execute(select).fetchall(), result)
def test_plain(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(select(lx).order_by(lx), [(1,), (2,), (3,)])
def test_composed_int(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(select(lx).order_by(lx), [(3,), (5,), (7,)])
def test_composed_multiple(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
ly = (func.lower(table.c.q) + table.c.p).label("ly")
self._assert_result(
select(lx, ly).order_by(lx, ly.desc()),
[(3, util.u("q1p3")), (5, util.u("q2p2")), (7, util.u("q3p1"))],
)
def test_plain_desc(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(select(lx).order_by(lx.desc()), [(3,), (2,), (1,)])
def test_composed_int_desc(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(select(lx).order_by(lx.desc()), [(7,), (5,), (3,)])
@testing.requires.group_by_complex_expression
def test_group_by_composed(self):
table = self.tables.some_table
expr = (table.c.x + table.c.y).label("lx")
stmt = (
select(func.count(table.c.id), expr).group_by(expr).order_by(expr)
)
self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)])
class ValuesExpressionTest(fixtures.TestBase):
__requires__ = ("table_value_constructor",)
__backend__ = True
def test_tuples(self, connection):
value_expr = values(
column("id", Integer), column("name", String), name="my_values"
).data([(1, "name1"), (2, "name2"), (3, "name3")])
eq_(
connection.execute(select(value_expr)).all(),
[(1, "name1"), (2, "name2"), (3, "name3")],
)
class FetchLimitOffsetTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
{"id": 5, "x": 4, "y": 6},
],
)
def _assert_result(
self, connection, select, result, params=(), set_=False
):
if set_:
query_res = connection.execute(select, params).fetchall()
eq_(len(query_res), len(result))
eq_(set(query_res), set(result))
else:
eq_(connection.execute(select, params).fetchall(), result)
def _assert_result_str(self, select, result, params=()):
conn = config.db.connect(close_with_result=True)
eq_(conn.exec_driver_sql(select, params).fetchall(), result)
def test_simple_limit(self, connection):
table = self.tables.some_table
stmt = select(table).order_by(table.c.id)
self._assert_result(
connection,
stmt.limit(2),
[(1, 1, 2), (2, 2, 3)],
)
self._assert_result(
connection,
stmt.limit(3),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
)
def test_limit_render_multiple_times(self, connection):
table = self.tables.some_table
stmt = select(table.c.id).limit(1).scalar_subquery()
u = union(select(stmt), select(stmt)).subquery().select()
self._assert_result(
connection,
u,
[
(1,),
],
)
@testing.requires.fetch_first
def test_simple_fetch(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(2),
[(1, 1, 2), (2, 2, 3)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(3),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
)
@testing.requires.offset
def test_simple_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(2),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(3),
[(4, 4, 5), (5, 4, 6)],
)
@testing.combinations(
([(2, 0), (2, 1), (3, 2)]),
([(2, 1), (2, 0), (3, 2)]),
([(3, 1), (2, 1), (3, 1)]),
argnames="cases",
)
@testing.requires.offset
def test_simple_limit_offset(self, connection, cases):
table = self.tables.some_table
connection = connection.execution_options(compiled_cache={})
assert_data = [(1, 1, 2), (2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)]
for limit, offset in cases:
expected = assert_data[offset : offset + limit]
self._assert_result(
connection,
select(table).order_by(table.c.id).limit(limit).offset(offset),
expected,
)
@testing.requires.fetch_first
def test_simple_fetch_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(2).offset(1),
[(2, 2, 3), (3, 3, 4)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(3).offset(2),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
@testing.requires.fetch_no_order_by
def test_fetch_offset_no_order(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).fetch(10),
[(1, 1, 2), (2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
set_=True,
)
@testing.requires.offset
def test_simple_offset_zero(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(0),
[(1, 1, 2), (2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(1),
[(2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
@testing.requires.offset
def test_limit_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select(table).order_by(table.c.id).limit(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
)
sql = str(sql)
self._assert_result_str(sql, [(2, 2, 3), (3, 3, 4)])
@testing.requires.fetch_first
def test_fetch_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select(table).order_by(table.c.id).fetch(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
)
sql = str(sql)
self._assert_result_str(sql, [(2, 2, 3), (3, 3, 4)])
@testing.requires.bound_limit_offset
def test_bound_limit(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).limit(bindparam("l")),
[(1, 1, 2), (2, 2, 3)],
params={"l": 2},
)
self._assert_result(
connection,
select(table).order_by(table.c.id).limit(bindparam("l")),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
params={"l": 3},
)
@testing.requires.bound_limit_offset
def test_bound_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"o": 2},
)
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"o": 1},
)
@testing.requires.bound_limit_offset
def test_bound_limit_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(bindparam("l"))
.offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"l": 2, "o": 1},
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(bindparam("l"))
.offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"l": 3, "o": 2},
)
@testing.requires.fetch_first
def test_bound_fetch_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(bindparam("f"))
.offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"f": 2, "o": 1},
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(bindparam("f"))
.offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"f": 3, "o": 2},
)
@testing.requires.sql_expression_limit_offset
def test_expr_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.offset(literal_column("1") + literal_column("2")),
[(4, 4, 5), (5, 4, 6)],
)
@testing.requires.sql_expression_limit_offset
def test_expr_limit(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("2")),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
)
@testing.requires.sql_expression_limit_offset
def test_expr_limit_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("1"))
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5)],
)
@testing.requires.fetch_first
def test_expr_fetch_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(literal_column("1") + literal_column("1"))
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5)],
)
@testing.requires.sql_expression_limit_offset
def test_simple_limit_expr_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(2)
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5)],
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(3)
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
@testing.requires.sql_expression_limit_offset
def test_expr_limit_simple_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("1"))
.offset(2),
[(3, 3, 4), (4, 4, 5)],
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("1"))
.offset(1),
[(2, 2, 3), (3, 3, 4)],
)
@testing.requires.fetch_ties
def test_simple_fetch_ties(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.x.desc()).fetch(1, with_ties=True),
[(4, 4, 5), (5, 4, 6)],
set_=True,
)
self._assert_result(
connection,
select(table).order_by(table.c.x.desc()).fetch(3, with_ties=True),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
set_=True,
)
@testing.requires.fetch_ties
@testing.requires.fetch_offset_with_options
def test_fetch_offset_ties(self, connection):
table = self.tables.some_table
fa = connection.execute(
select(table)
.order_by(table.c.x)
.fetch(2, with_ties=True)
.offset(2)
).fetchall()
eq_(fa[0], (3, 3, 4))
eq_(set(fa), set([(3, 3, 4), (4, 4, 5), (5, 4, 6)]))
@testing.requires.fetch_ties
@testing.requires.fetch_offset_with_options
def test_fetch_offset_ties_exact_number(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.x)
.fetch(2, with_ties=True)
.offset(1),
[(2, 2, 3), (3, 3, 4)],
)
self._assert_result(
connection,
select(table)
.order_by(table.c.x)
.fetch(3, with_ties=True)
.offset(3),
[(4, 4, 5), (5, 4, 6)],
)
@testing.requires.fetch_percent
def test_simple_fetch_percent(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(20, | |
is None or not self.warm_start:
self.model = self._get_model()
self.model.initialize_bias_terms(mx.nd.array(wd_freqs).squeeze()) ## initialize bias weights to log frequencies
trainer = gluon.Trainer(self.model.collect_params(), self.optimizer, {'learning_rate': self.lr})
sc_obj, npmi, ppl, redundancy = 0.0, 0.0, 0.0, 0.0
v_res = None
for epoch in range(self.epochs):
ts_epoch = time.time()
elbo_losses = []
lab_losses = []
for i, (data, labels) in enumerate(train_dataloader):
if labels is None:
labels = mx.nd.expand_dims(mx.nd.zeros(data.shape[0]), 1)
labels = labels.as_in_context(self.ctx)
data = data.as_in_context(self.ctx)
with autograd.record():
elbo_ls, kl_loss, _, _, lab_loss, total_ls = self._get_losses(self.model, data, labels)
elbo_mean = elbo_ls.mean()
total_ls.backward()
trainer.step(1)
if not self.quiet:
elbo_losses.append(float(elbo_mean.asscalar()))
if lab_loss is not None:
lab_losses.append(float(lab_loss.mean().asscalar()))
if not self.quiet and not self.validate_each_epoch:
elbo_mean = np.mean(elbo_losses) if len(elbo_losses) > 0 else 0.0
lab_mean = np.mean(lab_losses) if len(lab_losses) > 0 else 0.0
self._output_status("Epoch [{}] finished in {} seconds. [elbo = {}, label loss = {}]"
.format(epoch+1, (time.time()-ts_epoch), elbo_mean, lab_mean))
mx.nd.waitall()
if val_X is not None and (self.validate_each_epoch or epoch == self.epochs-1):
logging.info('Performing validation ....')
v_res = self.validate(val_X, val_y)
sc_obj = self._get_objective_from_validation_result(v_res)
if self.has_classifier:
self._output_status("Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}. Accuracy = {}."
.format(epoch+1, sc_obj, v_res['ppl'],
v_res['npmi'], v_res['redundancy'], v_res['accuracy']))
else:
self._output_status("Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}."
.format(epoch+1, sc_obj, v_res['ppl'], v_res['npmi'], v_res['redundancy']))
if self.reporter:
self.reporter(epoch=epoch+1, objective=sc_obj, time_step=time.time(),
coherence=v_res['npmi'], perplexity=v_res['ppl'], redundancy=v_res['redundancy'])
mx.nd.waitall()
return sc_obj, v_res
def fit(self, X: sp.csr.csr_matrix, y: np.ndarray = None) -> 'BaseBowEstimator':
"""
Fit VAE model according to the given training data X with optional co-variates y.
Parameters:
X: representing input data
y: representing covariate/labels associated with data elements
Returns:
self
"""
self.fit_with_validation(X, y, None, None)
return self
class BowEstimator(BaseBowEstimator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_config(cls, *args, **kwargs):
return super().from_config(*args, **kwargs)
@classmethod
def from_saved(cls, *args, **kwargs):
return super().from_saved(*args, **kwargs)
def npmi(self, X, k=10):
return self._npmi(X, k=k)
def perplexity(self, X: sp.csr.csr_matrix) -> float:
"""
Calculate approximate perplexity for data X and y
Parameters:
X: Document word matrix of shape [n_samples, vocab_size]
Returns:
Perplexity score.
"""
return super().perplexity(X, None)
def _forward(self, model: BowVAEModel, data: mx.nd.NDArray, labels: mx.nd.NDArray):
"""
Forward pass of BowVAE model given the supplied data
Parameters:
model: Core VAE model for bag-of-words topic model
data: Document word matrix of shape (n_train_samples, vocab_size)
labels: Ignored
Returns:
Tuple of:
elbo, kl_loss, rec_loss, coherence_loss, redundancy_loss, reconstruction
"""
return model(data, labels)
def initialize_with_pretrained(self):
assert(self.pretrained_param_file is not None)
self.model = self._get_model()
self.model.load_parameters(self.pretrained_param_file, allow_missing=False)
def _get_model(self):
"""
Initializes embedding weights and returns a `BowVAEModel` with hyperparameters provided.
Returns:
(:class:`BowVAEModel`) initialized using provided hyperparameters
"""
#vocab, emb_size = self._initialize_embedding_layer(self.embedding_source, self.embedding_size)
if self.embedding_source != 'random' and self.vocabulary.embedding is None:
e_type, e_name = tuple(self.embedding_source.split(':'))
pt_embedding = nlp.embedding.create(e_type, source=e_name)
self.vocabulary.set_embedding(pt_embedding)
emb_size = len(self.vocabulary.embedding.idx_to_vec[0])
for word in self.vocabulary.embedding._idx_to_token:
if (self.vocabulary.embedding[word] == mx.nd.zeros(emb_size)).sum() == emb_size:
self.vocabulary.embedding[word] = mx.nd.random.normal(0, 0.1, emb_size)
else:
emb_size = self.embedding_size
model = \
BowVAEModel(self.enc_hidden_dim, emb_size, n_encoding_layers=self.n_encoding_layers,
enc_dr=self.enc_dr, fixed_embedding=self.fixed_embedding,
classifier_dropout=self.classifier_dropout,
n_labels = self.n_labels,
gamma = self.gamma,
multilabel = self.multilabel,
vocabulary=self.vocabulary,
latent_distribution=self.latent_distribution,
coherence_reg_penalty=self.coherence_reg_penalty, redundancy_reg_penalty=self.redundancy_reg_penalty,
batch_size=self.batch_size,
n_covars=0, ctx=self.ctx)
if self.pretrained_param_file is not None:
model.load_parameters(self.pretrained_param_file, allow_missing=False)
return model
def get_topic_vectors(self) -> mx.nd.NDArray:
"""
Get topic vectors of the fitted model.
Returns:
topic_distribution: topic_distribution[i, j] represents word j in topic i. shape=(n_latent, vocab_size)
"""
return self.model.get_topic_vectors()
def transform(self, X: sp.csr.csr_matrix) -> mx.nd.NDArray:
"""
Transform data X according to the fitted model.
Parameters:
X: Document word matrix of shape {n_samples, n_features}
Returns:
topic_distribution: shape=(n_samples, n_latent) Document topic distribution for X
"""
mx_array = mx.nd.array(X,dtype='float32')
return self.model.encode_data(mx_array).asnumpy()
class CovariateBowEstimator(BaseBowEstimator):
def __init__(self, n_covars=0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.covar_net_layers = 1 ### XXX - temp hardcoded
self.n_covars = n_covars
@classmethod
def from_config(cls, n_covars, *args, **kwargs):
est = super().from_config(*args, **kwargs)
est.n_covars = n_covars
return est
def _get_model(self):
"""
Returns
MXNet model initialized using provided hyperparameters
"""
if self.embedding_source != 'random' and self.vocabulary.embedding is None:
e_type, e_name = tuple(self.embedding_source.split(':'))
pt_embedding = nlp.embedding.create(e_type, source=e_name)
self.vocabulary.set_embedding(pt_embedding)
emb_size = len(self.vocabulary.embedding.idx_to_vec[0])
for word in self.vocabulary.embedding._idx_to_token:
if (self.vocabulary.embedding[word] == mx.nd.zeros(emb_size)).sum() == emb_size:
self.vocabulary.embedding[word] = mx.nd.random.normal(0, 0.1, emb_size)
else:
emb_size = self.embedding_size
model = \
CovariateBowVAEModel(n_covars=self.n_covars,
vocabulary=self.vocabulary, enc_dim=self.enc_hidden_dim, embedding_size=emb_size,
fixed_embedding=self.fixed_embedding, latent_distribution=self.latent_distribution,
coherence_reg_penalty=self.coherence_reg_penalty, redundancy_reg_penalty=self.redundancy_reg_penalty,
batch_size=self.batch_size, n_encoding_layers=self.n_encoding_layers, enc_dr=self.enc_dr,
ctx=self.ctx)
return model
def _get_config(self):
config = super()._get_config()
config['n_covars'] = self.n_covars
return config
def _forward(self, model, data, labels):
"""
Forward pass of BowVAE model given the supplied data
Parameters:
model (MXNet model): Model that returns elbo, kl_loss, rec_loss, l1_pen, coherence_loss, redundancy_loss, reconstruction
data ({array-like, sparse matrix}): Document word matrix of shape (n_train_samples, vocab_size)
labels ({array-like, sparse matrix}): Covariate matrix of shape (n_train_samples, n_covars)
Returns:
(tuple): Tuple of:
elbo, kl_loss, rec_loss, l1_pen, coherence_loss, redundancy_loss, reconstruction
"""
self.train_data = data
self.train_labels = labels
return model(data, labels)
def _npmi_per_covariate(self, X, y, k=10):
"""
Calculate NPMI(Normalized Pointwise Mutual Information) for each covariate for data X
Parameters:
X (array-like or sparse matrix): Document word matrix. shape [n_samples, vocab_size]
y (array-like or sparse matrix): Covariate matrix. shape [n_samples, n_covars]
k (int): Threshold at which to compute npmi. optional (default=10)
Returns:
(dict): Dictionary of npmi scores for each covariate.
"""
X_train = X.toarray()
y_train = y
covars = np.unique(y_train, axis=0)
covar_npmi = {}
npmi_total = 0
for covar in covars:
mask = (y_train == covar).all(axis=1)
X_covar, y_covar = mx.nd.array(X_train[mask], dtype=np.float32), mx.nd.array(y_train[mask], dtype=np.float32)
sorted_ids = self.model.get_ordered_terms_with_covar_at_data(X_covar,k, y_covar)
top_k_words_per_topic = [[int(i) for i in list(sorted_ids[:k, t].asnumpy())] for t in range(self.n_latent)]
npmi_eval = EvaluateNPMI(top_k_words_per_topic)
npmi = npmi_eval.evaluate_csr_mat(X_covar)
#if(self.label_map):
# covar_key = covar[0]
#else:
# covar_key = np.where(covar)[0][0]
covar_keky = covar[0]
covar_npmi[covar_key] = npmi
npmi_total += npmi
return npmi_total / len(covars)
def _npmi(self, X, k=10):
return super()._npmi(X, k=k)
#return self._npmi_per_covariate(X, y, k)
def _get_objective_from_validation_result(self, v_res):
return v_res['npmi']
def validate(self, X, y):
npmi, redundancy = self._npmi(X)
return {'npmi': npmi, 'redundancy': redundancy, 'ppl': 0.0}
def get_topic_vectors(self) -> mx.nd.NDArray:
"""
Get topic vectors of the fitted model.
Returns:
topic_vectors: Topic word distribution. topic_distribution[i, j] represents word j in topic i.
shape=(n_latent, vocab_size)
"""
return self.model.get_topic_vectors(self.train_data, self.train_labels)
def transform(self, X: sp.csr.csr_matrix, y: np.ndarray):
"""
Transform data X and y according to the fitted model.
Parameters:
X: Document word matrix of shape {n_samples, n_features)
y: Covariate matrix of shape (n_train_samples, n_covars)
Returns:
Document topic distribution for X and y of shape=(n_samples, n_latent)
"""
x_mxnet, y_mxnet = mx.nd.array(X, dtype=np.float32), mx.nd.array(y, dtype=np.float32)
return self.model.encode_data_with_covariates(x_mxnet, y_mxnet).asnumpy()
class SeqBowEstimator(BaseEstimator):
def __init__(self, bert_base, *args,
bert_model_name = 'bert_12_768_12',
bert_data_name = 'book_corpus_wiki_en_uncased',
bow_vocab = None,
n_labels = 0,
log_interval=5,
warmup_ratio=0.1,
gamma=1.0,
multilabel=False,
decoder_lr = 0.01,
checkpoint_dir = None,
optimizer = 'bertadam',
classifier_dropout = 0.0,
pure_classifier_objective = False,
**kwargs):
super(SeqBowEstimator, self).__init__(*args, optimizer=optimizer, **kwargs)
self.pure_classifier_objective = pure_classifier_objective
self.minimum_lr = 1e-9
self.checkpoint_dir = checkpoint_dir
self.bert_base = bert_base
self.bert_model_name = bert_model_name
self.bert_data_name = bert_data_name
self.has_classifier = n_labels >= 2
self.classifier_dropout = classifier_dropout
self.multilabel = multilabel
self.n_labels = n_labels
self.metric = get_composite_p_and_r_metric() if multilabel else mx.metric.Accuracy()
self.warmup_ratio = warmup_ratio
self.log_interval = log_interval
self.loss_function = gluon.loss.SigmoidBCELoss() if multilabel else gluon.loss.SoftmaxCELoss(sparse_label=False)
self.gamma = gamma
self.decoder_lr = decoder_lr
self._bow_matrix = None
self.bow_vocab = bow_vocab
@classmethod
def from_config(cls,
config: Union[str, ag.space.Dict],
bert_base: nlp.model.bert.BERTModel,
bow_vocab: nlp.Vocab,
n_labels: int = 0,
reporter: Optional[object] = None,
log_interval: int = 1,
pretrained_param_file: Optional[str] = None,
ctx: mx.context.Context = mx.cpu()) -> 'SeqBowEstimator':
"""
Instantiate an object of this class using the provided `config`
Parameters:
config: String to configuration path (in json format) or an autogluon dictionary representing the config
bert_base: GluonNLP BERT model
bow_vocab: Bag-of-words vocabulary used for decoding reconstruction target
n_labels: Number of labels for (semi-)supervised modeling
repoter: Autogluon reporter object with callbacks for logging model selection
log_interval: Logging frequency (default = 1)
pretrained_param_file: Parameter file
ctx: MXNet context
Returns:
An object of this class
"""
if isinstance(config, str):
try:
with open(config, 'r') as f:
config_dict = json.load(f)
except:
logging.error("File {} does not appear to be a valid config instance".format(config))
raise Exception("Invalid Json Configuration File")
config = ag.space.Dict(**config_dict)
ldist_def = config.latent_distribution
kappa = 0.0
alpha = 1.0
latent_distrib = ldist_def.dist_type
n_latent = int(config.n_latent)
if latent_distrib == 'logistic_gaussian':
alpha = ldist_def.alpha
latent_distribution = LogisticGaussianDistribution(n_latent, ctx=ctx, alpha=alpha)
elif latent_distrib == 'vmf':
kappa = ldist_def.kappa
latent_distribution = HyperSphericalDistribution(n_latent, | |
from train_generation import TrainingData, DataSampler
from auxiliary_functions import EmpiricalDist, AllBinaryStrings, num_bytes_needed, SampleListToArray
from kernel_functions import KernelAllBinaryStrings
from param_init import NetworkParams
from sample_gen import BornSampler
import json
import numpy as np
import sys
import os
from pyquil.api import get_qc
import argparse
max_qubits = 8
def ParseInputArguments():
'''
This function returns optional input arguments in order to print various things to files, i.e. kernels, data.
'''
parser = argparse.ArgumentParser()
parser.add_argument("Data", help = "Argument specifying which data should be printed", type = str)
parser.add_argument("Qubits", help = "Argument specifying maximum number of qubits to be printed", type = int)
parser.add_argument("Kernel", help = "Argument specifying which kernel should be printed", type = str)
args = parser.parse_args()
return args
def MakeDirectory(path):
'''Makes an directory in the given \'path\', if it does not exist already'''
if not os.path.exists(path):
os.makedirs(path)
return
def PrintParamsToFile(seed, max_qubits):
for qubit_index in range(2, max_qubits):
J_init, b_init, gamma_init, delta_init = NetworkParams(qubit_index, seed)
np.savez('data/Parameters_%iQubits.npz' % (qubit_index), J_init = J_init, b_init = b_init, gamma_init = gamma_init, delta_init = delta_init)
return
#PrintParamsToFile(seed, max_qubits)
def KernelDictToFile(N_qubits, N_kernel_samples, kernel_dict, kernel_choice):
#writes kernel dictionary to file
if (N_kernel_samples == 'infinite'):
with open('kernel/%sKernel_Dict_%iQBs_Exact' % (kernel_choice[0], N_qubits), 'w') as f:
dict_keys = kernel_dict.keys()
dict_values = kernel_dict.values()
k1 = [str(key) for key in dict_keys]
print(json.dump(json.dumps(dict(zip(*[k1, dict_values])), sort_keys=True, indent=0),f))
print(json.dumps(dict(zip(*[k1, dict_values])), sort_keys=True, indent=0))
else:
with open('kernel/%sKernel_Dict_%iQBs_%iKernelSamples' % (kernel_choice[0], N_qubits, N_kernel_samples), 'w') as f:
dict_keys = kernel_dict.keys()
dict_values = kernel_dict.values()
k1 = [str(key) for key in dict_keys]
print(json.dump(json.dumps(dict(zip(*[k1, dict_values])), sort_keys=True),f))
print(json.dumps(dict(zip(*[k1, dict_values])), sort_keys=True, indent=0))
return
def PrintKernel(N_kernel_samples, kernel_choice, max_qubits):
#print the required kernel out to a file, for all binary strings
devices = [get_qc('%iq-qvm' %N_qubits , as_qvm = True) for N_qubits in range(2, max_qubits)]
for qc in devices:
N_qubits = len(qc.qubits())
print('This is qubit:', N_qubits)
#The number of samples, N_samples = infinite if the exact kernel is being computed
_,_, kernel_approx_dict,_ = KernelAllBinaryStrings(qc, N_kernel_samples, kernel_choice)
KernelDictToFile(N_qubits, N_kernel_samples, kernel_approx_dict, kernel_choice)
return
def PrintSomeKernels(kernel_type, max_qubits):
kernel_path = './kernel' #Create Folder for data if it does not exist
MakeDirectory(kernel_path)
N_kernel_samples_list = [10, 100, 200, 500, 1000, 2000]
# N_kernel_samples_list = [10]
for N_kernel_samples in N_kernel_samples_list:
print("Kernel is printing for %i samples" %N_kernel_samples)
PrintKernel(N_kernel_samples, kernel_type, max_qubits)
print("Exact Kernel is Printing")
PrintKernel('infinite', kernel_type, max_qubits)
return
def DataDictToFile(data_type, N_qubits, data_dict, N_data_samples, *args):
''' This function prepares data samples according to a a specified number of samples
for all number of visible qubits up to max_qubits, and saves them to files'''
if data_type.lower() == 'bernoulli_data':
if (N_data_samples == 'infinite'):
with open('data/Bernoulli_Data_Dict_%iQBs_Exact' % N_qubits, 'w') as f:
json.dump(json.dumps(data_dict, sort_keys=True),f)
else:
with open('data/Bernoulli_Data_Dict_%iQBs_%iSamples' % (N_qubits, N_data_samples), 'w') as f:
json.dump(json.dumps(data_dict, sort_keys=True),f)
elif data_type.lower() == 'quantum_data':
circuit_choice = args[0]
if (N_data_samples == 'infinite'):
with open('data/Quantum_Data_Dict_%iQBs_Exact_%sCircuit' % (N_qubits, circuit_choice), 'w') as f:
json.dump(json.dumps(data_dict, sort_keys=True),f)
else:
with open('data/Quantum_Data_Dict_%iQBs_%iSamples_%sCircuit' % (N_qubits, N_data_samples, circuit_choice), 'w') as f:
json.dump(json.dumps(data_dict, sort_keys=True),f)
else: raise ValueError('Please enter either \'Quantum_Data\' or \'Bernoulli_Data\' for \'data_type\' ')
return
def PrintCircuitParamsToFile(random_seed, circuit_choice):
quantum_computers = [get_qc('%iq-qvm' %N_qubits , as_qvm = True) for N_qubits in range(2, 7)]
for qc in quantum_computers:
device_name = qc.name
qubits = qc.qubits()
N_qubits = len(qubits)
circuit_params = NetworkParams(qc, random_seed)
np.savez('data/Parameters_%iQbs_%sCircuit_%sDevice.npz' % (N_qubits, circuit_choice, device_name),\
J = circuit_params['J'], b = circuit_params['b'], gamma = circuit_params['gamma'], delta = circuit_params['delta'])
def string_to_int_byte(string, N_qubits, byte):
total = 0
for qubit in range(8 * byte, min(8 * (byte + 1), N_qubits)):
total <<= 1
total += int(string[qubit])
return total
def PrintDataToFiles(data_type, N_samples, qc, circuit_choice, N_qubits):
binary_data_path = 'binary_data/'
MakeDirectory(binary_data_path)
data_path = 'data/'
MakeDirectory(data_path)
if data_type == 'Bernoulli_Data':
#Define training data along with all binary strings on the visible and hidden variables from train_generation
#M_h is the number of hidden Bernoulli modes in the data
M_h = 8
N_h = 0
data_probs, exact_data_dict = TrainingData(N_qubits, N_h, M_h)
data_samples = DataSampler(N_qubits, N_h, M_h, N_samples, data_probs)
#Save data as binary files
with open('binary_data/Bernoulli_Data_%iQBs_%iSamples' % (N_qubits, N_samples), 'wb') as f:
for string in data_samples:
for byte in range(num_bytes_needed(N_qubits)):
total = string_to_int_byte(string, N_qubits, byte)
f.write(bytes([total]))
np.savetxt('data/Bernoulli_Data_%iQBs_%iSamples' % (N_qubits, N_samples), data_samples, fmt='%s')
data_samples_list = SampleListToArray(data_samples, N_qubits, 'int')
emp_data_dist = EmpiricalDist(data_samples_list, N_qubits)
DataDictToFile(data_type, N_qubits, emp_data_dist, N_samples)
np.savetxt('data/Bernoulli_Data_%iQBs_Exact' % (N_qubits), np.asarray(data_probs), fmt='%.10f')
DataDictToFile(data_type, N_qubits, exact_data_dict, 'infinite')
elif data_type.lower() == 'quantum_data':
#Set random seed differently to that which initialises the actual Born machine to be trained
random_seed_for_data = 13
N_Born_Samples = [0, N_samples] #BornSampler takes a list of sample values, the [1] entry is the important one
circuit_params = NetworkParams(qc, random_seed_for_data) #Initialise a fixed instance of parameters to learn.
quantum_data_samples, quantum_probs_dict, quantum_probs_dict_exact = BornSampler(qc, N_Born_Samples, circuit_params, circuit_choice)
np.savetxt('data/Quantum_Data_%iQBs_%iSamples_%sCircuit' % (N_qubits, N_samples, circuit_choice), quantum_data_samples, fmt='%s')
DataDictToFile(data_type, N_qubits, quantum_probs_dict, N_samples, circuit_choice)
np.savetxt('data/Quantum_Data_%iQBs_Exact_%sCircuit' % (N_qubits, circuit_choice), np.asarray(quantum_data_samples), fmt='%.10f')
DataDictToFile(data_type, N_qubits, quantum_probs_dict_exact, 'infinite', circuit_choice)
else: raise ValueError('Please enter either \'Quantum_Data\' or \'Bernoulli_Data\' for \'data_type\' ')
return
def PrintAllDataToFiles(data_type, max_qubits, *args):
'''
This function prints all data samples to files, for either Quantum or Classical Data
for all number of qubits between 2 and max_qubits.
'''
N_sample_trials = [10, 20, 30, 40, 50, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 3000, 4000, 5000, 6000, 8000, 10000]
for N_qubits in range(2, max_qubits):
for N_samples in N_sample_trials:
if data_type == 'Quantum_Data':
qc = get_qc('%iq-qvm' %N_qubits , as_qvm = True)
circuit_choice = args[0]
print('Quantum Data is printing for %i qubits on qc %s using circuit choice %s' %(N_qubits, qc.name, circuit_choice))
PrintDataToFiles('Quantum_Data', N_samples, qc, circuit_choice, N_qubits)
elif data_type == 'Bernoulli_Data':
qc = None
circuit_choice = None
print('Bernoulli Data is printing for %i qubits' %N_qubits)
PrintDataToFiles('Bernoulli_Data', N_samples, qc, circuit_choice, N_qubits)
# #Uncomment to print circuit parameters to file, corresponding to the data, if the data is quantum
# random_seed_for_data = 13
# PrintCircuitParamsToFile(random_seed_for_data, 'IQP')
def MakeTrialNameFile(cost_func,data_type, data_circuit, N_epochs,learning_rate, qc, kernel_type, N_samples, stein_params, sinkhorn_eps, run):
'''This function prints out all information generated during the training process for a specified set of parameters'''
[N_data_samples, N_born_samples, batch_size, N_kernel_samples] = N_samples
if data_type == 'Quantum_Data':
if cost_func == 'MMD':
score = stein_params[0]
trial_name = "outputs/Output_MMD_%s_%s_%s_%skernel_%ikernel_samples_%iBorn_Samples%iData_samples_%iBatch_size_%iEpochs_%.3fLR_%s_Run%s" \
%(qc,data_type, data_circuit, kernel_type,N_kernel_samples,N_born_samples, N_data_samples,batch_size,N_epochs,\
learning_rate, score[0:5], str(run))
path_to_output = './%s/' %trial_name
MakeDirectory(path_to_output)
with open('%s/info' %trial_name, 'w') as f:
sys.stdout = f
print("The data is:cost function:MMD chip:%s Data_type: %s Data Circuit: %s kernel:%s N kernel samples:%i N Born Samples:%i N Data samples:%s\
Batch size:%i Epochs:%i Adam Learning Rate:%.3f_%s Run: %i"
%(qc,\
data_type,\
data_circuit,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
score[0:5],\
str(run)))
elif cost_func == 'Stein':
score = stein_params[0]
stein_eigvecs = stein_params[1]
stein_eta = stein_params[2]
trial_name = "outputs/Output_Stein_%s_%s_%s_%skernel_%ikernel_samples_%iBorn_Samples%iData_samples_%iBatch_size_%iEpochs_%.3fLR_%s_%iEigvecs_%.3fEta_Run%s" \
%(qc, data_type, data_circuit, kernel_type, N_kernel_samples, N_born_samples, N_data_samples, batch_size,\
N_epochs, learning_rate, score, stein_eigvecs, stein_eta, str(run))
path_to_output = './%s/' %trial_name
MakeDirectory(path_to_output)
with open('%s/info' %trial_name, 'w') as f:
sys.stdout = f
print("The data is: cost function: Stein, chip:%s Data_type: %s Data Circuit: %s kernel:%s N kernel samples:%i \n N Born Samples:%i N Data samples:%s\
Batch size:%iEpochs:%iAdam Learning Rate:%.3fStein Score:%sN Nystrom Eigvecs:%iStein Eta:%.3f Run: %i"
%(qc,\
data_type,\
data_circuit,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
score,\
stein_eigvecs, \
stein_eta,\
str(run)))
elif cost_func == 'Sinkhorn':
trial_name = "outputs/Output_Sinkhorn_%s_%s_%s_HammingCost_%iBorn_Samples%iData_samples_%iBatch_size_%iEpochs_%.3fLR_%.3fEpsilon_Run%s" \
%(qc,\
data_type,\
data_circuit,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
sinkhorn_eps, \
str(run))
path_to_output = './%s/' %trial_name
MakeDirectory(path_to_output)
with open('%s/info' %trial_name, 'w') as f:
sys.stdout = f
print("The data is: cost function:Sinkhorn Data_type: %s Data Circuit: %s chip: %s kernel: %s N kernel samples: %i \
N Born Samples: %i N Data samples: %i Batch size: %i Epochs: %i Adam Learning Rate: %.3f Sinkhorn Epsilon: %.3f Run: %s"
%(qc,\
data_type,\
data_circuit,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
sinkhorn_eps,\
str(run)))
elif data_type == 'Bernoulli_Data':
if cost_func == 'MMD':
score = stein_params[0]
trial_name = "outputs/Output_MMD_%s_%skernel_%ikernel_samples_%iBorn_Samples%iData_samples_%iBatch_size_%iEpochs_%.3fLR_%s_Run%s" \
%(qc,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
score,\
str(run))
path_to_output = './%s/' %trial_name
MakeDirectory(path_to_output)
with open('%s/info' %trial_name, 'w') as f:
sys.stdout = f
print("The data is:cost function:MMD chip:%s kernel:%s N kernel samples:%i N Born Samples:%i N Data samples:%s\
Batch size:%i Epochs:%i Adam Learning Rate:%.3f, Data Form: %s Run: %s"
%(qc,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate, \
score,\
str(run)))
elif cost_func == 'Stein':
score = stein_params[0]
stein_eigvecs = stein_params[1]
stein_eta = stein_params[2]
trial_name = "outputs/Output_Stein_%s_%skernel_%ikernel_samples_%iBorn_Samples%iData_samples_%iBatch_size_%iEpochs_%.3fLR_%s_%iEigvecs_%.3fEta_Run%s" \
%(qc,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
score,\
stein_eigvecs,
stein_eta,\
str(run))
path_to_output = './%s/' %trial_name
MakeDirectory(path_to_output)
with open('%s/info' %trial_name, 'w') as f:
sys.stdout = f
print("The data is: cost function: Stein, chip:%s kernel:%s N kernel samples:%i \n N Born Samples:%i N Data samples:%i\
Batch size:%iEpochs:%iAdam Learning Rate:%.3fStein Score:%sN Nystrom Eigvecs:%iStein Eta:%.3f Run: %s"
%(qc,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
score,\
stein_eigvecs, \
stein_eta,\
str(run)))
elif cost_func == 'Sinkhorn':
trial_name = "outputs/Output_Sinkhorn_%s_HammingCost_%iBorn_Samples%iData_samples_%iBatch_size_%iEpochs_%.3fLR_%.3fEpsilon_Run%s" \
%(qc,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
sinkhorn_eps,\
str(run))
path_to_output = './%s/' %trial_name
MakeDirectory(path_to_output)
with open('%s/info' %trial_name, 'w') as f:
sys.stdout = f
print("The data is: cost function: Sinkhorn chip: %s kernel: %sN kernel samples: %i N Born Samples: %iN Data samples: %i Batch size: %i \n \
Epochs: %i Adam Learning Rate: %.3f Sinkhorn Epsilon: %.3f Run: %s"
%(qc,\
kernel_type,\
N_kernel_samples,\
N_born_samples,\
N_data_samples,\
batch_size,\
N_epochs,\
learning_rate,\
sinkhorn_eps,\
str(run)))
else: raise ValueError('\'data_type\' must be either \'Quantum_Data\' or \'Bernoulli_Data\'')
return trial_name
def PrintFinalParamsToFile(cost_func, data_type, data_circuit, N_epochs, learning_rate, loss,
circuit_params, data_exact_dict, born_probs_list, empirical_probs_list, qc,
kernel_type, N_samples, stein_params, sinkhorn_eps, run):
'''This function prints out all information generated during the training process for a specified set of parameters'''
trial_name = MakeTrialNameFile(cost_func, data_type, data_circuit, \
N_epochs,learning_rate, qc.name, kernel_type, N_samples, stein_params, sinkhorn_eps, run)
loss_path = '%s/loss/%s/' %(trial_name, cost_func)
weight_path = '%s/params/weights/' %trial_name
bias_path = '%s/params/biases/' %trial_name
gammax_path = '%s/params/gammaX/' %trial_name
gammay_path = '%s/params/gammaY/' %trial_name
born_probs_path = '%s/probs/born/' %trial_name
data_probs_path = '%s/probs/data/' %trial_name
#create directories to store output training information
MakeDirectory(loss_path)
# MakeDirectory(tv_path)
MakeDirectory(weight_path)
MakeDirectory(bias_path)
MakeDirectory(gammax_path)
MakeDirectory(gammay_path)
MakeDirectory(born_probs_path)
MakeDirectory(data_probs_path)
# with open('%s/loss' %trail_name, 'w'):
np.savetxt('%s/loss/%s/train' %(trial_name,cost_func), loss[('%s' %cost_func, 'Train')])
np.savetxt('%s/loss/%s/test' %(trial_name,cost_func), loss[('%s' %cost_func, 'Test')] )
np.savetxt('%s/loss/TV' %(trial_name), loss[('TV')]) #Print Total Variation of Distributions during training
data_path = '%s/data' %(trial_name)
for epoch in range(0, N_epochs - 1):
np.savetxt('%s/params/weights/epoch%s' %(trial_name, epoch), circuit_params[('J', epoch)])
np.savetxt('%s/params/biases/epoch%s' %(trial_name, epoch), circuit_params[('b', epoch)])
np.savetxt('%s/params/gammaX/epoch%s' %(trial_name, epoch), circuit_params[('gamma', epoch)])
np.savetxt('%s/params/gammaY/epoch%s' %(trial_name, epoch), circuit_params[('delta', epoch)])
with open('%s/probs/born/epoch%s' %(trial_name, epoch), 'w') as f:
json.dump(json.dumps(empirical_probs_list[epoch], sort_keys=True),f)
with open('%s/probs/data/epoch%s' %(trial_name, epoch), 'w') as f:
json.dump(json.dumps(data_exact_dict, sort_keys=True),f)
return
def main():
args = ParseInputArguments()
data = args.Data
max_qubits = args.Qubits
kernel = args.Kernel
if data.lower() == 'bernoulli':
# Bernoulli data needs to be printed to file
PrintAllDataToFiles('Bernoulli_Data', max_qubits)
elif data.lower() == 'quantum':
#quantum data needs to be printed to file
PrintAllDataToFiles('Quantum_Data', max_qubits, 'IQP')
if | |
from itertools import cycle
import random
import sys
import pygame
from pygame.locals import *
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
BIRDS_COUNT = 21
MAX_ITEA = 120
MAX_SCORE = 1000
PM = 0.5
dump_file = f"iter-MAX_ITEA.plot"
try:
xrange
except NameError:
xrange = range
from sklearn.neural_network import MLPRegressor
import numpy as np
from random import shuffle
from itertools import accumulate, count
import math
import copy
import pickle
from pathlib import Path
class Bird:
def __init__(self, brain = None):
if brain is None:
# featurenya: diff y player dgn pipe terdekat, jarak dgn piper terdekat, diff y pipe terdekat dgn pipe berikutnya
initDataX = [ [BASEY / 2, 0, BASEY / 2], [-BASEY / 2, 0, -BASEY / 2] ]
initDatay = [ 1, 0 ]
self.brain = MLPRegressor(hidden_layer_sizes=(8, ), max_iter=10)
self.brain.fit(initDataX, initDatay)
self.brain.out_activation_ = "logistic"
else:
self.brain = copy.deepcopy(brain)
self.score = 0
def isJump(self, X):
yPredict = self.brain.predict(X)
return yPredict[0] > 0.5
# cross over
def cross_over(self, other):
for i in range(self.brain.n_layers_ - 1):
for j in range(len(self.brain.coefs_[i])):
for k in range(len(self.brain.coefs_[i][j])):
if random.uniform(0, 1) <= .5:
self.brain.coefs_[i][j][k], other.brain.coefs_[i][j][k] = other.brain.coefs_[i][j][k], self.brain.coefs_[i][j][k]
for i in range(self.brain.n_layers_ - 1):
for j in range(len(self.brain.intercepts_[i])):
if random.uniform(0, 1) <= .5:
self.brain.intercepts_[i][j], other.brain.intercepts_[i][j] = other.brain.intercepts_[i][j], self.brain.intercepts_[i][j]
def mutate(self, pm):
for i in range(self.brain.n_layers_ - 1):
for j in range(len(self.brain.coefs_[i])):
for k in range(len(self.brain.coefs_[i][j])):
if random.uniform(0, 1) <= pm:
self.brain.coefs_[i][j][k] += random.randint(1, 10) / 100 * (round(random.uniform(0, 1)) * 2 - 1)
for i in range(self.brain.n_layers_ - 1):
for j in range(len(self.brain.intercepts_[i])):
if random.uniform(0, 1) <= pm:
self.brain.intercepts_[i][j] += random.randint(1, 10) / 100 * (round(random.uniform(0, 1)) * 2 - 1)
class BirdsPopulation(list):
def __init__(self, birdsCount = -1, birds = None, generation_counts = 1):
if (birdsCount < 0):
self.birdsCount = len(birds)
else:
self.birdsCount = birdsCount
if (birds is None):
self.birds = [ Bird() for i in range(birdsCount) ]
else:
self.birds = birds
self.generation_counts = generation_counts
def __getitem__(self, key):
return self.birds[key]
def __setitem__(self, key, item):
self.birds[key] = item
def next(self):
self.birds.sort(key=lambda x : -x.score)
self.best_one = self.birds[0]
print('fitness function: ', end='')
for bird in self.birds:
print(bird.score, end=' ')
print('\n')
# roulette
sum_scores = [int(bird.score) if bird.score > 0 else 0 for bird in self.birds]
for idx in range(1, len(sum_scores)):
sum_scores[idx] += sum_scores[idx - 1]
newPopulation = []
population_idx = []
for bird in range(self.birdsCount - 1):
dart = random.randint(1, sum_scores[-1])
for idx, sum_score in enumerate(sum_scores):
if dart <= sum_score:
break
newPopulation.append(copy.deepcopy(self.birds[idx]))
population_idx.append(idx)
# cross_over
for idx in range(0, len(newPopulation) - 1, 2):
# print(newPopulation[idx].brain.coefs_)
# print(newPopulation[idx + 1].brain.coefs_)
newPopulation[idx].cross_over(newPopulation[idx + 1])
# mutate
# print(PM * (MAX_ITEA - self.generation_counts) / MAX_ITEA)
for new_bird in newPopulation:
new_bird.mutate(PM * (MAX_ITEA - self.generation_counts) / MAX_ITEA)
# add best bird of last population
newPopulation.append(self.best_one)
return BirdsPopulation(self.birdsCount, newPopulation, self.generation_counts + 1)
def saveConfig(bp):
fout = open(f'latest_birds_config.txt', 'wb')
pickle.dump(bp, fout)
def loadConfig():
my_file = Path(f'latest_birds_config.txt')
if my_file.exists():
fin = open(f'latest_birds_config.txt', 'rb')
return pickle.load(fin)
return None
class SoundEffectDump:
def play(self):
print(self.msg)
def __init__(self, msg):
self.msg = msg
def main():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite ๅฐ้ข็็งปๅจๆๆ
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = SoundEffectDump("die")
SOUNDS['hit'] = SoundEffectDump("hit")
SOUNDS['point'] = SoundEffectDump("point")
SOUNDS['swoosh'] = SoundEffectDump("swoosh")
SOUNDS['wing'] = SoundEffectDump("wing")
# select random background sprites
randBg = 0
#randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
#randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
randPlayer = 0
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
# pipeindex = random.randint(0, len(PIPES_LIST) - 1)
pipeindex = 0
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
global birds
global lastScore
global fastForward
fastForward = False
birds = loadConfig()
if (birds is None):
birds = BirdsPopulation(BIRDS_COUNT)
bestScore = 0
# while True:
best_score_each_iter = [0] * MAX_ITEA
for iter in range(MAX_ITEA):
saveConfig(birds)
# update each iteration
lastScore = 0
print("=========== Generation - {} ===========".format(birds.generation_counts))
movementInfo = initPosition()
crashInfo = mainGame(movementInfo)
bestScore = max(bestScore, lastScore)
print("=========== Best Score - {} ===========".format(bestScore))
if bestScore == MAX_SCORE:
best_score_each_iter[iter:MAX_ITEA] = [lastScore] * (MAX_ITEA - iter)
break
birds = birds.next()
with open(f"iter-{MAX_ITEA}.txt", 'wb') as f:
pickle.dump(dump_file)
pygame.quit()
sys.exit()
#showGameOverScreen(crashInfo)
def initPosition():
# index of player to blit on screen
playerIndexGen = cycle([0, 1, 2, 1])
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
basex = 0
# player shm for up-down motion on welcome screen
# val: ๅฐ้ธๅๅงไฝ็ฝฎ dir: ๅฐ้ธๆฏๆฌก up-down ็ๅ็ด
playerShmVals = {'val': 0, 'dir': 1}
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
def yScoreFunc(playerY, lGapY, uGapY):
if lGapY <= playerY <= uGapY:
return 0
if (playerY < lGapY):
return lGapY - playerY
if (playerY > uGapY):
return playerY - uGapY
return 0
def mainGame(movementInfo):
global birds
global playerDied
global lastScore
loopIter = 0
score = [0] * BIRDS_COUNT
playerIndex = [0] * BIRDS_COUNT
playerIndexGen = [ movementInfo['playerIndexGen'] ] * BIRDS_COUNT
playerx, playery = [ int(SCREENWIDTH * 0.2) ] * BIRDS_COUNT, [ movementInfo['playery'] ] * BIRDS_COUNT
basex = movementInfo['basex']
# base๏ผๅฐ้ข๏ผๆฏ background๏ผ่ๆฏ๏ผ่ฆ้ฟ๏ผ็งปๅจๆๆๅฐฑๆฏ base ไธ็ดๅๅทฆ็งปๅจ
# baseShift ๅฐฑๆฏ base ๅฏไปฅๅๅทฆ็งปๅจ็ๆๅคง้ฟๅบฆ๏ผไธ็ถๅฐฑไผๅบ็ฐ้ป่ฒ็ฉบๆด
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lower pipes
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = [-9] * BIRDS_COUNT # player's velocity along Y, default same as playerFlapped
playerMaxVelY = [10] * BIRDS_COUNT # max vel along Y, max descend speed
playerMinVelY = [-8] * BIRDS_COUNT # min vel along Y, max ascend speed
playerAccY = [ 1] * BIRDS_COUNT # players downward accleration
playerRot = [45] * BIRDS_COUNT # player's rotation
playerVelRot = [ 3] * BIRDS_COUNT # angular speed
playerRotThr = [20] * BIRDS_COUNT # rotation threshold
playerFlapAcc = [-9] * BIRDS_COUNT # players speed on flapping
playerFlapped = [False] * BIRDS_COUNT # True when player flaps
playerDied = [False] * BIRDS_COUNT
playersLeft = BIRDS_COUNT
global fastForward
travelDistance = 0
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE):
fastForward = not fastForward
"""
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SOUNDS['wing'].play()
"""
# print(playerx[0], (lowerPipes[1]['x'] if lowerPipes[0]['x'] + IMAGES['pipe'][0].get_width() < playerx[0] else lowerPipes[0]['x']) + IMAGES['pipe'][0].get_width())
for i in range(BIRDS_COUNT):
if playerDied[i]: continue
upperX = lowerPipes[0]['x'] + IMAGES['pipe'][0].get_width()
# ๅฐ้ธไธๅฝๅๅณๅฐ้่ฟ็็ฎก้็ๆฐดๅนณ่ท็ฆป๏ผ็ฎก้ๅณไพง๏ผ
closeX = SCREENWIDTH
# ๅฐ้ธไธๅฝๅๅณๅฐ้่ฟ็็ฎก้็gapไธญๅฟๆๅจ็้ซๅบฆ
centerGapY = SCREENHEIGHT / 2
# ๅฐ้ธไธไธไธไธชๅณๅฐ้่ฟ็็ฎก้็gapไธญๅฟๆๅจ็้ซๅบฆ
nextCenterGapY = 0
for j in range(len(lowerPipes)):
if lowerPipes[j]['x'] + IMAGES['pipe'][0].get_width() < playerx[i]:
continue
if lowerPipes[j]['x'] > SCREENWIDTH:
continue
closeX = lowerPipes[j]['x'] + IMAGES['pipe'][0].get_width()
centerGapY = (lowerPipes[j]['y'] - (PIPEGAPSIZE / 2))
if j < len(lowerPipes) and j + 1 | |
<filename>lib/mechanize/_response.py
"""Response classes.
The seek_wrapper code is not used if you're using UserAgent with
.set_seekable_responses(False), or if you're using the urllib2-level interface
HTTPEquivProcessor. Class closeable_response is instantiated by some handlers
(AbstractHTTPHandler), but the closeable_response interface is only depended
upon by Browser-level code. Function upgrade_response is only used if you're
using Browser.
Copyright 2006 <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import copy, mimetools, urllib2
from cStringIO import StringIO
def len_of_seekable(file_):
# this function exists because evaluation of len(file_.getvalue()) on every
# .read() from seek_wrapper would be O(N**2) in number of .read()s
pos = file_.tell()
file_.seek(0, 2) # to end
try:
return file_.tell()
finally:
file_.seek(pos)
# XXX <NAME> kindly sent me a similar class in response to my request on
# comp.lang.python, which I then proceeded to lose. I wrote this class
# instead, but I think he's released his code publicly since, could pinch the
# tests from it, at least...
# For testing seek_wrapper invariant (note that
# test_urllib2.HandlerTest.test_seekable is expected to fail when this
# invariant checking is turned on). The invariant checking is done by module
# ipdc, which is available here:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/436834
## from ipdbc import ContractBase
## class seek_wrapper(ContractBase):
class seek_wrapper:
"""Adds a seek method to a file object.
This is only designed for seeking on readonly file-like objects.
Wrapped file-like object must have a read method. The readline method is
only supported if that method is present on the wrapped object. The
readlines method is always supported. xreadlines and iteration are
supported only for Python 2.2 and above.
Public attributes:
wrapped: the wrapped file object
is_closed: true iff .close() has been called
WARNING: All other attributes of the wrapped object (ie. those that are not
one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
are passed through unaltered, which may or may not make sense for your
particular file object.
"""
# General strategy is to check that cache is full enough, then delegate to
# the cache (self.__cache, which is a cStringIO.StringIO instance). A seek
# position (self.__pos) is maintained independently of the cache, in order
# that a single cache may be shared between multiple seek_wrapper objects.
# Copying using module copy shares the cache in this way.
def __init__(self, wrapped):
self.wrapped = wrapped
self.__read_complete_state = [False]
self.__is_closed_state = [False]
self.__have_readline = hasattr(self.wrapped, "readline")
self.__cache = StringIO()
self.__pos = 0 # seek position
def invariant(self):
# The end of the cache is always at the same place as the end of the
# wrapped file (though the .tell() method is not required to be present
# on wrapped file).
return self.wrapped.tell() == len(self.__cache.getvalue())
def close(self):
self.wrapped.close()
self.is_closed = True
def __getattr__(self, name):
if name == "is_closed":
return self.__is_closed_state[0]
elif name == "read_complete":
return self.__read_complete_state[0]
wrapped = self.__dict__.get("wrapped")
if wrapped:
return getattr(wrapped, name)
return getattr(self.__class__, name)
def __setattr__(self, name, value):
if name == "is_closed":
self.__is_closed_state[0] = bool(value)
elif name == "read_complete":
if not self.is_closed:
self.__read_complete_state[0] = bool(value)
else:
self.__dict__[name] = value
def seek(self, offset, whence=0):
assert whence in [0,1,2]
# how much data, if any, do we need to read?
if whence == 2: # 2: relative to end of *wrapped* file
if offset < 0: raise ValueError("negative seek offset")
# since we don't know yet where the end of that file is, we must
# read everything
to_read = None
else:
if whence == 0: # 0: absolute
if offset < 0: raise ValueError("negative seek offset")
dest = offset
else: # 1: relative to current position
pos = self.__pos
if pos < offset:
raise ValueError("seek to before start of file")
dest = pos + offset
end = len_of_seekable(self.__cache)
to_read = dest - end
if to_read < 0:
to_read = 0
if to_read != 0:
self.__cache.seek(0, 2)
if to_read is None:
assert whence == 2
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__pos = self.__cache.tell() - offset
else:
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
# Don't raise an exception even if we've seek()ed past the end
# of .wrapped, since fseek() doesn't complain in that case.
# Also like fseek(), pretend we have seek()ed past the end,
# i.e. not:
#self.__pos = self.__cache.tell()
# but rather:
self.__pos = dest
else:
self.__pos = dest
def tell(self):
return self.__pos
def __copy__(self):
cpy = self.__class__(self.wrapped)
cpy.__cache = self.__cache
cpy.__read_complete_state = self.__read_complete_state
cpy.__is_closed_state = self.__is_closed_state
return cpy
def get_data(self):
pos = self.__pos
try:
self.seek(0)
return self.read(-1)
finally:
self.__pos = pos
def read(self, size=-1):
pos = self.__pos
end = len_of_seekable(self.__cache)
available = end - pos
# enough data already cached?
if size <= available and size != -1:
self.__cache.seek(pos)
self.__pos = pos+size
return self.__cache.read(size)
# no, so read sufficient data from wrapped file and cache it
self.__cache.seek(0, 2)
if size == -1:
self.__cache.write(self.wrapped.read())
self.read_complete = True
else:
to_read = size - available
assert to_read > 0
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.read(size)
self.__pos = self.__cache.tell()
assert self.__pos == pos + len(data)
return data
def readline(self, size=-1):
if not self.__have_readline:
raise NotImplementedError("no readline method on wrapped object")
# line we're about to read might not be complete in the cache, so
# read another line first
pos = self.__pos
self.__cache.seek(0, 2)
data = self.wrapped.readline()
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.readline()
if size != -1:
r = data[:size]
self.__pos = pos+size
else:
r = data
self.__pos = pos+len(data)
return r
def readlines(self, sizehint=-1):
pos = self.__pos
self.__cache.seek(0, 2)
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__cache.seek(pos)
data = self.__cache.readlines(sizehint)
self.__pos = self.__cache.tell()
return data
def __iter__(self): return self
def next(self):
line = self.readline()
if line == "": raise StopIteration
return line
xreadlines = __iter__
def __repr__(self):
return ("<%s at %s whose wrapped object = %r>" %
(self.__class__.__name__, hex(abs(id(self))), self.wrapped))
class response_seek_wrapper(seek_wrapper):
"""
Supports copying response objects and setting response body data.
"""
def __init__(self, wrapped):
seek_wrapper.__init__(self, wrapped)
self._headers = self.wrapped.info()
def __copy__(self):
cpy = seek_wrapper.__copy__(self)
# copy headers from delegate
cpy._headers = copy.copy(self.info())
return cpy
# Note that .info() and .geturl() (the only two urllib2 response methods
# that are not implemented by seek_wrapper) must be here explicitly rather
# than by seek_wrapper's __getattr__ delegation) so that the nasty
# dynamically-created HTTPError classes in get_seek_wrapper_class() get the
# wrapped object's implementation, and not HTTPError's.
def info(self):
return self._headers
def geturl(self):
return self.wrapped.geturl()
def set_data(self, data):
self.seek(0)
self.read()
self.close()
cache = self._seek_wrapper__cache = StringIO()
cache.write(data)
self.seek(0)
class eoffile:
# file-like object that always claims to be at end-of-file...
def read(self, size=-1): return ""
def readline(self, size=-1): return ""
def __iter__(self): return self
def next(self): return ""
def close(self): pass
class eofresponse(eoffile):
def __init__(self, url, headers, code, msg):
self._url = url
self._headers = headers
self.code = code
self.msg = msg
def geturl(self): return self._url
def info(self): return self._headers
class closeable_response:
"""Avoids unnecessarily clobbering urllib.addinfourl methods on .close().
Only supports responses returned by mechanize.HTTPHandler.
After .close(), the following methods are supported:
.read()
.readline()
.info()
.geturl()
.__iter__()
.next()
.close()
and the following attributes are supported:
.code
.msg
Also supports pickling (but the stdlib currently does something to prevent
it: http://python.org/sf/1144636).
"""
# presence of this attr indicates is useable after .close()
closeable_response = None
def __init__(self, fp, headers, url, code, msg):
self._set_fp(fp)
self._headers = headers
self._url = url
self.code = code
self.msg = msg
def _set_fp(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
self.__iter__ = self.fp.__iter__
self.next = self.fp.next
def __repr__(self):
return '<%s at %s whose fp = %r>' % (
self.__class__.__name__, hex(abs(id(self))), self.fp)
def info(self):
return self._headers
def geturl(self):
return self._url
def close(self):
wrapped = self.fp
wrapped.close()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
self._set_fp(new_wrapped)
def __getstate__(self):
# There are three obvious options here:
# 1. truncate
# 2. read to end
| |
('IsFT1248H', ctypes.c_ubyte),
('FT1248CpolH', ctypes.c_ubyte),
('FT1248LsbH', ctypes.c_ubyte),
('FT1248FlowControlH', ctypes.c_ubyte),
('IsVCPH', ctypes.c_ubyte),
('PowerSaveEnableH', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte),
]
FT_PROGRAM_DATA = struct_ft_program_data
PFT_PROGRAM_DATA = POINTER_T(struct_ft_program_data)
FT_EE_Program = _libraries['ftd2xx64.dll'].FT_EE_Program
FT_EE_Program.restype = FT_STATUS
# FT_EE_Program(ftHandle, pData)
FT_EE_Program.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA]
FT_EE_Program.__doc__ = \
"""FT_STATUS FT_EE_Program(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData)
ftd2xx.h:700"""
FT_EE_ProgramEx = _libraries['ftd2xx64.dll'].FT_EE_ProgramEx
FT_EE_ProgramEx.restype = FT_STATUS
# FT_EE_ProgramEx(ftHandle, pData, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EE_ProgramEx.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EE_ProgramEx.__doc__ = \
"""FT_STATUS FT_EE_ProgramEx(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:706"""
FT_EE_Read = _libraries['ftd2xx64.dll'].FT_EE_Read
FT_EE_Read.restype = FT_STATUS
# FT_EE_Read(ftHandle, pData)
FT_EE_Read.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA]
FT_EE_Read.__doc__ = \
"""FT_STATUS FT_EE_Read(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData)
ftd2xx.h:716"""
FT_EE_ReadEx = _libraries['ftd2xx64.dll'].FT_EE_ReadEx
FT_EE_ReadEx.restype = FT_STATUS
# FT_EE_ReadEx(ftHandle, pData, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EE_ReadEx.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EE_ReadEx.__doc__ = \
"""FT_STATUS FT_EE_ReadEx(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:722"""
FT_EE_UASize = _libraries['ftd2xx64.dll'].FT_EE_UASize
FT_EE_UASize.restype = FT_STATUS
# FT_EE_UASize(ftHandle, lpdwSize)
FT_EE_UASize.argtypes = [FT_HANDLE, LPDWORD]
FT_EE_UASize.__doc__ = \
"""FT_STATUS FT_EE_UASize(FT_HANDLE ftHandle, LPDWORD lpdwSize)
ftd2xx.h:732"""
FT_EE_UAWrite = _libraries['ftd2xx64.dll'].FT_EE_UAWrite
FT_EE_UAWrite.restype = FT_STATUS
# FT_EE_UAWrite(ftHandle, pucData, dwDataLen)
FT_EE_UAWrite.argtypes = [FT_HANDLE, PUCHAR, DWORD]
FT_EE_UAWrite.__doc__ = \
"""FT_STATUS FT_EE_UAWrite(FT_HANDLE ftHandle, PUCHAR pucData, DWORD dwDataLen)
ftd2xx.h:738"""
FT_EE_UARead = _libraries['ftd2xx64.dll'].FT_EE_UARead
FT_EE_UARead.restype = FT_STATUS
# FT_EE_UARead(ftHandle, pucData, dwDataLen, lpdwBytesRead)
FT_EE_UARead.argtypes = [FT_HANDLE, PUCHAR, DWORD, LPDWORD]
FT_EE_UARead.__doc__ = \
"""FT_STATUS FT_EE_UARead(FT_HANDLE ftHandle, PUCHAR pucData, DWORD dwDataLen, LPDWORD lpdwBytesRead)
ftd2xx.h:745"""
class struct_ft_eeprom_header(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('deviceType', ctypes.c_uint32),
('VendorId', ctypes.c_uint16),
('ProductId', ctypes.c_uint16),
('SerNumEnable', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte),
('MaxPower', ctypes.c_uint16),
('SelfPowered', ctypes.c_ubyte),
('RemoteWakeup', ctypes.c_ubyte),
('PullDownEnable', ctypes.c_ubyte),
('PADDING_1', ctypes.c_ubyte),
]
FT_EEPROM_HEADER = struct_ft_eeprom_header
PFT_EEPROM_HEADER = POINTER_T(struct_ft_eeprom_header)
class struct_ft_eeprom_232b(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
]
FT_EEPROM_232B = struct_ft_eeprom_232b
PFT_EEPROM_232B = POINTER_T(struct_ft_eeprom_232b)
class struct_ft_eeprom_2232(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('AIsHighCurrent', ctypes.c_ubyte),
('BIsHighCurrent', ctypes.c_ubyte),
('AIsFifo', ctypes.c_ubyte),
('AIsFifoTar', ctypes.c_ubyte),
('AIsFastSer', ctypes.c_ubyte),
('BIsFifo', ctypes.c_ubyte),
('BIsFifoTar', ctypes.c_ubyte),
('BIsFastSer', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 2),
]
FT_EEPROM_2232 = struct_ft_eeprom_2232
PFT_EEPROM_2232 = POINTER_T(struct_ft_eeprom_2232)
class struct_ft_eeprom_232r(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('IsHighCurrent', ctypes.c_ubyte),
('UseExtOsc', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
]
FT_EEPROM_232R = struct_ft_eeprom_232r
PFT_EEPROM_232R = POINTER_T(struct_ft_eeprom_232r)
class struct_ft_eeprom_2232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ALSlowSlew', ctypes.c_ubyte),
('ALSchmittInput', ctypes.c_ubyte),
('ALDriveCurrent', ctypes.c_ubyte),
('AHSlowSlew', ctypes.c_ubyte),
('AHSchmittInput', ctypes.c_ubyte),
('AHDriveCurrent', ctypes.c_ubyte),
('BLSlowSlew', ctypes.c_ubyte),
('BLSchmittInput', ctypes.c_ubyte),
('BLDriveCurrent', ctypes.c_ubyte),
('BHSlowSlew', ctypes.c_ubyte),
('BHSchmittInput', ctypes.c_ubyte),
('BHDriveCurrent', ctypes.c_ubyte),
('AIsFifo', ctypes.c_ubyte),
('AIsFifoTar', ctypes.c_ubyte),
('AIsFastSer', ctypes.c_ubyte),
('BIsFifo', ctypes.c_ubyte),
('BIsFifoTar', ctypes.c_ubyte),
('BIsFastSer', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 3),
]
FT_EEPROM_2232H = struct_ft_eeprom_2232h
PFT_EEPROM_2232H = POINTER_T(struct_ft_eeprom_2232h)
class struct_ft_eeprom_4232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ASlowSlew', ctypes.c_ubyte),
('ASchmittInput', ctypes.c_ubyte),
('ADriveCurrent', ctypes.c_ubyte),
('BSlowSlew', ctypes.c_ubyte),
('BSchmittInput', ctypes.c_ubyte),
('BDriveCurrent', ctypes.c_ubyte),
('CSlowSlew', ctypes.c_ubyte),
('CSchmittInput', ctypes.c_ubyte),
('CDriveCurrent', ctypes.c_ubyte),
('DSlowSlew', ctypes.c_ubyte),
('DSchmittInput', ctypes.c_ubyte),
('DDriveCurrent', ctypes.c_ubyte),
('ARIIsTXDEN', ctypes.c_ubyte),
('BRIIsTXDEN', ctypes.c_ubyte),
('CRIIsTXDEN', ctypes.c_ubyte),
('DRIIsTXDEN', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('CDriverType', ctypes.c_ubyte),
('DDriverType', ctypes.c_ubyte),
]
FT_EEPROM_4232H = struct_ft_eeprom_4232h
PFT_EEPROM_4232H = POINTER_T(struct_ft_eeprom_4232h)
class struct_ft_eeprom_232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ACSlowSlew', ctypes.c_ubyte),
('ACSchmittInput', ctypes.c_ubyte),
('ACDriveCurrent', ctypes.c_ubyte),
('ADSlowSlew', ctypes.c_ubyte),
('ADSchmittInput', ctypes.c_ubyte),
('ADDriveCurrent', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('Cbus5', ctypes.c_ubyte),
('Cbus6', ctypes.c_ubyte),
('Cbus7', ctypes.c_ubyte),
('Cbus8', ctypes.c_ubyte),
('Cbus9', ctypes.c_ubyte),
('FT1248Cpol', ctypes.c_ubyte),
('FT1248Lsb', ctypes.c_ubyte),
('FT1248FlowControl', ctypes.c_ubyte),
('IsFifo', ctypes.c_ubyte),
('IsFifoTar', ctypes.c_ubyte),
('IsFastSer', ctypes.c_ubyte),
('IsFT1248', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 3),
]
FT_EEPROM_232H = struct_ft_eeprom_232h
PFT_EEPROM_232H = POINTER_T(struct_ft_eeprom_232h)
class struct_ft_eeprom_x_series(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ACSlowSlew', ctypes.c_ubyte),
('ACSchmittInput', ctypes.c_ubyte),
('ACDriveCurrent', ctypes.c_ubyte),
('ADSlowSlew', ctypes.c_ubyte),
('ADSchmittInput', ctypes.c_ubyte),
('ADDriveCurrent', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('Cbus5', ctypes.c_ubyte),
('Cbus6', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('BCDEnable', ctypes.c_ubyte),
('BCDForceCbusPWREN', ctypes.c_ubyte),
('BCDDisableSleep', ctypes.c_ubyte),
('I2CSlaveAddress', ctypes.c_uint16),
('PADDING_0', ctypes.c_ubyte * 2),
('I2CDeviceId', ctypes.c_uint32),
('I2CDisableSchmitt', ctypes.c_ubyte),
('FT1248Cpol', ctypes.c_ubyte),
('FT1248Lsb', ctypes.c_ubyte),
('FT1248FlowControl', ctypes.c_ubyte),
('RS485EchoSuppress', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
('PADDING_1', ctypes.c_ubyte),
]
FT_EEPROM_X_SERIES = struct_ft_eeprom_x_series
PFT_EEPROM_X_SERIES = POINTER_T(struct_ft_eeprom_x_series)
FT_EEPROM_Read = _libraries['ftd2xx64.dll'].FT_EEPROM_Read
FT_EEPROM_Read.restype = FT_STATUS
# FT_EEPROM_Read(ftHandle, eepromData, eepromDataSize, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EEPROM_Read.argtypes = [FT_HANDLE, POINTER_T(None), DWORD, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EEPROM_Read.__doc__ = \
"""FT_STATUS FT_EEPROM_Read(FT_HANDLE ftHandle, LP_None eepromData, DWORD eepromDataSize, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:968"""
FT_EEPROM_Program = _libraries['ftd2xx64.dll'].FT_EEPROM_Program
FT_EEPROM_Program.restype = FT_STATUS
# FT_EEPROM_Program(ftHandle, eepromData, eepromDataSize, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EEPROM_Program.argtypes = [FT_HANDLE, POINTER_T(None), DWORD, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EEPROM_Program.__doc__ = \
"""FT_STATUS FT_EEPROM_Program(FT_HANDLE ftHandle, LP_None eepromData, DWORD eepromDataSize, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:980"""
FT_SetLatencyTimer = _libraries['ftd2xx64.dll'].FT_SetLatencyTimer
FT_SetLatencyTimer.restype = FT_STATUS
# FT_SetLatencyTimer(ftHandle, ucLatency)
FT_SetLatencyTimer.argtypes = [FT_HANDLE, UCHAR]
FT_SetLatencyTimer.__doc__ = \
"""FT_STATUS FT_SetLatencyTimer(FT_HANDLE ftHandle, UCHAR ucLatency)
ftd2xx.h:992"""
FT_GetLatencyTimer = _libraries['ftd2xx64.dll'].FT_GetLatencyTimer
FT_GetLatencyTimer.restype = FT_STATUS
# FT_GetLatencyTimer(ftHandle, pucLatency)
FT_GetLatencyTimer.argtypes = [FT_HANDLE, PUCHAR]
FT_GetLatencyTimer.__doc__ = \
"""FT_STATUS FT_GetLatencyTimer(FT_HANDLE ftHandle, PUCHAR pucLatency)
ftd2xx.h:998"""
FT_SetBitMode = _libraries['ftd2xx64.dll'].FT_SetBitMode
FT_SetBitMode.restype = FT_STATUS
# FT_SetBitMode(ftHandle, ucMask, ucEnable)
FT_SetBitMode.argtypes = [FT_HANDLE, UCHAR, UCHAR]
FT_SetBitMode.__doc__ = \
"""FT_STATUS FT_SetBitMode(FT_HANDLE ftHandle, UCHAR ucMask, UCHAR ucEnable)
ftd2xx.h:1004"""
FT_GetBitMode = _libraries['ftd2xx64.dll'].FT_GetBitMode
FT_GetBitMode.restype = FT_STATUS
# FT_GetBitMode(ftHandle, pucMode)
FT_GetBitMode.argtypes = [FT_HANDLE, PUCHAR]
FT_GetBitMode.__doc__ = \
"""FT_STATUS FT_GetBitMode(FT_HANDLE ftHandle, PUCHAR pucMode)
ftd2xx.h:1011"""
FT_SetUSBParameters = _libraries['ftd2xx64.dll'].FT_SetUSBParameters
FT_SetUSBParameters.restype = FT_STATUS
# FT_SetUSBParameters(ftHandle, ulInTransferSize, ulOutTransferSize)
FT_SetUSBParameters.argtypes = [FT_HANDLE, ULONG, ULONG]
FT_SetUSBParameters.__doc__ = \
"""FT_STATUS FT_SetUSBParameters(FT_HANDLE ftHandle, ULONG ulInTransferSize, ULONG ulOutTransferSize)
ftd2xx.h:1017"""
FT_SetDeadmanTimeout = _libraries['ftd2xx64.dll'].FT_SetDeadmanTimeout
FT_SetDeadmanTimeout.restype = FT_STATUS
# FT_SetDeadmanTimeout(ftHandle, ulDeadmanTimeout)
FT_SetDeadmanTimeout.argtypes = [FT_HANDLE, ULONG]
FT_SetDeadmanTimeout.__doc__ = \
"""FT_STATUS FT_SetDeadmanTimeout(FT_HANDLE ftHandle, ULONG ulDeadmanTimeout)
ftd2xx.h:1024"""
FT_GetDeviceInfo = _libraries['ftd2xx64.dll'].FT_GetDeviceInfo
FT_GetDeviceInfo.restype = FT_STATUS
# FT_GetDeviceInfo(ftHandle, lpftDevice, lpdwID, SerialNumber, Description, Dummy)
FT_GetDeviceInfo.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32), LPDWORD, PCHAR, PCHAR, LPVOID]
FT_GetDeviceInfo.__doc__ = \
"""FT_STATUS FT_GetDeviceInfo(FT_HANDLE ftHandle, LP_c_uint32 lpftDevice, LPDWORD lpdwID, PCHAR SerialNumber, PCHAR Description, LPVOID Dummy)
ftd2xx.h:1053"""
FT_StopInTask = _libraries['ftd2xx64.dll'].FT_StopInTask
FT_StopInTask.restype = FT_STATUS
# FT_StopInTask(ftHandle)
FT_StopInTask.argtypes = [FT_HANDLE]
FT_StopInTask.__doc__ = \
"""FT_STATUS FT_StopInTask(FT_HANDLE ftHandle)
ftd2xx.h:1063"""
FT_RestartInTask = _libraries['ftd2xx64.dll'].FT_RestartInTask
FT_RestartInTask.restype = FT_STATUS
# FT_RestartInTask(ftHandle)
FT_RestartInTask.argtypes = [FT_HANDLE]
FT_RestartInTask.__doc__ = \
"""FT_STATUS FT_RestartInTask(FT_HANDLE ftHandle)
ftd2xx.h:1068"""
FT_SetResetPipeRetryCount = _libraries['ftd2xx64.dll'].FT_SetResetPipeRetryCount
FT_SetResetPipeRetryCount.restype = FT_STATUS
# FT_SetResetPipeRetryCount(ftHandle, dwCount)
FT_SetResetPipeRetryCount.argtypes = [FT_HANDLE, DWORD]
FT_SetResetPipeRetryCount.__doc__ = \
"""FT_STATUS FT_SetResetPipeRetryCount(FT_HANDLE ftHandle, DWORD dwCount)
ftd2xx.h:1073"""
FT_ResetPort = _libraries['ftd2xx64.dll'].FT_ResetPort
FT_ResetPort.restype = FT_STATUS
# FT_ResetPort(ftHandle)
FT_ResetPort.argtypes = [FT_HANDLE]
FT_ResetPort.__doc__ = \
"""FT_STATUS FT_ResetPort(FT_HANDLE ftHandle)
ftd2xx.h:1079"""
FT_CyclePort = _libraries['ftd2xx64.dll'].FT_CyclePort
FT_CyclePort.restype = FT_STATUS
# FT_CyclePort(ftHandle)
FT_CyclePort.argtypes = [FT_HANDLE]
FT_CyclePort.__doc__ = \
"""FT_STATUS FT_CyclePort(FT_HANDLE ftHandle)
ftd2xx.h:1084"""
FT_W32_CreateFile = _libraries['ftd2xx64.dll'].FT_W32_CreateFile
FT_W32_CreateFile.restype = FT_HANDLE
# FT_W32_CreateFile(lpszName, dwAccess, dwShareMode, lpSecurityAttributes, dwCreate, dwAttrsAndFlags, hTemplate)
FT_W32_CreateFile.argtypes = [LPCTSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
FT_W32_CreateFile.__doc__ = \
"""FT_HANDLE FT_W32_CreateFile(LPCTSTR lpszName, DWORD dwAccess, DWORD dwShareMode, LPSECURITY_ATTRIBUTES lpSecurityAttributes, DWORD dwCreate, DWORD dwAttrsAndFlags, HANDLE hTemplate)
ftd2xx.h:1094"""
FT_W32_CloseHandle = _libraries['ftd2xx64.dll'].FT_W32_CloseHandle
FT_W32_CloseHandle.restype = BOOL
# FT_W32_CloseHandle(ftHandle)
FT_W32_CloseHandle.argtypes = [FT_HANDLE]
FT_W32_CloseHandle.__doc__ = \
"""BOOL FT_W32_CloseHandle(FT_HANDLE ftHandle)
ftd2xx.h:1105"""
FT_W32_ReadFile = _libraries['ftd2xx64.dll'].FT_W32_ReadFile
FT_W32_ReadFile.restype = BOOL
# FT_W32_ReadFile(ftHandle, lpBuffer, nBufferSize, lpBytesReturned, lpOverlapped)
FT_W32_ReadFile.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_W32_ReadFile.__doc__ = \
"""BOOL FT_W32_ReadFile(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD nBufferSize, LPDWORD lpBytesReturned, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1110"""
FT_W32_WriteFile = _libraries['ftd2xx64.dll'].FT_W32_WriteFile
FT_W32_WriteFile.restype = BOOL
# FT_W32_WriteFile(ftHandle, lpBuffer, nBufferSize, lpBytesWritten, lpOverlapped)
FT_W32_WriteFile.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_W32_WriteFile.__doc__ = \
"""BOOL FT_W32_WriteFile(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD nBufferSize, LPDWORD lpBytesWritten, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1119"""
FT_W32_GetLastError = _libraries['ftd2xx64.dll'].FT_W32_GetLastError
FT_W32_GetLastError.restype = DWORD
# FT_W32_GetLastError(ftHandle)
FT_W32_GetLastError.argtypes = [FT_HANDLE]
FT_W32_GetLastError.__doc__ = \
"""DWORD FT_W32_GetLastError(FT_HANDLE ftHandle)
ftd2xx.h:1128"""
FT_W32_GetOverlappedResult = _libraries['ftd2xx64.dll'].FT_W32_GetOverlappedResult
FT_W32_GetOverlappedResult.restype = BOOL
# FT_W32_GetOverlappedResult(ftHandle, lpOverlapped, lpdwBytesTransferred, bWait)
FT_W32_GetOverlappedResult.argtypes = [FT_HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
FT_W32_GetOverlappedResult.__doc__ = \
"""BOOL FT_W32_GetOverlappedResult(FT_HANDLE ftHandle, LPOVERLAPPED lpOverlapped, LPDWORD lpdwBytesTransferred, BOOL bWait)
ftd2xx.h:1133"""
FT_W32_CancelIo = _libraries['ftd2xx64.dll'].FT_W32_CancelIo
FT_W32_CancelIo.restype = BOOL
# FT_W32_CancelIo(ftHandle)
FT_W32_CancelIo.argtypes = [FT_HANDLE]
FT_W32_CancelIo.__doc__ = \
"""BOOL FT_W32_CancelIo(FT_HANDLE ftHandle)
ftd2xx.h:1141"""
class struct__FTCOMSTAT(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('fCtsHold', ctypes.c_uint32, 1),
('fDsrHold', ctypes.c_uint32, 1),
('fRlsdHold', ctypes.c_uint32, 1),
('fXoffHold', ctypes.c_uint32, 1),
('fXoffSent', ctypes.c_uint32, 1),
('fEof', ctypes.c_uint32, 1),
('fTxim', ctypes.c_uint32, 1),
('fReserved', ctypes.c_uint32, 25),
('cbInQue', ctypes.c_uint32),
('cbOutQue', ctypes.c_uint32),
]
FTCOMSTAT = struct__FTCOMSTAT
LPFTCOMSTAT = POINTER_T(struct__FTCOMSTAT)
class struct__FTDCB(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('DCBlength', ctypes.c_uint32),
('BaudRate', ctypes.c_uint32),
('fBinary', ctypes.c_uint32, 1),
('fParity', ctypes.c_uint32, 1),
('fOutxCtsFlow', ctypes.c_uint32, 1),
('fOutxDsrFlow', ctypes.c_uint32, 1),
('fDtrControl', ctypes.c_uint32, 2),
('fDsrSensitivity', ctypes.c_uint32, 1),
('fTXContinueOnXoff', ctypes.c_uint32, 1),
('fOutX', ctypes.c_uint32, 1),
('fInX', ctypes.c_uint32, 1),
('fErrorChar', ctypes.c_uint32, 1),
('fNull', ctypes.c_uint32, 1),
('fRtsControl', ctypes.c_uint32, 2),
('fAbortOnError', ctypes.c_uint32, 1),
('fDummy2', ctypes.c_uint32, 17),
('wReserved', ctypes.c_uint16),
('XonLim', ctypes.c_uint16),
('XoffLim', ctypes.c_uint16),
('ByteSize', ctypes.c_ubyte),
('Parity', ctypes.c_ubyte),
('StopBits', ctypes.c_ubyte),
('XonChar', ctypes.c_char),
('XoffChar', ctypes.c_char),
('ErrorChar', ctypes.c_char),
('EofChar', ctypes.c_char),
('EvtChar', ctypes.c_char),
('wReserved1', ctypes.c_uint16),
]
FTDCB = struct__FTDCB
LPFTDCB = POINTER_T(struct__FTDCB)
class struct__FTTIMEOUTS(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ReadIntervalTimeout', ctypes.c_uint32),
('ReadTotalTimeoutMultiplier', ctypes.c_uint32),
('ReadTotalTimeoutConstant', ctypes.c_uint32),
('WriteTotalTimeoutMultiplier', ctypes.c_uint32),
('WriteTotalTimeoutConstant', ctypes.c_uint32),
]
FTTIMEOUTS = struct__FTTIMEOUTS
LPFTTIMEOUTS = POINTER_T(struct__FTTIMEOUTS)
FT_W32_ClearCommBreak = _libraries['ftd2xx64.dll'].FT_W32_ClearCommBreak
FT_W32_ClearCommBreak.restype = BOOL
# FT_W32_ClearCommBreak(ftHandle)
FT_W32_ClearCommBreak.argtypes = [FT_HANDLE]
FT_W32_ClearCommBreak.__doc__ = \
"""BOOL FT_W32_ClearCommBreak(FT_HANDLE ftHandle)
ftd2xx.h:1203"""
FT_W32_ClearCommError = _libraries['ftd2xx64.dll'].FT_W32_ClearCommError
FT_W32_ClearCommError.restype = BOOL
# FT_W32_ClearCommError(ftHandle, lpdwErrors, lpftComstat)
FT_W32_ClearCommError.argtypes = [FT_HANDLE, LPDWORD, LPFTCOMSTAT]
FT_W32_ClearCommError.__doc__ = \
"""BOOL FT_W32_ClearCommError(FT_HANDLE ftHandle, LPDWORD lpdwErrors, LPFTCOMSTAT lpftComstat)
ftd2xx.h:1208"""
FT_W32_EscapeCommFunction = _libraries['ftd2xx64.dll'].FT_W32_EscapeCommFunction
FT_W32_EscapeCommFunction.restype = BOOL
# FT_W32_EscapeCommFunction(ftHandle, dwFunc)
FT_W32_EscapeCommFunction.argtypes = [FT_HANDLE, DWORD]
FT_W32_EscapeCommFunction.__doc__ = \
"""BOOL FT_W32_EscapeCommFunction(FT_HANDLE ftHandle, DWORD dwFunc)
ftd2xx.h:1215"""
FT_W32_GetCommModemStatus = _libraries['ftd2xx64.dll'].FT_W32_GetCommModemStatus
FT_W32_GetCommModemStatus.restype = BOOL
# FT_W32_GetCommModemStatus(ftHandle, lpdwModemStatus)
FT_W32_GetCommModemStatus.argtypes = [FT_HANDLE, LPDWORD]
FT_W32_GetCommModemStatus.__doc__ = \
"""BOOL FT_W32_GetCommModemStatus(FT_HANDLE ftHandle, LPDWORD lpdwModemStatus)
ftd2xx.h:1221"""
FT_W32_GetCommState = _libraries['ftd2xx64.dll'].FT_W32_GetCommState
FT_W32_GetCommState.restype = BOOL
# FT_W32_GetCommState(ftHandle, lpftDcb)
FT_W32_GetCommState.argtypes = [FT_HANDLE, LPFTDCB]
FT_W32_GetCommState.__doc__ = \
"""BOOL FT_W32_GetCommState(FT_HANDLE ftHandle, LPFTDCB lpftDcb)
ftd2xx.h:1227"""
FT_W32_GetCommTimeouts = _libraries['ftd2xx64.dll'].FT_W32_GetCommTimeouts
FT_W32_GetCommTimeouts.restype = BOOL
# | |
None:
self.parent.parent.set_statusmessage('Selection is None?')
if not items:
return
for _, data in items:
new_item = self.gui.add_listitem(data[0])
self.gui.set_listitem_values(new_item, [data[0]] + list(data[2:]))
def change_selected(self, item_n):
"""callback voor wijzigen geselecteerd item, o.a. door verplaatsen van de
cursor of door klikken
"""
self.parent.current_item = item_n
self.gui.set_selection()
if not self.parent.newitem:
selindx = self.gui.get_selected_action()
self.readp(selindx)
hlp = "&Herleef" if self.parent.pagedata.arch else "&Archiveer"
self.gui.set_archive_button_text(hlp)
def activate_item(self):
"""callback voor activeren van item, door doubleclick of enter
"""
self.goto_actie()
def select_items(self, event=None):
"""tonen van de selectie dialoog
niet alleen selecteren op tekst(deel) maar ook op status, soort etc
"""
args = self.sel_args, None
if self.parent.parent.datatype == shared.DataType.SQL.name:
data = dmls.SelectOptions(self.parent.fnaam, self.parent.parent.user)
args, sel_args = data.load_options(), {}
for key, value in args.items():
if key == 'nummer':
for item in value: # splitsen in idgt, id en idlt
if len(item) == 1:
sel_args['id'] = 'and' if item[0] == 'en' else 'or'
elif item[1] == 'GT':
sel_args['idgt'] = item[0]
elif item[1] == 'LT':
sel_args['idlt'] = item[0]
# elif key == 'arch':
# sel_args[key] = {0: 'narch', 1: 'arch', 2: 'alles'}[value]
elif value:
sel_args[key] = value
args = sel_args, data
while True:
test = gui.show_dialog(self.gui, gui.SelectOptionsDialog, args)
if not test:
break
self.parent.rereadlist = True
try:
self.vulp()
except (dmlx.DataError, dmls.DataError) as msg:
self.parent.rereadlist = False
gui.show_message(self, str(msg))
else:
break
def sort_items(self, *args):
"""tonen van de sorteer-opties dialoog
sortering mogelijk op datum/tijd, soort, titel, status via schermpje met
2x4 comboboxjes waarin je de volgorde van de rubrieken en de sorteervolgorde
per rubriek kunt aangeven"""
sortopts, sortlist = {}, []
if self.parent.parent.datatype == shared.DataType.XML.name:
gui.show_message(self.gui, 'Sorry, multi-column sorteren werkt nog niet')
return
if self.parent.parent.datatype == shared.DataType.SQL.name:
sortopts = self.saved_sortopts.load_options()
try:
sortlist = [x[0] for x in dmls.SORTFIELDS]
except AttributeError:
pass
if not sortlist:
sortlist = [x for x in self.parent.ctitels]
sortlist[1] = "Soort"
sortlist.insert(0, "(geen)")
args = sortopts, sortlist
test = gui.show_dialog(self.gui, gui.SortOptionsDialog, args)
if not test:
return
if self.sort_via_options:
self.gui.enable_sorting(False)
self.parent.rereadlist = True
try:
self.vulp()
# moet hier soms nog het daadwerkelijke sorteren tussen (bij XML)?
except (dmlx.DataError, dmls.DataError) as msg:
self.parent.rereadlist = False
gui.show_message(self, str(msg))
else:
self.gui.enable_sorting(True)
def archiveer(self, *args):
"archiveren of herleven van het geselecteerde item"
selindx = self.gui.get_selected_action()
if self.parent.parent.datatype == shared.DataType.XML.name:
selindx = shared.data2str(selindx)
else:
selindx = shared.data2int(selindx)
self.readp(selindx)
if self.parent.parent.datatype == shared.DataType.XML.name:
self.parent.pagedata.arch = not self.parent.pagedata.arch
hlp = "gearchiveerd" if self.parent.pagedata.arch else "herleefd"
self.parent.pagedata.events.append((shared.get_dts(), "Actie {0}".format(hlp)))
elif self.parent.parent.datatype == shared.DataType.SQL.name:
self.parent.pagedata.set_arch(not self.parent.pagedata.arch)
self.update_actie() # self.parent.pagedata.write()
self.parent.rereadlist = True
self.vulp()
self.parent.parent.gui.set_tabfocus(0)
# het navolgende geldt alleen voor de selectie "gearchiveerd en actief"
if self.sel_args.get("arch", "") == "alles":
self.gui.ensure_visible(self.parent.current_item)
hlp = "&Herleef" if self.parent.pagedata.arch else "&Archiveer"
self.gui.set_archive_button_text(hlp)
def enable_buttons(self, value=None):
"buttons wel of niet bruikbaar maken"
if value is not None:
self.gui.enable_buttons(value)
else:
self.gui.enable_buttons()
def get_items(self):
"retrieve all listitems"
return self.gui.get_items()
def get_item_text(self, item_or_index, column):
"get the item's text for a specified column"
return self.gui.get_item_text(item_or_index, column)
def clear_selection(self):
"initialize selection criteria"
self.sel_args = {}
class Page1(Page):
"pagina 1: startscherm actie"
def __init__(self, parent):
self.parent = parent
super().__init__(parent, pageno=1, standard=False)
self.gui = gui.Page1Gui(parent, self)
def vulp(self):
"""te tonen gegevens invullen in velden e.a. initialisaties
methode aan te roepen voorafgaand aan het tonen van de pagina"""
super().vulp()
self.initializing = True
self.gui.init_fields()
self.parch = False
if self.parent.pagedata is not None: # and not self.parent.newitem:
self.gui.set_text('id', str(self.parent.pagedata.id))
self.gui.set_text('date', self.parent.pagedata.datum)
self.parch = self.parent.pagedata.arch
if self.parent.parent.datatype == shared.DataType.XML.name:
if self.parent.pagedata.titel is not None:
if " - " in self.parent.pagedata.titel:
hlp = self.parent.pagedata.titel.split(" - ", 1)
else:
hlp = self.parent.pagedata.titel.split(": ", 1)
self.gui.set_text('proc', hlp[0])
if len(hlp) > 1:
self.gui.set_text('desc', hlp[1])
elif self.parent.parent.datatype == shared.DataType.SQL.name:
self.gui.set_text('proc', self.parent.pagedata.over)
self.gui.set_text('desc', self.parent.pagedata.titel)
self.gui.set_choice('stat', self.parent.pagedata.status)
self.gui.set_choice('cat', self.parent.pagedata.soort)
self.oldbuf = self.gui.set_oldbuf()
if self.parch:
aanuit = False
if self.parent.parent.datatype == shared.DataType.XML.name:
if self.parent.pagedata.titel is not None:
if " - " in self.parent.pagedata.titel:
hlp = self.parent.pagedata.titel.split(" - ", 1)
else:
hlp = self.parent.pagedata.titel.split(": ", 1)
self.gui.set_text('proc', hlp[0])
if len(hlp) > 1:
self.gui.set_text('desc', hlp[1])
elif self.parent.parent.datatype == shared.DataType.SQL.name:
self.gui.set_text('proc', self.parent.pagedata.over)
self.gui.set_text('desc', self.parent.pagedata.titel)
self.gui.set_text('arch', "Deze actie is gearchiveerd")
self.gui.set_archive_button_text("Herleven")
else:
aanuit = True
self.gui.set_text('arch', '')
self.gui.set_archive_button_text("Archiveren")
if not self.parent.parent.is_user:
aanuit = False
self.gui.enable_fields(aanuit)
self.initializing = False
def savep(self, *args):
"opslaan van de paginagegevens"
super().savep()
proc = self.gui.get_text('proc')
self.gui.set_text('proc', proc.capitalize())
self.enable_buttons(False)
desc = self.gui.get_text('desc')
if proc == "" or desc == "":
gui.show_message(self.gui, "Beide tekstrubrieken moeten worden ingevuld")
return False
wijzig = False
procdesc = " - ".join((proc, desc))
if procdesc != self.parent.pagedata.titel:
if self.parent.parent.datatype == shared.DataType.XML.name:
self.parent.pagedata.titel = procdesc
elif self.parent.parent.datatype == shared.DataType.SQL.name:
self.parent.pagedata.over = proc
self.parent.pagedata.events.append(
(shared.get_dts(), 'Onderwerp gewijzigd in "{0}"'.format(proc)))
self.parent.pagedata.titel = procdesc = desc
self.parent.pagedata.events.append(
(shared.get_dts(), 'Titel gewijzigd in "{0}"'.format(procdesc)))
wijzig = True
newstat, sel = self.gui.get_choice_data('stat')
if newstat != self.parent.pagedata.status:
self.parent.pagedata.status = newstat
self.parent.pagedata.events.append(
(shared.get_dts(), 'Status gewijzigd in "{0}"'.format(sel)))
wijzig = True
newcat, sel = self.gui.get_choice_data('cat')
if newcat != self.parent.pagedata.soort:
self.parent.pagedata.soort = newcat
self.parent.pagedata.events.append(
(shared.get_dts(), 'Categorie gewijzigd in "{0}"'.format(sel)))
wijzig = True
if self.parch != self.parent.pagedata.arch:
self.parent.pagedata.set_arch(self.parch)
hlp = "gearchiveerd" if self.parch else "herleefd"
self.parent.pagedata.events.append(
(shared.get_dts(), "Actie {0}".format(hlp)))
wijzig = True
if wijzig:
self.update_actie()
# teksten op panel 0 bijwerken
pagegui = self.parent.pages[0].gui
item = pagegui.get_selection()
pagegui.set_item_text(item, 1, self.parent.pagedata.get_soorttext()[0].upper())
pagegui.set_item_text(item, 2, self.parent.pagedata.get_statustext())
pagegui.set_item_text(item, 3, self.parent.pagedata.updated)
if self.parent.parent.datatype == shared.DataType.XML.name:
pagegui.set_item_text(item, 4, self.parent.pagedata.titel)
elif self.parent.parent.datatype == shared.DataType.SQL.name:
pagegui.set_item_text(item, 4, self.parent.pagedata.over)
pagegui.set_item_text(item, 5, self.parent.pagedata.titel)
self.oldbuf = self.gui.set_oldbuf()
return True
def archiveer(self, *args):
"archiveren/herleven"
self.parch = not self.parch
self.savep()
self.parent.rereadlist = True
self.vulp()
def vul_combos(self):
"vullen comboboxen"
self.initializing = True
self.gui.clear_stats()
self.gui.clear_cats()
for key in sorted(self.parent.stats.keys()):
text, value = self.parent.stats[key][:2]
self.gui.add_stat_choice(text, value)
for key in sorted(self.parent.cats.keys()):
text, value = self.parent.cats[key][:2]
self.gui.add_cat_choice(text, value)
self.initializing = False
def get_field_text(self, entry_type):
"return a screen field's text"
return self.gui.get_field_text(entry_type)
class Page6(Page):
"pagina 6: voortgang"
def __init__(self, parent):
super().__init__(parent, pageno=6, standard=False)
self.current_item = 0
self.oldtext = ""
self.event_list, self.event_data, self.old_list, self.old_data = [], [], [], []
self.gui = gui.Page6Gui(parent, self)
def vulp(self):
"""te tonen gegevens invullen in velden e.a. initialisaties
methode aan te roepen voorafgaand aan het tonen van de pagina"""
super().vulp()
self.initializing = True
self.gui.init_textfield()
# self.progress_text.clear()
# self.progress_text.setReadOnly(True)
if self.parent.pagedata:
self.event_list = [x[0] for x in self.parent.pagedata.events]
self.event_list.reverse()
self.old_list = self.event_list[:]
self.event_data = [x[1] for x in self.parent.pagedata.events]
self.event_data.reverse()
self.old_data = self.event_data[:]
if self.parent.parent.is_user:
text = '-- doubleclick or press Shift-Ctrl-N to add new item --'
else:
text = '-- adding new items is disabled --'
self.gui.init_list(text)
for idx, datum in enumerate(self.event_list):
self.gui.add_item_to_list(idx, datum)
if self.parent.parent.datatype == shared.DataType.SQL.name:
self.gui.set_list_callback()
# self.gui.clear_textfield() - zit al in init_textfield
self.oldbuf = (self.old_list, self.old_data)
self.oldtext = ''
self.initializing = False
def savep(self, *args):
"opslaan van de paginagegevens"
super().savep()
# voor het geval er na het aanpassen van een tekst direkt "sla op" gekozen is
# nog even kijken of de tekst al in self.event_data is aangepast.
idx = self.current_item
hlp = self.gui.get_textfield_contents()
if idx > 0:
idx -= 1
if self.event_data[idx] != hlp:
self.event_data[idx] = hlp
self.oldtext = hlp
short_text = hlp.split("\n")[0]
if len(short_text) < 80:
short_text = short_text[:80] + "..."
if self.parent.parent.datatype == shared.DataType.XML.name:
short_text = short_text.encode('latin-1')
self.gui.set_listitem_text(idx + 1, "{} - {}".format(self.event_list[idx], short_text))
self.gui.set_listitem_data(idx + 1)
wijzig = False
if self.event_list != self.old_list or self.event_data != self.old_data:
wijzig = True
hlp = len(self.event_list) - 1
for idx, data in enumerate(self.parent.pagedata.events):
if data != (self.event_list[hlp - idx], self.event_data[hlp - idx]):
self.parent.pagedata.events[idx] = (self.event_list[hlp - idx],
self.event_data[hlp - idx])
for idx in range(len(self.parent.pagedata.events), hlp + 1):
if self.event_data[hlp - idx]:
self.parent.pagedata.events.append((self.event_list[hlp - idx],
self.event_data[hlp - idx]))
if wijzig:
self.update_actie()
# waar is deze voor (self.book.current_item.setText) ?
# self.parent.current_item = self.parent.page0.p0list.topLevelItem(x)
# self.parent.current_item.setText(4, self.parent.pagedata.updated)
self.parent.pages[0].gui.set_item_text(self.parent.current_item, 3,
self.parent.pagedata.updated)
# dit was self.parent.page0.p0list.currentItem().setText( -- is dat niet hetzelfde?
self.old_list = self.event_list[:]
self.old_data = self.event_data[:]
self.oldbuf = (self.old_list, self.old_data)
return True
def goto_prev(self, *args):
"set the selection to the previous row, if possible"
test = self.gui.get_list_row() - 1
if test > 0:
self.gui.set_list_row(test)
def goto_next(self, *args):
"set the selection to the next row, if possible"
test = self.gui.get_list_row() + 1
if test < self.gui.get_list_rowcount():
self.gui.set_list_row(test)
def on_text(self, *args):
"""callback voor wanneer de tekst gewijzigd is
de initializing flag wordt uitgevraagd omdat deze event ook tijdens vulp()
en wijzigen van list positie | |
<gh_stars>0
import copy
from django import forms
from django.db import connection
class AddInstituteForm(forms.Form):
name = forms.CharField(
label='Institute Name',
help_text='Enter the name of the institute here.',
min_length=1
)
city = forms.CharField(
label='City',
help_text='Enter the city of the institute here.',
min_length=1
)
state = forms.CharField(
label='State',
help_text='Enter the state of the institute.',
min_length=1
)
country = forms.CharField(
label='Country',
help_text='Enter the country the institute is in.',
min_length=1
)
postal = forms.CharField(
label='Postal Code',
help_text='Enter the postal code of the institute here.',
min_length=1
)
url = forms.URLField(
label='Institute URL',
help_text='Enter a URL of the institute.',
min_length=1
)
def clean(self):
cleaned_data = super(AddInstituteForm, self).clean()
text_fields = {
'name': cleaned_data.get('name'),
'city': cleaned_data.get('city'),
'state': cleaned_data.get('state'),
'country': cleaned_data.get('country'),
}
for f in text_fields.keys():
if f in cleaned_data and any(char.isdigit() for char in text_fields[f]):
self.add_error('%s' % f, 'Field %s cannot contain numbers.' % f)
if 'postal' in cleaned_data and not all(char.isdigit() for char in cleaned_data.get('postal')):
self.add_error('postal', 'Postal code must be numeric.')
if 'name' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `institute` ' +
'WHERE `name`=%s', [cleaned_data['name'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('name', 'Institute field with same name exists.')
return self.cleaned_data
class AddDepartmentForm(forms.Form):
name = forms.CharField(
label='Department Name',
help_text='Enter the name of the department here.',
min_length=1
)
def clean(self):
cleaned_data = super(AddDepartmentForm, self).clean()
text_fields = {
'name': cleaned_data.get('name'),
}
for f in text_fields.keys():
if f in cleaned_data and any(char.isdigit() for char in text_fields[f]):
self.add_error(f, 'Field %s cannot contain numbers.' % f)
if 'name' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `department` ' +
'WHERE `name`=%s', [cleaned_data['name'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('name', 'Department with same name exists.')
return self.cleaned_data
class AddFieldForm(forms.Form):
name = forms.CharField(
label='Field Name',
help_text='Enter the name of the field here.',
min_length=1
)
department1 = forms.ChoiceField(
label='Department(s)',
help_text='Choose the department(s) the field belongs to. ' +
'If a department does not exist. Add one from the menu.',
choices=[]
)
def __init__(self, *args, **kwargs):
super(AddFieldForm, self).__init__(*args, **kwargs)
with connection.cursor() as cursor:
cursor.execute('SELECT `id`, `name` FROM `department`;')
tbl = cursor.fetchall()
dept_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
self.fields['department1'].choices = dept_choices
def clean(self):
cleaned_data = super(AddFieldForm, self).clean()
text_fields = {
'name': cleaned_data.get('name'),
}
for f in text_fields.keys():
if f in cleaned_data and any(char.isdigit() for char in text_fields[f]):
self.add_error(f, 'Field %s cannot contain numbers.' % f)
if 'name' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `publication_field` ' +
'WHERE `title`=%s', [cleaned_data['name'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('name', 'Publication field with same name exists.')
i = 0
while True:
if not 'department%d' % (i + 1) in cleaned_data:
break
i += 1
for j in range(0, i):
for k in range(j + 1, i):
if cleaned_data['department%d' % (j + 1)] == cleaned_data['department%d' % (k + 1)]:
self.add_error('department%d' % (j + 1), 'Two department fields cannot be same')
self.add_error('department%d' % (k + 1), 'Two department fields cannot be same')
return self.cleaned_data
class AddAuthorForm(forms.Form):
first_name = forms.CharField(
label='First Name',
strip=True,
min_length=1,
help_text='Enter the author\'s first name.',
)
middle_name = forms.CharField(
label='Middle Name',
strip=True,
required=False,
help_text='Enter the author\'s middle name.',
)
last_name = forms.CharField(
label='Last Name',
strip=True,
min_length=1,
help_text='Enter the author\'s last name.',
)
email = forms.EmailField(
label='Email',
help_text='Enter the author\'s Email here.'
)
url = forms.URLField(
label='Author URL',
widget=forms.URLInput,
help_text='Enter a homepage URL for the author.',
required=False
)
type = forms.ChoiceField(
label='Author Type',
widget=forms.RadioSelect,
choices=[
('Student', 'Student'),
('Faculty', 'Faculty'),
],
help_text='Select the kind of author (Student/Faculty).',
)
institute = forms.ChoiceField(
label='Institute',
choices=[],
help_text='Choose the author\'s institute from the list. ' +
'If your institute does not exist, add one from the menu.'
)
department = forms.ChoiceField(
label='Department',
choices=[],
help_text='Choose the author\'s department from the list. ' +
'If your department does not exist, add one from the menu.'
)
def __init__(self, *args, **kwargs):
super(AddAuthorForm, self).__init__(*args, **kwargs)
with connection.cursor() as cursor:
cursor.execute('SELECT `id`, `name` FROM `department`;')
tbl = cursor.fetchall()
dept_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
cursor.execute('SELECT `id`, `name` FROM `institute`;')
tbl = cursor.fetchall()
inst_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
self.fields['institute'].choices = inst_choices
self.fields['department'].choices = dept_choices
def clean(self):
"""
Validate form data.
"""
cleaned_data = super(AddAuthorForm, self).clean()
typ = cleaned_data.get('type')
if 'first_name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('first_name')):
self.add_error('first_name', 'Name cannot contain numbers.')
if 'middle_name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('middle_name')):
self.add_error('middle_name', 'Name cannot contain numbers.')
if 'last_name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('last_name')):
self.add_error('last_name', 'Name cannot contain numbers.')
if 'type' in cleaned_data and typ != 'Student' and typ != 'Faculty':
self.add_error('type', 'Invalid value for type.')
if 'email' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `author` ' +
'WHERE `email`=%s', [cleaned_data['email'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('email', 'Author with same Email exists.')
return self.cleaned_data
class AddPublisherForm(forms.Form):
name = forms.CharField(
label='Publisher Name',
strip=True,
min_length=1,
help_text='Enter the publisher\'s name.',
)
url = forms.URLField(
label='Publisher URL',
widget=forms.URLInput,
help_text='Enter a homepage URL for the publisher.',
)
type = forms.ChoiceField(
label='Publisher Type',
widget=forms.RadioSelect,
choices=[
('Journal', 'Journal'),
('Conference', 'Conference'),
('Publishing House', 'Publishing House'),
],
help_text='Select the kind of publisher.',
)
def clean(self):
"""
Validate form data.
"""
cleaned_data = super(AddPublisherForm, self).clean()
typ = cleaned_data.get('type')
if 'name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('name')):
self.add_error('first_name', 'Name cannot contain numbers.')
if 'type' in cleaned_data and typ != 'Conference' and typ != 'Journal' and typ != 'Publishing House':
self.add_error('type', 'Invalid value for type.')
if 'url' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `publisher` ' +
'WHERE `url`=%s', [cleaned_data['url'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('url', 'Publisher with same URL exists.')
return self.cleaned_data
class AddPublicationForm(forms.Form):
title = forms.CharField(
label='Title',
help_text='The title of the publication.',
min_length=1
)
description = forms.CharField(
label='Description',
required=False,
widget=forms.Textarea,
help_text='A brief abstract of your publication.'
)
url = forms.URLField(
label='URL',
help_text='A URL for your publication.',
required=False,
)
location = forms.CharField(
label='Location',
required=False,
help_text='Enter a location for your publication, it can be the location ' +
'of the conference or that of the publisher.'
)
date = forms.DateField(
label='Date of Publication',
help_text='Date your publication was published. The format is "DD/MM/YYYY".',
input_formats=['%d/%m/%Y', ]
)
code = forms.CharField(
label='Publication Code',
required=False,
help_text='ISBN or similar code for the publication.'
)
publisher = forms.ChoiceField(
label='Publisher',
help_text='Choose the publisher from the list. If yours doesn\'t exist, add one from the menu.',
choices=[]
)
author1 = forms.ChoiceField(
label='Author(s)',
help_text='Choose the author from the list. If yours does not exist, add one from the menu.',
choices=[]
)
degree1 = forms.ChoiceField(
label='Degree',
help_text='The degree/status of author',
choices=[
('first', 'First'),
('second', 'Second'),
('third', 'Third'),
('corresponding', 'Corresponding'),
('other', 'Other')
]
)
field1 = forms.ChoiceField(
label='Field(s)/Area(s)',
help_text='Choose the field(s) your publication belongs to. If yours does not exist, add one from the menu.',
choices=[]
)
def __init__(self, *args, **kwargs):
super(AddPublicationForm, self).__init__(*args, **kwargs)
with connection.cursor() as cursor:
cursor.execute('SELECT `id`, `title` FROM `publication_field`;')
tbl = cursor.fetchall()
field_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
cursor.execute('SELECT `id`, `first_name`, `last_name` FROM `author`;')
tbl = cursor.fetchall()
author_choices = list(map(
lambda row: (row[0], row[1] + ' ' + row[2]),
tbl
))
cursor.execute('SELECT `id`, `name` FROM `publisher`;')
tbl = cursor.fetchall()
publ_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
self.fields['publisher'].choices = publ_choices
self.fields['field1'].choices = field_choices
self.fields['author1'].choices = author_choices
def clean(self):
cleaned_data = copy.deepcopy(super(AddPublicationForm, self).clean())
if 'url' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `publication` ' +
'WHERE `url`=%s', [cleaned_data['url'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('url', 'Publication with same URL exists.')
i = 0
while True:
if not 'author%d' % (i + 1) in cleaned_data:
break
i += 1
for j in range(0, i):
for k in range(j + 1, i):
if cleaned_data['author%d' % (j + 1)] == cleaned_data['author%d' % (k + 1)]:
self.add_error('author%d' % (j + 1), 'Two author fields cannot be same')
self.add_error('author%d' % (k + | |
<filename>discord/client.py
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .user import User, Profile
from .invite import Invite
from .object import Object
from .errors import *
from .permissions import Permissions, PermissionOverwrite
from .enums import ChannelType, Status
from .gateway import *
from .emoji import Emoji
from .http import HTTPClient
from .state import ConnectionState
from . import utils, compat
from .backoff import ExponentialBackoff
import asyncio
import aiohttp
import websockets
import logging, traceback
import sys, re, io
import itertools
import datetime
from collections import namedtuple
from os.path import split as path_split
PY35 = sys.version_info >= (3, 5)
log = logging.getLogger(__name__)
AppInfo = namedtuple('AppInfo', 'id name description icon owner')
def app_info_icon_url(self):
"""Retrieves the application's icon_url if it exists. Empty string otherwise."""
if not self.icon:
return ''
return 'https://cdn.discordapp.com/app-icons/{0.id}/{0.icon}.jpg'.format(self)
AppInfo.icon_url = property(app_info_icon_url)
class Client:
"""Represents a client connection that connects to Discord.
This class is used to interact with the Discord WebSocket and API.
A number of options can be passed to the :class:`Client`.
.. _deque: https://docs.python.org/3.4/library/collections.html#collections.deque
.. _event loop: https://docs.python.org/3/library/asyncio-eventloops.html
.. _connector: http://aiohttp.readthedocs.org/en/stable/client_reference.html#connectors
.. _ProxyConnector: http://aiohttp.readthedocs.org/en/stable/client_reference.html#proxyconnector
Parameters
----------
max_messages : Optional[int]
The maximum number of messages to store in :attr:`messages`.
This defaults to 5000. Passing in `None` or a value less than 100
will use the default instead of the passed in value.
loop : Optional[event loop]
The `event loop`_ to use for asynchronous operations. Defaults to ``None``,
in which case the default event loop is used via ``asyncio.get_event_loop()``.
connector : aiohttp.BaseConnector
The `connector`_ to use for connection pooling. Useful for proxies, e.g.
with a `ProxyConnector`_.
shard_id : Optional[int]
Integer starting at 0 and less than shard_count.
shard_count : Optional[int]
The total number of shards.
fetch_offline_members: bool
Indicates if :func:`on_ready` should be delayed to fetch all offline
members from the guilds the bot belongs to. If this is ``False``\, then
no offline members are received and :meth:`request_offline_members`
must be used to fetch the offline members of the guild.
Attributes
-----------
ws
The websocket gateway the client is currently connected to. Could be None.
loop
The `event loop`_ that the client uses for HTTP requests and websocket operations.
"""
def __init__(self, *, loop=None, **options):
self.ws = None
self.loop = asyncio.get_event_loop() if loop is None else loop
self._listeners = {}
self.shard_id = options.get('shard_id')
self.shard_count = options.get('shard_count')
connector = options.pop('connector', None)
self.http = HTTPClient(connector, loop=self.loop)
self.connection = ConnectionState(dispatch=self.dispatch, chunker=self._chunker,
syncer=self._syncer, http=self.http, loop=self.loop, **options)
self.connection.shard_count = self.shard_count
self._closed = asyncio.Event(loop=self.loop)
self._ready = asyncio.Event(loop=self.loop)
# if VoiceClient.warn_nacl:
# VoiceClient.warn_nacl = False
# log.warning("PyNaCl is not installed, voice will NOT be supported")
# internals
@asyncio.coroutine
def _syncer(self, guilds):
yield from self.ws.request_sync(guilds)
@asyncio.coroutine
def _chunker(self, guild):
if hasattr(guild, 'id'):
guild_id = guild.id
else:
guild_id = [s.id for s in guild]
payload = {
'op': 8,
'd': {
'guild_id': guild_id,
'query': '',
'limit': 0
}
}
yield from self.ws.send_as_json(payload)
def handle_ready(self):
self._ready.set()
def _resolve_invite(self, invite):
if isinstance(invite, Invite) or isinstance(invite, Object):
return invite.id
else:
rx = r'(?:https?\:\/\/)?discord\.gg\/(.+)'
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
@property
def user(self):
"""Optional[:class:`ClientUser`]: Represents the connected client. None if not logged in."""
return self.connection.user
@property
def guilds(self):
"""List[:class:`Guild`]: The guilds that the connected client is a member of."""
return self.connection.guilds
@property
def emojis(self):
"""List[:class:`Emoji`]: The emojis that the connected client has."""
return self.connection.emojis
@property
def private_channels(self):
"""List[:class:`abc.PrivateChannel`]: The private channels that the connected client is participating on."""
return self.connection.private_channels
@property
def messages(self):
"""A deque_ of :class:`Message` that the client has received from all
guilds and private messages.
The number of messages stored in this deque is controlled by the
``max_messages`` parameter.
"""
return self.connection.messages
@property
def voice_clients(self):
"""List[:class:`VoiceClient`]: Represents a list of voice connections."""
return self.connection.voice_clients
def is_ready(self):
"""bool: Specifies if the client's internal cache is ready for use."""
return self._ready.is_set()
@asyncio.coroutine
def _run_event(self, coro, event_name, *args, **kwargs):
try:
yield from coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
yield from self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def dispatch(self, event, *args, **kwargs):
log.debug('Dispatching event {}'.format(event))
method = 'on_' + event
handler = 'handle_' + event
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, condition) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = condition(*args)
except Exception as e:
future.set_exception(e)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
try:
actual_handler = getattr(self, handler)
except AttributeError:
pass
else:
actual_handler(*args, **kwargs)
try:
coro = getattr(self, method)
except AttributeError:
pass
else:
compat.create_task(self._run_event(coro, method, *args, **kwargs), loop=self.loop)
@asyncio.coroutine
def on_error(self, event_method, *args, **kwargs):
"""|coro|
The default error handler provided by the client.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
Check :func:`discord.on_error` for more details.
"""
print('Ignoring exception in {}'.format(event_method), file=sys.stderr)
traceback.print_exc()
@asyncio.coroutine
def request_offline_members(self, *guilds):
"""|coro|
Requests previously offline members from the guild to be filled up
into the :attr:`Guild.members` cache. This function is usually not
called. It should only be used if you have the ``fetch_offline_members``
parameter set to ``False``.
When the client logs on and connects to the websocket, Discord does
not provide the library with offline members if the number of members
in the guild is larger than 250. You can check if a guild is large
if :attr:`Guild.large` is ``True``.
Parameters
-----------
\*guilds
An argument list of guilds to request offline members for.
Raises
-------
InvalidArgument
If any guild is unavailable or not large in the collection.
"""
if any(not g.large or g.unavailable for g in guilds):
raise InvalidArgument('An unavailable or non-large guild was passed.')
yield from self.connection.request_offline_members(guilds)
# login state management
@asyncio.coroutine
def login(self, token, *, bot=True):
"""|coro|
Logs in the client with the specified credentials.
This function can be used in two different ways.
Parameters
-----------
token: str
The authentication token. Do not prefix this token with
anything as the library will do it for you.
bot: bool
Keyword argument that specifies if the account logging on is a bot
token or not.
Raises
------
LoginFailure
The wrong credentials are passed.
HTTPException
An unknown HTTP related error occurred,
usually when it isn't 200 or the known incorrect credentials
passing status code.
"""
log.info('logging in using static token')
data = yield from self.http.static_login(token, bot=bot)
self.connection.is_bot = bot
@asyncio.coroutine
def logout(self):
"""|coro|
Logs out of Discord and closes all connections.
"""
yield from self.close()
@asyncio.coroutine
def _connect(self):
self.ws = yield from DiscordWebSocket.from_client(self)
while True:
try:
yield from self.ws.poll_event()
except ResumeWebSocket as e:
log.info('Got a request to RESUME the websocket.')
self.ws = yield from DiscordWebSocket.from_client(self, shard_id=self.shard_id,
session=self.ws.session_id,
sequence=self.ws.sequence,
resume=True)
@asyncio.coroutine
def connect(self, *, reconnect=True):
"""|coro|
Creates a websocket connection and lets the websocket listen
to messages from discord. This is a loop that runs the entire
event system and miscellaneous aspects of the library. Control
is not resumed until the WebSocket connection is terminated.
Parameters
-----------
reconnect: bool
If we should attempt reconnecting, either due to internet
failure or a specific failure on Discord's part. Certain
disconnects that lead to bad state will not be handled (such as
invalid sharding payloads or bad tokens).
Raises
-------
GatewayNotFound
If the gateway to | |
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
AWS provisioner.
"""
import logging
from itertools import izip_longest, repeat
from pyrsistent import PClass, field
from twisted.internet.defer import DeferredList, fail, maybeDeferred
from zope.interface import implementer
from boto.ec2 import connect_to_region
from boto.ec2.blockdevicemapping import (
EBSBlockDeviceType, BlockDeviceMapping,
)
from boto.exception import EC2ResponseError
from ..common import loop_until, poll_until
from ._common import INode, IProvisioner
from ._install import provision_for_any_user
from eliot import Message, start_action, write_failure
from eliot.twisted import DeferredContext
from ._ssh import run_remotely, run_from_args
_usernames = {
'centos-7': 'centos',
'ubuntu-14.04': 'ubuntu',
'ubuntu-16.04': 'ubuntu',
'rhel-7': 'ec2-user',
}
IMAGE_NAMES = {
# Find an image for the appropriate version at the following URL, then get
# the name of the image. Both CentOS and Ubuntu use consistent names across
# all regions.
# https://wiki.centos.org/Cloud/AWS
'centos-7': 'CentOS Linux 7 x86_64 HVM EBS 1602-b7ee8a69-ee97-4a49-9e68-afaee216db2e-ami-d7e1d2bd.3', # noqa
# https://cloud-images.ubuntu.com/locator/ec2/
'ubuntu-14.04': 'ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20160222', # noqa
'ubuntu-16.04': 'ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20160627', # noqa
# https://access.redhat.com/solutions/15356
# aws ec2 describe-images --owners 309956199498
'rhel-7': 'RHEL-7.3_HVM_GA-20161026-x86_64-1-Hourly2-GP2',
}
BOTO_INSTANCE_NOT_FOUND = u'InvalidInstanceID.NotFound'
INSTANCE_TIMEOUT = 300
class EliotLogHandler(logging.Handler):
# Whitelist ``"msg": "Params:`` field for logging.
_to_log = {"Params"}
def emit(self, record):
fields = vars(record)
# Only log certain things. The log is massively too verbose
# otherwise.
if fields.get("msg", ":").split(":")[0] in self._to_log:
Message.new(
message_type=u'flocker:provision:aws:boto_logs',
**fields
).write()
def _enable_boto_logging():
"""
Make boto log activity using Eliot.
"""
logger = logging.getLogger("boto")
logger.setLevel(logging.INFO)
logger.addHandler(EliotLogHandler())
_enable_boto_logging()
class FailedToRun(Exception):
"""
Raised if a pending AWS instance fails to become running.
"""
def _check_response_error(e, message_type):
"""
Check if an exception is a transient one.
If it is, then it is simply logged, otherwise it is raised.
:param boto.exception import EC2ResponseErro e: The exception to check.
:param str message_type: The message type for logging.
"""
if e.error_code != BOTO_INSTANCE_NOT_FOUND:
raise e
Message.new(
message_type=message_type,
reason=e.error_code,
).write()
def _node_is_booting(instance):
"""
Check if an instance is still booting, where booting is defined
as either a pending or rebooting instance that is expected to
become running.
:param boto.ec2.instance.Instance instance: The instance to check.
"""
try:
instance.update()
except EC2ResponseError as e:
_check_response_error(
e,
u"flocker:provision:aws:node_is_booting:retry"
)
Message.new(
message_type=u"flocker:provision:aws:node_is_booting:update",
instance_state=instance.state,
ip_address=instance.ip_address,
).write()
# Sometimes an instance can be reported as running but without a public
# address being set, we consider that instance to be still pending.
return (instance.state == u'pending' or instance.state == u'rebooting' or
(instance.state == u'running' and instance.ip_address is None))
def _poll_while(predicate, steps, sleep=None):
"""
Like common.poll_until, but with the reverse meaning of the predicate.
"""
return poll_until(lambda: not predicate(), steps, sleep)
def _wait_until_running(instance):
"""
Wait until a instance is running.
:param boto.ec2.instance.Instance instance: The instance to wait for.
:raises FailedToRun: The instance failed to become running.
"""
with start_action(
action_type=u"flocker:provision:aws:wait_until_running",
instance_id=instance.id,
) as context:
# Since we are refreshing the instance's state once in a while
# we may miss some transitions. So, here we are waiting until
# the node has transitioned out of the original state and then
# check if the new state is the one that we expect.
_poll_while(lambda: _node_is_booting(instance),
repeat(1, INSTANCE_TIMEOUT))
context.add_success_fields(instance_state=instance.state)
context.add_success_fields(instance_state_reason=instance.state_reason)
if instance.state != u'running':
raise FailedToRun(instance.state_reason)
def _async_wait_until_running(reactor, instance):
"""
Wait until a instance is running.
:param reactor: The reactor.
:param boto.ec2.instance.Instance instance: The instance to wait for.
:return: Deferred that fires when the instance has become running
or failed to run (within a predefined period of time).
"""
action = start_action(
action_type=u"flocker:provision:aws:wait_until_running",
instance_id=instance.id,
)
def check_final_state(ignored):
if instance.state != u'running':
raise FailedToRun(instance.state_reason)
action.add_success_fields(
instance_state=instance.state,
instance_state_reason=instance.state_reason,
)
return instance
def finished_booting():
d = maybeDeferred(_node_is_booting, instance)
d.addCallback(lambda x: not x)
return d
with action.context():
# Since we are refreshing the instance's state once in a while
# we may miss some transitions. So, here we are waiting until
# the node has transitioned out of the original state and then
# check if the new state is the one that we expect.
d = loop_until(
reactor,
finished_booting,
repeat(5, INSTANCE_TIMEOUT)
)
d = DeferredContext(d)
d.addCallback(check_final_state)
d.addActionFinish()
return d.result
@implementer(INode)
class AWSNode(PClass):
_provisioner = field(mandatory=True)
_instance = field(mandatory=True)
distribution = field(mandatory=True)
name = field(mandatory=True)
@property
def address(self):
return self._instance.ip_address.encode('ascii')
@property
def private_address(self):
return self._instance.private_ip_address.encode('ascii')
def destroy(self):
with start_action(
action_type=u"flocker:provision:aws:destroy",
instance_id=self._instance.id,
):
self._instance.terminate()
def get_default_username(self):
"""
Return the username available by default on a system.
"""
return _usernames[self.distribution]
def provision(self, package_source, variants=()):
"""
Provision flocker on this node.
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
return provision_for_any_user(self, package_source, variants)
def reboot(self):
"""
Reboot the node.
:return Effect:
"""
def do_reboot(_):
with start_action(
action_type=u"flocker:provision:aws:reboot",
instance_id=self._instance.id,
):
self._instance.reboot()
_wait_until_running(self._instance)
return run_remotely(
username="root",
address=self.address,
commands=run_from_args(["sync"])
).on(success=do_reboot)
@implementer(IProvisioner)
class AWSProvisioner(PClass):
"""
A provisioner that creates nodes on AWS.
:ivar boto.ec2.connection.EC2Connection _connection: The boto connection to
use.
:ivar bytes _keyname: The name of an existing ssh public key configured
with the cloud provider.
:ivar _security_groups: List of security groups to put the instances
created by this provisioner in.
:type _security_groups: `list` of `bytes`
:param bytes _zone: The AWS availability zone to put instances created by
this provisioner in.
"""
_connection = field(mandatory=True)
_keyname = field(type=bytes, mandatory=True)
_security_groups = field(mandatory=True)
_zone = field(type=bytes, mandatory=True)
_default_size = field(type=bytes, mandatory=True)
def get_ssh_key(self):
"""
Return the public key associated with the provided keyname.
:return Key: The ssh public key or ``None`` if it can't be determined.
"""
# EC2 only provides the SSH2 fingerprint (for uploaded keys)
# or the SHA-1 hash of the private key (for EC2 generated keys)
# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_KeyPairInfo.html
return None
def create_node(self, name, distribution, metadata={}):
size = self._default_size
disk_size = 10
with start_action(
action_type=u"flocker:provision:aws:create_node",
name=name,
distribution=distribution,
image_size=size,
disk_size=disk_size,
metadata=metadata,
):
metadata = metadata.copy()
metadata['Name'] = name
disk1 = EBSBlockDeviceType()
disk1.size = disk_size
disk1.delete_on_termination = True
diskmap = BlockDeviceMapping()
diskmap['/dev/sda1'] = disk1
images = self._connection.get_all_images(
filters={'name': IMAGE_NAMES[distribution]},
)
# Retry several times, no sleep between retries is needed.
instance = poll_until(
lambda: self._get_node(images[0].id, size, diskmap, metadata),
repeat(0, 10),
lambda x: None)
return AWSNode(
name=name,
_provisioner=self,
_instance=instance,
distribution=distribution,
)
def _get_node(self, image_id, size, diskmap, metadata):
"""
Create an AWS instance with the given parameters.
Return either boto.ec2.instance object or None if the instance
could not be created.
"""
with start_action(
action_type=u"flocker:provision:aws:get_node",
) as context:
[instance] = self._run_nodes(1, image_id, size, diskmap)
context.add_success_fields(instance_id=instance.id)
poll_until(lambda: self._set_metadata(instance, metadata),
repeat(1, INSTANCE_TIMEOUT))
try:
_wait_until_running(instance)
return instance
except FailedToRun:
instance.terminate()
return None # the instance is in the wrong state
def _run_nodes(self, count, image_id, size, diskmap):
"""
Create an AWS instance with the given parameters.
Return either boto.ec2.instance object or None if the instance
could not be created.
"""
with start_action(
action_type=u"flocker:provision:aws:create_node:run_nodes",
instance_count=count,
):
reservation = self._connection.run_instances(
image_id,
min_count=1,
max_count=count,
key_name=self._keyname,
instance_type=size,
security_groups=self._security_groups,
block_device_map=diskmap,
placement=self._zone,
# On some operating systems a tty is requried for sudo.
# Since AWS systems have a non-root user as the login,
# disable this, so we can use sudo with conch.
)
return reservation.instances
def _set_metadata(self, instance, metadata):
"""
Set metadata for an instance.
:param boto.ec2.instance.Instance instance: The instance to configure.
:param dict metadata: The tag-value metadata.
"""
try:
self._connection.create_tags([instance.id], metadata)
return True
except EC2ResponseError as e:
_check_response_error(
e,
u"flocker:provision:aws:set_metadata:retry"
)
return False
def create_nodes(self, reactor, names, distribution, metadata={}):
"""
Create nodes with the given names.
:param reactor: The reactor.
:param name: The names of the nodes.
:type name: list of str
:param str distribution: The name of the distribution to
install on the nodes.
:param dict metadata: Metadata to associate with the nodes.
:return: A list of ``Deferred``s each firing with an INode
when the corresponding node is created. The list has
the same order as :param:`names`.
"""
size = self._default_size
disk_size = 8
action = start_action(
action_type=u"flocker:provision:aws:create_nodes",
instance_count=len(names),
distribution=distribution,
image_size=size,
disk_size=disk_size,
metadata=metadata,
)
with action.context():
disk1 = EBSBlockDeviceType()
disk1.size = disk_size
disk1.delete_on_termination = True
diskmap = BlockDeviceMapping()
diskmap['/dev/sda1'] = disk1
images = self._connection.get_all_images(
filters={'name': IMAGE_NAMES[distribution]},
)
instances = self._run_nodes(
count=len(names),
image_id=images[0].id,
size=size,
diskmap=diskmap
)
def make_node(ignored, name, instance):
return AWSNode(
name=name,
_provisioner=self,
_instance=instance,
distribution=distribution,
)
results = []
for name, instance in izip_longest(names, instances):
if instance is None:
results.append(fail(Exception("Could not run instance")))
else:
node_metadata = metadata.copy()
node_metadata['Name'] = name
d = self._async_get_node(reactor, instance, node_metadata)
d = DeferredContext(d)
d.addCallback(make_node, name, instance)
results.append(d.result)
action_completion = DeferredContext(DeferredList(results))
action_completion.addActionFinish()
# Individual results and errors | |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: <NAME> (<EMAIL>)
from __future__ import annotations
from abc import abstractmethod, ABC
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from fabric_cf.actor.core.apis.abc_actor_mixin import ABCActorMixin
from fabric_cf.actor.core.apis.abc_reservation_mixin import ABCReservationMixin
from fabric_cf.actor.core.plugins.handlers.config_token import ConfigToken
from fabric_cf.actor.core.time.term import Term
from fabric_cf.actor.core.util.reservation_set import ReservationSet
from fabric_cf.actor.core.kernel.resource_set import ResourceSet
from fabric_cf.actor.core.util.id import ID
from fabric_cf.actor.core.apis.abc_delegation import ABCDelegation
class ABCPolicy(ABC):
"""
IPolicy encapsulates all policy decisions an actor must make to
perform its functions. Each actor has one policy instance that controls its
policy decisions. The policy instance must implement the IPolicy
interface together with one or more of the other policy interfaces (depending
on the role the actor performs).
Some things to know when implementing a policy:
- The policy is always called by a single thread. No synchronization is
required, but method implementations should not block for long periods of
time, as that would prevent the actor from processing other events.
- The policy operates on the passed ResourceSets. ResourceSets encapsulate
the free resource pools and resource requests. The policy satisfies requests
by using ResourceSet methods to shift resources from one ResourceSet to
another. Resource attributes etc. flow with these operations, and they drive
other resource-specific operations including SHARP ticket delegation.
- The policy has access to all ResourceData attributes attached to the
request and the free resource pools. The client-side methods may decorate the
request with attributes (e.g., bids), which are passed through to the
server-side policy that handles the request. The client-side also has the
option of touching the AuthToken passed with the request...this may be a bit
goofy.
- If it is necessary to defer an allocation request, return false. At some
later time, when the policy is ready to satisfy the request (e.g., after an
auction on an agent), set bidPending to false and arrange a call to
probePending on the requesting reservation. This causes the reservation to
retry the pending operation.
"""
@abstractmethod
def prepare(self, *, cycle: int):
"""
Informs the policy that processing for a new cycle is about to begin. The
policy should initialize whatever internal state is necessary to process
a new cycle.
Note: The cycle number parameter is redundant and is passed for
convenience. The policy can always obtain the cycle number by calling
IActor.getCurrentCycle()
"""
@abstractmethod
def finish(self, *, cycle: int):
"""
Informs the policy that all processing for the specified cycle is
complete. The policy can safely discard any state associated with the
cycle or any previous cycles.
Note: The cycle number parameter is redundant and is passed for
convenience. The policy can always obtain the cycle number by calling
IActor.getCurrentCycle()
"""
@abstractmethod
def extend(self, *, reservation: ABCReservationMixin, resources: ResourceSet, term: Term):
"""
Notifies the policy that a reservation is about to be extended. This
method will be invoked only for reservations, whose extensions have not
been triggered by the policy, e.g, from the management interface. The
policy should update its state to reflect the extend request.
@params reservation: reservation to be extended
@params resources: resource set used for the extension
@params term: term used for the extension
"""
@abstractmethod
def close(self, *, reservation: ABCReservationMixin):
"""
Notifies the policy that a reservation is about to be closed. This method
will be invoked for every reservation that is about to be closed, even if
the close was triggered by the policy itself. The policy should update
its internal state/cancel pending operations associated with the
reservation.
@params reservation: reservation about to be closed
"""
@abstractmethod
def closed(self, *, reservation: ABCReservationMixin):
"""
Notifies the policy that a reservation has been closed. This method will
be invoked for every reservation that closes successfully. The policy
must uncommit any resources associated with the reservation, e.g,
physical machines, currency, etc.
Note: For an authority resources are released using the
IAuthorityPolicy.release(ResourceSet) method. Authority policy
implementations should not consider the resources of the passed
reservation as released. The release will take place once all
configuration actions complete.
@params reservation: closed reservation
"""
@abstractmethod
def closed_delegation(self, *, delegation: ABCDelegation):
"""
Notifies the policy that a delegation has been closed. This method will
be invoked for every delegation that closes successfully. The policy
must uncommit any resources associated with the delegation, e.g,
physical machines, currency, etc.
@params delegation: closed delegation
"""
@abstractmethod
def remove(self, *, reservation: ABCReservationMixin):
"""
Notifies the policy that a reservation is about to be removed. This
method will be invoked for each reservation that is to be removed from
the system. The policy should remove any state that it maintains for the
reservation.
Note: Only failed and closed reservations can be removed. The
system will not invoke this method if the reservation is not closed or
failed.
@params reservation: reservation to remove
"""
@abstractmethod
def query(self, *, p) -> dict:
"""
Answers a query from another actor. This method is intended to be used to
obtain policy-specific parameters and information. This method should be
used when writing more complex policies requiring additional interaction
among actors. Instead of extending the proxies to support
passing/obtaining the required information, policy code can use the query
interface to request/obtain such information. The implementation should
not block for prolonged periods of time. If necessary, future versions
will update this interface to allow query responses to be delivered using
callbacks.
@params p : a properties list of query parameters. Can be null or empty.
@returns a properties list of outgoing values. If the incoming properties
collection is null or empty, should return all possible
properties that can be relevant to the caller.
"""
@abstractmethod
def configuration_complete(self, *, action: str, token: ConfigToken, out_properties: dict):
"""
Notifies the policy that a configuration action for the object
represented by the token parameter has completed.
@params action : configuration action. See Config.Target*
@params token : object or a token for the object whose configuration action has completed
@params out_properties : output properties produced by the configuration action
"""
@abstractmethod
def reset(self):
"""
Post recovery entry point. This method will be invoked once all revisit
operations are complete and the actor is ready to operate normally.
@raises Exception in case of error
"""
@abstractmethod
def recovery_starting(self):
"""
Informs the policy that recovery is about to begin.
"""
@abstractmethod
def revisit(self, *, reservation: ABCReservationMixin):
"""
Informs the policy about a reservation. Called during recovery/startup.
The policy must re-establish any state required for the management of the
reservation.
@params reservation: reservation being recovered
@raises Exception in case of error
"""
@abstractmethod
def revisit_delegation(self, *, delegation: ABCDelegation):
"""
Informs the policy about a delegation. Called during recovery/startup.
The policy must re-establish any state required for the management of the
delegation.
@params delegation: delegation being recovered
@raises Exception in case of error
"""
@abstractmethod
def recovery_ended(self):
"""
Informs the policy that recovery has completed.
"""
@abstractmethod
def set_actor(self, *, actor: | |
promises that use the workflow's output names.
new_promises = [Promise(var, wf_outputs_as_literal_dict[var]) for var in expected_output_names]
return create_task_output(new_promises, self.python_interface)
class ImperativeWorkflow(WorkflowBase):
def __init__(
self,
name: str,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
self._compilation_state = CompilationState(prefix="")
self._inputs = {}
# This unbound inputs construct is just here to help workflow authors detect issues a bit earlier. It just
# keeps track of workflow inputs that you've declared with add_workflow_input but haven't yet consumed. This
# is an error that Admin would return at compile time anyways, but this allows flytekit to raise
# the error earlier.
self._unbound_inputs = set()
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=workflow_metadata_defaults,
python_interface=Interface(),
)
@property
def compilation_state(self) -> CompilationState:
"""
Compilation is done a bit at a time, one task or other entity call at a time. This is why this workflow
class has to keep track of its own compilation state.
"""
return self._compilation_state
@property
def nodes(self) -> List[Node]:
return self._compilation_state.nodes
@property
def inputs(self) -> Dict[str, Promise]:
"""
This holds the input promises to the workflow. The nodes in these Promise objects should always point to
the global start node.
"""
return self._inputs
def __repr__(self):
return super().__repr__() + f"Nodes ({len(self.compilation_state.nodes)}): {self.compilation_state.nodes}"
def execute(self, **kwargs):
"""
Called by _local_execute. This function is how local execution for imperative workflows runs. Because when an
entity is added using the add_entity function, all inputs to that entity should've been already declared, we
can just iterate through the nodes in order and we shouldn't run into any dependency issues. That is, we force
the user to declare entities already in a topological sort. To keep track of outputs, we create a map to
start things off, filled in only with the workflow inputs (if any). As things are run, their outputs are stored
in this map.
After all nodes are run, we fill in workflow level outputs the same way as any other previous node.
"""
if not self.ready():
raise FlyteValidationException(f"Workflow not ready, wf is currently {self}")
# Create a map that holds the outputs of each node.
intermediate_node_outputs = {GLOBAL_START_NODE: {}} # type: Dict[Node, Dict[str, Promise]]
# Start things off with the outputs of the global input node, i.e. the inputs to the workflow.
# _local_execute should've already ensured that all the values in kwargs are Promise objects
for k, v in kwargs.items():
intermediate_node_outputs[GLOBAL_START_NODE][k] = v
# Next iterate through the nodes in order.
for node in self.compilation_state.nodes:
if node not in intermediate_node_outputs.keys():
intermediate_node_outputs[node] = {}
# Retrieve the entity from the node, and call it by looking up the promises the node's bindings require,
# and then fill them in using the node output tracker map we have.
entity = node.flyte_entity
entity_kwargs = get_promise_map(node.bindings, intermediate_node_outputs)
# Handle the calling and outputs of each node's entity
results = entity(**entity_kwargs)
expected_output_names = list(entity.python_interface.outputs.keys())
if isinstance(results, VoidPromise) or results is None:
continue # pragma: no cover # Move along, nothing to assign
# Because we should've already returned in the above check, we just raise an Exception here.
if len(entity.python_interface.outputs) == 0:
raise FlyteValueException(results, f"{results} received but should've been VoidPromise or None.")
# if there's only one output,
if len(expected_output_names) == 1:
if entity.python_interface.output_tuple_name and isinstance(results, tuple):
intermediate_node_outputs[node][expected_output_names[0]] = results[0]
else:
intermediate_node_outputs[node][expected_output_names[0]] = results
else:
if len(results) != len(expected_output_names):
raise FlyteValueException(results, f"Different lengths {results} {expected_output_names}")
for idx, r in enumerate(results):
intermediate_node_outputs[node][expected_output_names[idx]] = r
# The rest of this function looks like the above but now we're doing it for the workflow as a whole rather
# than just one node at a time.
if len(self.python_interface.outputs) == 0:
return VoidPromise(self.name)
# The values that we return below from the output have to be pulled by fulfilling all of the
# workflow's output bindings.
# The return style here has to match what 1) what the workflow would've returned had it been declared
# functionally, and 2) what a user would return in mock function. That is, if it's a tuple, then it
# should be a tuple here, if it's a one element named tuple, then we do a one-element non-named tuple,
# if it's a single element then we return a single element
if len(self.output_bindings) == 1:
# Again use presence of output_tuple_name to understand that we're dealing with a one-element
# named tuple
if self.python_interface.output_tuple_name:
return (get_promise(self.output_bindings[0].binding, intermediate_node_outputs),)
# Just a normal single element
return get_promise(self.output_bindings[0].binding, intermediate_node_outputs)
return tuple([get_promise(b.binding, intermediate_node_outputs) for b in self.output_bindings])
def add_entity(self, entity: Union[PythonTask, LaunchPlan, WorkflowBase], **kwargs) -> Node:
"""
Anytime you add an entity, all the inputs to the entity must be bound.
"""
# circular import
from flytekit.core.node_creation import create_node
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
n = create_node(entity=entity, **kwargs)
def get_input_values(input_value):
if isinstance(input_value, list):
input_promises = []
for x in input_value:
input_promises.extend(get_input_values(x))
return input_promises
if isinstance(input_value, dict):
input_promises = []
for _, v in input_value.items():
input_promises.extend(get_input_values(v))
return input_promises
else:
return [input_value]
# Every time an entity is added, mark it as used. The above function though will gather all the input
# values but we're only interested in the ones that are Promises so let's filter for those.
# There's probably a way to clean this up, maybe key off of the name instead of value?
all_input_values = get_input_values(kwargs)
for input_value in filter(lambda x: isinstance(x, Promise), all_input_values):
if input_value in self._unbound_inputs:
self._unbound_inputs.remove(input_value)
return n
def add_workflow_input(self, input_name: str, python_type: Type) -> Interface:
"""
Adds an input to the workflow.
"""
if input_name in self._inputs:
raise FlyteValidationException(f"Input {input_name} has already been specified for wf {self.name}.")
self._python_interface = self._python_interface.with_inputs(extra_inputs={input_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
self._inputs[input_name] = Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
self._unbound_inputs.add(self._inputs[input_name])
return self._inputs[input_name]
def add_workflow_output(
self, output_name: str, p: Union[Promise, List[Promise], Dict[str, Promise]], python_type: Optional[Type] = None
):
"""
Add an output with the given name from the given node output.
"""
if output_name in self._python_interface.outputs:
raise FlyteValidationException(f"Output {output_name} already exists in workflow {self.name}")
if python_type is None:
if type(p) == list or type(p) == dict:
raise FlyteValidationException(
f"If specifying a list or dict of Promises, you must specify the python_type type for {output_name}"
f" starting with the container type (e.g. List[int]"
)
python_type = p.ref.node.flyte_entity.python_interface.outputs[p.var]
logger.debug(f"Inferring python type for wf output {output_name} from Promise provided {python_type}")
flyte_type = TypeEngine.to_literal_type(python_type=python_type)
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
b = binding_from_python_std(
ctx, output_name, expected_literal_type=flyte_type, t_value=p, t_value_type=python_type
)
self._output_bindings.append(b)
self._python_interface = self._python_interface.with_outputs(extra_outputs={output_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
def add_task(self, task: PythonTask, **kwargs) -> Node:
return self.add_entity(task, **kwargs)
def add_launch_plan(self, launch_plan: LaunchPlan, **kwargs) -> Node:
return self.add_entity(launch_plan, **kwargs)
def add_subwf(self, sub_wf: WorkflowBase, **kwargs) -> Node:
return self.add_entity(sub_wf, **kwargs)
def ready(self) -> bool:
"""
This function returns whether or not the workflow is in a ready state, which means
* Has at least one node
* All workflow inputs are bound
These conditions assume that all nodes and workflow i/o changes were done with the functions above, which
do additional checking.
"""
if len(self.compilation_state.nodes) == 0:
return False
if len(self._unbound_inputs) > 0:
return False
return True
class PythonFunctionWorkflow(WorkflowBase, ClassStorageTaskResolver):
"""
Please read :std:ref:`flyte:divedeep-workflows` first for a high-level understanding of what workflows are in Flyte.
This Python object represents a workflow defined by a function and decorated with the
:py:func:`@workflow <flytekit.workflow>` decorator. Please see notes on that object for additional information.
"""
def __init__(
self,
workflow_function: Callable,
metadata: Optional[WorkflowMetadata],
default_metadata: Optional[WorkflowMetadataDefaults],
):
name = f"{workflow_function.__module__}.{workflow_function.__name__}"
self._workflow_function = workflow_function
native_interface = transform_signature_to_interface(inspect.signature(workflow_function))
# TODO do we need this - can this not be in launchplan only?
# This can be in launch plan only, but is here only so that we don't have to re-evaluate. Or
# we can re-evaluate.
self._input_parameters = None
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=default_metadata,
python_interface=native_interface,
)
@property
def function(self):
return self._workflow_function
def task_name(self, t: PythonAutoContainerTask) -> | |
<reponame>tbloch1/HelioML<filename>book/_build/jupyter_execute/08/notebook.py
#!/usr/bin/env python
# coding: utf-8
# # Notebook
#
# **Authors:** <NAME> (<EMAIL>), <NAME> (<EMAIL>), <NAME> (<EMAIL>)
# [](https://upload.wikimedia.org/wikipedia/commons/c/c9/NASA_Spacecraft_Finds_New_Magnetic_Process_in_Turbulent_Space.webm)
# ## Introduction
# Global-scale energy flow throughout Earthโs magnetosphere is catalyzed by processes that occur at Earthโs magnetopause (MP) in the electron diffusion region (EDR) of magnetic reconnection. Until the launch of the Magnetospheric Multiscale (MMS) mission, only rare, fortuitous circumstances permitted a glimpse of the electron dynamics that break magnetic field lines and energize plasma. MMS employs automated burst triggers onboard the spacecraft and a Scientist-in-the-Loop (SITL) on the ground to select intervals likely to contain diffusion regions. Only low-resolution survey data is available to the SITL, which is insufficient to resolve electron dynamics. A strategy for the SITL, then, is to select all MP crossings. This has resulted in over 35 potential MP EDR encounters but is labor- and resource-intensive; after manual reclassification, just โผ 0.7% of MP crossings, or 0.0001% of the mission lifetime during MMSโs first two years contained an EDR.
#
# In this notebook, we develop a Long-Short Term Memory (LSTM) neural network to detect magnetopause crossings and automate the SITL classification process. An LSTM developed with this notebook hasย been implemented in the MMS data stream to provide automated predictions to the SITL.
#
#
# This model facilitates EDR studies and helps free-up mission operation costs by consolidating manual classification processes into automated routines.
# **Authors' notes:**
#
# 1. This notebook was developed after the development of the original model in use at the SDC. We have tried our best to replicate the development steps and hyperparameters of that model, but we cannot guarantee that models developed with this notebook will exactly match the performance of the original.
#
# 2. This notebook was designed on, and is best run on, Google Colab. It must either be run on Colab or on a machine with an NVIDIA GPU and cuDNN installed. If your machine does not have an NVIDIA GPU, does not have cuDNN installed, or if you run into issues running this notebook yourself, please open the notebook in Google Colab, which provides you with a virtual GPU to run the notebook. (If TF Keras is unable to identify a GPU to run on, make sure the notebook is set to use one by clicking the "Runtime" tab in the top menu bar, selecting "Change runtime type", selecting "GPU" in the dropdown menu under "Hardware accelerator", and clicking save. Colab will refresh your timetime, and you will need to re-run all cells.):
# <a href="https://colab.research.google.com/drive/1Mh7GEQfXCR5xKvtKmbG9Uh8yAlOtFgwO?usp=sharing" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
# ## Import Libraries
#
# To start, we import the neccesary libraries for this notebook.
# In[1]:
get_ipython().system('pip install nasa-pymms')
# In[2]:
from pathlib import Path
from sklearn import preprocessing
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, CuDNNLSTM, BatchNormalization, Bidirectional, Reshape, TimeDistributed
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from matplotlib import pyplot
from sklearn.metrics import roc_curve, auc, confusion_matrix
from keras import backend as K
from pymms.sdc import mrmms_sdc_api as mms
import keras.backend.tensorflow_backend as tfb
import tensorflow as tf
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
import datetime as dt
import os
import time
import sklearn
import scipy
import pickle
import random
import requests
# ## Download, Preprocess, and Format MMS Data
# After installing and importinng the neccesary libraries, we download our training and validation data.
# In[3]:
get_ipython().system('wget -O training_data.csv https://zenodo.org/record/3884266/files/original_training_data.csv?download=1')
get_ipython().system('wget -O validation_data.csv https://zenodo.org/record/3884266/files/original_validation_data.csv?download=1')
# After downloading the training and validation data, we preprocess our training data in preparation for training the neural network.
# We first load the data we downloaded above. The data is a table of measurements from the MMS spacecraft, where each row represents individual measurements taken at a given time and where each column represents a feature (variable) recorded at that time. There is an additional column representing the ground truths for each measurement (whether this measurement was selected by a SITL or not). Then, we will adjust the formatting and datatypes of several of the columns and sort the data by the time of the measurement.
# In[4]:
mms_data = pd.read_csv('training_data.csv', index_col=0, infer_datetime_format=True,
parse_dates=[0])
# In[5]:
mms_data[mms_data['selected'] == False]
# We save references to data's index and column names for later use and additionally pop off the ground truths column. We will reattach the ground truths column after standardizing and interpolating the data.
# In[6]:
index = mms_data.index
selections = mms_data.pop("selected")
column_names = mms_data.columns
# Since there exists a possibility that the training contains missing data or data misreported by the MMS spacecraft (reported as either infinity or negative infinity), we need to fill in (interpolate) any missing data.
# In[7]:
mms_data = mms_data.replace([np.inf, -np.inf], np.nan)
mms_data = mms_data.interpolate(method='time', limit_area='inside')
# We normalize all features with standardization:
#
# 
#
# Where xฬ is the mean of the data, and ฯ is the standard deviation of the data.
#
# Normalization ensures that the numerical values of all features of the data fall within a range from one to negative one and are centered around their mean (zero-mean and unit variance). Normalization improves the speed and performance of training neural networks as it unifies the scale by which differences in the data are represented without altering the data themselves.
# In[8]:
scaler = preprocessing.StandardScaler()
mms_data = scaler.fit_transform(mms_data)
mms_data = pd.DataFrame(mms_data, index, column_names)
mms_data = mms_data.join(selections)
# Next, we calculate class weights for our data classes (selected data points and non-selected data points). Since the distribution of our data is heavily skewed towards non-selected data points (just 1.9% of all data points in our training data were selected), it's important to give the class of selected data points a higher weight when training. In fact, without establishing these class weights our model would quickly acheive 98% accuracy by naively leaving all data points unselected.
# In[9]:
false_weight = len(mms_data)/(2*np.bincount(mms_data['selected'].values)[0])
true_weight = len(mms_data)/(2*np.bincount(mms_data['selected'].values)[1])
# Our entire dataset is not contigous, and it contains time intervals with no observations. Therefore, we break it up into contigous chunks. We can do so by breaking up the data into the windows that the SITLs used to review the data.
# In[10]:
sitl_windows = mms.mission_events('sroi', mms_data.index[0].to_pydatetime(), mms_data.index[-1].to_pydatetime(), sc='mms1')
windows = []
for start, end in zip(sitl_windows['tstart'], sitl_windows['tend']):
window = mms_data[start:end]
if not window.empty and len(window[window['selected']==True])>1:
windows.append(window)
# In[11]:
windows
# Finally, we break up our data into individual sequences that will be fed to our neural network.
# We define a SEQ_LEN variable that will determine the length of our sequences. This variable will also be passed to our network so that it knows how long of a data sequence to expect while training. The choice of sequence length is largely arbitrary.
# In[12]:
SEQ_LEN = 250
# For each window, we assemble two sequences: an X_sequence containing individual data points from our training data and a y_sequence containing the truth values for those data points (whether or not those data points were selected by a SITL).
#
# We add those sequences to four collections: X_train and y_train containing X_sequences and y_sequences for our training data and X_test and y_test containing X_sequences and y_sequences for our testing data. We allocate 80% of the sequences to trainining and the remaining 20% to testing.
# In[13]:
while True:
X_train, X_test, y_train, y_test = [], [], [], []
sequences = []
for i in range(len(windows)):
X_sequence = []
y_sequence = []
if random.random() < 0.6:
for value in windows[i].values:
X_sequence.append(value[:-1])
y_sequence.append(value[-1])
if len(X_sequence) == SEQ_LEN:
X_train.append(X_sequence.copy())
y_train.append(y_sequence.copy())
X_sequence = []
y_sequence = []
else:
for value in windows[i].values:
X_sequence.append(value[:-1])
y_sequence.append(value[-1])
if len(X_sequence) == SEQ_LEN:
X_test.append(X_sequence.copy())
y_test.append(y_sequence.copy())
X_sequence = []
y_sequence = []
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.expand_dims(np.array(y_train), axis=2)
y_test = np.expand_dims(np.array(y_test), axis=2)
if len(X_train) > len(X_test):
break
# We can see how many sequences of data we have for training and testing, respectively:
# In[14]:
print(f"Number of sequences in training data: {len(X_train)}")
print(f"Number of sequences in test data: {len(X_test)}")
# ## Define and Train LSTM
#
# Now that we have processed our data into our training and test sets, we can begin to build and train and our LSTM.
# First, | |
## { 'city' : 'WILMINGTON',
## 'nationalNumber': '(910) 444-0268',
## 'number' : '+19104440268',
## 'price' : '0.35',
## 'rateCenter' : 'WILMINGTON',
## 'state' : 'NC'}
## ]
print(numbers[0]["number"])
## +19104440230
"""
kwargs["city"] = city
kwargs["state"] = state
kwargs["zip"] = zip_code
kwargs["areaCode"] = area_code
kwargs["localNumber"] = local_number
kwargs["inLocalCallingArea"] = in_local_calling_area
kwargs["quantity"] = quantity
kwargs["pattern"] = pattern
return self._make_request('get', '/availableNumbers/local', params=kwargs)[0]
def search_available_toll_free_numbers(self, quantity=None, pattern=None, **kwargs):
"""
Searches for available local or toll free numbers.
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:param str pattern: A number pattern that may include letters, digits, and the wildcard characters
:rtype: list
:returns: list of numbers
Example: Search for 3 toll free numbers with pattern 456::
numbers = api.search_available_toll_free_numbers(pattern = '*456', quantity = 3)
print(numbers)
## [ { 'nationalNumber': '(844) 489-0456',
## 'number' : '+18444890456',
## 'patternMatch' : ' 456',
## 'price' : '0.75'},
## { 'nationalNumber': '(844) 498-2456',
## 'number' : '+18444982456',
## 'patternMatch' : ' 456',
## 'price' : '0.75'},
## { 'nationalNumber': '(844) 509-4566',
## 'number' : '+18445094566',
## 'patternMatch' : ' 456 ',
## 'price' : '0.75'}]
print(numbers[0]["number"])
## +18444890456
"""
kwargs["quantity"] = quantity
kwargs["pattern"] = pattern
return self._make_request('get', '/availableNumbers/tollFree', params=kwargs)[0]
def search_and_order_local_numbers(self,
city=None,
state=None,
zip_code=None,
area_code=None,
local_number=None,
in_local_calling_area=None,
quantity=None,
**kwargs):
"""
Searches and orders for available local numbers.
:param str city: A city name
:param str state: A two-letter US state abbreviation
:param str zip_code: A 5-digit US ZIP code
:param str area_code: A 3-digit telephone area code
:param str local_number: It is defined as the first digits of a telephone number inside an area code for
filtering the results. It must have at least 3 digits and the areaCode field must be filled.
:param str in_local_calling_area: Boolean value to indicate that the search for available numbers
must consider overlayed areas.
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:rtype: list
:returns: list of ordered numbers
Example: Search _and_ order a single number::
ordered_numbers = api.search_and_order_available_numbers(zip = '27606', quantity = 1)
print(ordered_number)
## [ { 'city' : 'RALEIGH',
## 'id' : 'n-abc',
## 'location' : 'https://api.catapult.inetwork.com/v1/users/u-12/phoneNumbers/n-abc',
## 'nationalNumber': '(919) 222-4444',
## 'number' : '+19192224444',
## 'price' : '0.35',
## 'state' : 'NC'}]
"""
kwargs["city"] = city
kwargs["state"] = state
kwargs["zip"] = zip_code
kwargs["areaCode"] = area_code
kwargs["localNumber"] = local_number
kwargs["inLocalCallingArea"] = in_local_calling_area
kwargs["quantity"] = quantity
number_list = self._make_request(
'post', '/availableNumbers/local', params=kwargs)[0]
for item in number_list:
item['id'] = item.get('location', '').split('/')[-1]
return number_list
def search_and_order_toll_free_numbers(self, quantity, **kwargs):
"""
Searches for available local or toll free numbers.
Query parameters for toll free numbers
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:rtype: list
:returns: list of numbers
Example: Search then order a single toll-free number::
numbers = api.search_and_order_toll_free_numbers(quantity = 1)
print(numbers)
## [ { 'location' : 'https://api.catapult.inetwork.com/v1/users/u-123/phoneNumbers/n-abc',
## 'nationalNumber': '(844) 484-1048',
## 'number' : '+18444841048',
## 'price' : '0.75'}]
print(numbers[0]["number"])
## +18444841048
"""
kwargs["quantity"] = quantity
list = self._make_request(
'post', '/availableNumbers/tollFree', params=kwargs)[0]
for item in list:
item['id'] = item.get('location', '').split('/')[-1]
return list
def list_bridges(self, size=None, **kwargs):
"""
Get a list of bridges
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items.
If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of bridges
Example: List bridges 1000 at a time::
bridges = api.list_bridges(size=1000)
for bridge in bridges:
print(bridge["id"])
## brg-6mv7pi22i
## brg-3emq7olua
## brg-bbufdc7yq
## brg-dvpvd7cuy
## brg-5ws2buzmq
"""
kwargs["size"] = size
path = '/users/%s/bridges' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_bridge(self, call_ids=None, bridge_audio=None, **kwargs):
"""
Create a bridge
:param bool bridge_audio: Enable/Disable two way audio path (default = true)
:param str call_ids: The first of the call ids in the bridge. If either of the call ids is not provided the
bridge is logically created and it can be used to place calls later.
:rtype: str
:returns: id of created bridge
Example: Create bridge with 2 calls and audio::
bridge_id = api.create_bridge(call_ids = ['callId1', 'callId2'], bridge_audio = True)
print(bridge_id)
# brg-123
"""
kwargs["callIds"] = call_ids
kwargs["bridgeAudio"] = bridge_audio
return self._make_request('post', '/users/%s/bridges' % self.user_id, json=kwargs)[2]
def get_bridge(self, bridge_id):
"""
Gets information about a bridge
:type bridge_id: str
:param bridge_id: id of a bridge
:rtype: dict
:returns: bridge information
Example: Fetch single bridge by ID::
my_bridge = api.get_bridge('brg-bridgeId')
print(my_bridge)
## { 'bridgeAudio': True,
## 'calls' : 'https://api.catapult.inetwork.com/v1/users/u-123/bridges/brg-bridgeId/calls',
## 'createdTime': '2017-01-26T01:15:09Z',
## 'id' : 'brg-bridgeId',
## 'state' : 'created'}
print(my_bridge["state"])
## created
"""
return self._make_request('get', '/users/%s/bridges/%s' % (self.user_id, bridge_id))[0]
def update_bridge(self, bridge_id, call_ids=None, bridge_audio=None, **kwargs):
"""
Update a bridge
:type bridge_id: str
:param bridge_id: id of a bridge
:param bool bridge_audio: Enable/Disable two way audio path (default = true)
:param str call_ids: The first of the call ids in the bridge. If either of the call ids
is not provided the bridge is logically created and it can be
used to place calls later.
Example: stop bridging audio::
my_bridge = api.get_bridge('brg-bridgeId')
print(my_bridge["bridgeAudio"])
## True
api.update_bridge(my_bridge['id'], call_ids = ['callId1', 'callId2'], bridge_audio = False)
my_bridge = api.get_bridge(my_bridge['id'])
print(my_bridge["bridgeAudio"])
## False
"""
kwargs["callIds"] = call_ids
kwargs["bridgeAudio"] = bridge_audio
self._make_request('post', '/users/%s/bridges/%s' %
(self.user_id, bridge_id), json=kwargs)
def list_bridge_calls(self, bridge_id):
"""
Get a list of calls of a bridge
:type bridge_id: str
:param bridge_id: id of a bridge
:rtype: types.GeneratorType
:returns: list of calls
Example: Fetch all calls that were in a bridge::
call_list = api.get_bridge_calls('bridgeId')
print(list(call_list))
## [
## {
## "activeTime" : "2013-05-22T19:49:39Z",
## "direction" : "out",
## "from" : "{fromNumber}",
## "id" : "{callId1}",
## "bridgeId" : "{bridgeId}",
## "startTime" : "2013-05-22T19:49:35Z",
## "state" : "active",
## "to" : "{toNumber1}",
## "recordingEnabled": false,
## "events" : "https://api.catapult.inetwork.com/v1/users/{userId}/calls/{callId1}/events",
## "bridge" : "https://api.catapult.inetwork.com/v1/users/{userId}/bridges/{bridgeId}"
## },
## {
## "activeTime" : "2013-05-22T19:50:16Z",
## "direction" : "out",
## "from" : "{fromNumber}",
## "id" : "{callId2}",
## "bridgeId" : "{bridgeId}",
## "startTime" : "2013-05-22T19:50:16Z",
## "state" : "active",
## "to" : "{toNumber2}",
## "recordingEnabled": false,
## "events" : "https://api.catapult.inetwork.com/v1/users/{userId}/calls/{callId2}/events",
## "bridge" : "https://api.catapult.inetwork.com/v1/users/{userId}/bridges/{bridgeId}"
## }
## ]
"""
path = '/users/%s/bridges/%s/calls' % (self.user_id, bridge_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def play_audio_to_bridge(self, bridge_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a bridge
:param str bridge_id: id of a bridge
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param bool loop_enabled: When value is true, the audio will keep playing in a loop.
Examples: Play either file for speak sentence::
api.play_audio_to_bridge('bridgeId', fileUrl='http://host/path/file.mp3')
api.play_audio_to_bridge('bridgeId', sentence='Press 0 to complete call', gender='female')
# or with extension methods
api.play_audio_file_to_bridge('bridgeId', 'http://host/path/file.mp3')
api.speak_sentence_to_bridge('bridgeId', 'Hello')
"""
kwargs["fileUrl"] = file_url
kwargs["sentence"] = sentence
kwargs["gender"] = gender
kwargs["locale"] = locale
kwargs["voice"] = voice
kwargs["loopEnabled"] = loop_enabled
self._make_request(
'post', '/users/%s/bridges/%s/audio' % (self.user_id, bridge_id), json=kwargs)
def create_conference(self,
from_,
callback_url=None,
callback_timeout=None,
callback_http_method=None,
fallback_url=None,
tag=None,
**kwargs):
"""
Create a conference
:param str ``from_``: The phone number that will host the conference (required)
:param str callback_url: The full server URL where the conference events related to the Conference will be sent
:param str callback_timeout: Determine how long should the
platform wait for callbackUrl's response before timing out in milliseconds.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST. \
Values are "GET" or "POST" (if not set the default is POST).
:param str fallback_url: The full server URL used to send the callback event if the request to callbackUrl
fails or timesout
:param str tag: A string that will be included in the callback events of the conference.
:rtype: str
:returns: id of created | |
__init__(self, verbose, name, cmd):
self.name = name
self.cmd = cmd
self.module = None # if this is a module command
self.verbose = verbose
self.doc_with_list = []
def getSynopsis(self, command_list):
if self.doc_with_list:
syn_cmd_list = []
for c in self.doc_with_list:
if command_list.has_key(c):
syn_cmd_list.append(c)
syn_cmd_list.sort()
elif self.cmd["doc_with"] and command_list.has_key(self.cmd["doc_with"]):
main_cmd = command_list[self.cmd["doc_with"]]
syn_cmd_list = []
for c in main_cmd.doc_with_list:
if (c != self.name) and command_list.has_key(c):
syn_cmd_list.append(c)
syn_cmd_list.append(main_cmd.name)
syn_cmd_list.sort()
else:
syn_cmd_list = []
syn_cmd_list = [self.name] + syn_cmd_list
synopsis = ""
for c in syn_cmd_list:
synopsis = (synopsis
+ get_synopsis(command_list[c].cmd, 0, 1, "jdocu")
+ "<br/>")
return synopsis[:-5]
def getDoc(self, command_list, avoided_command_list):
if self.cmd["doc_with"]:
if command_list.has_key(self.cmd["doc_with"]):
doc = command_list[self.cmd["doc_with"]].cmd["doc"]
else:
doc = avoided_command_list[self.cmd["doc_with"]].cmd["doc"]
else:
doc = self.cmd["doc"]
return doc
def getSeeAlso(self, command_list, avoided_command_list):
if self.cmd["doc_with"]:
if command_list.has_key(self.cmd["doc_with"]):
s = command_list[self.cmd["doc_with"]].cmd["see_also"]
else:
s = avoided_command_list[self.cmd["doc_with"]].cmd["see_also"]
else:
s = self.cmd["see_also"]
return s
def printLong(self, o, doc, id = '', online = 0,
namespace = None, no_extern_link = 0):
command_list = doc['command_list']
avoided_command_list = doc['avoided_command_list']
if online:
cmd_name = o.encode(self.cmd["name"])
else:
cmd_name = o.encode(stripCommandName(self.cmd["name"]))
o.beginAdd(id, "", cmd_name, 'Command')
o.pr(o.makeTarget(commandId(self.cmd["name"])))
if namespace:
o.pr(o.makeIndex([o.encode(stripCommandName(self.name)),
"namespace command",
namespace]))
else:
o.pr(o.makeIndex([o.encode(self.name)]))
o.beginDoc()
# name
if not online:
o.beginDocItem('name')
o.pr('<b>' + o.encode(self.cmd["name"]))
if self.cmd["deprecated"]:
o.pr(' — <i>deprecated</i>');
o.pr('</b>')
o.endDocItem()
# aliases
if (self.cmd["alias"]):
o.beginDocItem('Alias')
if self.cmd["namespace"]:
aliases = []
for a in self.cmd["alias"]:
aliases.append('<' + self.cmd["namespace"] + '>.' + a)
else:
aliases = self.cmd["alias"]
for a in aliases:
if namespace:
o.pr(o.makeIndex([o.encode(stripCommandName(a)),
"namespace command",
namespace]))
else:
o.pr(o.makeIndex([o.encode(a)]))
o.printListWithSep(aliases, None, o.encode, "", "", ", ")
o.endDocItem()
# synopsis
o.beginDocItem('synopsis')
o.pr(self.getSynopsis(command_list))
o.endDocItem()
# description
o.beginDocItem('description')
if self.cmd["deprecated"]:
o.pn('This command is deprecated, use '
+ iff(no_extern_link,
o.encode(self.cmd["deprecated"]),
o.makeLink(commandId(self.cmd["deprecated"]),
o.encode(self.cmd["deprecated"])))
+ ' instead.')
o.pn('')
o.pr(self.getDoc(command_list, avoided_command_list).strip())
o.endDocItem()
# provided by
if self.module:
o.beginDocItem('Provided By')
o.pr(iff(no_extern_link,
self.module,
o.makeLink(moduleId(self.module), self.module)))
o.endDocItem()
# doc items
if self.cmd["doc_items"]:
for di in self.cmd["doc_items"]:
o.beginDocItem(di[0].capitalize())
# hack around See Also
if di[0].lower() == "see also":
o.pr(o.encode(di[1].strip()))
else:
o.pr(di[1].strip())
o.endDocItem()
# see also
see_also = self.getSeeAlso(command_list, avoided_command_list)
if see_also:
for c in see_also:
if not command_list.has_key(c):
self.verbosePrint(
"CmdDesc::printLong(): " +
"*** unknown see also reference in command "
+ self.cmd["name"] +": " + c)
o.beginDocItem('See Also')
o.printListWithSep(see_also,
iff(no_extern_link, None, commandId),
o.encode, "", "", ", ")
o.endDocItem()
o.endDoc()
o.endAdd()
def printComponentCommand(self, o, doc, id = '',
namespace = None, no_extern_link = 0):
command_list = doc['command_list']
avoided_command_list = doc['avoided_command_list']
cmd_name = o.encode(stripCommandName(self.cmd["name"]))
o.beginAdd(id, "", cmd_name, 'Command')
o.pr(o.makeTarget(commandId(self.cmd["name"])))
if namespace:
o.pr(o.makeIndex([o.encode(stripCommandName(self.name)),
"namespace command",
namespace]))
else:
o.pr(o.makeIndex([o.encode(self.name)]))
o.beginDoc()
o.beginDocItem(get_synopsis(self.cmd, 0, 1, "jdocu"))
o.pr(self.getDoc(command_list, avoided_command_list).strip())
o.endDocItem()
o.endDoc()
o.endAdd()
#
# Module Description
#
class ModuleDesc(GenericDesc):
def __init__(self, verbose, name, filename, unload):
self.verbose = verbose
self.name = name
self.filename = filename
if (unload == "UNLOAD"):
self.unload = 1
else:
self.unload = 0
self.classes = []
self.commands = {}
self.haps = VT_get_implemented_haps(name)
def print_name(self, o):
o.pr(self.name)
def print_short(self, o):
o.pr(self.name)
def printLong(self, o, doc, online = 0, no_extern_link = 0):
class_list = doc['class_list']
id = moduleId(self.name)
o.beginDoc()
# name
if not online:
o.beginDocItem('name')
o.pr(o.encode(self.name))
o.endDocItem()
o.pr(o.makeTarget(id))
else:
o.beginAdd('', '', self.name, 'Module')
o.pr(o.makeIndex([self.name]))
# filename
o.beginDocItem('Filename')
o.pr(' <file>' + os.path.basename(self.filename) + '</file>')
o.endDocItem()
# classes
if self.classes:
o.beginDocItem('Classes')
o.beginTable(len(self.classes))
self.classes.sort()
for c in self.classes:
cl = class_list[c]
o.pr(o.makeRow(o.makeCell(
iff(no_extern_link,
cl.name,
o.makeLink(classId(cl.name), cl.name)))))
o.endTable()
o.endDocItem()
if self.haps:
o.beginDocItem('Haps')
o.beginTable(len(self.haps))
self.haps.sort()
for h in self.haps:
o.pr(o.makeRow(o.makeCell(
iff(no_extern_link,
h,
o.makeLink(hapId(h), h)))))
o.endTable()
o.endDocItem()
if self.commands:
o.beginDocItem('Global Commands')
o.beginTable(len(self.commands))
cmds = self.commands.values()
cmds.sort(cmp_item)
for c in cmds:
o.beginRow()
o.pr(o.makeCell(
iff(no_extern_link,
o.encode(c.cmd["name"]),
o.makeLink(commandId(c.cmd["name"]),
o.encode(c.cmd["name"])))))
o.beginCell()
if c.cmd["deprecated"]:
o.pr("<i>deprecated</i> — ")
o.pr(c.cmd["short"])
o.endCell()
o.endRow()
o.endTable()
o.endDocItem()
o.endDoc()
#
# group connectors with same base-name together
#
def group_connector_info(connectors):
def add_cnt_info():
if last - 1 > start:
new_cnts['%s[%d-%d]' % (cnt, start, last - 1)] = info
else:
new_cnts['%s%d' % (cnt, start)] = info
new_cnts = {}
cnt_info = {}
for cnt in connectors:
m = re.match('(.*?)([0-9]*)$', cnt)
if not m.group(2):
new_cnts[m.group(1)] = connectors[cnt]
elif not m.group(1) in cnt_info:
cnt_info[m.group(1)] = [int(m.group(2))]
else:
cnt_info[m.group(1)] += [int(m.group(2))]
for cnt in cnt_info:
cnt_info[cnt].sort()
start = cnt_info[cnt][0]
last = start
for i in cnt_info[cnt]:
info = connectors[cnt + `start`]
if (i != last
or info != connectors[cnt + `i`]):
add_cnt_info()
start = last = i
last += 1
if i == last - 1:
add_cnt_info()
return new_cnts
#
# Class Description
#
class ClassDesc(GenericDesc):
def __init__(self, verbose, side_effect, internal, c):
self.verbose = verbose
self.side_effect = side_effect
self.internal = internal
info = VT_get_class_info(c, verbose, side_effect)
self.name = c
self.description = info[0]
if not self.description:
self.verbosePrint("ClassDesc() *** no description for class " + c)
self.description = "<todo>No description</todo>"
self.parent = info[1]
self.kind = info[2]
self.ifc_list = info[3]
self.attr_list = info[4]
self.module = info[5]
self.named_ifc_list = info[6]
self.attr_info = {}
self.commands = {}
self.class_hierarchy = []
# gather attribute information
ati = VT_get_all_attributes(self.name, verbose, side_effect)
for a in ati:
attr_i = AttrDesc()
attr_i.name = a[0]
attr_i.rw = a[1]
attr_i.attributes = a[2]
attr_i.description = a[3]
if not attr_i.description:
self.verbosePrint("ClassDesc() *** no description for attribute " + self.name + "." + attr_i.name)
attr_i.description = "<todo>No description</todo>"
attr_i.type = a[4]
attr_i.indexed_type = a[5]
if not attr_i.type and not attr_i.indexed_type:
self.verbosePrint("ClassDesc() *** no type for attribute " + self.name + "." + attr_i.name)
self.attr_info[attr_i.name] = attr_i
self.verbosePrint("ClassDesc(): attribute " + attr_i.name)
def updateModules(self, module_list):
# update modules
tmp_cd_module = []
if self.module:
for m in self.module:
# a class may be also declared by a non-loaded module
try:
md = module_list[m]
md.classes = md.classes + [self.name]
tmp_cd_module = tmp_cd_module + [m]
self.verbosePrint("ClassDesc::updateModules(): updating module " + m)
except:
pass
else:
md = module_list[core_module]
md.classes = md.classes + [self.name]
tmp_cd_module = [core_module]
self.verbosePrint("ClassDesc::updateModules(): updating module " + md.name)
# update the module list so that it contains only documented modules
self.module = tmp_cd_module
def updateInterfaces(self, ifc_list, online = 0):
# update interface list
if self.ifc_list:
new_ifc_list = []
named_ifcs = set([x[0] for x in self.named_ifc_list])
for ifc in self.ifc_list + list(named_ifcs):
# avoid interfaces starting with _ or without documentation
try:
desc = VT_get_interface_info(ifc, self.verbose, self.side_effect)
except:
self.verbosePrint("ClassDesc::updateInterfaces() *** no description for interface" + ifc)
# set a todo if internal manual
if self.internal:
desc = "<todo>No description found</todo>"
else:
desc = ""
# ignore internal or not documented interfaces, except when
# internal or online
if (not self.internal
and ((ifc[0] == '_')
or (not online and desc == ""))):
self.verbosePrint("ClassDesc::updatesInterfaces(): " +
"ignoring internal interface " + ifc)
else:
if ifc in self.ifc_list:
new_ifc_list.append(ifc)
try:
ifcd = ifc_list[ifc]
except:
ifcd = IfcDesc(self.verbose)
ifcd.name = ifc
ifcd.description = desc
ifc_list[ifc] = ifcd
self.verbosePrint("ClassDesc::updateInterfaces(): interface" + ifc)
ifcd.classes.append(self.name)
# update the interface list to hide the internal ones
self.ifc_list = new_ifc_list
def updateClassHierarchy(self, class_list):
# build class hierarchy for each class and sort out attributes
try:
current_class = class_list[self.parent]
except:
current_class = None
while current_class and current_class != "":
self.class_hierarchy = [current_class] + self.class_hierarchy
self.verbosePrint("ClassDesc::updateClassHierarchy(): parent = "
+ current_class.parent)
try:
current_class = class_list[current_class.parent]
except:
current_class = None
real_attr_list = []
for a in self.attr_list:
inherited = 0
for cl in self.class_hierarchy:
if a in cl.attr_list:
inherited = 1
# nothing found before? then we keep this attribute
if not inherited:
real_attr_list.append(a)
real_attr_list.sort()
self.attr_list = real_attr_list
def printLong(self, o, doc, no_extern_link = 0):
ifc_list = doc['ifc_list']
id = classId(self.name)
o.beginAdd(id, id, o.encode(self.name), 'Class')
o.pr(o.makeIndex([self.name]))
o.beginDoc()
# Modules
o.beginDocItem('Provided by')
o.printListWithSep(self.module, iff(no_extern_link, None, moduleId),
o.encode, "", "", ", ")
o.endDocItem()
# Class hierarchy
o.beginDocItem('Class Hierarchy')
class_hier = []
for c in self.class_hierarchy:
class_hier.append(c.name)
if class_hier:
o.printListWithSep(class_hier, iff(no_extern_link, None, classId),
o.encode, "", "", " → ")
o.pr(" → ")
o.pr('<class>' + o.encode(self.name) + '</class>')
o.endDocItem()
# Interfaces
o.beginDocItem('Interfaces Implemented')
if self.ifc_list:
o.printListWithSep(self.ifc_list, iff(no_extern_link, None, ifcId),
o.encode, "", "", ", ")
else:
o.pr('None')
o.endDocItem()
if self.named_ifc_list:
o.beginDocItem('Ports')
def text_transform(ni):
if no_extern_link:
ifc_text = o.encode(ni[0])
else:
ifc_text = o.makeLink(ifcId(ni[0]), o.encode(ni[0]))
return "%s (%s)"%(o.encode(ni[1]), ifc_text)
o.printListWithSep(self.named_ifc_list, None, text_transform,
"", "", ", ")
o.endDocItem()
# Description
o.beginDocItem('description')
o.pn('<insert id=' + o.q(id + "_desc") + '/>')
o.endDocItem()
o.endDoc()
# compute how many attributes there are
attr_nb = 0
for c in self.class_hierarchy:
attr_nb = attr_nb + | |
else:
tmp = self.empty(tsr.shape)
self.ng.power(self.fabs(tsr), order, tmp)
self.ng.sum(tmp, axis, out)
self.ng.power(out, (1.0 / order), out)
return out
def mean(self, tsr, axes, out):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.mean(tsr, axis=axes, out=out)
return out
def min(self, tsr, axes, out):
"""
Calculates the minimum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.min(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.min(tsr, axis=axes, out=out)
return out
def max(self, tsr, axes, out):
"""
Calculates the maximum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.max(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.max(tsr, axis=axes, out=out)
return out
def variance(self, tsr, axes, out, mean=None):
"""
Calculates the variance of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): the tensor on which to compute the variance
axes (int, list, optional): the dimension(s) along which to
variance. If set to None, we will
variance over all dimensions.
out (GPUTensor): where the result will be stored.
mean (GPUTensor): the tensor containing mean of tsr
Returns:
GPUTensor: reference to out
"""
if mean is None:
logger.error("GPUTensor requires mean to be specified.")
raise ValueError("mean not specified")
self.ng.mean(self.ng.square(tsr-mean), axis=axes, out=out)
return out
def fabs(self, x, out):
"""
Calculates absolute value of the elements in a tensor
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
Returns:
GPUTensor: reference to out
"""
self.ng.fabs(x, out=out)
return out
def sqrt(self, x, out):
"""
Calculates square root of the elements in a tensor
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
Returns:
GPUTensor: reference to out
"""
self.ng.sqrt(x, out=out)
return out
def zeros(self, shape, dtype=default_dtype, persist_values=True):
"""
Allocate a new GPUTensor and fill it with zeros.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.zeros(shape, dtype=dtype)
def ones(self, shape, dtype=default_dtype, persist_values=True):
"""
Allocate a new GPUTensor and fill it with ones.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.ones(shape, dtype=dtype)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary and populating each element with a value of 0.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
return self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
return self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
def empty(self, shape, dtype=default_dtype, persist_values=True,
name=None):
"""
Allocate a new GPUTensor.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.empty(shape, dtype=dtype)
def copy(self, ary):
"""
returns a copy of ary
"""
res = self.empty_like(ary)
res.copy(ary)
return res
def array(self, ary, dtype=default_dtype, persist_values=True, name=None,
allocator=drv.mem_alloc):
"""
Allocate a new GPUTensor and fill it with supplied numpy array.
Arguments:
ary (ndarray): Numpy array with source data
dtype (dtype, optional): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
name (string): Name for the GPUTensor
allocator (pycuda): Pycuda memory allocator
Returns:
GPUTensor: output
"""
return self.ng.array(ary, dtype=dtype, name=name)
def add(self, left, right, out):
"""
Elementwise addition
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.add(left, right, out=out)
return out
def subtract(self, left, right, out):
"""
Elementwise subtraction
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.subtract(left, right, out=out)
return out
def multiply(self, left, right, out):
"""
Elementwise multiplication
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.multiply(left, right, out=out)
return out
def divide(self, left, right, out):
"""
Elementwise division
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.divide(left, right, out=out)
return out
def greater(self, left, right, out):
"""
Elementwise greater than testing
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.greater(left, right, out=out)
return out
def equal(self, left, right, out):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.equal(left, right, out=out)
return out
def not_equal(self, left, right, out):
"""
Elementwise not equal testing
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.not_equal(left, right, out=out)
return out
def clip(self, a, a_min, a_max, out):
"""
Elementwise clipping between a range of specified values
Arguments:
a (GPUTensor): input tensor.
a_min (float): floor value.
a_max (float): ceiling value.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
| |
= nameMap.get(sp[2].lower(), sp[2][0].upper())
third = nameMap.get(sp[3].lower(), sp[3][0].upper())
paren = [ '(%s,(%s,%s))' % (first, sec, third),
'((%s,%s),%s)' % (first, sec, third),
'((%s,%s),%s)' % (first, third, sec),
'(%s,%s,%s)' % (first, sec, third) ]
else:
sec = nameMap.get(sp[1].lower(), sp[1][0].upper())
third = nameMap.get(sp[2].lower(), sp[2][0].upper())
paren = [ '((%s,%s),%s)' % (first, sec, third),
'((%s,%s),%s)' % (first, third, sec),
'(%s,(%s,%s))' % (first, sec, third),
'(%s,%s,%s)' % (first, sec, third) ]
else:
if isinstance(s, file):
first, sec, third, fourth = '1', '2', '3', '4'
else:
sp = s.split('/')
sp = sp[-1]
sp = sp.split('.')
first, sec, third, fourth = find_shortest_unique_leading_substrings(sp[:4])
paren = [ '((%s,%s),(%s,%s))' % (first, sec, third, fourth),
'((%s,%s),(%s,%s))' % (first, third, sec, fourth),
'((%s,%s),(%s,%s))' % (first, fourth, sec, third),
'(%s,%s,%s,%s)' % (first, sec, third, fourth) ]
return paren
def prepare_plot_kwargs(kwargDict, optionList, fontscaler=1.0):
'''
parse and prepare kwargs in various formats, add new options,
This is mainly for use with the PlottingArgumentParser defined below
and would be called like this:
superTitleKwargs = prepare_kwargs(opt.additionalSuperTitleKwargs,
[('fontsize', opt.superTitleFontsize)],
fontscaler=scaler)
plt.suptitle(opt.superTitle, **superTitleKwargs)
where
-the first argument is any extra arbitrary kwargs that might have been
passed on the command line
-then a list of things to set specifically. Defaults for those should
have been set when the parser was instantiated, but can be overridden
kwargDict - can just be a list of strings, i.e. ['blah=2', 'poo=4']
or a list of tuples [('blah', 2), ('poo', 4)]
or an actual dictionary {'blah':2, 'poo':4}
optionList - list of (key, value) tuples, added to returned dict ONLY if
not already in it
fontscaler - value to rescale fonts by, if desired - deprecated and may not work
returns outKwargs - dict of kwargs to pass to other functions
>>> prepare_plot_kwargs(['blah=2', 'poo=4'], [('fontsize', 10)])
{'blah': 2.0, 'fontsize': 10.0, 'poo': 4.0}
>>> prepare_plot_kwargs([('blah', 2), ('poo', 4)], [('fontsize', 10)])
{'blah': 2.0, 'fontsize': 10.0, 'poo': 4.0}
>>> prepare_plot_kwargs({'blah':2, 'poo':4}, [('fontsize', 10)])
{'blah': 2.0, 'fontsize': 10.0, 'poo': 4.0}
'''
outKwargs = {}
if kwargDict:
error = False
if isinstance(kwargDict, dict):
outKwargs = dict(kwargDict)
elif isinstance(kwargDict, list) or isinstance(kwargDict, set) or isinstance(kwargDict, tuple):
#There are shorter and easier ways to do the below, but I'm not sure that they guarantee
#that things are added to the dictionary in list order, which is needed so that later
#ones override earlier
outKwargs = {}
if isinstance(kwargDict[0], str):
for key, val in (kw.split('=') for kw in kwargDict):
outKwargs[key] = val
elif len(kwargDict[0]) == 2:
for key, val in kwargDict:
outKwargs[key] = val
else:
error = True
else:
error = True
if error:
sys.exit('kwargDict must be a dictionary, or a list, set or tuple containing strings, \
or a list, set or tuple containing strings containing 2-tuples')
#this just ensures that if a kwarg is specified at the command line for something,
#it trumps any actual argparse option for it
for arg, setting in optionList:
outKwargs.setdefault(arg, setting)
for key, val in outKwargs.iteritems():
#try to convert values to numbers where possible
try:
outKwargs[key] = float(val)
except ValueError:
pass
except TypeError:
pass
if val == 'False':
outKwargs[key] = False
elif val == 'True':
outKwargs[key] = True
#this doesn't really work as intended
if 'fontsize' in key or 'labelsize' in key:
outKwargs[key] = outKwargs[key] * fontscaler
return outKwargs
class PlottingHistogramArgumentParser(argparse.ArgumentParser):
'''A generic argparse parser that comes prepackaged with a bunch of common arguments
for matplotlib/pyplot preloaded.
Default values for the created parser can be passed in as keyword arguments. Pass
False to remove a built in option.'''
def __init__(self, **kwargs):
'''These are the defaults which can be overwridden with keyword arguments passed
to the constructor.
'''
defaultSubplotKwargs = kwargs.pop('defaultSubplotKwargs', [])
defaultHistKwargs = kwargs.pop('defaultHistKwargs', [])
defaultNumBins = kwargs.pop('defaultNumBins', 20)
defaultBarFaceColor = kwargs.pop('defaultBarFaceColor', 'gray')
if defaultNumBins is not False:
self.add_argument('-n', '--num-bins', dest='numBins', type=int, default=defaultNumBins,
help='number of evenly spaced histogram bins (default %s)' % str(defaultNumBins))
if defaultBarFaceColor is not False:
self.add_argument('-c', '--bar-color', dest='barFaceColor', type=str, default=defaultBarFaceColor,
help='color of histogram bars (default %s)' % str(defaultBarFaceColor))
class PlottingArgumentParser(argparse.ArgumentParser):
'''
A generic argparse parser that comes prepackaged with a bunch of common arguments
for matplotlib/pyplot preloaded.
Default values for the created parser can be passed in as keyword arguments. Pass
a values of 'SUPPRESS' to remove a built in option.
e.g.
my_parser = PlottingArgumentParser(defaultColors='SUPPRESS', defaultMarkers='o*x')
creates a parser with the defaults as defined below, except the default for
the markers is set to 'o*x' and colors is completely removed as a command line
option.
'''
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
**kwargs):
#the default named kwargs above are just those from the normal ArgumentParser,
#except for the formatter_class. These are the defaults which can be overwridden
#with keyword arguments passed to the constructor.
#markers "x+|_" were not working in the defaultMarkers, for some reason
option_defaults = {
'defaultSubplotKwargs': [],
#plot defaults
'defaultGreys': ['0.0'],
'defaultColors': None,
'defaultMarkerSizes': [12.0],
'defaultMarkers': 'Dos*^p',
'defaultLineWidth': [3.0],
'defaultLineStyle': ['-', '--', ':'],
'defaultCycleStylesWithinFiles': True,
'defaultPlotKwargs': [],
#axis defaults
'defaultYrange': None,
'defaultXrange': None,
'defaultAxisTickFontsize': 16.0,
'defaultAxisLabelFontsize': 32.0,
'defaultXTickFunction': None,
'defaultTickKwargs': [],
'defaultXTickKwargs': [],
'defaultYTickKwargs': [],
'defaultXTickLabelKwargs': ['weight=bold'],
'defaultYTickLabelKwargs': ['weight=bold'],
'defaultXtick_label_location': 'm',
'defaultYtick_label_location': 'm',
'defaultDrawAxesAtZero': None,
'defaultDrawVerticalGrid': False,
'defaultDrawHorizontalGrid': False,
#axis labels
'defaultXlabel': None,
'defaultYlabel': None,
'defaultXlabelLocation': 's',
'defaultYlabelLocation': 's',
'defaultXlabelKwargs': [],
'defaultYlabelKwargs': [],
#title defaults
'defaultTitleFunc': None,
'defaultTitles': None,
'defaultTitleFontsize': 18.0,
'defaultSuperTitle': None,
'defaultSuperTitleFontsize': 32.0,
'defaultTitleHalign': 'center',
'defaultTitleValign': 'top',
'defaultTitleKwargs': [],
'defaultSuperTitleKwargs': [],
'defaultDZaxKwargs': ['leftw=4', 'lefts=solid', 'rightw=4', 'rights=solid',
'topw=4', 'tops=solid', 'bottomw=4', 'bottoms=solid'],
'defaultNcol': 2,
'defaultOnePlot': None,
'defaultSubplotPerFunction': None,
'defaultInput': True,
'defaultOutput': True,
'defaultMissingOk': None,
'defaultDataColumnFunc': None,
'defaultHatches': None,
'defaultSkipRows': 0,
'defaultRowIgnorePatterns': [],
'defaultSeriesNames': None,
'defaultHistogram': None,
'defaultHistogramKwargs': [],
'defaultHistogramBins': 20,
'defaultLegendTextKwargs': [],
'defaultBarGraph': None,
'defaultTkGui': False
}
#this just pulls out and removes any kwargs matching the keys for the defaults defined above,
#and then passes any remaining on to the base __init__
reduced_kwargs = dict(kwargs)
for key, val in kwargs.iteritems():
if key[0:7] == "default":
if key not in option_defaults:
sys.exit('unknown option %s passed to PlottingArgumentParser.__init__' % key)
else:
if val == 'SUPPRESS':
option_defaults.pop(key)
else:
option_defaults[key] = val
reduced_kwargs.pop(key)
#since the default kwargs are now explictly included in the __init__ and explictly passed
#to the ArgumentParser __init__, reduced_kwargs should actually be empty here
super(PlottingArgumentParser, self).__init__(prog=prog, usage=usage, description=description, epilog=epilog, version=version, parents=parents, formatter_class=formatter_class, prefix_chars=prefix_chars, fromfile_prefix_chars=fromfile_prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler, add_help=add_help, **reduced_kwargs)
if 'defaultSubplotKwargs' in option_defaults:
self.add_argument('--subplot-kwargs', nargs='*', type=str, default=option_defaults['defaultSubplotKwargs'], action=ArgparseActionAppendToDefault,
help='kwargs to be passed on to matplotlib Figure.subplots_adjust function')
if 'defaultDZaxKwargs' in option_defaults:
self.add_argument('--dz-axis-kwargs', nargs='*', type=str, default=option_defaults['defaultDZaxKwargs'], action=ArgparseActionAppendToDefault,
help='settings for axis appearance - interpreted by ME not MPL. w=weight, s=style, examples: rights=bold topw=5.')
################
#argument for plotting of lines/points
styleArgs = self.add_argument_group('ARGUMENTS FOR POINT AND LINE STYLES')
if 'defaultGreys' in option_defaults:
styleArgs.add_argument('-gv', '--grey-values', nargs='*', type=str, default=option_defaults['defaultGreys'],
help='floating point values in range 0.0 - 1.0 (black to white) indicating grey value cycle of lines')
if 'defaultColors' in option_defaults:
styleArgs.add_argument('-cv', '--color-values', nargs='*', type=str, default=option_defaults['defaultColors'],
help='valid matplotlib color specs indicating color cycle of lines')
if 'defaultMarkers' in option_defaults:
styleArgs.add_argument('-m', '--markers', type=str, default=option_defaults['defaultMarkers'],
help='single string with marker designations that will be cycled through for series')
if 'defaultMarkerSizes' in option_defaults:
styleArgs.add_argument('-ms', '--marker-sizes', nargs='*', type=float, default=option_defaults['defaultMarkerSizes'],
help='floating point values indicating size of markers in points for series 1 2 3, or a single value to apply to all')
if 'defaultLineWidth' in option_defaults:
styleArgs.add_argument('-lw', '--line-width', nargs='*', type=float, default=option_defaults['defaultLineWidth'],
help='point size of lines to cycle through')
if 'defaultLineStyle' in option_defaults:
styleArgs.add_argument('-ls', '--line-style', nargs='*', type=str, default=option_defaults['defaultLineStyle'],
help='styles of lines to cycle through')
if 'defaultCycleStylesWithinFiles' in option_defaults:
self.add_argument('--style-per-file', default=option_defaults['defaultCycleStylesWithinFiles'], action='store_false',
help='use the same line/point styles for all series within a given file, rather \
than cycling through styles for each series _within_ a file')
if 'defaultPlotKwargs' in option_defaults:
self.add_argument('--plot-kwargs', nargs='*', type=str, default=option_defaults['defaultPlotKwargs'], action=ArgparseActionAppendToDefault,
help='kwargs to be passed on to matplotlib axes.plot function')
############
#axis labels
axisLabelArgs = self.add_argument_group('ARGUMENTS | |
"""
pass
Architecture = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The architecture of the ELF file.
Get: Architecture(self: IElfHeader) -> ElfMachine
"""
Is64Bit = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Whether this file is 64 bit or not.
Get: Is64Bit(self: IElfHeader) -> bool
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Whether this file contains the magic header at the right offset or not.
Get: IsValid(self: IElfHeader) -> bool
"""
ProgramHeaderCount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The count of program headers.
Get: ProgramHeaderCount(self: IElfHeader) -> UInt16
"""
ProgramHeaderEntrySize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The size of program headers.
Get: ProgramHeaderEntrySize(self: IElfHeader) -> UInt16
"""
ProgramHeaderOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The offset of the program header.
Get: ProgramHeaderOffset(self: IElfHeader) -> UInt64
"""
SectionHeaderCount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The count of section headers.
Get: SectionHeaderCount(self: IElfHeader) -> UInt16
"""
SectionHeaderEntrySize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The size of section headers.
Get: SectionHeaderEntrySize(self: IElfHeader) -> UInt16
"""
SectionHeaderOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The offset of the section header.
Get: SectionHeaderOffset(self: IElfHeader) -> UInt64
"""
SectionHeaderStringIndex = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The section header string index.
Get: SectionHeaderStringIndex(self: IElfHeader) -> UInt16
"""
Type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The type of ELF file.
Get: Type(self: IElfHeader) -> ElfHeaderType
"""
class IElfPRStatus:
""" An abstraction of the ELF PRStatus view. """
def CopyRegistersAsContext(self, context):
""" CopyRegistersAsContext(self: IElfPRStatus, context: Span[Byte]) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
ProcessId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The process id associated with this prstatus
Get: ProcessId(self: IElfPRStatus) -> UInt32
"""
ThreadId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The thread id of this prstatus.
Get: ThreadId(self: IElfPRStatus) -> UInt32
"""
class IUnknownVTable(object):
""" The basic VTable for an IUnknown interface. """
AddRef = None
QueryInterface = None
Release = None
class PEImage(object, IDisposable):
"""
A class to read information out of PE images (dll/exe).
PEImage(stream: Stream, leaveOpen: bool)
PEImage(stream: Stream, leaveOpen: bool, isVirtual: bool)
"""
def Dispose(self):
""" Dispose(self: PEImage) """
pass
def GetFileVersionInfo(self):
"""
GetFileVersionInfo(self: PEImage) -> FileVersionInfo
Gets the File Version Information that is stored as a resource in the PE file. (This is what the
version tab a file's property page is populated with).
"""
pass
def Read(self, virtualAddress, dest):
""" Read(self: PEImage, virtualAddress: int, dest: Span[Byte]) -> int """
pass
def RvaToOffset(self, virtualAddress):
"""
RvaToOffset(self: PEImage, virtualAddress: int) -> int
Allows you to convert between a virtual address to a stream offset for this module.
virtualAddress: The address to translate.
Returns: The position in the stream of the data, -1 if the virtual address doesn't map to any location of the PE image.
"""
pass
def TryGetExportSymbol(self, symbolName, offset):
"""
TryGetExportSymbol(self: PEImage, symbolName: str) -> (bool, UInt64)
Returns the address of a module export symbol if found
symbolName: symbol name (without the module name prepended)
Returns: true if found
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, stream, leaveOpen, isVirtual=None):
"""
__new__(cls: type, stream: Stream, leaveOpen: bool)
__new__(cls: type, stream: Stream, leaveOpen: bool, isVirtual: bool)
"""
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
CoffHeader = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a wrapper over this PE image's IMAGE_FILE_HEADER structure. Undefined behavior if IsValid is lse.
Get: CoffHeader(self: PEImage) -> CoffHeader
"""
CorHeader = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the managed header information for this image. Undefined behavior if IsValid is lse.
Get: CorHeader(self: PEImage) -> CorHeader
"""
DefaultPdb = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the PDB information for this module. If this image does not contain PDB info (or that information
wasn't included in Stream) this returns ll. If multiple PDB streams are present, this method returns the
last entry.
Get: DefaultPdb(self: PEImage) -> PdbInfo
"""
IndexFileSize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the file size that this PE image is indexed under.
Get: IndexFileSize(self: PEImage) -> int
"""
IndexTimeStamp = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the timestamp that this PE image is indexed under.
Get: IndexTimeStamp(self: PEImage) -> int
"""
IsManaged = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether this image is managed. (.NET image)
Get: IsManaged(self: PEImage) -> bool
"""
IsPE64 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether this image is for a 64bit processor.
Get: IsPE64(self: PEImage) -> bool
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the given Stream contains a valid DOS header and PE signature.
Get: IsValid(self: PEImage) -> bool
"""
Pdbs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a list of PDBs associated with this PE image. PE images can contain multiple PDB entries,
but by convention it's usually the last entry that is the most up to date. Unless you need to enumerate
all PDBs for some reason, you should use DefaultPdb instead.
Undefined behavior if IsValid is lse.
Get: Pdbs(self: PEImage) -> ImmutableArray[PdbInfo]
"""
PEHeader = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a wrapper over this PE image's IMAGE_OPTIONAL_HEADER. Undefined behavior if IsValid is lse.
Get: PEHeader(self: PEImage) -> PEHeader
"""
Reader = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Reader(self: PEImage) -> PEReader
"""
Resources = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the root resource node of this PEImage.
Get: Resources(self: PEImage) -> ResourceEntry
"""
Sections = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a collection of IMAGE_SECTION_HEADERs in the PE image. Undefined behavior if IsValid is lse.
Get: Sections(self: PEImage) -> ImmutableArray[SectionHeader]
"""
class ResourceEntry(object):
""" An entry in the resource table. """
def GetData(self, *__args):
"""
GetData(self: ResourceEntry, span: Span[Byte]) -> int
GetData[T](self: ResourceEntry, offset: int) -> T
"""
pass
def ToString(self):
""" ToString(self: ResourceEntry) -> str """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
ChildCount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the number of children this entry contains. Note that ResourceEntry.Children is capped at
MaxChildrenCount entries. This property returns the total number of entries as defined by the
IMAGE_RESOURCE_DIRECTORY. That means this number may be larger than Children.Count.
Get: ChildCount(self: ResourceEntry) -> int
"""
Children = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the children resources of this ResourceEntry.
Get: Children(self: ResourceEntry) -> ImmutableArray[ResourceEntry]
"""
Image = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the PEImage containing this ResourceEntry.
Get: Image(self: ResourceEntry) -> PEImage
"""
IsLeaf = property(lambda self: object(), lambda self, v: None, lambda | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 17:49:15 2012
@author: empeeu
"""
import scipy as sp
from scipy import sparse
import scipy.linalg as splinalg
from scipy.sparse import linalg as sparselinalg
import copy
from time import time
from src.pyutil import unique
from src.master.mk_basis import mk_ed_orient
from src.master.mk_basis import get_nrml_jac
import pdb, traceback
class Integrate_z:
""" This class creates the necessary data-structures/matrices to integrate
a field in the z-direction, that is, the dim-1 direction.
def __init__(sol):
def __call__(self, sol, field):
"""
def __init__(self, sol, top2bot=True):
"""Here we make the elemental integration matrices
@param sol (\c object) The solution data-structure
@param top2bot (\c bool) Flag which indicates the direction of the
integration.
If True (Default), integration will start with the top element.
If False, it will start from the bottom element.
@note The starting order determines how the boundary condition will be
determined. That is, if top2bot, then whatever boundary condition is
prescribed at the surface, it will be exactly satisfied.
"""
##Set the top-to-bottom preferences
self.top2bot = top2bot
##Tolerance for comparisons to zero
self.TOL = 1e-10
##Save some useful numbers for indexing:
self.nb = sol.nb
##Calculate the element type offset
self.elm_offset = [nb * n_elm \
for nb, n_elm in zip(sol.nb, sol.n_elm_type)]
self.elm_offset = [0] + self.elm_offset[:-1]
##The total number of edge degrees of freedom for each element type.
#Also, save the indexes for the start and stop of each edge number
#of dofs.
self.index_ed = [sp.zeros((M.ne, 2)) for M in sol.master]
self.tot_nb_ed = [0] * len(sol.u_elm_type)
for i in range(len(sol.u_elm_type)):
k = 0
for eds in sol.master[i].basis.ed_type:
j = (sol.u_ed_type == eds).nonzero()[0][0]
self.index_ed[i][k, 0] = self.tot_nb_ed[i]
self.index_ed[i][k, 1] = self.index_ed[i][k, 0] + sol.nb_ed[j]
self.tot_nb_ed[i] += sol.nb_ed[j]
k += 1
#Create oft-used FEM operators
mass_ed = [None for i in range(len(self.tot_nb_ed))]
for i in range(len(self.tot_nb_ed)):
mass_ed[i] = sp.zeros((self.tot_nb_ed[i], self.tot_nb_ed[i]))
ns = 0
for eds in sol.master[i].basis.ed_type:
j = (sol.u_ed_type == eds).nonzero()[0][0]
ne = ns + sol.nb_ed[j]
mass_ed[i][ns:ne, ns:ne] = sol.master_ed[j].M
ns = ne
self.mass_ed = mass_ed
#Build the element-local matrix inverses
tic = time()
print "Making the global A matrix for z-integration.",
##The global A matrix which links elements through the boundary
#boundary conditions.
self.A = None
##The global righ-hand-side matrix, which adds the top/bottom boundary
#data to the solution. This is not always required or desired. For e.g.,
#if calculating \f$\int_z^0 u = u(0) - u(z)\f$, the standard function has
#assumed that u(0) == 0. When using the global rhs matrix
#(Setting addbc=Tru) in the __call__), u(0) is taken directly from the
#3d field supplied. If the boundary data does not come from the 3D field
#supplied, it has to be added after the fact (using Add2Dto3D for e.g.)
#The later situation happens when calculating
#\f$w = \int_z^0 \nabla \cdot u \f$ since here
#\f$w(0) = \frac{\partial \eta}{\partial t}\f$, which is a 2D field.
self.RHS = None
self.A, self.RHS = self.mk_A(sol)
print "DONE in", time()-tic
self.A_factored = None
##To calculate the edge values, the solution from the left and right
#edges need to be combined using the coefficients in this matrix. The
#value of the coefficients sum to 1, and will depend on top2bot
self.lr_coeffs = [0.5 * sp.ones((ne, 2)) for ne in sol.n_ed_type]
for i in range(len(self.lr_coeffs)):
self.lr_coeffs[i][sol.ids_exterior_ed[i], :] = 0
ids_l = (sol.nrml[i][0, sol.dim-1, :] * (1 - 2 * self.top2bot) > 0) \
& (abs(sol.nrml[i][0, sol.dim-1, :]) > self.TOL)
ids_r = (sol.nrml[i][0, sol.dim-1, :] * (1 - 2 * self.top2bot) < 0) \
& (abs(sol.nrml[i][0, sol.dim-1, :]) > self.TOL)
self.lr_coeffs[i][ids_l, 0] = 1
self.lr_coeffs[i][ids_l, 1] = 0
self.lr_coeffs[i][ids_r, 0] = 0
self.lr_coeffs[i][ids_r, 1] = 1
self.lr_coeffs[i][sol.ids_exterior_ed[i], 1] = 0
def __call__(self, sol, field, addbc=False):
"""Integrate the field over the last dimension
@param sol (\c object) The solution data-structure
@param field (\c float) A list of numpy arrays of the field on the
element space. Each list is for an element type.
@param addbc (\c bool) Flag to add boundary conditions to the
integration (Does not really make sense)
"""
#Reshape the rhs and boundary data vectors if needed
#Also Figure out how many unique boundary condition types we have
isvector = False
shp = field[0].shape
n_flds = shp[1]
if len(shp) == 4:
isvector = True
n_flds = shp[2] * shp[1]
field = [r.swapaxes(-1, -2).reshape(r.shape[0] * r.shape[2], n_flds, \
order='F') for r in field]
#Now stack the different element types
tmp = field.pop(0)
for fs in field:
tmp = sp.concatenate((tmp, fs))
#Add the boundary conditions (which are already present in the given 3D field)
#print "adding bcs", addbc
if addbc:
#print "adding bcs"
tmp -= self.RHS * tmp
#Negate the rhs if top2bot (WHY? WHY DO I NEED TO DO THIS TO GET IT RIGHT?)
if self.top2bot:
tmp = -tmp
#Integrate using the matrix:
tmp = self.solve(sol, tmp)
#Now re-shape and re-distribute the solution
tmp = [tmp[eo : eo + nb * ne, :]\
.reshape(nb, ne, n_flds, order='F').swapaxes(1, 2)\
for eo, nb, ne in zip(self.elm_offset, sol.nb, sol.n_elm_type)]
#Final reshaping if required
if isvector:
tmp = [tmp.reshape[nb, shp[1], shp[2], ne]\
for nb, ne in zip(self.nb, self.n_elm_type)]
return tmp
def solve(self, sol, field):
"""Integrates the field in the z-direction (or dim-1 direction).
The base class uses a direct solver. To
change this, create a new class, inhereting from this one, and change
or update this function only.
@param sol (\c object) The solution data-structure
@param field (\c float) The field to be integrated
@retval integrate_z (\c float) The z-integral of the given field
"""
if self.A_factored == None:
#This is the first time we're solving this system, so for the first
#time we have to factorize the matrix
tic = time()
print "Factoring (LU-decomposition) of the global HDG Matrix ... ",
self.A_factored = sparselinalg.factorized(self.A)
print "DONE in", time()-tic
n_flds = field.shape[1]
integrate_z = sp.zeros_like(field)
for i in xrange(n_flds):
tic = time()
#print "Solving HDG Matrix ... ",
integrate_z[:, i] = self.A_factored(field[:, i].ravel())
#print "DONE in", time()-tic
return integrate_z
def mk_locA(self, sol, jac, jac_fact, jac_ed_on_elm, nrml, elm_type):
"""Makes the unique elemental matrix -- this is made in the real
coordinate space, so it changes based on the jacobians etc.
@param sol (\c object) The solution data-structure
@param jac (\c float) The element-local volume integral Jacobians
@param jac_fact (\c float) The geometric factors
@see master.mk_jacobian_nodal.jacobian
@param jac_ed_on_elm (\c float) The edge-local edge integral Jacobians
formatted on the element. @see sol.get_ed2elm_array
@param nrml (\c float) The edge-local normal vector, with proper sign,
formatted on the element. @see sol.get_ed2elm_array
@param elm_type (\c int) The type of the current element
@retval locA (\c float) The element-local diffusion matrix -- excluding
the lambda-space edge integrals (assumed to be on rhs).
"""
#shorter variable names
master = sol.master[elm_type]
L = master.L
mass_ed = self.mass_ed[elm_type]
nb = master.nb
dim = master.dim
#Initialize locA
locA = sp.zeros((nb, nb))
#Build the unique elemental operators
M = splinalg.inv(sp.dot(master.M, sp.diag(jac)))
B = sp.zeros((nb, nb))
i = dim - 1
for j in range(dim):
#Note, the derivative matrix is in the weak form
B -= sp.dot(master.K[j], sp.diag(jac_fact[:, j, i] * jac))
#Build the edge-integral operators
nz = nrml[:, -1]
#The (1-2 *top2bot) adds a negative sign if top2bot is false, in which
#case our rule for the normal direction has to be reversed
if (abs(nz[0]) > self.TOL):
E_in = sp.dot(mass_ed, \
sp.diag(jac_ed_on_elm * nz * ((nz * (1 - 2 * self.top2bot)) > 0)))
else:
E_in = sp.zeros(mass_ed.shape)
#Note the L.T at the end, which is correct
locA = sp.dot(M, B + sp.dot(L, sp.dot(E_in, L.T)))
#And return
return locA
def mk_A(self, sol):
"""Makes the local real-space matrix inverses
@param sol (\c object) The solution data-structure
@retval locAinv (\c float) A list of the element-local diffusion
matrices, pre-inverted. locAinv[i][j] contains the inverse matrix
of the jth element of type i
@note The inverse is calculated using sp.linalg.inv -- that is, sp.inv
is used for speed instead of the pseudo-inverse .pinv, which could be
used for stability | |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2019 Spotify AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import, division, print_function
import logging
from typing import Tuple, Union, Dict, Iterator # noqa: F401
import six
import numpy as np # noqa: F401
import pandas as pd
import tensorflow as tf
from spotify_tensorflow.tf_schema_utils import parse_schema_file, schema_to_feature_spec
from tensorflow_metadata.proto.v0.schema_pb2 import Schema # noqa: F401
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Datasets(object):
@staticmethod
def _assert_eager(endpoint):
assert tf.executing_eagerly(), "Eager execution is required for a %s endpoint! " \
"Add this add the begining of your main:\n\nimport " \
"tensorflow as tf\ntf.enable_eager_execution()\n\n" % \
endpoint
@classmethod
def parse_schema(cls, schema_path):
# type: (str) -> Tuple[Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]], Schema] # noqa: E501
"""
Returns TensorFlow Feature Spec and parsed tf.metadata Schema for given tf.metadata Schema.
:param schema_path: tf.metadata Schema path
"""
schema = parse_schema_file(schema_path)
return schema_to_feature_spec(schema), schema
@classmethod
def parse_schema_from_stats(cls, stats_path):
# type: (str) -> Tuple[Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]], Schema] # noqa: E501
"""
Returns TensorFlow Feature Spec and parsed tf.metadata Schema for given tf.metadata
DatasetFeatureStatisticsList.
:param stats_path: tf.metadata DatasetFeatureStatisticsList path
"""
import tensorflow_data_validation as tfdv
stats = tfdv.load_statistics(stats_path)
schema = tfdv.infer_schema(stats)
return schema_to_feature_spec(schema), schema
@classmethod
def examples_via_schema(cls,
file_pattern, # type: str
schema_path, # type: str
compression_type=None, # type: str
batch_size=128, # type: int
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=None, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> tf.data.Dataset
"""Get `Dataset` of parsed `Example` protos.
:param file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules
:param schema_path: tf.metadata Schema path
:param compression_type: TFRecord compression type, see `tf.data.TFRecordDataset` doc
:param batch_size: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param num_epochs: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle_seed: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param prefetch_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param reader_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param parser_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param sloppy_ordering: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param drop_final_batch: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:return `Dataset`, which holds results of the parsing of `Example` protos
"""
return cls._examples(file_pattern,
schema_path=schema_path,
compression_type=compression_type,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
prefetch_buffer_size=prefetch_buffer_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
sloppy_ordering=sloppy_ordering,
drop_final_batch=drop_final_batch)
@classmethod
def examples_via_feature_spec(cls,
file_pattern, # type: str
feature_spec, # type: Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]] # noqa: E501
compression_type=None, # type: str
batch_size=128, # type: int
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=None, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> tf.data.Dataset
"""Get `Dataset` of parsed `Example` protos.
:param file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules
:param feature_spec: TensorFlow feature spec
:param compression_type: TFRecord compression type, see `tf.data.TFRecordDataset` doc
:param batch_size: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param num_epochs: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle_seed: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param prefetch_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param reader_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param parser_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param sloppy_ordering: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param drop_final_batch: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:return `Dataset`, which holds results of the parsing of `Example` protos
"""
return cls._examples(file_pattern,
feature_spec=feature_spec,
compression_type=compression_type,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
prefetch_buffer_size=prefetch_buffer_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
sloppy_ordering=sloppy_ordering,
drop_final_batch=drop_final_batch)
@classmethod
def _examples(cls,
file_pattern, # type: str
schema_path=None, # type: str
feature_spec=None, # type: Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]] # noqa: E501
compression_type=None, # type: str
batch_size=128, # type: int
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=None, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> tf.data.Dataset
if schema_path:
feature_spec, _ = cls.parse_schema(schema_path)
logger.debug("Will parse features from: `%s`, using features spec: %s",
file_pattern,
str(feature_spec))
from tensorflow.contrib.data import make_batched_features_dataset
reader_args = [compression_type] if compression_type else None
dataset = make_batched_features_dataset(file_pattern,
batch_size=batch_size,
features=feature_spec,
reader_args=reader_args,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
prefetch_buffer_size=prefetch_buffer_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
sloppy_ordering=sloppy_ordering,
drop_final_batch=drop_final_batch)
return dataset
class __DictionaryEndpoint(object):
@classmethod
def examples_via_schema(cls,
file_pattern, # type: str
schema_path, # type: str
default_value=0, # type: float
batch_size=128, # type: int
compression_type=None, # type: str
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=42, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> Iterator[Dict[str, np.ndarray]]
"""
Read a TF dataset and load it into a dictionary of NumPy Arrays.
:param file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules
:param default_value: Value used if a sparse feature is missing.
:param schema_path: tf.metadata Schema path
:param compression_type: TFRecord compression type, see `tf.data.TFRecordDataset` doc
:param batch_size: batch size, set to the size of the dataset to read all data at once
:param shuffle: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param num_epochs: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param shuffle_seed: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param prefetch_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param reader_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param parser_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param sloppy_ordering: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param drop_final_batch: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:return Dictionary of NumPy arrays
"""
return cls._examples(file_pattern=file_pattern,
schema_path=schema_path,
default_value=default_value,
batch_size=batch_size,
compression_type=compression_type,
shuffle=shuffle,
num_epochs=num_epochs,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
prefetch_buffer_size=prefetch_buffer_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
sloppy_ordering=sloppy_ordering,
drop_final_batch=drop_final_batch)
@classmethod
def examples_via_feature_spec(cls,
file_pattern, # type: str
feature_spec, # type: Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]] # noqa: E501
default_value=0, # type: float
batch_size=128, # type: int
compression_type=None, # type: str
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=42, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> Iterator[Dict[str, np.ndarray]]
"""
Read a TF dataset and load it into a dictionary of NumPy Arrays.
:param file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules
:param feature_spec: TensorFlow feature spec
:param default_value: Value used if a sparse feature is missing.
:param compression_type: TFRecord compression type, see `tf.data.TFRecordDataset` doc
:param batch_size: batch size, set to the size of the dataset to read all data at once
:param shuffle: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param num_epochs: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param shuffle_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param shuffle_seed: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param prefetch_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param reader_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param parser_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset`
doc
:param sloppy_ordering: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:param drop_final_batch: see `tensorflow.contrib.data.make_batched_features_dataset` doc
:return Dictionary of NumPy arrays
"""
return cls._examples(file_pattern=file_pattern,
feature_spec=feature_spec,
default_value=default_value,
batch_size=batch_size,
compression_type=compression_type,
shuffle=shuffle,
num_epochs=num_epochs,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
prefetch_buffer_size=prefetch_buffer_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
sloppy_ordering=sloppy_ordering,
drop_final_batch=drop_final_batch)
@classmethod
def _examples(cls,
file_pattern, # type: str
schema_path=None, # type: str
feature_spec=None, # type: Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]] # noqa: E501
default_value=0, # type: float
compression_type=None, # type: str
batch_size=128, # type: int
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=None, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> Iterator[Dict[str, np.ndarray]]
Datasets._assert_eager("Dictionary")
def get_numpy(tensor):
if isinstance(tensor, tf.Tensor):
return tensor.numpy()
elif isinstance(tensor, tf.SparseTensor):
# If it's a SparseTensor, which is the representation of VarLenFeature and
# SparseFeature, we convert it to dense representation, and further is it's
# a scalar, we reshape to to a vector
shape = tensor.dense_shape.numpy()
# first element is batch size
if shape[1] == 0:
# this feature is not defined for any of the examples in the | |
have at least ' + str(args[i].min_ndims) + \
' dimension')
else:
write_fatal_block(w1file,'The ' + args[i].name + \
' array must have at least ' + str(args[i].min_ndims) + \
' dimensions')
if not args[i].is_scalar:
dstr = ""
for j in range(len(args[i].dsizes_names)):
#
# If we're not dealing with a scalar, then write the code that
# assigns the size of the rightmost dimensions, and then creates
# a variable that will contain the size of all the rightmost dimensions.
#
# For example, if the rightmost dimensions are nx, ny, and
# nz, then the code will be "nxnynz = nx * ny * nz".
#
if not args[i].dsizes_names[j] in global_dsizes_names_accum:
if args[i].ndims > 0:
if args[i].dsizes[j] == 0:
w1file.write(' ' + args[i].dsizes_names[j] + ' = ' + \
args[i].dsizes_name + '[' + str(j) + '];\n')
else:
w1file.write(' ' + args[i].dsizes_names[j] + ' = ' + \
str(args[i].dsizes[j]) + ';\n')
else:
w1file.write(' ' + args[i].dsizes_names[j] + ' = ' + \
args[i].dsizes_name + '[' + args[i].ndims_name + '-' +
str(int(args[i].min_ndims-j)) + '];\n')
global_dsizes_names_accum.append(args[i].dsizes_names[j])
else:
if args[i].ndims > 0 and args[i].dsizes[j] == 0:
w1file.write(' if(' + args[i].dsizes_name + '[' + str(j) + \
'] != ' + args[i].dsizes_names[j] + ') {\n')
write_fatal_block(w1file,'The #' + str(j) + ' argument of ' +
args[i].name + ' must be length ' + \
args[i].dsizes_names[j])
elif args[i].ndims == 0:
w1file.write(' if(' + args[i].dsizes_name + '[' +
args[i].ndims_name + '-' + str(args[i].min_ndims-j) + \
'] != ' + args[i].dsizes_names[j] + ') {\n')
write_fatal_block(w1file,'The ndims-' + \
str(int(args[i].min_ndims-j)) + \
' dimension of ' + args[i].name + \
' must be of length ' + \
args[i].dsizes_names[j])
if args[i].min_ndims > 1 or args[i].ndims > 1:
if j == 0:
dstr = " " + args[i].dsizes_names_str + " = " + \
args[i].dsizes_names[0]
else:
dstr = dstr + " * " + args[i].dsizes_names[j]
if dstr != "":
w1file.write(dstr + ";\n\n")
#---------------------------------------------------------------------
#
# Write out code to calculate size of leftmost dimensions, if any.
#
#---------------------------------------------------------------------
if have_leftmost:
first = True
name_str = ""
w1file.write("""
/*
* Calculate size of leftmost dimensions.
*/
""")
w1file.write(" size_leftmost = 1;\n")
for i in range(len(args)):
if args[i].min_ndims > 0:
if first:
w1file.write(" ndims_leftmost = " + args[i].ndims_name + "-" +
str(args[i].min_ndims) + ";\n");
w1file.write(" for(i = 0; i < ndims_leftmost; i++) {\n")
first_arg_name = args[i].name # Keep track of this argument
prev_arg_name = first_arg_name
first = False
second = True
else:
if second:
w1file.write(" if(" + args[i].dsizes_name + "[i] != dsizes_" + \
first_arg_name + "[i]")
name_str = prev_arg_name
prev_arg_name = args[i].name
second = False
else:
name_str = name_str + ", " + prev_arg_name
w1file.write(" ||\n " + args[i].dsizes_name + \
"[i] != dsizes_" + first_arg_name + "[i]")
prev_arg_name = args[i].name
#---------------------------------------------------------------------
#
# Write code to close leftmost dimensions loop.
#
#---------------------------------------------------------------------
if name_str != "":
w1file.write(") {\n")
write_fatal_block(w1file,'The leftmost dimensions of ' + \
name_str + ' and ' + prev_arg_name + \
' must be the same',spaces=" ")
if not first:
w1file.write(" size_leftmost *= dsizes_" + first_arg_name + "[i];\n")
w1file.write(" }\n\n")
if isfunc:
#----------------------------------------------------------------------
# Code to calculate size of output array.
#----------------------------------------------------------------------
w1file.write("""
/*
* Calculate size of output array.
*/
""")
#
# Create string that will hold total dimension sizes of rightmost
# dimensions of return variable.
#
if not ret_arg.is_scalar:
if (ret_arg.min_ndims > 0) or (0 in ret_arg.dsizes):
if ret_arg.min_ndims > 1 or ret_arg.ndims > 1:
for j in range(len(ret_arg.dsizes_names)):
if j == 0:
ret_dstr = " " + ret_arg.dsizes_names_str + " = " + \
ret_arg.dsizes_names[0]
else:
ret_dstr = ret_dstr + " * " + ret_arg.dsizes_names[j]
w1file.write(ret_dstr + ";\n")
w1file.write(" " + ret_arg.size_name + " = size_leftmost * " + \
ret_arg.dsizes_names_str + ";\n")
else:
w1file.write(" " + ret_arg.size_name + " = ...need input here...;\n")
w1file.write("""
/*
* Allocate space for output array.
*/
""")
w1file.write(" " + ret_arg.name + " = (" + ret_arg.ctype + \
" *)calloc(" + ret_arg.size_name + \
", sizeof(" + ret_arg.ctype + "));\n")
w1file.write(" if(" + ret_arg.name + " == NULL) {\n")
write_fatal_block(w1file,'Unable to allocate memory for output array')
#
# Set the missing value for the output, if any.
#
if ret_has_missing and (num_input_has_missing > 0) and ret_msg_depend_input:
w1file.write(" if(" + args[ret_msg_depend_index].has_msg_name + ") {\n")
w1file.write(" if(" + ret_arg.type_name + " == NCL_double) " + \
ret_arg.msg_name + " = " + \
args[ret_msg_depend_index].msg_dname + ";\n")
w1file.write(" else " + ret_arg.msg_name + " = " + \
args[ret_msg_depend_index].msg_fname + ";\n")
w1file.write(" " + ret_arg.msg_dname + " = " + \
args[ret_msg_depend_index].msg_dname + ";\n")
w1file.write(" }\n")
#
# If the dimension sizes are identical to one of the input variables'
# dimension sizes, then nothing is needed here.
#
if not ret_size_depend_input:
w1file.write("""
/*
* Allocate space for output dimension sizes and set them.
*/
""")
if ret_arg.ndims > 0:
w1file.write(" " + ret_arg.ndims_name + " = " + str(ret_arg.ndims) + \
";\n")
else:
w1file.write(" " + ret_arg.ndims_name + " = ndims_leftmost + " + \
str(ret_arg.min_ndims) + ";\n")
w1file.write(" " + ret_arg.dsizes_name + " = (npy_intp*)calloc(" + \
ret_arg.ndims_name + ",sizeof(npy_intp)); \n")
w1file.write(" if( " + ret_arg.dsizes_name + " == NULL ) {\n")
write_fatal_block(w1file,'Unable to allocate memory for holding dimension sizes')
if ret_arg.min_ndims > 0:
#
# Loop through input arguments until we find one that has leftmost
# dimensions, and then use its leftmost dimensions to assign
# dimensions to the output array's dimension sizes array.
#
for i in range(len(args)):
if args[i].min_ndims > 0:
w1file.write(" for(i = 0; i < " + ret_arg.ndims_name + "-" + \
str(ret_arg.min_ndims) + "; i++) " + \
ret_arg.dsizes_name + "[i] = " + args[i].dsizes_name + "[i];\n")
break
else:
w1file.write(" for(i = 0; i < " + ret_arg.ndims_name + \
"; i++)" + ret_arg.dsizes_name + \
"[i] = ...need input here;\n")
for i in range(ret_arg.min_ndims):
w1file.write(" " + ret_arg.dsizes_name + "[" + ret_arg.ndims_name +
"-" + str(ret_min_ndims-i) + "] = " + \
ret_dsizes_names[i] + ";\n")
else:
w1file.write(" for(i = 0; i < " + ret_arg.ndims_name + "; i++)" + \
ret_arg.dsizes_name + "[i] = ...need input here;\n")
#
# Write out code for the loop across leftmost dimensions (if any).
#
if have_leftmost:
w1file.write("""
/*
* Loop across leftmost dimensions and call the Fortran routine for each
* subsection of the input arrays.
*/
""")
index_str = " "
for i in range(len(index_names)):
if i < (len(index_names)-1):
index_str = index_str + index_names[i] + " = "
else:
index_str = index_str + index_names[i] + " = 0;\n"
w1file.write(index_str)
w1file.write("\n for(i = 0; i < size_leftmost; i++) {\n")
#
# Code for pointing tmp_xxxx to appropriate location in x if it's
# a variable that has leftmost dimensions.
#
for i in range(len(args)):
if args[i].ndims == 0:
w1file.write(" " + args[i].tmp_name + " = &((" + args[i].ctype + \
"*)" + args[i].name + ")[" + args[i].index_name + "];\n")
for i in range(len(args)):
name = args[i].name
#
# Write code for calling Fortran routine inside loop. The input variables
# and the output variable need to be pointed to appropriately.
#
w1file.write("""
/*
* Loop across leftmost dimensions and call the Fortran routine.
*/
""")
w1file.write(" NGCALLF("+ lower(fortran_name) + "," + \
upper(fortran_name) + ")(")
for i in range(len(farg_names)):
if i != 0:
w1file.write(", ")
#
# Need to add code to check the ndims of farg_names so that the index
# thing is only added if you are dealing with a variable with variable
# dimensions (ndims=0).
#
if farg_names[i] in input_var_names or farg_names[i] in ret_name:
w1file.write("&" + farg_names[i] + "[index_" + farg_names[i] + "]")
else:
w1file.write(farg_cchar[i] + farg_names[i])
w1file.write(");\n")
#
# Write out code for incrementing index variables, if any.
#
for i in range(len(args)):
if args[i].ndims == 0:
w1file.write(" " + args[i].index_name + " += " + \
args[i].dsizes_names_str + ";\n")
if isfunc and ret_arg.ndims == 0:
w1file.write(" " + ret_arg.index_name + " += " + \
ret_arg.dsizes_names_str + ";\n")
w1file.write(" }\n") # End "for" loop
else:
#
# We are dealing with a function that doesn't have any arguments
# with leftmost dimensions. This is unusual, but possible.
#
# Write code for calling Fortran routine not in a loop.
#
w1file.write("""
/*
* Call the | |
from pydantic import BaseModel
from sly import Parser, Lexer
import io
import os
import pandas as pd
import re
import spacy
from nltk.tokenize import RegexpTokenizer
from rich import pretty
import subprocess
from subprocess import Popen, PIPE, STDOUT
pretty.install()
try:
nlp = spacy.load('en_core_web_lg')
except:
# the model isn't downloaded, so downloading it
print("Downloading and Loading the Spacy Model")
subprocess.call(['python', '-m', "spacy", "download", "en_core_web_lg"])
nlp = spacy.load('en_core_web_lg')
pass
reserved_vars = set()
keylist = [
"begin", "end", "assign", "to", "print", "read", "if", "then", "else",
"endif", "while", "endwhile", "do", "for", "from", "repeat", "return", "endfor",
"start_procedure", "end_procedure"
]
keywords = {
"begin", "end", "assign", "to", "print", "read", "if", "then", "else", "endif",
"while", "endwhile", "do", "for", "from", "repeat", "return", "endfor",
"start_procedure", "end_procedure"
}
class customParser(Parser):
# debugfile = 'parser.out'
tokens = {BEG, END, DATATYPE, ASSIGN, TO, PRINT, SCAN, READ, COMMA, OPEN, CLOSE,
IF, THEN, ELSE, ENDIF, WHILE, ENDWHILE, ENDDOWHILE, DO, FOR, FROM, REPEAT,
RETURN, ENDFOR, QUOTE, BOOL, RELOP, LOGOP, AS, MD, Q, START_PROCEDURE,
END_FUNCTION, VAR, NAME_PROCEDURE, NUM, STRING}
funcdec = ""
def find_type(self, datatype):
if (datatype == 'int'):
return "%d"
elif (datatype == 'char'):
return "%c"
elif (datatype == 'float'):
return "%f"
else:
return "%ld"
def update_VAR(self, var):
# check if belongs to set
if var in self.variables: # variable with same name already exists
print("Same variable used twice. Change variable name of {}".format(var))
else:
self.variables.add(var)
return True
def check_VAR(self, var):
# checks if varible exists
if var in self.variables:
return True
else:
print("The variable {} is not defined.".format(var))
def __init__(self):
self.names = {}
self.variables = set()
# START : BEG {printf("\n#include<stdio.h>\nvoid main()\n{\n");} CODE END { printf("\n}\nValid"); exit(0); }
# ;
@_('BEG CODE END')
def START(self, p):
return "#include<stdio.h>\n" + self.funcdec + "void main()\n{\n" + p.CODE + "}"
# CODE : STMT {printf(";");}
# | CODE STMT {printf(";");}
# | ST
# | CODE ST
# ;
@_('STMT')
def CODE(self, p):
return p.STMT + ";\n"
@_('CODE STMT')
def CODE(self, p):
return p.CODE + p.STMT + ";\n"
@_('ST')
def CODE(self, p):
return p[0]
@_('CODE ST')
def CODE(self, p):
return p[0] + p[1]
# STMT : EXPR
# | DEC
# | INIT
# ;
@_('EXPR', 'DEC', 'INIT', 'PR', 'SC')
def STMT(self, p):
return p[0]
# ST : IF {printf("if(");} CON THEN{printf(")\n{");} CODE ENDIF {printf("\n}");}
# | IF { printf("if(");} CON THEN{printf(")\n{");} STMT ELSE { printf("}\nelse\n{");} STMT ENDIF {printf("\n}");}
# | WHILE{printf("while(");} EXPR THEN{printf(")\n{\n");} CODE ENDWHILE {printf("\n}");}
# | DO{printf("do\n{");} CODE WHILE{printf("\n}while(");} EXPR ENDDOWHILE {printf(");");}
# | START_PROCEDURE NAME_PROCEDURE OPEN{printf("void %s(",$2); } parameter_list CLOSE{printf(")\n{\n");} CODE END_FUNCTION {printf("}");}
# | FOR{printf("for(");} INIT REPEAT{printf("\n{\n");} CODE ENDFOR{printf("\n}\n");}
# | PR
# | SC
# ;
@_('IF CON THEN CODE ENDIF')
def ST(self, p):
return "if(" + p.CON + ")\n{" + p.CODE + "}"
@_('IF CON THEN CODE ELSE CODE ENDIF')
def ST(self, p):
return "if(" + p.CON + ")\n{" + p.CODE0 + "}\nelse\n{" + p.CODE1 + "}"
@_('WHILE EXPR THEN CODE ENDWHILE')
def ST(self, p):
return("while(" + p.EXPR + ")\n{\n" + p.CODE + "\n}")
@_('DO CODE WHILE EXPR ENDDOWHILE')
def ST(self, p):
return("do\n{" + p.CODE + "\n}while(" + p.EXPR + ")")
@_('START_PROCEDURE NAME_PROCEDURE parameter_list CLOSE CODE END_FUNCTION')
def ST(self, p):
self.funcdec += "void {}".format(p.NAME_PROCEDURE) + \
p.parameter_list + ")\n{\n" + p.CODE + "}\n"
return ""
@_('FOR INIT REPEAT CODE ENDFOR')
def ST(self, p):
return("for(" + p.INIT + "\n{\n" + p.CODE + "\n}")
@_('PR', 'SC')
def ST(self, p):
return (p[0])
# parameter_list: VAR DATATYPE{printf("%s %s",$2,$1);}
# | parameter_list COMMA VAR DATATYPE{printf(",%s %s",$4,$3);}
@_('VAR DATATYPE')
def parameter_list(self, p):
return ("{} {}".format(p.DATATYPE, p.VAR))
@_('parameter_list COMMA VAR DATATYPE')
def parameter_list(self, p):
return ("{} {}".format(p.DATATYPE, p.VAR))
# EXPR : E RELOP{printf("%s",$2); } E
# | E LOGOP{printf("%s",$2); } E
# | E
# ;
@_('E RELOP E')
def EXPR(self, p):
return (p[0] + p.RELOP + p[2])
@_('E LOGOP E')
def EXPR(self, p):
return (p[0] + p.LOGOP + p[2])
@_('E')
def EXPR(self, p):
return (p.E)
# E : E AS{printf("%s",$2);} T
# | T
# ;
@_('E AS T')
def E(self, p):
return(p.E + p.AS + p.T)
@_('T')
def E(self, p):
return p.T
# T : T MD{printf("%s",$2);} F
# | F
# ;
@_('T MD F')
def T(self, p):
return (p.T + p.MD + p.F)
@_('F')
def T(self, p):
return p.F
# F : VAR {printf("%s",$1);}
# | NUM {printf("%s",$1);}
# | OPEN E CLOSE
# ;
@_('VAR')
def F(self, p):
return ("{}".format(p.VAR))
@_('NUM')
def F(self, p):
# print(p.NUM)
return ("{}".format(p.NUM))
@_('OPEN E CLOSE')
def F(self, p):
return (p.OPEN + p.E + p.CLOSE)
# N : VAR{printf("%s",$1);} N
# |
# ;
# @_('VAR N')
# def N(self,p):
# return ("{}".format(p.VAR),p.N)
# DEC : ASSIGN OPEN VAR DATATYPE CLOSE { update($3); printf("%s %s",$4,$3); };update_VAR
@_('ASSIGN OPEN VAR DATATYPE CLOSE')
def DEC(self, p):
self.update_VAR(p.VAR)
# print("assigning " + p.DATATYPE + " " + p.VAR)
return p.DATATYPE + " " + p.VAR
# TODO DO VERIFICATION
# INIT : ASSIGN VAR TO NUM {g=check($2); if(g==1)printf("%s = %s",$2,$4); else exit(0);}
# | ASSIGN VAR TO VAR { printf("%s = %s",$2,$4); }
# | VAR FROM NUM TO NUM {if($3<$5){printf("%s = %s ; %s <= %s ; %s ++)",$1,$3,$1,$5,$1);}else{printf("%s = %s ; %s >= %s ; %s --)",$1,$3,$1,$5,$1);}}
# | VAR Q{printf("%s %s",$1,$2);} E , VAR = E
# ; check_VAR
@_('ASSIGN VAR TO NUM')
def INIT(self, p):
# print("ASSIGN VAR TO NUM")
self.check_VAR(p.VAR)
return p.VAR + "=" + p.NUM
@_('ASSIGN VAR TO VAR')
def INIT(self, p):
if ((p[1] in self.variables) and (p[3] in self.variables)):
return "{} = {}".format(p[1], p[3])
else:
Exception("Both variables should exist before equating.")
@_('VAR FROM EXPR TO EXPR')
def INIT(self, p):
if (p[2] < p[4]):
return "{} = {} ; {} <= {} ; {}++)".format(p[0], p[2], p[0], p[4], p[0])
else:
return "{} = {} ; {} >= {} ; {}--)".format(p[0], p[2], p[0], p[4], p[0])
@_('')
def EXPR(self, p):
if EXPR == NUM:
return p.NUM
else:
return p.VAR
@_('VAR Q E')
def INIT(self, p):
return ("{} = ".format(p.VAR) + p.E)
# PR : PRINT DATATYPE COMMA VAR {g=check($4); getspec($2); if(g==1)printf("printf(\"%s\",%s);\n",str,$4); else exit(0); }
# | PRINT{printf("printf(\"");} QUOTE N DATA N QUOTE{printf("\"%s);",name); strcpy(name,"");}
# | PRINT{printf("printf(\"");} QUOTE N QUOTE{printf("\");");}
# ;
# print (a int)
@_('PRINT OPEN VAR DATATYPE CLOSE')
def PR(self, p):
return "printf(\"{}\",{})".format(self.find_type(p.DATATYPE), p.VAR)
@_('PRINT STRING')
def PR(self, p):
return "printf({})".format(p.STRING)
# SC : READ DATATYPE COMMA VAR {g=check($4); getspec($2); if(g==1)printf("scanf(\"%s\",&%s);\n",str,$4); else exit(0); }
# ;
@_('READ OPEN VAR DATATYPE CLOSE')
def SC(self, p):
return 'scanf(\"{}\",&{})'.format(self.find_type(p.DATATYPE), p.VAR)
# CON : VAR RELOP VAR { printf("%s %s %s",$1,$2,$3); }
# | VAR RELOP NUM { printf("%s %s %s",$1,$2,$3); }
# | VAR LOGOP VAR { printf("%s %s %s",$1,$2,$3); }
# | BOOL { printf("%s",$1); }
# ;
@_('VAR RELOP VAR', 'VAR RELOP NUM', 'VAR LOGOP VAR')
def CON(self, p):
return "{} {} {}".format(p[0], p[1], p[2])
@_('BOOL')
def CON(self, p):
return "{}".p[0]
class Input(BaseModel):
message: str
class Output(BaseModel):
c_code: str
comment_eraser = re.compile(r"[\/\/].*")
string_eraser = re.compile(r"([\"'])((\\{2})*|(.*?[^\\](\\{2})*))\1")
capture_func = re.compile(r"([a-zA-Z_][a-zA-Z0-9_]*)[(]")
capture_vars = re.compile(r"\(?\s*(([\d\w]+)\s(int|float|char|double)\s*)\)?")
def cleaner(strs):
# print("Initial:",strs)
strs = comment_eraser.sub("", strs)
strs = string_eraser.sub("", strs)
for match in capture_vars.finditer(strs):
# extract words
reserved_vars.add(match.group(2))
# print(match.group(2)) #2 = var names, 3 = datatype, 1=whole declaration
for match in capture_func.finditer(strs):
reserved_vars.add(match.group(1))
# print(match.group(1))
strs = capture_func.sub("", strs)
strs = capture_vars.sub("", strs)
return strs
def spacysim(word1, word2):
token1 = nlp(word1)
token2 = nlp(word2)
return token1.similarity(token2)
def keywordreturner(text):
keyw = {}
for key in keylist:
keyw[key] = set()
keyw[key].add(key)
df = pd.read_csv(io.StringIO(text), header=None, delimiter="\n")
df = df.rename(columns={0: "Code"})
df["Cleaned"] = df["Code"].apply(cleaner)
tokenizer = RegexpTokenizer(r'[a-zA-Z_][a-zA-Z0-9_]*')
for index, row in df.iterrows():
sentence = df.loc[index, 'Cleaned']
# print(sentence)
sentence = tokenizer.tokenize(sentence)
for word in sentence:
if word in keywords or word in reserved_vars:
continue
max_sim = 0
max_key = None
for k in keywords:
sim = spacysim(k, word)
if sim > max_sim:
max_sim = sim
max_key = k
print("word : {} , closest match : {} | Sim : {}".format(
word, max_key, max_sim))
if max_sim > 0:
if max_key in keyw:
keyw[max_key].add(word)
else:
keyw[max_key] = set()
keyw[max_key].add(word)
return keyw
def C_Code_Generator(input: Input) -> Output:
"""Constructs the C code from the input data."""
global reserved_vars
reserved_vars | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###
#
# @file framework.py
#
# DMFT is a software package provided by Rutgers Univiversity,
# the State University of New Jersey
#
# @version 1.0.0
# @author <NAME>
# @date 2016-02-15
#
###
from utils import shellcmd, writefile, delfiles, fixpaths, includefromlib
import sys
import os
import getopt
import string
import subprocess
class Framework:
""" This class is framework for DMFT package installation. """
#set default values
#prefix = "./bin" # The install directory
build = "./src" # The build directory
make = "make" # the "make" command
downcmd = "" # the command used to download stuff
ranlib = "" # the ranlib command
downblas = 0 # whether or not to download reference Blas
downfftw = 0 # whether or not to download reference FFTW
downgsl = 0 # whether or not to download reference GSL
downlapack = 0 # whether or not to download reference Lapack
lapversion = "lapack-3.6.0"
fftwversion = "fftw-3.3.4"
gslversion = "gsl-1.16"
versions = ("1.0.0")
clean = 0
src = 0
installerdir = ""
verbose = 0
#nbcores = 2
def __init__(self, argv, config):
#print "*-+-*"*16
print "Setting up the framework"
self.config = config
self.config.PIC = '-fPIC'
if config.prefix==None:
if os.environ.has_key('WIEN_DMFT_ROOT') and os.environ['WIEN_DMFT_ROOT']!='':
config.prefix=os.environ['WIEN_DMFT_ROOT']
else:
config.prefix=os.getcwd()+'/install/'
self.config.prefix = os.path.abspath(config.prefix)
# parse input arguments
self.parse_args(argv)
if self.clean:
self.cleanup()
sys.exit()
if ((str.upper(self.config.compiler) == 'INTEL')):
if self.config.cc==None or self.config.cc == '':
self.config.cc = 'icc'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --cc parameter empty, assuming it equal to """+self.config.cc+".\n"
if self.config.fc==None or self.config.fc == '':
self.config.fc = 'ifort'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --fc parameter empty, assuming it equal to """+self.config.fc+".\n"
if self.config.cxx==None or self.config.cxx == '':
self.config.cxx = 'icpc'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --cxx parameter empty, assuming it equal to """+self.config.cxx+".\n"
if self.config.preproc==None or self.config.preproc == '':
self.config.preproc = 'fpp'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --preproc parameter empty, assuming it equal to """+self.config.preproc+".\n"
if ((str.upper(self.config.compiler) == 'GNU')):
if self.config.cc==None or self.config.cc == '':
self.config.cc = 'gcc'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --cc parameter empty, assuming it equal to """+self.config.cc+".\n"
if self.config.fc==None or self.config.fc == '':
self.config.fc = 'gfortran'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --fc parameter empty, assuming it equal to """+self.config.fc+".\n"
if self.config.cxx==None or self.config.cxx == '':
self.config.cxx = 'g++'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --cxx parameter empty, assuming it equal to """+self.config.cxx+".\n"
if self.config.preproc==None or self.config.preproc == '':
self.config.preproc = 'cpp -traditional-cpp'
print """
You specified """+self.config.compiler+""" as your main compiler
and left --preproc parameter empty, assuming it equal to """+self.config.preproc+".\n"
if self.config.cc == '':
# check if no C compiler is provided
print """
C compiler is required to compile DMFT software.
Please use --cc flag or edit configure.py file."""
sys.exit()
if self.config.fc == "":
# chekc if Fortran compiler is provided
print """
Fortran compiler is required to compile DMFT software.
Please use --fc flag or edit configure.py file."""
sys.exit()
if self.config.cxx == "":
# check if C++ compiler is provided
print """
C++ compiler is required to compile DMFT software.
Please use --cxx flag or edit configure.py file."""
sys.exit()
if((self.config.mpi_define != "") and ( (self.config.pcc =="") or (self.config.pfc == "") or (self.config.pcxx == "") )):
# check if MPI compilers are provided
print """
MPI Fortran, C and C++ compilers are required to compile DMFT software
with non empty mpi_define="""+self.config.mpi_define+""" flag.
Please use --pfc, --pcc and --pcxx flags or edit configure.py file
and specify missing parameters.\n"""
sys.exit()
if(self.config.mpi_define == ""):
self.config.pcc = self.config.cc
self.config.pfc = self.config.fc
self.config.pcxx = self.config.cxx
if self.config.prefix == "" :
self.config.prefix = "./install"
self.config.prefix = fixpaths(self.config.prefix)
if(not os.path.isdir(self.config.prefix)):
print"Creating directory", self.config.prefix
os.mkdir(self.config.prefix)
print 'Install directory is...', self.config.prefix
if self.build == "":
self.build = "./scr"
self.build = fixpaths(self.build)
if(not os.path.isdir(self.build)):
print"Creating directory",self.build
os.mkdir(self.build)
print 'Build directory is...', self.build
self.installerdir = os.getcwd()
os.chdir(self.build)
# CUSTOM CHECKS
self.check_cc()
self.check_fc()
self.set_ranlib()
#self.set_download()
self.detect_compilers()
self.detect_blaslibs()
#self.check_linking()
#if self.testing:
#self.set_nbcores()
print 'C compiler is... ', self.config.cc
print 'C flags are... ', self.config.cflags
print 'Fortran compiler is... ', self.config.fc
print 'Fortran flags are... ', self.config.fflags
#print 'Ar flags are... ', self.config.arflags
if (self.downblas > 1) :
print 'BLAS library will to be downloaded and installed )'
else:
print 'BLAS library is... ', self.config.blaslib
if (self.downfftw > 1) :
print 'FFTW library will be downloaded and installed '
else :
print 'FFTW library is... ', self.config.fftwlib
if (self.downgsl > 1) :
print 'GSL library will be downloaded and installed '
else :
print 'GSL library is... ', self.config.gsl
if (self.downlapack == 2) :
print 'Lapack library will to be downloaded and installed )'
elif (self.config.lapacklib == ""):
if (self.downlapack == 1):
print 'LAPACK library is... will check if it is part of Blas Library call and download if it is not'
else:
print 'LAPACK library is... will check if it is part of Blas library call'
else:
self.downlapack = -1
print 'LAPACK library is...', self.config.lapacklib
return
def usage(self):
print "*-+-*"*16
print """
DMFT configuration script version %d.%d.%d.
The script will help you to create makefile & compile DMFT code.
Please provide as much information as you can using flags listed below.
Alternatively, you can edit "configure.py" file in the current directory.
-h or --help : Display this help and exit
--prefix=[DIR] : Install files in DIR [%s]
--build=[DIR] : Software building DIR [%s]. Contains logs, downloads and builds.
--fc=[CMD] : Fortran compiler. [%s]
--cc=[CMD] : C compiler. [%s]
--cxx=[CMD] : C++ compiler. [%s]
--cflags=[FLAGS] : Flags for the C compiler [%s]
--fflags=[FLAGS] : Flags for the Fortran compiler [%s]
--ompflag=[FLAGS] : Flags for openmp compiler [%s]
--pfc=[CMD] : MPI Fortran compiler. [%s]
--pcc=[CMD] : MPI C compiler. [%s]
--pcxx=[CMD] : MPI C++ compiler. [%s]
--blaslib=[LIB] : BLAS library [%s]
--lapacklib=[LIB] : Lapack library (if it's not included in BLAS) [%s]
--fftwlib=[LIB] : FFTW library [%s]
--gsl=[LIB] : GSL library [%s]
--downblas : Download and install BLAS.
--downlapack : Download and install LAPACK.
--downfftw : Download and install FFTW.
--downgsl : Download and install GSL.
--downall : Download and install all missing external libraries.
If you don't have access to network please provide
the following packages in directory %s/download:
http://netlib.org/blas/blas.tgz -> [blas.tgz]
http://www.netlib.org/lapack/lapack-%s.tgz -> [lapack.tgz]
http://fftw.org/fftw-%s.tar.gz -> [fftw.tgz]
ftp://ftp.gnu.org/gnu/gsl/gsl-%s.tar.gz -> [gsl.tgz]
--clean : cleans up the installation directory.
""" % (self.config.version[0], self.config.version[1], self.config.version[2],
self.config.prefix,os.path.abspath(self.build),
self.config.fc, self.config.cc,self.config.cxx,
self.config.cflags,self.config.fflags,self.config.ompflag,
self.config.pfc, self.config.pcc,self.config.pcxx,
self.config.blaslib, self.config.lapacklib,self.config.fftwlib,self.config.gsl,
self.build, "3.6.0","3.3.4","1.16")
print "*-+-*"*16
def parse_args(self, argv):
""" Parse input argument to get compilers etc. from command line. """
if len(argv) == 1:
print "Let's go"
try:
opts, args = getopt.getopt(argv[1:], "?hvp:b:n:",
["help", "compiler=", "prefix=", "build=",
"cc=", "fc=", "cxx=",
"cflags=", "fflags=", "oflags=", "gflags=",
"pcc=", "pfc=", "pcxx=",
"blaslib=", "lapacklib=", "fttwlib=", "gsl=",
"mpi_define=", "make=",
"downblas", "downlapack","downgsl","downfftw",
"downall", "verbose", "clean", "src"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
if len(args) > 0 :
print 'Unknown arguments : ', args
print "for help use --help"
sys.exit(2);
# process options
for o, a in opts:
if o in ("-h", "--help"):
self.usage()
sys.exit(0)
else:
if o == '--clean':
self.clean = 1
return
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-b', '--build'):
self.build = a
elif o == '--cflags':
self.config.cflags = a
elif o=='--fflags':
self.config.fflags = a
elif o=='--oflags':
self.config.oflags = a
elif o=='--gflags':
self.config.gflags = a
elif o=='--make':
self.make = a
elif o=='--compiler':
self.config.ompiler = a
elif o=='--cc':
self.config.cc = a
elif o=='--fc':
self.config.fc = a
elif o=='--cxx':
self.config.cxx = a
elif o=='--pcc':
self.config.cc = a
elif o=='--pfc':
self.config.fc = a
elif o=='--pcxx':
self.config.cxx = a
elif o == '--downblas':
self.downblas = 2
elif o == '--downlapack':
self.downlapack = 2
elif o == '--downfftw':
self.downfftw = 2
elif o == '--downgsl':
self.downgsl = 2
elif o == '--downall':
self.downblas = max(1, | |
2.01252e-9, 2.01252e-9])
assert_allclose(
get_positional_probabilities(freqs, probs, 50), expected, rtol=1e-5
)
# value that is truely over raises an error
freqs = [51.0000000001, 0.0, 0.0]
probs = [0.33, 0.33, 0.33]
self.assertRaises(ValueError, get_positional_probabilities, freqs, probs, 50)
def test_sca_input_validation(self):
"""sca_input_validation: handles sca-specific validation steps"""
# moltype != PROTEIN makes background freqs required
self.assertRaises(ValueError, sca_input_validation, self.dna_aln, cutoff=0.4)
self.assertRaises(ValueError, sca_input_validation, self.rna_aln, cutoff=0.4)
# no cutoff -> ValueError
self.assertRaises(ValueError, sca_input_validation, self.protein_aln)
# low cutoff -> ValueError
self.assertRaises(
ValueError, sca_input_validation, self.protein_aln, cutoff=-0.001
)
# high cutoff -> ValueError
self.assertRaises(
ValueError, sca_input_validation, self.protein_aln, cutoff=1.001
)
# good cut-off -> no error
sca_input_validation(self.protein_aln, cutoff=0.50)
sca_input_validation(self.protein_aln, cutoff=0.0)
sca_input_validation(self.protein_aln, cutoff=1.0)
# only bad alphabet -> ValueError
self.assertRaises(
ValueError, sca_input_validation, self.dna_aln, cutoff=0.5, alphabet="ABC"
)
# only bad background_freqs -> ValueError
self.assertRaises(
ValueError,
sca_input_validation,
self.dna_aln,
cutoff=0.5,
background_freqs={"A": 0.25, "C": 0.75},
)
# incompatible background_freqs & alphabet provided -> ValueError
self.assertRaises(
ValueError,
sca_input_validation,
self.dna_aln,
cutoff=0.5,
alphabet="ABC",
background_freqs={"A": 0.25, "C": 0.75},
)
# default alphabet, background_freqs -> no error
sca_input_validation(self.protein_aln, cutoff=0.50)
# compatible non-default alphabet, backgorund_freqs -> no error
sca_input_validation(
self.dna_aln, cutoff=0.50, alphabet="A", background_freqs={"A": 1.0}
)
# Note: don't need a full set of tests of validate_alphabet here --
# it's tested on it's own.
def test_sca_pair_no_error(self):
"""sca_pair: returns w/o error"""
r = sca_pair(
self.dna_aln,
1,
0,
cutoff=0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
r = coevolve_pair(
sca_pair,
self.dna_aln,
1,
0,
cutoff=0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
def test_sca_pair_return_all(self):
"""sca_pair: handles return_all by returning lists of proper length"""
# two allowed_perturbations
a = "ACGT"
aln = ArrayAlignment(
data={0: "AA", 1: "AC", 2: "CG", 3: "CT", 4: "TA"}, moltype=DNA
)
actual = sca_pair(
aln,
0,
1,
cutoff=0.33,
return_all=True,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
self.assertEqual(len(actual), 2)
self.assertEqual(actual[0][0], "A")
self.assertEqual(actual[1][0], "C")
# one allowed_perturbations
a = "ACGT"
aln = ArrayAlignment(
data={0: "AA", 1: "AC", 2: "AG", 3: "CT", 4: "TA"}, moltype=DNA
)
actual = sca_pair(
aln,
0,
1,
0.33,
return_all=True,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0][0], "A")
# zero allowed_perturbations
actual = sca_pair(
aln,
0,
1,
1.0,
return_all=True,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# expected = [('A',-1),('C',-1)]
expected = DEFAULT_NULL_VALUE
assert_allclose(actual, expected)
# pos1 == pos2
actual = sca_pair(
aln,
0,
0,
0.33,
return_all=True,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
actual = list(zip(*actual))
expected = list(zip(*[("A", 2.40381185618)]))
assert_equal(actual[0], expected[0])
assert_allclose(actual[1], expected[1])
def test_sca_pair_error(self):
"""sca_pair:returns w/ error when appropriate"""
a = "ACGT"
# pos1 out of range
self.assertRaises(
ValueError,
coevolve_pair,
sca_pair,
self.dna_aln,
100,
1,
cutoff=0.50,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# pos2 out of range
self.assertRaises(
ValueError,
coevolve_pair,
sca_pair,
self.dna_aln,
0,
100,
cutoff=0.50,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# pos1 & pos2 out of range
self.assertRaises(
ValueError,
coevolve_pair,
sca_pair,
self.dna_aln,
100,
100,
cutoff=0.50,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# bad cut-off
self.assertRaises(
ValueError,
coevolve_pair,
sca_pair,
self.dna_aln,
0,
1,
cutoff=1.2,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# incompatible alphabet and background freqs
self.assertRaises(
ValueError,
coevolve_pair,
sca_pair,
self.dna_aln,
0,
1,
cutoff=0.2,
alphabet=a,
)
self.assertRaises(
ValueError,
coevolve_pair,
sca_pair,
self.dna_aln,
0,
1,
cutoff=0.2,
alphabet="ACGTBC",
background_freqs=self.dna_base_freqs,
)
def test_sca_position_no_error(self):
"""sca_position: returns w/o error"""
r = sca_position(
self.dna_aln, 1, 0.50, alphabet="ACGT", background_freqs=self.dna_base_freqs
)
# sanity check -- coupling w/ self
assert_allclose(r[1], 3.087, 0.01)
r = sca_position(
self.dna_aln_gapped,
1,
0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
assert_allclose(r[1], 3.387, 0.01)
# same tests, but called via coevolve_position
r = coevolve_position(
sca_position,
self.dna_aln,
1,
cutoff=0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
# sanity check -- coupling w/ self
assert_allclose(r[1], 3.087, 0.01)
r = coevolve_position(
sca_position,
self.dna_aln_gapped,
1,
cutoff=0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
# sanity check -- coupling w/ self
assert_allclose(r[1], 3.387, 0.01)
def test_sca_position_error(self):
"""sca_position: returns w/ error when appropriate"""
a = "ACGT"
# position out of range
self.assertRaises(
ValueError,
coevolve_position,
sca_position,
self.dna_aln,
100,
cutoff=0.50,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# bad cutoff
self.assertRaises(
ValueError,
coevolve_position,
sca_position,
self.dna_aln,
1,
cutoff=-8.2,
alphabet=a,
background_freqs=self.dna_base_freqs,
)
# incompatible alphabet and background freqs
self.assertRaises(
ValueError,
coevolve_position,
sca_position,
self.dna_aln,
0,
cutoff=0.2,
alphabet=a,
)
self.assertRaises(
ValueError,
coevolve_position,
sca_position,
self.dna_aln,
0,
cutoff=0.2,
alphabet="ACGTBC",
background_freqs=self.dna_base_freqs,
)
def test_sca_position_returns_same_as_sca_pair(self):
"""sca_position: returns same as sca_pair called on each pos"""
expected = []
for i in range(len(self.dna_aln)):
expected.append(
sca_pair(
self.dna_aln,
1,
i,
0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
)
actual = sca_position(
self.dna_aln, 1, 0.50, alphabet="ACGT", background_freqs=self.dna_base_freqs
)
assert_allclose(actual, expected)
# change some of the defaults to make sure they make it through
bg_freqs = {"A": 0.50, "C": 0.50}
expected = []
for i in range(len(self.dna_aln)):
expected.append(
sca_pair(
self.dna_aln,
1,
i,
0.50,
alphabet="AC",
null_value=52.0,
scaled_aln_size=20,
background_freqs=bg_freqs,
)
)
actual = sca_position(
self.dna_aln,
1,
0.50,
alphabet="AC",
null_value=52.0,
scaled_aln_size=20,
background_freqs=bg_freqs,
)
assert_allclose(actual, expected)
def test_sca_alignment_no_error(self):
"""sca_alignment: returns w/o error"""
r = sca_alignment(
self.dna_aln, 0.50, alphabet="ACGT", background_freqs=self.dna_base_freqs
)
# sanity check -- coupling w/ self
assert_allclose(r[0][0], 2.32222608171)
# same test, but called via coevolve_alignment
r = coevolve_alignment(
sca_alignment,
self.dna_aln,
cutoff=0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
# sanity check -- coupling w/ self
assert_allclose(r[0][0], 2.32222608171)
def test_sca_alignment_error(self):
"""sca_alignment: returns w/ error when appropriate"""
a = "ACGT"
# incompatible alphabet and background freqs
self.assertRaises(
ValueError,
coevolve_position,
sca_position,
self.dna_aln,
0,
cutoff=0.2,
alphabet=a,
)
self.assertRaises(
ValueError,
coevolve_position,
sca_position,
self.dna_aln,
0,
cutoff=0.2,
alphabet="ACGTBC",
background_freqs=self.dna_base_freqs,
)
def test_sca_alignment_returns_same_as_sca_position(self):
"""sca_alignment: returns same as sca_position on every position"""
expected = []
for i in range(len(self.dna_aln)):
expected.append(
sca_position(
self.dna_aln,
i,
0.50,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
)
actual = sca_alignment(
self.dna_aln, 0.50, alphabet="ACGT", background_freqs=self.dna_base_freqs
)
assert_allclose(actual, expected)
# change some of the defaults to make sure they make it through
bg_freqs = {"A": 0.50, "C": 0.50}
expected = []
for i in range(len(self.dna_aln)):
expected.append(
sca_position(
self.dna_aln,
i,
0.50,
alphabet="AC",
null_value=52.0,
scaled_aln_size=20,
background_freqs=bg_freqs,
)
)
actual = sca_alignment(
self.dna_aln,
0.50,
alphabet="AC",
null_value=52.0,
scaled_aln_size=20,
background_freqs=bg_freqs,
)
assert_allclose(actual, expected)
def test_sca_pair_gpcr(self):
"""sca_pair: reproduces several GPCR data from Suel et al., 2003"""
assert_allclose(sca_pair(self.gpcr_aln, 295, 18, 0.32), 0.12, 0.1)
assert_allclose(sca_pair(self.gpcr_aln, 295, 124, 0.32), 1.86, 0.1)
assert_allclose(sca_pair(self.gpcr_aln, 295, 304, 0.32), 0.3, 0.1)
# covariation w/ self
assert_allclose(sca_pair(self.gpcr_aln, 295, 295, 0.32), 7.70358628)
def test_sca_position_gpcr(self):
"""sca_position: reproduces several GPCR data from Suel et al., 2003"""
if not self.run_slow_tests:
return
vector = sca_position(self.gpcr_aln, 295, 0.32)
assert_allclose(vector[18], 0.12, 0.1)
assert_allclose(vector[124], 1.86, 0.1)
assert_allclose(vector[304], 0.3, 0.1)
# covariation w/ self == null_value
assert_allclose(vector[295], nan)
def test_ltm_to_symmetric(self):
"""ltm_to_symmetric: making ltm matrices symmetric functions"""
m = arange(9).reshape((3, 3))
expected = [[0, 3, 6], [3, 4, 7], [6, 7, 8]]
assert_equal(ltm_to_symmetric(m), expected)
# non-square matrices not supported
self.assertRaises(AssertionError, ltm_to_symmetric, arange(10).reshape(5, 2))
self.assertRaises(AssertionError, ltm_to_symmetric, arange(10).reshape(2, 5))
def test_merge_alignments(self):
"""merging alignments of same moltype functions as expected"""
# PROTEIN
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EF", "2": "EG"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(data={"1": "ACEF", "2": "ADEG"}, moltype=PROTEIN)
actual = merge_alignments(aln1, aln2)
self.assertEqual(actual, combined_aln)
self.assertEqual(actual.moltype, PROTEIN)
# RNA
aln1 = ArrayAlignment(data={"1": "AC", "2": "AU"}, moltype=RNA)
aln2 = ArrayAlignment(data={"1": "GG", "2": "UG"}, moltype=RNA)
combined_aln = ArrayAlignment(data={"1": "ACGG", "2": "AUUG"}, moltype=RNA)
actual = merge_alignments(aln1, aln2)
self.assertEqual(actual, combined_aln)
self.assertEqual(actual.moltype, RNA)
# DNA
aln1 = ArrayAlignment(data={"1": "AC", "2": "AT"}, moltype=DNA)
aln2 = ArrayAlignment(data={"1": "GG", "2": "TG"}, moltype=DNA)
combined_aln = ArrayAlignment(data={"1": "ACGG", "2": "ATTG"}, moltype=DNA)
actual = merge_alignments(aln1, aln2)
self.assertEqual(actual, combined_aln)
self.assertEqual(actual.moltype, DNA)
def test_merge_alignments_ignores_id_following_plus(self):
"""merge_alignments ignores all seq id characters after '+'"""
aln1 = ArrayAlignment(data={"1+a": "AC", "2+b": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1 + c": "EFW", "2 + d": "EGY"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(
data={"1": "ACEFW", "2": "ADEGY"}, moltype=PROTEIN
)
self.assertEqual(merge_alignments(aln1, aln2), combined_aln)
# not all ids have a +
aln1 = ArrayAlignment(data={"1": "AC", "2+b": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1+c": "EFW", "2": "EGY"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(
data={"1": "ACEFW", "2": "ADEGY"}, moltype=PROTEIN
)
self.assertEqual(merge_alignments(aln1, aln2), combined_aln)
def test_merge_alignments_different_moltype(self):
"""merging alignments of different moltype functions as expected"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AU"}, moltype=RNA)
aln2 = ArrayAlignment(data={"1": "EF", "2": "EG"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(data={"1": "ACEF", "2": "AUEG"})
self.assertEqual(merge_alignments(aln1, aln2), combined_aln)
aln1 = ArrayAlignment(data={"1": "AC", "2": "AT"}, moltype=DNA)
aln2 = ArrayAlignment(data={"1": "EF", "2": "EG"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(data={"1": "ACEF", "2": "ATEG"})
self.assertEqual(merge_alignments(aln1, aln2), combined_aln)
aln1 = ArrayAlignment(data={"1": "AC", "2": "AT"}, moltype=DNA)
aln2 = ArrayAlignment(data={"1": "UC", "2": "UG"}, moltype=RNA)
combined_aln = ArrayAlignment(data={"1": "ACUC", "2": "ATUG"})
self.assertEqual(merge_alignments(aln1, aln2), combined_aln)
def test_n_random_seqs(self):
"""n_random_seqs: functions as expected"""
aln1 = make_aligned_seqs(
data=list(zip(list("abcd"), ["AA", "AC", "DD", "GG"])),
moltype=PROTEIN,
array_align=True,
)
# Number of returned sequences correct
self.assertEqual(n_random_seqs(aln1, 1).num_seqs, 1)
self.assertEqual(n_random_seqs(aln1, 2).num_seqs, 2)
self.assertEqual(n_random_seqs(aln1, 3).num_seqs, 3)
self.assertEqual(n_random_seqs(aln1, 4).num_seqs, 4)
# Sequences are correct
new_aln = n_random_seqs(aln1, 3)
self.assertEqual(new_aln.num_seqs, 3)
for n in new_aln.names:
self.assertEqual(new_aln.get_seq(n), aln1.get_seq(n))
# | |
ะณะดะต
ะบะฐะถะดะฐั ัััะพะบะฐ - ะทะฝะฐัะตะฝะธะต ััะพะณะพ ะฟะพะปั. ะ ะธัะพะณะต ะทะฐะฟัะพั
ะฒะตัะฝะตั ัะพะปัะบะพ ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั ะฟะพะปะตะน ะบะพัะพััั
,
ัะพะพัะฒะตัััะฒััั ะดะฐะฝะฝะพะน ัะธะปัััะฐัะธะธ. ะขะพ ะตััั, ะดะปั ะบะฐะถะดะพะณะพ
ะบะปััะฐ, ะฑัะดัั ะพััะธะปัััะพะฒะฐะฝั ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั
ัะพะพัะฒะตัััะฒัััะตะณะพ ะฟะพะปั ะบะพัะพัะพะณะพ ะฝะต ะฝะฐั
ะพะดะธััั ะฒ
ัะฟะธัะบะต-ะทะฝะฐัะตะฝะธะธ. '''
try:
request_json = RestAPI.request_data_to_json(request.data)
except APIExceptions.NoJsonError:
lot_filter = {}
else:
lot_filter = request_json['filter'] if 'filter' in request_json else {}
lot_list = LotListGatherer(lot_filter)
return jsonify(lot_list.get_all_approved_lots()), 200
@staticmethod
@route('lots', methods=['POST'])
@User.login_required
@weighted(weight=1)
def create_lot():
''' ะกะพะทะดะฐัั ะฝะพะฒัะน ะปะพั.
ะกะพะทะดะฐะตั ะฝะพะฒัะน ะปะพั. ะญัะพั ะปะพั ะฑัะดะตั ะฐะฒัะพะผะฐัะธัะตัะบะธ
ะทะฐะบัะตะฟะปะตะฝ ะทะฐ ัะตะบััะธะผ ะฟะพะปัะทะพะฒะฐัะตะปะตะผ. ะะพ ัะผะพะปัะฐะฝะธั,
ััะฐะทั ะฟะพัะปะต ัะพะทะดะฐะฝะธั, ะปะพั ะฑัะดะตั ะพัะฟัะฐะฒะปะตะฝ ะฝะฐ ะผะพะดะตัะฐัะธั,
ะณะดะต ะฑัะดะตั ะพะถะธะดะฐัั ะฟัะพะฒะตัะบะธ.'''
request_json = RestAPI.request_data_to_json(request.data)
data_required = [
'name',
'amount',
'currency',
'term',
'return_way',
'security',
'percentage',
'form',
'commentary'
]
RestAPI.check_required_fields(request_json, data_required)
RestAPI.check_fields_values(request_json, "lot")
user = User.current()
return jsonify({
'lot_id': user.create_lot(*[request_json[data] for data in data_required])
}), 201
@staticmethod
@route('lots/<int:lot_id>', methods=['GET'])
@weighted(weight=1)
def get_lot(lot_id):
''' ะะพะปััะธัั ะดะฐะฝะฝัะต ะพ ะปะพัะต.
ะะพะปััะธัั ะดะฐะฝะฝัะต ะพ ะบะพะฝะบัะตัะฝะพะผ ะปะพัะต ะฟะพ ะตะณะพ ะฐะนะดะธ.
ะะพั ะฒะพะทะฒัะฐัะฐะตััั ััะฐะฝะดะฐััะฝัะน ัะปะพะฒะฐัั ะปะพัะฐ, ะบะฐะบ ะธ ะฒ
ะปัะฑะพะผ ัะฟะธัะบะต ะปะพัะพะฒ.'''
return jsonify(Lot(lot_id).get_lot_data()), 200
@staticmethod
@route('lots/<int:lot_id>', methods=['PUT'])
@User.login_required
@weighted(weight=3)
def update_lot(lot_id):
''' ะะฑะฝะพะฒะธัั ะดะฐะฝะฝัะต ะปะพัะฐ.
ะะทะผะตะฝะธัั ะฝะตะบะพัะพััะต ะดะฐะฝะฝัะต ะปะพัะฐ.
ะะทะผะตะฝััั ะดะฐะฝะฝัะต ะปะพัะฐ ะผะพะถะตั ัะพะปัะบะพ ะตะณะพ ัะพะทะดะฐัะตะปั.
ะะพัะปะต ะธะทะผะตะฝะตะฝะธั, ะตัะปะธ ะปะพั ะฑัะป ะฟะพะดัะฒะตัะถะดะตะฝ,
ะพะฝ ัะฝะพะฒะฐ ััะฐะฝะตั ะฝะตะฟะพะดัะฒะตัะถะดะตะฝะฝัะผ ะธ ะพัะฟัะฐะฒะธััั
ะฝะฐ ะผะพะดะตัะฐัะธั.'''
lot = Lot(lot_id)
user = User.current()
if not lot.can_user_edit(user):
raise APIExceptions.NoPermissionError()
request_json = RestAPI.request_data_to_json(request.data)
data_available = [
'name',
'amount',
'currency',
'term',
'return_way',
'security',
'percentage',
'form',
'commentary'
]
RestAPI.check_fields_values(request_json, "lot")
for data in data_available:
if data in request_json:
lot.update_data(data, request_json[data])
return RestAPI.message('A lot is changed.'), 201
@staticmethod
@route('lots/<int:lot_id>', methods=['DELETE'])
@User.login_required
@weighted(weight=2)
def delete_lot(lot_id):
''' ะฃะดะฐะปะธัั ะปะพั.
ะะตัะตะผะตััะธัั ะปะพั ะฒ ะฐัั
ะธะฒ.
ะัั
ะธะฒะฝัะน ะปะพั ะผะพะถะฝะพ ะฟะพะทะถะต ัะดะฐะปะธัั ะฟะพะปะฝะพัััั
ะธะปะธ ะฒะพัััะฐะฝะพะฒะธัั.
ะญัั ะพะฟะตัะฐัะธั ะผะพะถะตั ะดะตะปะฐัั ัะพะปัะบะพ ัะพะทะดะฐัะตะปั ะปะพัะฐ.'''
lot = Lot(lot_id)
user = User.current()
if not lot.can_user_edit(user):
raise APIExceptions.NoPermissionError()
lot.delete_lot()
return RestAPI.message('A lot is deleted.'), 201
@staticmethod
@route('lots/<int:lot_id>', methods=['POST'])
@User.login_required
@weighted(weight=2)
def restore_lot(lot_id):
''' ะะพัััะฐะฝะพะฒะธัั ัะดะฐะปะตะฝะฝัะน ะปะพั.
ะัะปะธ ะปะพั ัะถะต ะฝะฐั
ะพะดะธััั ะฒ ะฐัั
ะธะฒะต,
ะฒะพัััะฐะฝะพะฒะธั ะตะณะพ.
ะญัั ะพะฟะตัะฐัะธั ะผะพะถะตั ะดะตะปะฐัั ัะพะปัะบะพ ัะพะทะดะฐัะตะปั ะปะพัะฐ.'''
lot = Lot(lot_id)
user = User.current()
if not lot.can_user_edit(user):
raise APIExceptions.NoPermissionError()
lot.restore_lot()
return RestAPI.message('A lot is restored.'), 201
@staticmethod
@route('lots/<int:lot_id>/photos', methods=['GET'])
@weighted(weight=2)
def get_lot_photos(lot_id):
''' ะะพะปััะธัั ัะฟะธัะพะบ ัะพัะพะณัะฐัะธะน ะปะพัะฐ.
ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ัััะปะพะบ ะฝะฐ ัะพัะพะณัะฐัะธะธ ะปะพัะฐ ะฟะพ
ะตะณะพ ะฐะนะดะธ.'''
return jsonify({'link': Lot(lot_id).get_photos()}), 200
@staticmethod
@route('lots/<int:lot_id>/photos', methods=['POST'])
@User.login_required
@weighted(weight=3)
def add_lot_photo(lot_id):
''' ะะพะฑะฐะฒะธัั ะปะพัั ัะพัะพะณัะฐัะธั.
ะะพะฑะฐะฒะปัะตั ะฝะพะฒัั ัะพัะพะณัะฐัะธั ะปะพัั.
ะญัะพ ะดะตะนััะฒะธะต ะฝะฐ ะฟะพะดัะฒะตัะถะดะตะฝะฝะพะผ ะปะพัะต ะฟัะธะฒะตะดะตั
ะบ ะตะณะพ ะฟะพะฒัะพัะฝะพะน ะผะพะดะตัะฐัะธะธ.
ะญัั ะพะฟะตัะฐัะธั ะผะพะถะตั ะดะตะปะฐัั ัะพะปัะบะพ ัะพะทะดะฐัะตะปั ะปะพัะฐ.'''
lot = Lot(lot_id)
user = User.current()
if not lot.can_user_edit(user):
raise APIExceptions.NoPermissionError()
resp = {filename: lot.add_photo(request.files[filename]) for filename in request.files}
return jsonify(resp), 201
@staticmethod
@route('lots/<int:lot_id>/photos/<int:photo_id>', methods=['DELETE'])
@User.login_required
@weighted(weight=2)
def remove_lot_photo(lot_id, photo_id):
''' ะฃะดะฐะปะธัั ัะพัะพะณัะฐัะธั ะปะพัะฐ.
ะฃะดะฐะปัะตั ัะพัะพะณัะฐัะธั ะปะพัะฐ ะฟะพ ะตะต ะฟะพััะดะบะพะฒะพะผั ะฝะพะผะตัั.
ะญัะพ ะดะตะนััะฒะธะต ะฝะฐ ะฟะพะดัะฒะตัะถะดะตะฝะฝะพะผ ะปะพัะต ะฟัะธะฒะตะดะตั
ะบ ะตะณะพ ะฟะพะฒัะพัะฝะพะน ะผะพะดะตัะฐัะธะธ.
ะญัั ะพะฟะตัะฐัะธั ะผะพะถะตั ะดะตะปะฐัั ัะพะปัะบะพ ัะพะทะดะฐัะตะปั ะปะพัะฐ.'''
lot = Lot(lot_id)
user = User.current()
if not lot.can_user_edit(user):
raise APIExceptions.NoPermissionError()
return jsonify(lot.remove_photo(photo_id)), 201
@staticmethod
@route('lots/favorites/<int:lot_id>', methods=['PUT'])
@User.login_required
@weighted(weight=1)
def add_favorite_lot(lot_id):
''' ะะพะฑะฐะฒะธัั ะปะพั ะฒ ะธะทะฑัะฐะฝะฝะพะต.
ะะพะฑะฐะฒะปัะตั ะฒัะฑัะฐะฝะฝัะน ะปะพั ะฒ ัะฟะธัะพะบ ะธะทะฑัะฐะฝะฝัั
ะปะพัะพะฒ ะฟะพะปัะทะพะฒะฐัะตะปั.'''
user = User.current()
user.add_lot_to_favorites(lot_id)
return RestAPI.message('A lot is added to favorites.'), 201
@staticmethod
@route('lots/favorites/<int:lot_id>', methods=['DELETE'])
@User.login_required
@weighted(weight=1)
def remove_favorite_lot(lot_id):
''' ะฃะดะฐะปะธัั ะปะพั ะธะท ะธะทะฑัะฐะฝะฝัั
.
ะฃะดะฐะปัะตั ะฒัะฑัะฐะฝะฝัะน ะปะพั ะธะท ัะฟะธัะบะฐ ะธะทะฑัะฐะฝะฝัั
ะปะพัะพะฒ ะฟะพะปัะทะพะฒะฐัะตะปั.'''
user = User.current()
user.remove_lot_from_favorites(lot_id)
return RestAPI.message('A lot is removed from favorites.'), 201
@staticmethod
@route('lots/favorites', methods=['GET', 'POST'])
@User.login_required
@weighted(weight=3)
def get_favorite_lots():
''' ะะพะปััะธัั ัะฟะธัะพะบ ะธะทะฑัะฐะฝะฝัั
ะปะพัะพะฒ.
ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะธะทะฑัะฐะฝะฝัั
ะปะพัะพะฒ ะฟะพะปัะทะพะฒะฐัะตะปั.
ะะฐะบ ะธ ะฒ ะปัะฑะพะผ ะดััะณะพะผ ัะฟะธัะบะต ะปะพัะพะฒ, ััั
ะฟัะธัััััะฒัะตั ัะธะปัััะฐัะธั.
ะญัะพ ะพะทะฝะฐัะฐะตั, ััะพ ะธัะฟะพะปัะทัั ะผะตัะพะด POST
(ะดะปั ะฝะตะบะพัะพััั
ะทะฐะฟัะพัะพะฒ, ะพะทะฝะฐัะฐะตั ััะพ-ัะพ ะดััะณะพะต)
ะผะพะถะฝะพ ะพัะฟัะฐะฒะธัั ะฟะฐัะฐะผะตัั "filter", ะฒ ะบะพัะพัะพะผ
ะฝัะถะฝะพ ะฟะตัะตะดะฐัั ัะปะพะฒะฐัั, ะฟะพะดะดะตัะถะธะฒะฐััะธะน
ัะปะตะดัััะธะต ะทะฝะฐัะตะฝะธั:
"limit" - ัะธัะปะพ ะปะพัะพะฒ ะฒ ัะฟะธัะบะต, ะผะฐะบัะธะผัะผ 1000.
ะะพ ัะผะพะปัะฐะฝะธั - 1000.
"offset" - ะฝะพะผะตั ะฟะตัะฒะพะณะพ ะปะพัะฐ ะฒ ัะฟะธัะบะต
(ะพััััะฟ ะพั ะฝะฐัะฐะปะฐ). ะะพ ัะผะพะปัะฐะฝะธั - 0.
"order_by" - ะธะผั ะฟะพะปั, ะฟะพ ะบะพัะพัะพะผั ะฝะตะพะฑั
ะพะดะธะผะพ
ัะพััะธัะพะฒะฐัั ัะฟะธัะพะบ.
"order_type" - ัะธะฟ ัะพััะธัะพะฒะบะธ, "ASC" ะธะปะธ "DESC".
ะะพ ัะผะพะปัะฐะฝะธั - "ASC".
"show_only" - ัะปะพะฒะฐัั, ะฒ ะบะพัะพัะพะผ ะบะปััะธ - ะธะผะตะฝะฐ ะฟะพะปะตะน,
ะฟะพ ะบะพัะพััะผ ะฝัะถะฝะพ ะดะตะปะฐัั ัะธะปัััะฐัะธั. ะะพะดะดะตัะถะธะฒะฐัััั
ัะพะปัะบะพ ัะต ะฟะพะปั, ะบะพัะพััะต ะธะผะตัั ะฒ ะฝะฐัััะพะนะบะฐั
ัะพัะผะฐั List[str]. ะะฝะฐัะตะฝะธะต ะถะต - ัะฟะธัะพะบ ัััะพะบ, ะณะดะต
ะบะฐะถะดะฐั ัััะพะบะฐ - ะทะฝะฐัะตะฝะธะต ััะพะณะพ ะฟะพะปั. ะ ะธัะพะณะต ะทะฐะฟัะพั
ะฒะตัะฝะตั ัะพะปัะบะพ ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั ะฟะพะปะตะน ะบะพัะพััั
,
ัะพะพัะฒะตัััะฒััั ะดะฐะฝะฝะพะน ัะธะปัััะฐัะธะธ. ะขะพ ะตััั, ะดะปั ะบะฐะถะดะพะณะพ
ะบะปััะฐ, ะฑัะดัั ะพััะธะปัััะพะฒะฐะฝั ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั
ัะพะพัะฒะตัััะฒัััะตะณะพ ะฟะพะปั ะบะพัะพัะพะณะพ ะฝะต ะฝะฐั
ะพะดะธััั ะฒ
ัะฟะธัะบะต-ะทะฝะฐัะตะฝะธะธ.'''
try:
request_json = RestAPI.request_data_to_json(request.data)
except APIExceptions.NoJsonError:
lot_filter = {}
else:
lot_filter = request_json['filter'] if 'filter' in request_json else {}
user = User.current()
lot_list = UsersLotListGatherer(user, lot_filter)
return jsonify(lot_list.get_favorites()), 200
@staticmethod
@route('lots/personal', methods=['GET', 'POST'])
@route('lots/personal/current', methods=['GET', 'POST'])
@User.login_required
@weighted(weight=3)
def get_personal_lots():
''' ะะพะปััะธัั ัะฟะธัะพะบ ัะฒะพะธั
ะปะพัะพะฒ.
ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ัะตะบััะธั
ะปะพัะพะฒ ะฟะพะปัะทะพะฒะฐัะตะปั.
ะ ััะพะผ ัะฟะธัะบะต ะพััััััะฒััั ัะต ะปะพัั, ะบะพัะพััะต
ัะฒะปััััั ะทะฐะฐัั
ะธะฒะธัะพะฒะฐะฝะฝัะผะธ, ะฝะฐัะตะดัะธะผะธ ัะฟะพะฝัะพัะฐ
ะธะปะธ ะทะฐะฒะตััะตะฝะฝัะผะธ.
ะะฐะบ ะธ ะฒ ะปัะฑะพะผ ะดััะณะพะผ ัะฟะธัะบะต ะปะพัะพะฒ, ััั
ะฟัะธัััััะฒัะตั ัะธะปัััะฐัะธั.
ะญัะพ ะพะทะฝะฐัะฐะตั, ััะพ ะธัะฟะพะปัะทัั ะผะตัะพะด POST
(ะดะปั ะฝะตะบะพัะพััั
ะทะฐะฟัะพัะพะฒ, ะพะทะฝะฐัะฐะตั ััะพ-ัะพ ะดััะณะพะต)
ะผะพะถะฝะพ ะพัะฟัะฐะฒะธัั ะฟะฐัะฐะผะตัั "filter", ะฒ ะบะพัะพัะพะผ
ะฝัะถะฝะพ ะฟะตัะตะดะฐัั ัะปะพะฒะฐัั, ะฟะพะดะดะตัะถะธะฒะฐััะธะน
ัะปะตะดัััะธะต ะทะฝะฐัะตะฝะธั:
"limit" - ัะธัะปะพ ะปะพัะพะฒ ะฒ ัะฟะธัะบะต, ะผะฐะบัะธะผัะผ 1000.
ะะพ ัะผะพะปัะฐะฝะธั - 1000.
"offset" - ะฝะพะผะตั ะฟะตัะฒะพะณะพ ะปะพัะฐ ะฒ ัะฟะธัะบะต
(ะพััััะฟ ะพั ะฝะฐัะฐะปะฐ). ะะพ ัะผะพะปัะฐะฝะธั - 0.
"order_by" - ะธะผั ะฟะพะปั, ะฟะพ ะบะพัะพัะพะผั ะฝะตะพะฑั
ะพะดะธะผะพ
ัะพััะธัะพะฒะฐัั ัะฟะธัะพะบ.
"order_type" - ัะธะฟ ัะพััะธัะพะฒะบะธ, "ASC" ะธะปะธ "DESC".
ะะพ ัะผะพะปัะฐะฝะธั - "ASC".
"show_only" - ัะปะพะฒะฐัั, ะฒ ะบะพัะพัะพะผ ะบะปััะธ - ะธะผะตะฝะฐ ะฟะพะปะตะน,
ะฟะพ ะบะพัะพััะผ ะฝัะถะฝะพ ะดะตะปะฐัั ัะธะปัััะฐัะธั. ะะพะดะดะตัะถะธะฒะฐัััั
ัะพะปัะบะพ ัะต ะฟะพะปั, ะบะพัะพััะต ะธะผะตัั ะฒ ะฝะฐัััะพะนะบะฐั
ัะพัะผะฐั List[str]. ะะฝะฐัะตะฝะธะต ะถะต - ัะฟะธัะพะบ ัััะพะบ, ะณะดะต
ะบะฐะถะดะฐั ัััะพะบะฐ - ะทะฝะฐัะตะฝะธะต ััะพะณะพ ะฟะพะปั. ะ ะธัะพะณะต ะทะฐะฟัะพั
ะฒะตัะฝะตั ัะพะปัะบะพ ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั ะฟะพะปะตะน ะบะพัะพััั
,
ัะพะพัะฒะตัััะฒััั ะดะฐะฝะฝะพะน ัะธะปัััะฐัะธะธ. ะขะพ ะตััั, ะดะปั ะบะฐะถะดะพะณะพ
ะบะปััะฐ, ะฑัะดัั ะพััะธะปัััะพะฒะฐะฝั ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั
ัะพะพัะฒะตัััะฒัััะตะณะพ ะฟะพะปั ะบะพัะพัะพะณะพ ะฝะต ะฝะฐั
ะพะดะธััั ะฒ
ัะฟะธัะบะต-ะทะฝะฐัะตะฝะธะธ.'''
try:
request_json = RestAPI.request_data_to_json(request.data)
except APIExceptions.NoJsonError:
lot_filter = {}
else:
lot_filter = request_json['filter'] if 'filter' in request_json else {}
user = User.current()
lot_list = UsersLotListGatherer(user, lot_filter)
return jsonify(lot_list.get_personal()), 200
@staticmethod
@route('lots/personal/taken', methods=['GET', 'POST'])
@User.login_required
@weighted(weight=3)
def get_personal_taken_lots():
''' ะะพะปััะธัั ัะฟะธัะพะบ ัะฒะพะธั
ะปะพัะพะฒ, ะฝะฐัะตะดัะธั
ัะธะฝะฐะฝัะธัะพะฒะฐะฝะธะต.
ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะปะพัะพะฒ ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพััะต
ัะถะต ะฝะฐัะปะธ ัะธะฝะฐะฝัะธัะพะฒะฐะฝะธะต.
ะะฐะบ ะธ ะฒ ะปัะฑะพะผ ะดััะณะพะผ ัะฟะธัะบะต ะปะพัะพะฒ, ััั
ะฟัะธัััััะฒัะตั ัะธะปัััะฐัะธั.
ะญัะพ ะพะทะฝะฐัะฐะตั, ััะพ ะธัะฟะพะปัะทัั ะผะตัะพะด POST
(ะดะปั ะฝะตะบะพัะพััั
ะทะฐะฟัะพัะพะฒ, ะพะทะฝะฐัะฐะตั ััะพ-ัะพ ะดััะณะพะต)
ะผะพะถะฝะพ ะพัะฟัะฐะฒะธัั ะฟะฐัะฐะผะตัั "filter", ะฒ ะบะพัะพัะพะผ
ะฝัะถะฝะพ ะฟะตัะตะดะฐัั ัะปะพะฒะฐัั, ะฟะพะดะดะตัะถะธะฒะฐััะธะน
ัะปะตะดัััะธะต ะทะฝะฐัะตะฝะธั:
"limit" - ัะธัะปะพ ะปะพัะพะฒ ะฒ ัะฟะธัะบะต, ะผะฐะบัะธะผัะผ 1000.
ะะพ ัะผะพะปัะฐะฝะธั - 1000.
"offset" - ะฝะพะผะตั ะฟะตัะฒะพะณะพ ะปะพัะฐ ะฒ ัะฟะธัะบะต
(ะพััััะฟ ะพั ะฝะฐัะฐะปะฐ). ะะพ ัะผะพะปัะฐะฝะธั - 0.
"order_by" - ะธะผั ะฟะพะปั, ะฟะพ ะบะพัะพัะพะผั ะฝะตะพะฑั
ะพะดะธะผะพ
ัะพััะธัะพะฒะฐัั ัะฟะธัะพะบ.
"order_type" - ัะธะฟ ัะพััะธัะพะฒะบะธ, "ASC" ะธะปะธ "DESC".
ะะพ ัะผะพะปัะฐะฝะธั - "ASC".
"show_only" - ัะปะพะฒะฐัั, ะฒ ะบะพัะพัะพะผ ะบะปััะธ - ะธะผะตะฝะฐ ะฟะพะปะตะน,
ะฟะพ ะบะพัะพััะผ ะฝัะถะฝะพ ะดะตะปะฐัั ัะธะปัััะฐัะธั. ะะพะดะดะตัะถะธะฒะฐัััั
ัะพะปัะบะพ ัะต ะฟะพะปั, ะบะพัะพััะต ะธะผะตัั ะฒ ะฝะฐัััะพะนะบะฐั
ัะพัะผะฐั List[str]. ะะฝะฐัะตะฝะธะต ะถะต - ัะฟะธัะพะบ ัััะพะบ, ะณะดะต
ะบะฐะถะดะฐั ัััะพะบะฐ - ะทะฝะฐัะตะฝะธะต ััะพะณะพ ะฟะพะปั. ะ ะธัะพะณะต ะทะฐะฟัะพั
ะฒะตัะฝะตั ัะพะปัะบะพ ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั ะฟะพะปะตะน ะบะพัะพััั
,
ัะพะพัะฒะตัััะฒััั ะดะฐะฝะฝะพะน ัะธะปัััะฐัะธะธ. ะขะพ ะตััั, ะดะปั ะบะฐะถะดะพะณะพ
ะบะปััะฐ, ะฑัะดัั ะพััะธะปัััะพะฒะฐะฝั ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั
ัะพะพัะฒะตัััะฒัััะตะณะพ ะฟะพะปั ะบะพัะพัะพะณะพ ะฝะต ะฝะฐั
ะพะดะธััั ะฒ
ัะฟะธัะบะต-ะทะฝะฐัะตะฝะธะธ.'''
try:
request_json = RestAPI.request_data_to_json(request.data)
except APIExceptions.NoJsonError:
lot_filter = {}
else:
lot_filter = request_json['filter'] if 'filter' in request_json else {}
user = User.current()
lot_list = UsersLotListGatherer(user, lot_filter)
return jsonify(lot_list.get_personal_confirmed()), 200
@staticmethod
@route('lots/personal/finished', methods=['GET', 'POST'])
@User.login_required
@weighted(weight=3)
def get_personal_finished_lots():
''' ะะพะปััะธัั ัะฟะธัะพะบ ัะฒะพะธั
ะทะฐะฒะตััะตะฝะฝัั
ะปะพัะพะฒ.
ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะทะฐะฒะตััะตะฝะฝัั
ะปะพัะพะฒ ะฟะพะปัะทะพะฒะฐัะตะปั.
ะะฐะบ ะธ ะฒ ะปัะฑะพะผ ะดััะณะพะผ ัะฟะธัะบะต ะปะพัะพะฒ, ััั
ะฟัะธัััััะฒัะตั ัะธะปัััะฐัะธั.
ะญัะพ ะพะทะฝะฐัะฐะตั, ััะพ ะธัะฟะพะปัะทัั ะผะตัะพะด POST
(ะดะปั ะฝะตะบะพัะพััั
ะทะฐะฟัะพัะพะฒ, ะพะทะฝะฐัะฐะตั ััะพ-ัะพ ะดััะณะพะต)
ะผะพะถะฝะพ ะพัะฟัะฐะฒะธัั ะฟะฐัะฐะผะตัั "filter", ะฒ ะบะพัะพัะพะผ
ะฝัะถะฝะพ ะฟะตัะตะดะฐัั ัะปะพะฒะฐัั, ะฟะพะดะดะตัะถะธะฒะฐััะธะน
ัะปะตะดัััะธะต ะทะฝะฐัะตะฝะธั:
"limit" - ัะธัะปะพ ะปะพัะพะฒ ะฒ ัะฟะธัะบะต, ะผะฐะบัะธะผัะผ 1000.
ะะพ ัะผะพะปัะฐะฝะธั - 1000.
"offset" - ะฝะพะผะตั ะฟะตัะฒะพะณะพ ะปะพัะฐ ะฒ ัะฟะธัะบะต
(ะพััััะฟ ะพั ะฝะฐัะฐะปะฐ). ะะพ ัะผะพะปัะฐะฝะธั - 0.
"order_by" - ะธะผั ะฟะพะปั, ะฟะพ ะบะพัะพัะพะผั ะฝะตะพะฑั
ะพะดะธะผะพ
ัะพััะธัะพะฒะฐัั ัะฟะธัะพะบ.
"order_type" - ัะธะฟ ัะพััะธัะพะฒะบะธ, "ASC" ะธะปะธ "DESC".
ะะพ ัะผะพะปัะฐะฝะธั - "ASC".
"show_only" - ัะปะพะฒะฐัั, ะฒ ะบะพัะพัะพะผ ะบะปััะธ - ะธะผะตะฝะฐ ะฟะพะปะตะน,
ะฟะพ ะบะพัะพััะผ ะฝัะถะฝะพ ะดะตะปะฐัั ัะธะปัััะฐัะธั. ะะพะดะดะตัะถะธะฒะฐัััั
ัะพะปัะบะพ ัะต ะฟะพะปั, ะบะพัะพััะต ะธะผะตัั ะฒ ะฝะฐัััะพะนะบะฐั
ัะพัะผะฐั List[str]. ะะฝะฐัะตะฝะธะต ะถะต - ัะฟะธัะพะบ ัััะพะบ, ะณะดะต
ะบะฐะถะดะฐั ัััะพะบะฐ - ะทะฝะฐัะตะฝะธะต ััะพะณะพ ะฟะพะปั. ะ ะธัะพะณะต ะทะฐะฟัะพั
ะฒะตัะฝะตั ัะพะปัะบะพ ัะต ะปะพัั, ะทะฝะฐัะตะฝะธั ะฟะพะปะตะน ะบะพัะพััั
,
ัะพะพัะฒะตัััะฒััั ะดะฐะฝะฝะพะน ัะธะปัััะฐัะธะธ. ะขะพ ะตััั, ะดะปั | |
""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import collections
import json
import logging
import math
import re
import string
from tokenization_utils import BasicTokenizer
logger = logging.getLogger(__name__)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(examples, preds):
"""
Computes the exact and f1 scores from the examples and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in examples:
qas_id = example.qas_id
gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qas_id not in preds:
print("Missing prediction for %s" % qas_id)
continue
prediction = preds[qas_id]
exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
main_eval["has_ans_exact"] = has_ans_exact
main_eval["has_ans_f1"] = has_ans_f1
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for _, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
if no_answer_probs is None:
no_answer_probs = {k: 0.0 for k in preds}
exact, f1 = get_raw_scores(examples, preds)
exact_threshold = apply_no_ans_threshold(
exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
)
f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
evaluation = make_eval_dict(exact_threshold, f1_threshold)
if has_answer_qids:
has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
merge_eval(evaluation, has_ans_eval, "HasAns")
if no_answer_qids:
no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
merge_eval(evaluation, no_ans_eval, "NoAns")
if no_answer_probs:
find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
return evaluation
def compute_predictions(
all_examples,
all_features,
all_results,
args,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
):
answers, nbest_answers = get_answers(all_examples, all_features, all_results, args)
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(answers, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(nbest_answers, indent=4) + "\n")
# if args.version_2_with_negative:
# with open(output_null_log_odds_file, "w") as writer:
# writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return answers
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def get_answers(examples, features, results, args):
predictions = collections.defaultdict(list) # it is possible that one example corresponds to multiple features
_Prediction = collections.namedtuple('_Prediction', ['text', 'start_logit', 'end_logit'])
if args.version_2_with_negative:
null_vals = collections.defaultdict(lambda: (float("inf"), 0, 0))
for ex, feat, result in match_results(examples, features, results):
if not args.joint_prediction:
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args)
feature_null_score = result.start_logits[0] + result.end_logits[0]
else:
prelim_predictions = get_valid_prelim_predictions_joint_head(result.start_top_index, result.end_top_index,
feat, result, args)
# start_indices = result.start_top_index
# end_indices = result.end_top_index
feature_null_score = result.cls_logits
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
if args.version_2_with_negative and feature_null_score < null_vals[ex.qas_id][0]:
null_vals[ex.qas_id] = (feature_null_score, result.start_logits[0], result.end_logits[0])
curr_predictions = []
seen_predictions = set()
for pred in prelim_predictions:
if len(curr_predictions) == args.n_best_size:
break
if pred.start_index > 0:
final_text = get_answer_text(ex, feat, pred, args)
else:
final_text = ''
if final_text in seen_predictions:
continue
seen_predictions.add(final_text)
curr_predictions.append(_Prediction(final_text, pred.start_logit, pred.end_logit))
predictions[ex.qas_id] += curr_predictions
# Add empty prediction
if args.version_2_with_negative:
for qas_id in predictions.keys():
predictions[qas_id].append(_Prediction('',
null_vals[qas_id][1],
null_vals[qas_id][2]))
nbest_answers = collections.defaultdict(list)
answers = {}
for qas_id, preds in predictions.items():
seen_predictions = set()
nbest = []
for pred in sorted(predictions[qas_id], key=lambda x: (x.start_logit + x.end_logit), reverse=True):
if len(nbest) >= args.n_best_size:
break
if pred.text in seen_predictions:
continue
seen_predictions.add(pred.text)
nbest.append(pred)
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if not nbest or (args.version_2_with_negative and len(nbest) == 1):
nbest.append(_Prediction(text="empty", start_logit=0.0, end_logit=0.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry and entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_answers[qas_id].append(output)
if args.version_2_with_negative:
if not args.joint_prediction:
score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
else:
score_diff = null_vals[qas_id][0]
if score_diff > args.null_score_diff_threshold:
answers[qas_id] = ""
else:
answers[qas_id] = best_non_null_entry.text
else:
answers[qas_id] = nbest_answers[qas_id][0]['text']
return answers, nbest_answers
def get_answer_text(example, feature, pred, args):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging)
return final_text
def get_valid_prelim_predictions_joint_head(start_indices, end_indices, feature, result, args):
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
prelim_predictions = []
# for start_index in start_indices:
for i in range(args.beam_size):
start_index = start_indices[i]
for j in range(args.beam_size):
# for end_index in end_indices:
end_index = end_indices[i * args.beam_size + j]
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[i], # start_index],
end_logit=result.end_logits[i * args.beam_size + j])) # | |
"""
PocketSmith
The public PocketSmith API # noqa: E501
The version of the OpenAPI document: 2.0+0.3.3
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pocketsmith.api_client import ApiClient, Endpoint as _Endpoint
from pocketsmith.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from pocketsmith.model.inline_object14 import InlineObject14
from pocketsmith.model.inline_object5 import InlineObject5
from pocketsmith.model.inline_response403 import InlineResponse403
from pocketsmith.model.institution import Institution
class InstitutionsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_institution_endpoint = _Endpoint(
settings={
'response_type': (Institution,),
'auth': [
'developerKey'
],
'endpoint_path': '/users/{id}/institutions',
'operation_id': 'create_institution',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'inline_object14',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
'inline_object14':
(InlineObject14,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'inline_object14': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_institution_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'developerKey'
],
'endpoint_path': '/institutions/{id}',
'operation_id': 'delete_institution',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_institution_endpoint = _Endpoint(
settings={
'response_type': (Institution,),
'auth': [
'developerKey'
],
'endpoint_path': '/institutions/{id}',
'operation_id': 'get_institution',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_institutions_endpoint = _Endpoint(
settings={
'response_type': ([Institution],),
'auth': [
'developerKey'
],
'endpoint_path': '/users/{id}/institutions',
'operation_id': 'list_institutions',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_institution_endpoint = _Endpoint(
settings={
'response_type': (Institution,),
'auth': [
'developerKey'
],
'endpoint_path': '/institutions/{id}',
'operation_id': 'update_institution',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'inline_object5',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
'inline_object5':
(InlineObject5,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'inline_object5': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_institution(
self,
id,
**kwargs
):
"""Create institution in user # noqa: E501
Creates an institution belonging to a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_institution(id, async_req=True)
>>> result = thread.get()
Args:
id (int): The unique identifier of the user
Keyword Args:
inline_object14 (InlineObject14): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Institution
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.create_institution_endpoint.call_with_http_info(**kwargs)
def delete_institution(
self,
id,
**kwargs
):
"""Delete institution # noqa: E501
Deletes an institution and all data within. Alternatively, another institution can be provided to merge the data into to avoid losing it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_institution(id, async_req=True)
>>> result = thread.get()
Args:
id (int): The unique identifier of the institution.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.delete_institution_endpoint.call_with_http_info(**kwargs)
def get_institution(
self,
id,
**kwargs
):
"""Get institution # noqa: E501
Gets an institution by its ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_institution(id, async_req=True)
>>> result = thread.get()
Args:
id (int): The unique identifier of the institution.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Institution
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.get_institution_endpoint.call_with_http_info(**kwargs)
def list_institutions(
self,
id,
**kwargs
):
"""List institutions in user # noqa: E501
Lists all the institutions belonging to the user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_institutions(id, async_req=True)
>>> result = thread.get()
Args:
id (int): The unique identifier of the user
Keyword Args:
_return_http_data_only (bool): response | |
<filename>skyline/webapp/ionosphere_backend.py
from __future__ import division
import logging
from os import path, walk, listdir, remove
# import string
import operator
import time
import re
# import csv
# import datetime
import shutil
import glob
from ast import literal_eval
import traceback
from flask import request
import requests
# from redis import StrictRedis
# from sqlalchemy import (
# create_engine, Column, Table, Integer, String, MetaData, DateTime)
# from sqlalchemy.dialects.mysql import DOUBLE, TINYINT
from sqlalchemy.sql import select
# import json
# from tsfresh import __version__ as tsfresh_version
# @added 20170916 - Feature #1996: Ionosphere - matches page
from pymemcache.client.base import Client as pymemcache_Client
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
from sqlalchemy.sql import text
import settings
import skyline_version
# from skyline_functions import (
# RepresentsInt, mkdir_p, write_data_to_file, get_graphite_metric)
from skyline_functions import (mkdir_p, get_graphite_metric, write_data_to_file)
# from tsfresh_feature_names import TSFRESH_FEATURES
from database import (
get_engine, ionosphere_table_meta, metrics_table_meta,
ionosphere_matched_table_meta,
# @added 20170305 - Feature #1960: ionosphere_layers
ionosphere_layers_table_meta, layers_algorithms_table_meta,
# @added 20170307 - Feature #1960: ionosphere_layers
# To present matched layers Graphite graphs
ionosphere_layers_matched_table_meta
)
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except EnvironmentError as err:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings')
ENABLE_WEBAPP_DEBUG = False
try:
full_duration_seconds = int(settings.FULL_DURATION)
except:
full_duration_seconds = 86400
full_duration_in_hours = full_duration_seconds / 60 / 60
exclude_redis_json = 'redis.%sh.json' % str(int(full_duration_in_hours))
def ionosphere_get_metrics_dir(requested_timestamp, context):
"""
Get a list of all the metrics in timestamp training data or features profile
folder
:param requested_timestamp: the training data timestamp
:param context: the request context, training_data or features_profiles
:type requested_timestamp: str
:type context: str
:return: tuple of lists
:rtype: (list, list, list, list)
"""
if context == 'training_data':
log_context = 'training data'
if context == 'features_profiles':
log_context = 'features profile data'
logger.info(
'Metrics requested for timestamp %s dir %s' % (
log_context, str(requested_timestamp)))
if context == 'training_data':
data_dir = '%s' % settings.IONOSPHERE_DATA_FOLDER
if context == 'features_profiles':
data_dir = '%s' % (settings.IONOSPHERE_PROFILES_FOLDER)
# @added 20160113 - Feature #1858: Ionosphere - autobuild features_profiles dir
if settings.IONOSPHERE_AUTOBUILD:
# TODO: see ionosphere docs page. Create any deleted/missing
# features_profiles dir with best effort with the data that is
# available and DB data on-demand
# Build the expected features_profiles dirs from the DB and auto
# provision any that are not present
if not path.exists(data_dir):
# provision features_profiles image resources
mkdir_p(data_dir)
metric_paths = []
metrics = []
timestamps = []
human_dates = []
for root, dirs, files in walk(data_dir):
for file in files:
if file.endswith('.json'):
data_file = True
if re.search(exclude_redis_json, file):
data_file = False
if re.search('mirage.redis.json', file):
data_file = False
if re.search(requested_timestamp, root) and data_file:
metric_name = file.replace('.json', '')
add_metric = True
metric_file = path.join(root, file)
else:
add_metric = False
if add_metric:
metric_paths.append([metric_name, root])
metrics.append(metric_name)
if context == 'training_data':
timestamp = int(root.split('/')[5])
if context == 'features_profiles':
timestamp = int(path.split(root)[1])
timestamps.append(timestamp)
set_unique_metrics = set(metrics)
unique_metrics = list(set_unique_metrics)
unique_metrics.sort()
set_unique_timestamps = set(timestamps)
unique_timestamps = list(set_unique_timestamps)
unique_timestamps.sort()
for i_ts in unique_timestamps:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(int(i_ts)))
human_dates.append(human_date)
return (metric_paths, unique_metrics, unique_timestamps, human_dates)
def ionosphere_data(requested_timestamp, data_for_metric, context):
"""
Get a list of all training data or profiles folders and metrics
:param requested_timestamp: the training data or profile timestamp
:param data_for_metric: the metric base_name
:param context: the request context, training_data or features_profiles
:type requested_timestamp: str
:type data_for_metric: str
:type context: str
:return: tuple of lists
:rtype: (list, list, list, list)
"""
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, '', 1)
if context == 'training_data':
log_context = 'training data'
if context == 'features_profiles':
log_context = 'features profile data'
logger.info(
'%s requested for %s at timestamp %s' %
(log_context, str(base_name), str(requested_timestamp)))
if requested_timestamp:
timeseries_dir = base_name.replace('.', '/')
if context == 'training_data':
data_dir = '%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, requested_timestamp,
timeseries_dir)
if context == 'features_profiles':
data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, timeseries_dir,
requested_timestamp)
else:
if context == 'training_data':
data_dir = '%s' % settings.IONOSPHERE_DATA_FOLDER
if context == 'features_profiles':
data_dir = '%s' % (settings.IONOSPHERE_PROFILES_FOLDER)
metric_paths = []
metrics = []
timestamps = []
human_dates = []
if context == 'training_data':
data_dir = '%s' % settings.IONOSPHERE_DATA_FOLDER
if context == 'features_profiles':
data_dir = '%s' % settings.IONOSPHERE_PROFILES_FOLDER
for root, dirs, files in walk(data_dir):
for file in files:
if file.endswith('.json'):
data_file = True
if re.search(exclude_redis_json, file):
data_file = False
if re.search('mirage.redis.json', file):
data_file = False
if re.search('\\d{10}', root) and data_file:
metric_name = file.replace('.json', '')
if data_for_metric != 'all':
add_metric = False
if metric_name == base_name:
add_metric = True
if requested_timestamp:
if re.search(requested_timestamp, file):
add_metric = True
else:
add_metric = False
if add_metric:
metric_paths.append([metric_name, root])
metrics.append(metric_name)
if context == 'training_data':
timestamp = int(root.split('/')[5])
if context == 'features_profiles':
timestamp = int(path.split(root)[1])
timestamps.append(timestamp)
else:
metric_paths.append([metric_name, root])
metrics.append(metric_name)
if context == 'training_data':
timestamp = int(root.split('/')[5])
if context == 'features_profiles':
timestamp = int(path.split(root)[1])
timestamps.append(timestamp)
set_unique_metrics = set(metrics)
unique_metrics = list(set_unique_metrics)
unique_metrics.sort()
set_unique_timestamps = set(timestamps)
unique_timestamps = list(set_unique_timestamps)
unique_timestamps.sort()
for i_ts in unique_timestamps:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(i_ts)))
human_dates.append(human_date)
return (metric_paths, unique_metrics, unique_timestamps, human_dates)
def get_an_engine():
try:
engine, fail_msg, trace = get_engine(skyline_app)
return engine, fail_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get MySQL engine for'
logger.error('%s' % fail_msg)
# return None, fail_msg, trace
raise # to webapp to return in the UI
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
def ionosphere_metric_data(requested_timestamp, data_for_metric, context, fp_id):
"""
Get a list of all training data folders and metrics
"""
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Feature #1830: Ionosphere alerts
# Use the new_load_metric_vars method
def new_load_metric_vars(metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars module object or ``False``
:rtype: list
"""
if path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
int_keys = ['from_timestamp', 'metric_timestamp', 'added_at', 'full_duration']
array_keys = ['algorithms', 'triggered_algorithms']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
if settings.ENABLE_DEBUG:
logger.info(
'debug :: metric_vars determined - metric variable - metric - %s' % str(metric_vars.metric))
# @added 20170113 - Feature #1842: Ionosphere - Graphite now graphs
# Handle features profiles that were created pre the addition of
# full_duration
full_duration_present = False
for key, value in metric_vars_array:
if key == 'full_duration':
full_duration_present = True
if not full_duration_present:
try:
for key, value in metric_vars_array:
if key == 'from_timestamp':
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
use_from_timestamp = int(value_list[0])
if key == 'metric_timestamp':
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
use_metric_timestamp = int(value_list[0])
round_full_duration_days = int((use_metric_timestamp - use_from_timestamp) / 86400)
round_full_duration = int(round_full_duration_days) * 86400
logger.info('debug :: calculated missing full_duration')
metric_vars_array.append(['full_duration', round_full_duration])
except:
logger.error('error :: could not calculate missing full_duration')
metric_vars_array.append(['full_duration', 'unknown'])
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, '', 1)
if context == 'training_data':
log_context = 'training data'
if context == 'features_profiles':
log_context = 'features profile data'
logger.info('%s requested for %s at %s' % (
context, str(base_name), str(requested_timestamp)))
metric_paths = []
images = | |
<filename>solvers/solver_helpers.py
import json
from abc import ABC, abstractmethod
import numpy as np
import os
import re
import torch
from collections import defaultdict, OrderedDict
from functools import wraps
from gensim.models import KeyedVectors
from pymorphy2 import MorphAnalyzer
from pytorch_pretrained_bert import BertModel, BertTokenizer, BertForMaskedLM
from scipy.spatial.distance import cosine
morph = MorphAnalyzer()
ALPHABET = "ะฐะฑะฒะณะดะตัะถะทะธะนะบะปะผะฝะพะฟัััััั
ัััััััััั"
class AbstractSolver(ABC):
is_loaded = False
def predict(self, task):
return sorted(str(ch["id"]) for ch in self.predict_from_model(task))
@abstractmethod
def predict_from_model(self, task):
pass
def fit(self, tasks):
pass
def save(self, path=""):
pass
def load(self, path=""):
self.is_loaded = True
def fix_spaces(text):
space_fix_pattern = re.compile("\s+")
return space_fix_pattern.sub(" ", text)
def singleton(cls):
instance = None
@wraps(cls)
def inner(*args, **kwargs):
nonlocal instance
if instance is None:
instance = cls(*args, **kwargs)
return instance
return inner
def standardize_task(task):
if "choices" not in task:
if "question" in task and "choices" in task["question"]:
task["choices"] = task["question"]["choices"]
else:
parts = task["text"].split("\n")
task["text"] = parts[0]
task["choices"] = []
for i in range(1, len(parts)):
task["choices"].append({"id": str(i), "text": parts[i]})
for i in range(len(task["choices"])):
parts = [x.strip() for x in task["choices"][i]["text"].split(",")]
task["choices"][i]["parts"] = parts
return task
class BertEmbedder(object):
"""
Embedding Wrapper on Bert Multilingual Cased
"""
def __init__(self, bert_path="data/models/bert/multilingual"):
self.bert_path = bert_path
self.model_file = os.path.join(bert_path, "bert-base-multilingual-cased.tar.gz")
self.vocab_file = os.path.join(
bert_path, "bert-base-multilingual-cased-vocab.txt"
)
self.model = self.bert_model()
self.tokenizer = self.bert_tokenizer()
self.embedding_matrix = self.get_bert_embed_matrix()
@singleton
def bert_model(self):
model = BertModel.from_pretrained(self.model_file).eval()
return model
@singleton
def bert_tokenizer(self):
tokenizer = BertTokenizer.from_pretrained(self.vocab_file, do_lower_case=False)
return tokenizer
@singleton
def get_bert_embed_matrix(self):
bert_embeddings = list(self.model.children())[0]
bert_word_embeddings = list(bert_embeddings.children())[0]
matrix = bert_word_embeddings.weight.data.numpy()
return matrix
def sentence_embedding(self, text_list):
embeddings = []
for text in text_list:
token_list = self.tokenizer.tokenize("[CLS] " + text + " [SEP]")
segments_ids, indexed_tokens = (
[1] * len(token_list),
self.tokenizer.convert_tokens_to_ids(token_list),
)
segments_tensors, tokens_tensor = (
torch.tensor([segments_ids]),
torch.tensor([indexed_tokens]),
)
with torch.no_grad():
encoded_layers, _ = self.model(tokens_tensor, segments_tensors)
sent_embedding = torch.mean(encoded_layers[11], 1)
embeddings.append(sent_embedding)
return embeddings
def token_embedding(self, token_list):
token_embedding = []
for token in token_list:
ontoken = self.tokenizer.tokenize(token)
segments_ids, indexed_tokens = (
[1] * len(ontoken),
self.tokenizer.convert_tokens_to_ids(ontoken),
)
segments_tensors, tokens_tensor = (
torch.tensor([segments_ids]),
torch.tensor([indexed_tokens]),
)
with torch.no_grad():
encoded_layers, _ = self.model(tokens_tensor, segments_tensors)
ontoken_embeddings = []
for subtoken_i in range(len(ontoken)):
hidden_layers = []
for layer_i in range(len(encoded_layers)):
vector = encoded_layers[layer_i][0][subtoken_i]
hidden_layers.append(vector)
ontoken_embeddings.append(hidden_layers)
cat_last_4_layers = [
torch.cat((layer[-4:]), 0) for layer in ontoken_embeddings
]
token_embedding.append(cat_last_4_layers)
token_embedding = (
torch.stack(token_embedding[0], 0)
if len(token_embedding) > 1
else token_embedding[0][0]
)
return token_embedding
class RubertForMasking(object):
"""
DeepPavlov Rubert Wrapper for Masking in Tasks 2, 15
by team Niw
"""
def __init__(self, bert_path="data/models/bert/rubert/deeppavlov"):
self.bert_path = bert_path
self.model_file = os.path.join(
self.bert_path, "ru_conversational_cased_L-12_H-768_A-12.tar.gz"
)
self.vocab_file = os.path.join(self.bert_path, "vocab.txt")
self.model = self.bert_model()
self.tokenizer = self.bert_tokenizer()
@singleton
def bert_model(self):
model = BertForMaskedLM.from_pretrained(self.model_file).eval()
return model
@singleton
def bert_tokenizer(self):
tokenizer = BertTokenizer.from_pretrained(self.vocab_file, do_lower_case=False)
return tokenizer
def token_embedding(self, token_list):
token_embedding = []
for token in token_list:
ontoken = self.tokenizer.tokenize(token)
segments_ids, indexed_tokens = (
[1] * len(ontoken),
self.tokenizer.convert_tokens_to_ids(ontoken),
)
segments_tensors, tokens_tensor = (
torch.tensor([segments_ids]),
torch.tensor([indexed_tokens]),
)
with torch.no_grad():
encoded_layers, _ = self.model(tokens_tensor, segments_tensors)
ontoken_embeddings = []
for subtoken_i in range(len(ontoken)):
hidden_layers = []
for layer_i in range(len(encoded_layers)):
vector = encoded_layers[layer_i][0][subtoken_i]
hidden_layers.append(vector)
ontoken_embeddings.append(hidden_layers)
cat_last_4_layers = [
torch.cat((layer[-4:]), 0) for layer in ontoken_embeddings
]
token_embedding.append(cat_last_4_layers)
token_embedding = (
torch.stack(token_embedding[0], 0)
if len(token_embedding) > 1
else token_embedding[0][0]
)
return token_embedding
def masking_task_15(self, text, w_x, delta):
w_y = []
for i in range(len(w_x)):
w_y.append(self.tokenizer.tokenize(w_x[i].lower()))
w_y[i] = self.tokenizer.convert_tokens_to_ids(w_y[i])
if text[-1] == "]":
text = text + " . ."
text = "[CLS] " + text + " [SEP]"
tokenized_text = self.tokenizer.tokenize(text)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
indexed_tokens = indexed_tokens[:500]
mask_input = []
for i in range(len(indexed_tokens)):
if indexed_tokens[i] == 103:
mask_input.append(i)
segments_ids = [0] * len(indexed_tokens)
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
predictions = self.model(tokens_tensor, segments_tensors)
predictions_one, predictions_two, predictsx3 = [], [], []
for i in range(len(mask_input)):
predictions_one.append(predictions[0, mask_input[i], :].numpy() + 10)
predictions_two.append(predictions[0, mask_input[i] + 1, :].numpy() + 10)
predictsx3.append(predictions[0, mask_input[i] + 2, :].numpy() + 10)
ver_w_y, ver_w2, ver_w3 = [], [], []
output = []
for i in range(len(w_x)):
if len(w_y[i]) > 2:
ver_w_y.append(
abs(
predictions_one[i][w_y[i][0]]
* predictions_two[i][w_y[i][1]]
* predictsx3[i][w_y[i][2]]
)
** (1 / 3)
)
elif len(w_y[i]) > 1:
ver_w_y.append(
abs(predictions_one[i][w_y[i][0]] * predictions_two[i][w_y[i][1]])
** (1 / 2)
)
else:
ver_w_y.append(predictions_one[i][w_y[i][0]])
if (
"ะบะพะฒะฐะฝั" in w_x[i]
or "ะบะพะฒะฐะฝั" in w_x[i]
or "ะทะพะปะพััะฝะพ" in w_x[i].replace("ะต", "ั")
or "ะทะพะปะพััะฝั" in w_x[i].replace("ะต", "ั")
or "ะทะพะปะพััะฝั" in w_x[i].replace("ะต", "ั")
):
ver_w_y[i] += 5
for i in range(len(w_x) // 2):
ver_w2.append(ver_w_y[i] / ver_w_y[len(w_x) // 2 + i])
for i in range(len(ver_w2)):
if ver_w2[i] > 1 + delta:
output.append(w_x[i])
else:
output.append(w_x[len(w_x) // 2 + i])
return output
def masking_task_2(self, text, seed_word, task_type=0):
text = "[CLS] " + text + " [SEP]"
tokenized_text = self.tokenizer.tokenize(text)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
indexed_tokens = indexed_tokens[:500]
mask_input = []
for i in range(len(indexed_tokens)):
if indexed_tokens[i] == 103:
mask_input.append(i)
segments_ids = [0] * len(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
model_output_two = ""
with torch.no_grad():
predictions = self.model(tokens_tensor, segments_tensors)
predictions_one = predictions[0, mask_input[0], :].argsort()[-50:].numpy()
model_output_one = self.tokenizer.convert_ids_to_tokens(predictions_one)
if len(mask_input) > 1:
predictions_two = (
predictions[0, mask_input[1], :].argsort()[-50:].numpy()
)
model_output_two = self.tokenizer.convert_ids_to_tokens(predictions_two)
result, from_search = "", True
for i in range(len(model_output_one) - 1, -1, -1):
if task_type == 3:
break
model_output_one[i] = model_output_one[i].strip().lower()
if model_output_two:
for w in seed_word:
if task_type == 3:
break
if model_output_one[i].lower() in w:
w2 = w.replace(model_output_one[i], "")
for j in range(len(model_output_two) - 1, -1, -1):
if model_output_two[j].lower() in w2:
w3 = w2.replace(model_output_two[j], "").replace(
" ", ""
)
if not w3:
task_type = 3
result = w.replace(" ", "")
break
if task_type == 2:
result = "ะฝะต ะทะฝะฐั"
elif task_type == 4:
for w in seed_word:
if "-" in w or " " in w:
result = w.replace(" ", "").replace("-", "")
break
if not result:
from_search = False
if seed_word:
result = seed_word[0]
else:
result = model_output_one[-1]
else:
for i in range(len(model_output_one) - 1, -1, -1):
if (
len(model_output_one[i]) < 2
and (model_output_one[i] != "ะธ" and model_output_one[i] != "ะฐ")
) or model_output_one[i] == "ะฒะพั":
model_output_one.pop(i)
elif model_output_one[i] in seed_word:
result = model_output_one[i]
break
if not result and task_type == 1:
for w in seed_word:
if "-" in w or " " in w:
result = w.replace(" ", "").replace("-", "")
break
if not result:
from_search = False
if seed_word:
result = seed_word[0]
else:
result = model_output_one[-1]
if result == "ะบัะพะถะฐะปะตะฝะธั":
result = "ัะฐะบะธะผะพะฑัะฐะทะพะผ"
if result == "ะฒะพะฒัะตะฝะต":
result = "ะธะผะตะฝะฝะพ"
return result, from_search
def clean(word):
return " ".join(re.sub("[^ะฐ-ัa-z0-9ั ]+", " ", word.lower()).split())
def tokenize(text, clean_method=clean):
text = clean_method(text)
return text.split()
class CommonData(object):
def __init__(self, common_data_path="data/models/utils/common_files"):
self.common_data_path = common_data_path
self.prefixes_path = os.path.join(self.common_data_path, "prefixes.txt")
self.norm2word_path = os.path.join(self.common_data_path, "norm2word.json")
self.prepositions_path = os.path.join(self.common_data_path, "prepositions.txt")
self.prefixes = self.load_prefixes()
self.prepositions = self.load_prepositions()
self.norm2word = self.load_norm2word()
@singleton
def load_prefixes(self):
prefixes = {}
with open(self.prefixes_path, "r", encoding="utf-8") as f:
for line in f:
if line[0] == "=":
k = line[1:].strip()
continue
for e in line.replace("-", "").split("/"):
if e.strip() not in prefixes:
prefixes[e.strip()] = k
return prefixes
@singleton
def load_prepositions(self):
prepositions = defaultdict(list)
with open(self.prepositions_path, "r", encoding="utf-8") as f:
for line in f:
if line[0] == "=":
k = line[2:].strip()
continue
for e in line.replace("-", "").split("/"):
if e.lower() == e:
prepositions[e.strip()].append(k)
for prep in ["ั", "ัะพ"]:
prepositions[prep] = ["ablt"]
return prepositions
@singleton
def load_norm2word(self):
with open(self.norm2word_path, "r", encoding="utf-8") as f:
norm2word = json.load(f)
return norm2word
class Word2vecProcessor(object):
def __init__(self, w2v_path="data/models/utils/w2v"):
self.w2v_path = w2v_path
self.w2v_model_filename = os.path.join(
self.w2v_path, "ruwikiruscorpora_0_300_20.bin"
)
self.verbs_filename = os.path.join(self.w2v_path, "verbs.txt")
self.word2vec = self.load_w2v()
self.lemma2word = self.build_lemma2word()
self.verbs = self.load_verbs()
self.norm_cache = dict()
@singleton
def load_w2v(self):
return KeyedVectors.load_word2vec_format(
self.w2v_model_filename, binary=True, datatype=np.float32
)
@singleton
def build_lemma2word(self):
lemma2word = dict()
for word in self.word2vec.index2word:
q = word.split("_")[0]
if q not in lemma2word:
lemma2word[q] = word
return lemma2word
@singleton
def load_verbs(self):
with open(os.path.join(self.verbs_filename), encoding="utf-8") as fin:
verbs = json.load(fin)
return verbs
def warmup_cache(self, word2norm_filename):
with open(word2norm_filename, encoding="utf-8") as fin:
self.norm_cache = json.load(fin)
self.norm2word_cache = defaultdict(set)
for word, word_norm in self.norm_cache.items():
self.norm2word_cache[word_norm].add(word)
def get_all_forms(self, word_norm):
return {
self.verbs.get(lemma, lemma) for lemma in self.norm2word_cache[word_norm]
}
def get_normal_form(self, word):
if word not in self.norm_cache:
self.norm_cache[word] = morph.normal_forms(word)[0]
lemma = self.norm_cache[word]
lemma = self.verbs.get(lemma, lemma)
lemma = lemma.replace("ั", "ะต")
return lemma
def prepare_word(self, word):
lemma = self.get_normal_form(word)
word = self.lemma2word.get(lemma)
return word
def word_vector(self, word):
word = self.prepare_word(word)
return self.word2vec[word] if word in self.word2vec else None
def text_vector(self, text):
word_vectors = | |
"""
Tests for Template Tags
"""
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.template import Template, Context
from django.test import TestCase, override_settings, RequestFactory
from django.urls import NoReverseMatch
from django_adminlte_2.menu import WHITELIST
from django_adminlte_2.templatetags import sidebar_menu
UserModel = get_user_model()
@override_settings(ADMINLTE2_MENU_PERMISSION_FREE_WHITELIST=WHITELIST)
class TemplateTagSidebarMenuTestCase(TestCase):
"""
Test Template Tags and Helper Methods used in those template tags
"""
# |-------------------------------------------------------------------------
# | Setup
# |-------------------------------------------------------------------------
def setUp(self):
self.superuser = None
self.staffuser = None
def _setup_superuser(self):
"""Set up Superuser"""
self.superuser = UserModel()
self.superuser.username = 'testsuperuser'
self.superuser.is_superuser = True
self.superuser.save()
def _setup_staffuser(self, permissions=None):
"""Set up Staff user"""
self.staffuser = UserModel()
self.staffuser.username = 'teststaffuser'
self.staffuser.is_staff = True
self.staffuser.save()
if permissions:
if isinstance(permissions, str):
permissions = [permissions]
for permission in permissions:
perm_object = Permission.objects.filter(
codename__exact=permission,
).first()
self.staffuser.user_permissions.add(perm_object)
# |-------------------------------------------------------------------------
# | Test get_permissions
# |-------------------------------------------------------------------------
def test_get_permissions_from_node_pulls_permissions_from_direct_assigned_permissions(self):
"""Test get permissions from node pulls permissions from direct assigned permissions"""
node = {
'route': 'django_adminlte_2:sample1',
'text': 'Sample1',
'icon': 'fa fa-group',
'permissions': ['add_sample1', 'update_sample1'],
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node)
self.assertEqual(node['permissions'], permissions)
self.assertFalse(one_of_permissions)
def test_get_permissions_from_node_pulls_permissions_from_view_function(self):
"""Test get permissions from node pulls permissins from view function"""
node = {
'route': 'django_adminlte_2:sample1',
'text': 'Sample1',
'icon': 'fa fa-group',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node)
self.assertIn('auth.add_group', permissions)
self.assertFalse(one_of_permissions)
def test_get_permissions_from_node_pulls_permissions_from_view_with_hash_route_and_valid_url(self):
"""Test get permissions from node pull permission from view with hash route and valid url"""
node = {
'route': '#',
'text': 'Sample1',
'icon': 'fa fa-building',
'url': '/sample1/',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node)
self.assertIn('auth.add_group', permissions)
self.assertFalse(one_of_permissions)
def test_get_permissions_from_node_raises_keyerror_when_route_is_missing(self):
"""Test get permissions from node raises KeyError when route is missing"""
node = {
'text': 'Sample1',
'icon': 'fa fa-group',
}
with self.assertRaises(KeyError):
sidebar_menu.get_permissions_from_node(node)
def test_get_permissions_from_node_returns_empty_list_when_no_reverse_error_and_route_is_a_hash(self):
"""Test get permissions from node returns empty list when no reverse
error and route is a hash"""
node = {
'route': '#',
'text': 'Sample1',
'icon': 'fa fa-group',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node)
self.assertEqual([], permissions)
self.assertEqual([], one_of_permissions)
def test_get_permissions_from_node_raises_error_when_route_causes_a_reverse_error(self):
"""Test get permissions from node raises error when route causes a reverse error"""
node = {
'route': 'foobar',
'text': 'Sample1',
'icon': 'fa fa-group',
}
with self.assertRaises(NoReverseMatch):
sidebar_menu.get_permissions_from_node(node)
def test_get_permissions_from_node_returns_empty_list_when_there_are_no_defined_permissions_on_the_node(self):
"""Test get permissions from node returns empty list when there are
no defined permissions on the node"""
node = {
'route': getattr(settings, 'ADMINLTE2_HOME_ROUTE', 'django_adminlte_2:home'),
'text': 'Home',
'icon': 'fa fa-dashboard',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node)
self.assertEqual([], permissions)
self.assertEqual([], one_of_permissions)
# |-------------------------------------------------------------------------
# | Test one_of_permissions
# |-------------------------------------------------------------------------
def test_one_of_permissions_from_node_works(self):
"""Test one of permissions from node works"""
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
'one_of_permissions': ['add_sample2', 'update_sample2'],
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node
)
self.assertEqual([], permissions)
self.assertIn('add_sample2', one_of_permissions)
def test_one_of_permissions_from_node_pulls_permissions_from_view_function(self):
"""Test one of permissions from node pulls permissions from view function"""
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node
)
self.assertEqual([], permissions)
self.assertIn('auth.add_permission', one_of_permissions)
def test_one_of_permissions_from_node_raises_keyerror_when_route_is_missing(self):
"""Test one of permissions from node raises KeyError when route is missing"""
node = {
'text': 'Sample2',
'icon': 'fa fa-building',
}
with self.assertRaises(KeyError):
sidebar_menu.get_permissions_from_node(node)
def test_one_of_permissions_from_node_returns_empty_list_when_no_reverse_error_and_route_is_a_hash(self):
"""Test one of permissions from node returns empty list when
no reverse error and route is a hash"""
node = {
'route': '#',
'text': 'Sample2',
'icon': 'fa fa-building',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node
)
self.assertEqual([], permissions)
self.assertEqual([], one_of_permissions)
def test_one_of_permissions_from_node_raises_error_when_route_causes_a_reverse_error(self):
"""Test one of permissions from node raises error when route causes a reverse error"""
node = {
'route': 'foobar',
'text': 'Sample2',
'icon': 'fa fa-building',
}
with self.assertRaises(NoReverseMatch):
sidebar_menu.get_permissions_from_node(node)
def test_one_of_permissions_from_node_returns_empty_list_when_there_are_no_defined_permissions_on_the_node(self):
"""Test one of permissions from node returns empty list when there are
no defined permissions on the node"""
node = {
'route': getattr(settings, 'ADMINLTE2_HOME_ROUTE', 'django_adminlte_2:home'),
'text': 'Home',
'icon': 'fa fa-dashboard',
}
permissions, one_of_permissions = sidebar_menu.get_permissions_from_node(
node
)
self.assertEqual([], permissions)
self.assertEqual([], one_of_permissions)
# |-------------------------------------------------------------------------
# | Test ensure_node_has_url_property
# |-------------------------------------------------------------------------
def test_ensure_node_has_url_property_works_when_node_has_url_property_defined(self):
"""Test ensure node has url property works when node has url property defined"""
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
'url': '/foobar/'
}
sidebar_menu.ensure_node_has_url_property(node)
self.assertEqual('/foobar/', node['url'])
def test_ensure_node_has_url_property_adds_url_property_from_valid_route(self):
"""Test ensure node has url property adds url property from valid route"""
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
sidebar_menu.ensure_node_has_url_property(node)
self.assertEqual('/sample2/', node['url'])
def test_ensure_node_has_url_property_sets_url_to_a_hash_when_route_is_a_hash(self):
"""Test ensure node has url property sets url to a hash when route is a hash"""
node = {
'route': '#',
'text': 'Sample2',
'icon': 'fa fa-building',
}
sidebar_menu.ensure_node_has_url_property(node)
self.assertEqual('#', node['url'])
def test_ensure_node_has_url_property_raises_key_error_when_route_field_missing(self):
"""Test ensure node has url property raises KeyError when route field missing"""
node = {
'text': 'Sample2',
'icon': 'fa fa-building',
}
with self.assertRaises(KeyError):
sidebar_menu.ensure_node_has_url_property(node)
def test_ensure_node_has_url_property_raises_reverse_error_when_route_is_not_valid(self):
"""Test ensure node has url property raises reverse error when route is not valid"""
node = {
'route': 'foobar',
'text': 'Sample2',
'icon': 'fa fa-building',
}
with self.assertRaises(NoReverseMatch):
sidebar_menu.ensure_node_has_url_property(node)
# |-------------------------------------------------------------------------
# | Test check_for_whitelisted_node
# |-------------------------------------------------------------------------
@override_settings(ADMINLTE2_MENU_PERMISSION_FREE_WHITELIST=['django_adminlte_2:sample2'])
def test_check_for_whitelisted_node_returns_true_when_node_in_list(self):
"""Test check for whitelisted node returns true when node in list"""
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
is_whitelisted = sidebar_menu.check_for_whitelisted_node(node)
self.assertTrue(is_whitelisted)
def test_check_for_whitelisted_node_returns_false_when_node_not_in_list(self):
"""Test check for whitelisted node returns false when node not in list"""
node = {
'route': 'foobar',
'text': 'Sample2',
'icon': 'fa fa-building',
}
is_whitelisted = sidebar_menu.check_for_whitelisted_node(node)
self.assertFalse(is_whitelisted)
# |-------------------------------------------------------------------------
# | Test check_for_all_permissions
# |-------------------------------------------------------------------------
def test_check_for_all_permissions_returns_true_when_user_is_superuser(self):
"""Test check for all permissions returns true when user is superuser"""
self._setup_superuser()
permissions = ['does_not_matter_since_superuser']
allowed = sidebar_menu.check_for_all_permissions(
self.superuser,
permissions
)
self.assertTrue(allowed)
def test_check_for_all_permissions_returns_true_when_user_is_not_su_but_has_perms(self):
"""Test check for all permissions returns true when user is not su but has perms"""
self._setup_staffuser('add_group')
permissions = ['auth.add_group']
allowed = sidebar_menu.check_for_all_permissions(
self.staffuser,
permissions
)
self.assertTrue(allowed)
def test_check_for_all_permissions_returns_false_when_permissions_is_empty_list(self):
"""Test check for all permissions returns false when permissions is empty list"""
self._setup_staffuser('add_group')
permissions = []
allowed = sidebar_menu.check_for_all_permissions(
self.staffuser,
permissions
)
self.assertFalse(allowed)
def test_check_for_all_permissions_returns_false_when_user_does_not_have_perms(self):
"""Test check for all permissions returns false when user does not have perms"""
self._setup_staffuser()
permissions = ['user_does_not_have_this_one']
allowed = sidebar_menu.check_for_all_permissions(
self.staffuser,
permissions
)
self.assertFalse(allowed)
# |-------------------------------------------------------------------------
# | Test check_for_one_permission
# |-------------------------------------------------------------------------
def test_check_for_one_permission_returns_true_when_user_is_superuser(self):
"""Test check for one permission returns true when user is superuser"""
self._setup_superuser()
permissions = ['does_not_matter_since_superuser']
allowed = sidebar_menu.check_for_one_permission(
self.superuser,
permissions
)
self.assertTrue(allowed)
def test_check_for_one_permission_returns_true_when_user_is_not_su_but_has_perms(self):
"""Test check for one permission returns true when user is not su but has perms"""
self._setup_staffuser('add_group')
permissions = ['auth.add_group', 'auth.update_group']
allowed = sidebar_menu.check_for_one_permission(
self.staffuser,
permissions
)
self.assertTrue(allowed)
def test_check_for_one_permission_returns_false_when_permissions_is_empty_list(self):
"""Test check for one permission returns false when permissions is empty list"""
self._setup_staffuser('add_group')
permissions = []
allowed = sidebar_menu.check_for_one_permission(
self.staffuser, permissions)
self.assertFalse(allowed)
def test_check_for_one_permission_returns_false_when_user_does_not_have_perms(self):
"""Test check for one permission returns false when suer does not have perms"""
self._setup_staffuser()
permissions = ['user_does_not_have_this_one']
allowed = sidebar_menu.check_for_one_permission(
self.staffuser, permissions)
self.assertFalse(allowed)
# |-------------------------------------------------------------------------
# | Test is_allowed_node
# |-------------------------------------------------------------------------
def test_is_allowed_node_returns_true_when_user_is_superuser_and_whitelist_is_off(self):
"""Test is allowed node returns true when user is superuser and whitelist is off"""
self._setup_superuser()
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
allowed = sidebar_menu.is_allowed_node(self.superuser, node)
self.assertTrue(allowed)
def test_is_allowed_node_returns_true_when_user_has_all_perms_and_whitelist_is_off(self):
"""Test is allowed node returns true when user has all perms and whitelist is off"""
self._setup_staffuser('add_group')
node = {
'route': 'django_adminlte_2:sample1',
'text': 'Sample1',
'icon': 'fa fa-building',
'permissions': ['auth.add_group'],
}
allowed = sidebar_menu.is_allowed_node(self.staffuser, node)
self.assertTrue(allowed)
def test_is_allowed_node_returns_true_when_user_has_one_perm_and_whitelist_is_off(self):
"""Test is allowed node returns true when user has one perm and whitelist if off"""
self._setup_staffuser('add_permission')
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
allowed = sidebar_menu.is_allowed_node(self.staffuser, node)
self.assertTrue(allowed)
def test_is_allowed_node_returns_true_when_there_are_no_perms_on_node_and_whitelist_is_off(self):
"""Test is allowed node returns true when there are no perms on node and whitelist if off"""
self._setup_staffuser('add_group')
node = {
'route': getattr(settings, 'ADMINLTE2_HOME_ROUTE', 'django_adminlte_2:home'),
'text': 'Home',
'icon': 'fa fa-building',
}
allowed = sidebar_menu.is_allowed_node(self.staffuser, node)
self.assertTrue(allowed)
@override_settings(ADMINLTE2_USE_WHITELIST_FOR_UNDEFINED_PERMISSIONS=True)
def test_is_allowed_node_returns_true_when_user_is_superuser_and_whitelist_is_on(self):
"""Test is allowed node returns true when user is superuser and whitelist is on"""
self._setup_superuser()
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
allowed = sidebar_menu.is_allowed_node(self.superuser, node)
self.assertTrue(allowed)
@override_settings(ADMINLTE2_USE_WHITELIST_FOR_UNDEFINED_PERMISSIONS=True)
def test_is_allowed_node_returns_true_when_user_has_all_perms_and_whitelist_is_on(self):
"""Test is allowed node returns true when user has all perms and white list is on"""
self._setup_staffuser('add_group')
node = {
'route': 'django_adminlte_2:sample1',
'text': 'Sample1',
'icon': 'fa fa-building',
'permissions': ['auth.add_group'],
}
allowed = sidebar_menu.is_allowed_node(self.staffuser, node)
self.assertTrue(allowed)
@override_settings(ADMINLTE2_USE_WHITELIST_FOR_UNDEFINED_PERMISSIONS=True)
def test_is_allowed_node_returns_true_when_user_has_one_perm_and_whitelist_is_on(self):
"""Test is allowed node returns true when user has one perm and whitelist is on"""
self._setup_staffuser('add_permission')
node = {
'route': 'django_adminlte_2:sample2',
'text': 'Sample2',
'icon': 'fa fa-building',
}
allowed = sidebar_menu.is_allowed_node(self.staffuser, node)
self.assertTrue(allowed)
@override_settings(ADMINLTE2_USE_WHITELIST_FOR_UNDEFINED_PERMISSIONS=True)
@override_settings(ADMINLTE2_MENU_PERMISSION_FREE_WHITELIST=WHITELIST + ['django_adminlte_2:home'])
def test_is_allowed_node_returns_true_when_there_are_no_perms_and_whitelist_is_on_and_node_is_whitelisted(self):
"""Test is allowed | |
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
* ``'REPEAT_ONE'`` -- Turns on repeat one and turns off shuffle.
* ``'SHUFFLE_REPEAT_ONE'`` -- Turns on shuffle *and* repeat one. (It's
strange, I know.)
"""
result = self.avTransport.GetTransportSettings(
[
("InstanceID", 0),
]
)
return result["PlayMode"]
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([("InstanceID", 0), ("NewPlayMode", playmode)])
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode(
[
("InstanceID", 0),
]
)
cross_fade_state = response["CrossfadeMode"]
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = "1" if crossfade else "0"
self.avTransport.SetCrossfadeMode(
[("InstanceID", 0), ("CrossfadeMode", crossfade_value)]
)
def ramp_to_volume(self, volume, ramp_type="SLEEP_TIMER_RAMP_TYPE"):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume(
[
("InstanceID", 0),
("Channel", "Master"),
("RampType", ramp_type),
("DesiredVolume", volume),
("ResetVolumeAfter", False),
("ProgramURI", ""),
]
)
return int(response["RampTime"])
def set_relative_volume(self, relative_volume):
"""Adjust the volume up or down by a relative amount.
If the adjustment causes the volume to overshoot the maximum value
of 100, the volume will be set to 100. If the adjustment causes the
volume to undershoot the minimum value of 0, the volume will be set
to 0.
Note that this method is an alternative to using addition and
subtraction assignment operators (+=, -=) on the `volume` property
of a `SoCo` instance. These operators perform the same function as
`set_relative_volume` but require two network calls per operation
instead of one.
Args:
relative_volume (int): The relative volume adjustment. Can be
positive or negative.
Returns:
int: The new volume setting.
Raises:
ValueError: If ``relative_volume`` cannot be cast as an integer.
"""
relative_volume = int(relative_volume)
# Sonos will automatically handle out-of-range adjustments
response = self.renderingControl.SetRelativeVolume(
[("InstanceID", 0), ("Channel", "Master"), ("Adjustment", relative_volume)]
)
return int(response["NewVolume"])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = "x-rincon-queue:{0}#0".format(self.uid)
self.avTransport.SetAVTransportURI(
[("InstanceID", 0), ("CurrentURI", uri), ("CurrentURIMetaData", "")]
)
# second, set the track number with a seek command
self.avTransport.Seek(
[("InstanceID", 0), ("Unit", "TRACK_NR"), ("Target", index + 1)]
)
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([("InstanceID", 0), ("Speed", 1)])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri="", meta="", title="", start=True, force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream
given by the URI. For some streams at least a title is
required as metadata. This can be provided using the ``meta``
argument or the ``title`` argument. If the ``title`` argument
is provided minimal metadata will be generated. If ``meta``
argument is provided the ``title`` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
``x-sonosapi-stream:``, ``x-sonosapi-radio:``,
``x-rincon-mp3radio:``, ``hls-radio:`` default to radio or
smart radio format depending on the stream. Others default to
track format: ``x-file-cifs:``, ``aac:``, ``http:``,
``https:``, ``x-sonos-spotify:`` (used by Spotify),
``x-sonosapi-hls-static:`` (Amazon Prime), ``x-sonos-http:``
(Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically ``http:``, ``https:`` or ``aac:``. To force display
and controls to Radio format set ``force_radio=True``
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonosยฎ (as of at least version 6.4.2)
means that the devices no longer accepts ordinary ``http:``
and ``https:`` URIs for radio stations. This method has the
option to replaces these prefixes with the one that Sonosยฎ
expects: ``x-rincon-mp3radio:`` by using the
"force_radio=True" parameter. A few streams may fail if
not forced to to Radio format.
"""
if meta == "" and title != "":
meta_template = (
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'
"<dc:title>{title}</dc:title><upnp:class>"
"object.item.audioItem.audioBroadcast</upnp:class><desc "
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
)
tunein_service = "SA_RINCON65031_"
# Radio stations need to have at least a title to play
meta = meta_template.format(title=escape(title), service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(":")
if colon > 0:
uri = "x-rincon-mp3radio{0}".format(uri[colon:])
self.avTransport.SetAVTransportURI(
[("InstanceID", 0), ("CurrentURI", uri), ("CurrentURIMetaData", meta)]
)
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([("InstanceID", 0), ("Speed", 1)])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([("InstanceID", 0), ("Speed", 1)])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r"^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$", timestamp):
raise ValueError("invalid timestamp, use HH:MM:SS format")
self.avTransport.Seek(
[("InstanceID", 0), ("Unit", "REL_TIME"), ("Target", timestamp)]
)
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([("InstanceID", 0), ("Speed", 1)])
@only_on_master
def previous(self):
| |
path of ELMo""",
)
group.add_argument(
"--elmo.weight_file",
type=str, default="elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5", dest="token.elmo.embedding.weight_file",
help=""" The weight file path of ELMo""",
)
group.add_argument(
"--elmo.trainable",
type=arg_str2bool, default=False, dest="token.elmo.embedding.trainable",
help=""" elmo Embedding Trainable""",
)
group.add_argument(
"--elmo.dropout",
type=float, default=0.5, dest="token.elmo.embedding.dropout",
help=""" Embedding dropout prob (default: 0.5)""",
)
group.add_argument(
"--elmo.project_dim",
type=int, default=None, dest="token.elmo.embedding.project_dim",
help=""" The number of projection dimension (default is None)""",
)
group.add_argument(
"--word_permeability.memory_clip",
type=int, default=3, dest="token.word_permeability.embedding.memory_clip",
help=""" The number of memory cell clip value """,
)
group.add_argument(
"--word_permeability.proj_clip",
type=int, default=3, dest="token.word_permeability.embedding.proj_clip",
help=""" The number of p clip value after projection """,
)
group.add_argument(
"--word_permeability.embed_dim",
type=int, default=1024, dest="token.word_permeability.embedding.embed_dim",
help=""" The number of Embedding dimension""",
)
group.add_argument(
"--word_permeability.linear_dim",
type=int, default=None, dest="token.word_permeability.embedding.linear_dim",
help=""" The number of linear projection dimension""",
)
group.add_argument(
"--word_permeability.trainable",
type=arg_str2bool, default=False, dest="token.word_permeability.embedding.trainable",
help=""" word_permeability Embedding Trainable """,
)
group.add_argument(
"--word_permeability.dropout",
type=float, default=0.5, dest="token.word_permeability.embedding.dropout",
help=""" Embedding dropout prob (default: 0.5)""",
)
group.add_argument(
"--word_permeability.activation",
type=str, default="tanh", dest="token.word_permeability.embedding.activation",
help=""" Activation Function (default is 'tanh') """,
)
group.add_argument(
"--word_permeability.bidirectional",
type=arg_str2bool, default=False, dest="token.word_permeability.embedding.bidirectional",
help=""" bidirectional use or not ([forward;backward]) (default is False) """,
)
group.add_argument(
"--frequent_word.embed_dim",
type=int, default=100, dest="token.frequent_word.embedding.embed_dim",
help=""" The number of Embedding dimension""",
)
group.add_argument(
"--frequent_word.pretrained_path",
type=str, default=None, dest="token.frequent_word.embedding.pretrained_path",
help=""" Add pretrained Word vector model's path. (support file format like Glove)""",
)
group.add_argument(
"--frequent_word.dropout",
type=float, default=0.2, dest="token.frequent_word.embedding.dropout",
help=""" Embedding dropout prob (default: 0.2)""",
)
group.add_argument(
"--word.embed_dim",
type=int, default=100, dest="token.word.embedding.embed_dim",
help=""" The number of Embedding dimension""",
)
group.add_argument(
"--word.pretrained_path",
type=str, default=None, dest="token.word.embedding.pretrained_path",
help=""" Add pretrained word vector model's path. (support file format like Glove)""",
)
group.add_argument(
"--word.trainable",
type=arg_str2bool, default=True, dest="token.word.embedding.trainable",
help=""" Word Embedding Trainable""",
)
group.add_argument(
"--word.dropout",
type=float, default=0.2, dest="token.word.embedding.dropout",
help=""" Embedding dropout prob (default: 0.2)""",
)
def model(parser):
group = parser.add_argument_group("Model")
group.add_argument(
"--model_name",
type=str, default="bidaf", dest="model.name",
help="""\
Pre-defined model
* Reading Comprehension
[bert_for_qa|bidaf|bidaf_no_answer|docqa|docqa_no_answer|dclaf|qanet|simple]
* Regression
[bert_for_reg|roberta_for_reg]
* Semantic Parsing
[sqlnet]
* Sequence Classification
[bert_for_seq_cls|roberta_for_seq_cls|structured_self_attention]
* Token Classification
[bert_for_tok_cls]
""",
)
reading_comprehension_title = "ใ
Reading Comprehension"
group = parser.add_argument_group(f"{reading_comprehension_title}\n # BERT for QuestionAnswering")
group.add_argument(
"--bert_for_qa.pretrained_model_name",
type=str, default=None, dest="model.bert_for_qa.pretrained_model_name",
help=""" A str with the name of a pre-trained model to load selected in the list of (default: None):
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese` """,
)
group.add_argument(
"--bert_for_qa.answer_maxlen",
type=int, default=None, dest="model.bert_for_qa.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group = parser.add_argument_group(f" # RoBERTa")
group.add_argument(
"--roberta_for_qa.pretrained_model_name",
type=str, default=None, dest="model.roberta_for_qa.pretrained_model_name",
help=""" A str with the name of a pre-trained model to load selected in the list of (default: None):
. `roberta-base`
. `roberta-large` """,
)
group.add_argument(
"--roberta_for_qa.answer_maxlen",
type=int, default=None, dest="model.roberta_for_qa.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group = parser.add_argument_group(f" # BiDAF")
group.add_argument(
"--bidaf.aligned_query_embedding",
type=int, default=False, dest="model.bidaf.aligned_query_embedding",
help=""" Aligned Question Embedding (default: False)""",
)
group.add_argument(
"--bidaf.answer_maxlen",
type=int, default=None, dest="model.bidaf.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group.add_argument(
"--bidaf.model_dim",
type=int, default=100, dest="model.bidaf.model_dim",
help=""" The number of BiDAF model dimension""",
)
group.add_argument(
"--bidaf.contextual_rnn_num_layer",
type=int, default=1, dest="model.bidaf.contextual_rnn_num_layer",
help=""" The number of BiDAF model contextual_rnn's recurrent layers""",
)
group.add_argument(
"--bidaf.modeling_rnn_num_layer",
type=int, default=2, dest="model.bidaf.modeling_rnn_num_layer",
help=""" The number of BiDAF model modeling_rnn's recurrent layers""",
)
group.add_argument(
"--bidaf.predict_rnn_num_layer",
type=int, default=1, dest="model.bidaf.predict_rnn_num_layer",
help=""" The number of BiDAF model predict_rnn's recurrent layers""",
)
group.add_argument(
"--bidaf.dropout",
type=float, default=0.2, dest="model.bidaf.dropout",
help=""" The prob of BiDAF dropout""",
)
group = parser.add_argument_group(" # BiDAF + Simple bias")
group.add_argument(
"--bidaf_no_answer.aligned_query_embedding",
type=int, default=False, dest="model.bidaf_no_answer.aligned_query_embedding",
help=""" Aligned Question Embedding (default: False)""",
)
group.add_argument(
"--bidaf_no_answer.answer_maxlen",
type=int, default=None, dest="model.bidaf_no_answer.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group.add_argument(
"--bidaf_no_answer.model_dim",
type=int, default=100, dest="model.bidaf_no_answer.model_dim",
help=""" The number of BiDAF model dimension""",
)
group.add_argument(
"--bidaf_no_answer.contextual_rnn_num_layer",
type=int, default=1, dest="model.bidaf_no_answer.contextual_rnn_num_layer",
help=""" The number of BiDAF model contextual_rnn's recurrent layers""",
)
group.add_argument(
"--bidaf_no_answer.modeling_rnn_num_layer",
type=int, default=2, dest="model.bidaf_no_answer.modeling_rnn_num_layer",
help=""" The number of BiDAF model modeling_rnn's recurrent layers""",
)
group.add_argument(
"--bidaf_no_answer.predict_rnn_num_layer",
type=int, default=1, dest="model.bidaf_no_answer.predict_rnn_num_layer",
help=""" The number of BiDAF model predict_rnn's recurrent layers""",
)
group.add_argument(
"--bidaf_no_answer.dropout",
type=float, default=0.2, dest="model.bidaf_no_answer.dropout",
help=""" The prob of BiDAF dropout""",
)
group = parser.add_argument_group(" # Simple")
group.add_argument(
"--simple.answer_maxlen",
type=int, default=None, dest="model.simple.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group.add_argument(
"--simple.model_dim",
type=int, default=100, dest="model.simple.model_dim",
help=""" The number of Simple model dimension""",
)
group.add_argument(
"--simple.dropout",
type=float, default=0.2, dest="model.simple.dropout",
help=""" The prob of Simple dropout""",
)
group = parser.add_argument_group(" # QANet")
group.add_argument(
"--qanet.aligned_query_embedding",
type=int, default=False, dest="model.qanet.aligned_query_embedding",
help=""" Aligned Question Embedding (default: False)""",
)
group.add_argument(
"--qanet.answer_maxlen",
type=int, default=30, dest="model.qanet.answer_maxlen",
help=""" The number of maximum answer's length (default: 30)""",
)
group.add_argument(
"--qanet.model_dim",
type=int, default=128, dest="model.qanet.model_dim",
help=""" The number of QANet model dimension""",
)
group.add_argument(
"--qanet.kernel_size_in_embedding",
type=int, default=7, dest="model.qanet.kernel_size_in_embedding",
help=""" The number of QANet model Embed Encoder kernel_size""",
)
group.add_argument(
"--qanet.num_head_in_embedding",
type=int, default=8, dest="model.qanet.num_head_in_embedding",
help=""" The number of QANet model Multi-Head Attention's head in Embedding Block""",
)
group.add_argument(
"--qanet.num_conv_block_in_embedding",
type=int, default=4, dest="model.qanet.num_conv_block_in_embedding",
help=""" The number of QANet model Conv Blocks in Embedding Block""",
)
group.add_argument(
"--qanet.num_embedding_encoder_block",
type=int, default=1, dest="model.qanet.num_embedding_encoder_block",
help=""" The number of QANet model Embedding Encoder Blocks""",
)
group.add_argument(
"--qanet.kernel_size_in_modeling",
type=int, default=5, dest="model.qanet.kernel_size_in_modeling",
help=""" The number of QANet model Model Encoder kernel_size""",
)
group.add_argument(
"--qanet.num_head_in_modeling",
type=int, default=8, dest="model.qanet.num_head_in_modeling",
help=""" The number of QANet model Multi-Head Attention's head in Modeling Block""",
)
group.add_argument(
"--qanet.num_conv_block_in_modeling",
type=int, default=2, dest="model.qanet.num_conv_block_in_modeling",
help=""" The number of QANet model Conv Blocks in Modeling Block""",
)
group.add_argument(
"--qanet.num_modeling_encoder_block",
type=int, default=7, dest="model.qanet.num_modeling_encoder_block",
help=""" The number of QANet model Modeling Encoder Blocks""",
)
group.add_argument(
"--qanet.layer_dropout",
type=float, default=0.9, dest="model.qanet.layer_dropout",
help=""" The prob of QANet model layer dropout""",
)
group.add_argument(
"--qanet.dropout",
type=float, default=0.1, dest="model.qanet.dropout",
help=""" The prob of QANet dropout""",
)
group = parser.add_argument_group(" # DocQA")
group.add_argument(
"--docqa.aligned_query_embedding",
type=arg_str2bool, default=False, dest="model.docqa.aligned_query_embedding",
help=""" Aligned Question Embedding (default: False)""",
)
group.add_argument(
"--docqa.answer_maxlen",
type=int, default=17, dest="model.docqa.answer_maxlen",
help=""" The number of maximum answer's length (default: 17)""",
)
group.add_argument(
"--docqa.rnn_dim",
type=int, default=100, dest="model.docqa.rnn_dim",
help=""" The number of DocQA model rnn dimension""",
)
group.add_argument(
"--docqa.linear_dim",
type=int, default=200, dest="model.docqa.linear_dim",
help=""" The number of DocQA model linear dimension""",
)
group.add_argument(
"--docqa.preprocess_rnn_num_layer",
type=int, default=1, dest="model.docqa.preprocess_rnn_num_layer",
help=""" The number of DocQA model preprocess_rnn's recurrent layers""",
)
group.add_argument(
"--docqa.modeling_rnn_num_layer",
type=int, default=1, dest="model.docqa.modeling_rnn_num_layer",
help=""" The number of DocQA model modeling_rnn's recurrent layers""",
)
group.add_argument(
"--docqa.predict_rnn_num_layer",
type=int, default=1, dest="model.docqa.predict_rnn_num_layer",
help=""" The number of DocQA model predict_rnn's recurrent layers""",
)
group.add_argument(
"--docqa.dropout",
type=float, default=0.2, dest="model.docqa.dropout",
help=""" The prob of DocQA dropout""",
)
group.add_argument(
"--docqa.weight_init",
type=arg_str2bool, default=True, dest="model.docqa.weight_init",
help=""" Weight Init""",
)
group = parser.add_argument_group(" # DocQA + No_Answer Option")
group.add_argument(
"--docqa_no_answer.aligned_query_embedding",
type=arg_str2bool, default=False, dest="model.docqa_no_answer.aligned_query_embedding",
help=""" Aligned Question Embedding (default: False)""",
)
group.add_argument(
"--docqa_no_answer.answer_maxlen",
type=int, default=17, dest="model.docqa_no_answer.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group.add_argument(
"--docqa_no_answer.rnn_dim",
type=int, default=100, dest="model.docqa_no_answer.rnn_dim",
help=""" The number of docqa_no_answer model rnn dimension""",
)
group.add_argument(
"--docqa_no_answer.linear_dim",
type=int, default=200, dest="model.docqa_no_answer.linear_dim",
help=""" The number of docqa_no_answer model linear dimension""",
)
group.add_argument(
"--docqa_no_answer.dropout",
type=float, default=0.2, dest="model.docqa_no_answer.dropout",
help=""" The prob of QANet dropout""",
)
group.add_argument(
"--docqa_no_answer.weight_init",
type=arg_str2bool, default=True, dest="model.docqa_no_answer.weight_init",
help=""" Weight Init""",
)
group = parser.add_argument_group(" # DrQA")
group.add_argument(
"--drqa.aligned_query_embedding",
type=int, default=True, dest="model.drqa.aligned_query_embedding",
help=""" Aligned Question Embedding (default: True)""",
)
group.add_argument(
"--drqa.answer_maxlen",
type=int, default=15, dest="model.drqa.answer_maxlen",
help=""" The number of maximum answer's length (default: None)""",
)
group.add_argument(
"--drqa.model_dim",
type=int, default=128, dest="model.drqa.model_dim",
help=""" The number of document reader model dimension""",
)
group.add_argument(
"--drqa.dropout",
type=int, default=0.3, dest="model.drqa.dropout",
help=""" The number of document reader model dropout""",
)
regression_title = "ใ
Regression"
group = parser.add_argument_group(f"{regression_title}\n # BERT for Regression")
group.add_argument(
"--bert_for_reg.pretrained_model_name",
type=str, default=None, dest="model.bert_for_reg.pretrained_model_name",
help=""" A str with the name of a pre-trained model to load selected in the list of (default: None):
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese` """,
)
group.add_argument(
"--bert_for_reg.dropout",
type=float, default=0.2, dest="model.bert_for_reg.dropout",
help=""" The prob of fc layer dropout """
)
group = parser.add_argument_group(f" # RoBERTa")
group.add_argument(
"--roberta_for_reg.pretrained_model_name",
type=str, default=None, dest="model.roberta_for_reg.pretrained_model_name",
help=""" A str with the name of a pre-trained model to load selected in the list of (default: None):
. `roberta-base`
. `roberta-large` """,
)
group.add_argument(
"--roberta_for_reg.dropout",
type=float, default=0.2, dest="model.roberta_for_reg.dropout",
help=""" The prob of fc layer dropout """
)
semantic_parsing_title = "ใ
Semantic Parsing"
group = parser.add_argument_group(f"{semantic_parsing_title}\n # SQLNet")
group.add_argument(
"--sqlnet.column_attention",
type=int, default=True, dest="model.sqlnet.column_attention",
help=""" Compute attention map on a question conditioned on the column names (default: True)""",
)
group.add_argument(
"--sqlnet.model_dim",
type=int, default=100, dest="model.sqlnet.model_dim",
help=""" The number of document reader model dimension""",
)
group.add_argument(
"--sqlnet.rnn_num_layer",
type=int, default=2, | |
* from mp4 where (upper(NAME) GLOB upper(?)) OR (upper(series) GLOB upper(?)) OR (upper(genre) GLOB upper(?)) ORDER BY id desc", (searchmoviestream, searchmoviestream, searchmoviestream))
rows = cur.fetchall();
return render_template("moviestreamdblist.html", rows=rows)
@app.route('/searchmoviestreamascdb')
def mp4ascdb():
if not session.get('logged_in'):
return render_template('login.html')
elif session.get('logged_in'):
global searchmoviestream
con = sql.connect("static/vid.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from mp4 ORDER BY id asc")
rows = cur.fetchall();
return render_template("moviestreamdb.html", rows=rows)
@app.route('/searchmoviestreamdescdb')
def mp4dscdb():
if not session.get('logged_in'):
return render_template('login.html')
elif session.get('logged_in'):
global searchmoviestream
con = sql.connect("static/vid.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from mp4 ORDER BY id desc")
rows = cur.fetchall();
return render_template("moviestreamdb.html", rows=rows)
@app.route('/adminlogin', methods=['POST'])
def do_admin_login():
POST_USERNAME = str(request.form['username'])
POST_PASSWORD = str(request.form['password'])
con = sql.connect("user.db")
con.row_factory = sql.Row
stmnt = "SELECT * FROM users WHERE (username = '" + POST_USERNAME + "') AND (password = '" + POST_PASSWORD + "');"
cur = con.cursor()
cur.execute(stmnt)
rows = cur.fetchall();
if rows:
session['admin_logged_in'] = True
return render_template("admin.html")
else:
flash('wrong password!')
return render_template("adminlogin.html")
@app.route('/admin')
def admin():
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
else:
return render_template("admin.html")
@app.route('/login', methods=['POST'])
def do_user_login():
POST_USERNAME = str(request.form['username'])
POST_PASSWORD = str(request.form['password'])
POST_PASSWORD = hashlib.md5(POST_PASSWORD.encode())
POST_PASSWORD = POST_PASSWORD.hexdigest()
con = sql.connect("userlog.db")
con.row_factory = sql.Row
stmnt = "SELECT * FROM users WHERE (username = '" + POST_USERNAME + "') AND (password = '" + POST_PASSWORD + "');"
cur = con.cursor()
cur.execute(stmnt)
rows = cur.fetchall();
if rows:
con = sql.connect("userlog.db")
stmnt = "SELECT ACTIVE FROM users WHERE (username = '" + POST_USERNAME + "') AND (password = '" + POST_PASSWORD + "');"
cur = con.cursor()
cur.execute(stmnt)
rows = cur.fetchall();
for row in rows:
row = row
isactive = str(row)
isactive = isactive.replace("(", "")
isactive = isactive.replace(",)", "")
print(isactive)
isactive = int(isactive)
if isactive < 1:
flash("You have been banned! Shut Up!")
return render_template("banned.html")
elif isactive > 0:
con = sql.connect("userlog.db")
stmnt = "SELECT premium FROM users WHERE (username = '" + POST_USERNAME + "') AND (password = '" + POST_PASSWORD + "');"
cur = con.cursor()
cur.execute(stmnt)
rows = cur.fetchall();
for row in rows:
row = row
premium = str(row)
premium = premium.replace("(", "")
premium = premium.replace(",)", "")
premium = premium.replace("'", "")
premium = premium.replace("')", "")
print(premium)
con.close()
global chatid
stmnta = "SELECT userid FROM users WHERE (username = '" + POST_USERNAME + "') AND (password = '" + POST_PASSWORD + "');"
con = sql.connect("userlog.db")
cur = con.cursor()
cur.execute(stmnta)
rowc = cur.fetchall();
for rowa in rowc:
rowa = rowa
chatid = str(rowa)
chatid = chatid.replace("(", "")
premium = chatid.replace(",)", "")
chatid = chatid.replace("'", "")
chatid = chatid.replace("')", "")
chatid = chatid.replace(",)", "")
print(chatid)
chatid = int(chatid)
con.close()
if premium == 'premium':
session['premium'] = False
session['logged_in'] = True
msg = "Welcome " + POST_USERNAME + "!"
flash(msg)
return render_template("index.html")
else:
session['premium'] = True
session['logged_in'] = True
msg = "Welcome " + POST_USERNAME + "! You are a Premium user!"
flash(msg)
return render_template("index.html")
else:
return render_template("login.html")
else:
flash('wrong password!')
return render_template("login.html")
@app.route('/')
def home():
if not session.get('logged_in'):
return render_template('login.html')
else:
return render_template("index.html")
@app.route("/logout")
def logout():
session['logged_in'] = False
session['admin_logged_in'] = False
return home()
@app.route('/signup')
def signup():
return render_template("signup.html")
@app.route('/signupres', methods=['POST'])
def do_user_register():
POST_USERNAME = str(request.form['username'])
POST_EMAIL = str(request.form['email'])
POST_PASSWORD = str(request.form['password'])
POST_PASSWORD_SECOND = str(request.form['passwordse'])
POST_CHATID = int(request.form['chatid'])
ACTIVE = 1
if not POST_CHATID:
flash("Fill in CHAT-ID")
return render_template("signup.html")
if POST_PASSWORD == POST_<PASSWORD>:
con = sql.connect("userlog.db")
con.row_factory = sql.Row
stmnt = "SELECT * FROM users WHERE username = '" + POST_USERNAME + "';"
cur = con.cursor()
cur.execute(stmnt)
rows = cur.fetchall();
if not rows:
POST_PASSWORD = <PASSWORD>(POST_PASSWORD.encode())
POST_PASSWORD = POST_PASSWORD.hexdigest()
con = sql.connect("userlog.db")
with con:
cur = con.cursor()
cur.execute("INSERT INTO users (id, username, password, email, active, userid, premium) VALUES (NULL, ?, ?, ?, ?, ?, '0')",(POST_USERNAME, POST_PASSWORD, POST_EMAIL, ACTIVE, POST_CHATID))
stmnt = 'CREATE TABLE "' + str(POST_CHATID) + '" ( "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "favtype" TEXT NOT NULL, "favid" INTEGER NOT NULL, "favname" INTEGER NOT NULL);'
cur.execute(stmnt)
con.commit()
con.close()
return home()
else:
flash("Username already taken!")
return render_template("signup.html")
else:
flash('Password does not match!')
return render_template("signup.html")
@app.route('/mkfav<uri>')
def mkfav(uri):
if not session.get('logged_in'):
return render_template('login.html')
else:
global chatid
print(uri)
favtypes = uri
favtypes = favtypes.replace("<","")
favtypes = favtypes.replace(">", "")
favtypes = favtypes[:1]
print(favtypes)
if favtypes == "m":
favid = uri
favid = favid.replace("m", "")
favid = favid.replace(">", "")
favid = favid.replace("<", "")
favid = str(favid)
print(favid)
con = sql.connect("static/bot.db")
with con:
cur = con.cursor()
stmnt = "select * from music where id = '" + favid + "';"
cur.execute(stmnt)
rows = cur.fetchall();
print(rows)
title = rows[0][2] + " " + rows[0][3]
title = str(title)
print(rows[title])
return render_template("index.html")
@app.route('/userlist')
def userlist():
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
con = sql.connect("userlog.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from users")
rows = cur.fetchall();
return render_template("userlist.html",rows = rows)
@app.route('/deluser<ids>')
def deluser(ids):
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
id = ids.replace("<", "")
id = id.replace(">", "")
con = sql.connect("userlog.db")
with con:
cur = con.cursor()
stmnt = "UPDATE users SET active = 0 WHERE id = '" + id + "';"
cur.execute(stmnt)
con.commit()
con.close()
flash("User Deleted!")
return render_template("userlist.html")
@app.route('/banuser<ids>')
def banuser(ids):
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
id = ids.replace("<", "")
id = id.replace(">", "")
con = sql.connect("userlog.db")
with con:
stmnt = "UPDATE users SET active = 0 WHERE id = '" + id + "';"
cur = con.cursor()
cur.execute(stmnt)
con.commit()
con.close()
flash("User Banned!")
print(ids)
return render_template("userlist.html")
@app.route('/unbanuser<ids>')
def unbanuser(ids):
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
id = ids.replace("<", "")
id = id.replace(">", "")
con = sql.connect("userlog.db")
with con:
stmnt = "UPDATE users SET active = 1 WHERE id = '" + id + "';"
cur = con.cursor()
cur.execute(stmnt)
con.commit()
con.close()
flash("User unbanned!")
return render_template("userlist.html")
@app.route('/mkpremium<ids>')
def mkpremium(ids):
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
id = ids.replace("<", "")
id = id.replace(">", "")
con = sql.connect("userlog.db")
with con:
stmnt = "UPDATE users SET premium = 'premium' WHERE id = '" + id + "';"
cur = con.cursor()
cur.execute(stmnt)
con.commit()
con.close()
flash("User is premium!")
print(ids)
return render_template("userlist.html")
@app.route('/umkpremium<ids>')
def umkpremium(ids):
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
id = ids.replace("<", "")
id = id.replace(">", "")
con = sql.connect("userlog.db")
with con:
stmnt = "UPDATE users SET premium = 'nonpremium' WHERE id = '" + id + "';"
cur = con.cursor()
cur.execute(stmnt)
con.commit()
con.close()
flash("Premium removed!")
print(ids)
return render_template("userlist.html")
@app.route('/delfile')
def delfile():
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
return render_template('deletefile.html')
@app.route('/delfileres',methods = ['POST', 'GET'])
def delfileres():
if request.method == 'POST':
delid = request.form['fileid']
con = sql.connect("static/bot.db")
cur = con.cursor()
stmnt = "DELETE FROM files where ID = '" + delid + "';"
cur.execute(stmnt)
con.commit()
con.close()
flash("DELETED!")
return render_template("admin.html")
@app.route('/delstream')
def delstream():
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
return render_template('delstream.html')
@app.route('/delstreamres',methods = ['POST', 'GET'])
def delstreamres():
if request.method == 'POST':
delid = request.form['fileid']
con = sql.connect("static/vid.db")
cur = con.cursor()
stmnt = "DELETE FROM mp4 where ID = '" + delid + "';"
cur.execute(stmnt)
con.commit()
con.close()
flash("DELETED!")
return render_template("admin.html")
@app.route('/vid<uri>')
def vidplay(uri):
if not session.get('logged_in'):
return render_template('login.html')
elif session.get('logged_in'):
id = uri.replace("<", "")
id = id.replace(">", "")
con = sql.connect("static/vid.db")
cur = con.cursor()
stmnt = "SELECT PATH FROM mp4 where ID = '" + id + "';"
cur.execute(stmnt)
rows = cur.fetchall();
con.close()
path = str(rows)
path = path.replace("[('", "")
path = path.replace("',)]", "")
print(path)
return render_template('vidplayer.html', value=path)
@app.route('/getpremium')
def getpremium():
if not session.get('logged_in'):
return render_template('login.html')
elif session.get('logged_in'):
return render_template("premium.html")
@app.route('/premiumpayres', methods=['POST'])
def do_premium_pay():
POST_PSCCODE = str(request.form['pscpin'])
POST_CHATID = int(request.form['chatid'])
now = datetime.datetime.now()
now = str(now)
if not POST_CHATID:
flash("Fill in CHAT-ID")
return render_template("premium.html")
if not POST_CHATID:
flash("Fill in PSC Pin")
return render_template("premium.html")
if POST_PSCCODE and POST_CHATID:
con = sql.connect("userlog.db")
with con:
cur = con.cursor()
cur.execute("INSERT INTO premiumpay (id, date, chatid, psccode) VALUES (NULL, ?, ?, ?)",(now, POST_CHATID, POST_PSCCODE))
con.commit()
con.close()
flash("Payment accepted")
return render_template("index.html")
else:
flash("Unknown Error!")
return render_template("index.html")
@app.route('/paylist')
def showpay():
if not session.get('admin_logged_in'):
return render_template('adminlogin.html')
elif session.get('admin_logged_in'):
con = sql.connect("userlog.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from premiumpay")
rows = cur.fetchall();
return render_template("showpayment.html",rows = rows)
@app.route('/vidbyseries<ids>')
def vidbyseries(ids):
ids = str(ids)
ids = ids.replace(">", "")
ids = ids.replace("<", "")
if not session.get('logged_in'):
return render_template('login.html')
elif session.get('logged_in'):
if not session.get('premium'):
flash("This is an Premium Feature!")
return render_template('index.html')
elif session.get('premium'):
con = sql.connect("static/vid.db")
con.row_factory = sql.Row
print(ids)
cur = con.cursor()
stmnt = "select * from mp4 where | |
26312.2479372761 * self.t)
Z0 += 0.00000000005 * math.cos(2.14793916248 + 128320.75184749259 * self.t)
Z0 += 0.00000000004 * math.cos(3.03681019540 + 151.0476698429 * self.t)
Z0 += 0.00000000004 * math.cos(0.23608219351 + 26057.57628056979 * self.t)
Z0 += 0.00000000004 * math.cos(2.78449581166 + 25138.7275326044 * self.t)
Z0 += 0.00000000004 * math.cos(1.74750321635 + 27044.1922975448 * self.t)
Z0 += 0.00000000004 * math.cos(5.26585590029 + 138319.60486120995 * self.t)
Z0 += 0.00000000004 * math.cos(2.16361705057 + 78366.80219894118 * self.t)
Z0 += 0.00000000005 * math.cos(1.86438708559 + 51653.22886505459 * self.t)
Z0 += 0.00000000004 * math.cos(3.67745684549 + 148532.89040742096 * self.t)
Z0 += 0.00000000005 * math.cos(3.36418574107 + 86457.98475793119 * self.t)
Z0 += 0.00000000004 * math.cos(4.39817949768 + 78896.49316403578 * self.t)
Z0 += 0.00000000004 * math.cos(3.16495363173 + 104197.83375581198 * self.t)
Z0 += 0.00000000004 * math.cos(5.96720583626 + 1485.9801210652 * self.t)
Z0 += 0.00000000003 * math.cos(3.62369510287 + 51852.30086649099 * self.t)
Z0 += 0.00000000004 * math.cos(4.95850373202 + 80382.47328510099 * self.t)
Z0 += 0.00000000003 * math.cos(0.24273955075 + 128106.31931499895 * self.t)
Z0 += 0.00000000004 * math.cos(6.09423095974 + 51528.79544983359 * self.t)
Z0 += 0.00000000004 * math.cos(2.26832726016 + 64901.25971792339 * self.t)
Z0 += 0.00000000004 * math.cos(5.37231956323 + 99024.13645791399 * self.t)
Z0 += 0.00000000004 * math.cos(5.83099981006 + 26411.4085582316 * self.t)
Z0 += 0.00000000003 * math.cos(5.27877050710 + 14.2270940016 * self.t)
Z0 += 0.00000000004 * math.cos(1.01079656557 + 16703.062133499 * self.t)
Z0 += 0.00000000003 * math.cos(5.29624442233 + 51432.81622261579 * self.t)
Z0 += 0.00000000003 * math.cos(1.99372517333 + 9384.8410080752 * self.t)
Z0 += 0.00000000003 * math.cos(1.08549694329 + 92741.06060792258 * self.t)
Z0 += 0.00000000004 * math.cos(2.61532511231 + 27250.37784598199 * self.t)
Z0 += 0.00000000004 * math.cos(3.84384246230 + 65831.6667743248 * self.t)
Z0 += 0.00000000004 * math.cos(5.07647835306 + 118828.96374949679 * self.t)
Z0 += 0.00000000004 * math.cos(0.95691561960 + 1795.258443721 * self.t)
Z0 += 0.00000000003 * math.cos(4.56071055194 + 130419.8459469712 * self.t)
Z0 += 0.00000000004 * math.cos(3.21002699852 + 51969.62073471119 * self.t)
Z0 += 0.00000000003 * math.cos(0.53542089836 + 25985.94062330859 * self.t)
Z0 += 0.00000000003 * math.cos(0.86346795613 + 27972.80430499159 * self.t)
Z0 += 0.00000000004 * math.cos(1.11850373850 + 52609.51802102519 * self.t)
Z0 += 0.00000000003 * math.cos(5.13697350283 + 26189.8656598398 * self.t)
Z0 += 0.00000000003 * math.cos(3.83162907268 + 207593.84658050018 * self.t)
Z0 += 0.00000000003 * math.cos(3.70405700948 + 193937.98608932378 * self.t)
Z0 += 0.00000000003 * math.cos(1.82907554275 + 153084.84390447979 * self.t)
Z0 += 0.00000000003 * math.cos(1.75677414102 + 158746.17595363196 * self.t)
Z0 += 0.00000000003 * math.cos(0.34579536632 + 161079.37234650398 * self.t)
Z0 += 0.00000000003 * math.cos(3.85483830173 + 39629.32434406539 * self.t)
Z0 += 0.00000000003 * math.cos(0.82694378727 + 51868.2486621788 * self.t)
Z0 += 0.00000000003 * math.cos(0.16328069810 + 27463.67694142 * self.t)
Z0 += 0.00000000003 * math.cos(4.08971647963 + 130226.21661243298 * self.t)
Z0 += 0.00000000004 * math.cos(3.70176043302 + 163766.09444104518 * self.t)
Z0 += 0.00000000003 * math.cos(1.31853873267 + 204151.27163553478 * self.t)
Z0 += 0.00000000004 * math.cos(2.50849600287 + 220.4126424388 * self.t)
Z0 += 0.00000000003 * math.cos(3.29076294593 + 125112.03959948818 * self.t)
Z0 += 0.00000000003 * math.cos(5.82527117029 + 49953.94964855139 * self.t)
Z0 += 0.00000000003 * math.cos(1.83258674312 + 37698.4550999484 * self.t)
Z0 += 0.00000000003 * math.cos(1.84707936363 + 26076.8574413103 * self.t)
Z0 += 0.00000000003 * math.cos(3.96840940578 + 130432.40216087017 * self.t)
Z0 += 0.00000000003 * math.cos(0.01192870778 + 26098.9488418381 * self.t)
Z0 += 0.00000000003 * math.cos(2.24128507678 + 78160.61665050399 * self.t)
Z0 += 0.00000000003 * math.cos(1.67457691866 + 182188.72380014337 * self.t)
Z0 += 0.00000000003 * math.cos(1.36021422148 + 136722.59155786238 * self.t)
# Mercury_Z1 (t) // 360 terms of order 1
Z1 = 0
Z1 += 0.00172388569 * math.cos(3.47961470688 + 26087.9031415742 * self.t)
Z1 -= 0.00084745328
Z1 += 0.00004790619 * math.cos(6.21921644635 + 52175.8062831484 * self.t)
Z1 += 0.00001359100 * math.cos(0.49329143004 + 78263.70942472259 * self.t)
Z1 += 0.00000602217 * math.cos(3.41138035633 + 104351.61256629678 * self.t)
Z1 += 0.00000191001 * math.cos(0.14519065991 + 130439.51570787099 * self.t)
Z1 += 0.00000055361 * math.cos(3.18058121998 + 156527.41884944518 * self.t)
Z1 += 0.00000015488 * math.cos(6.22255945229 + 182615.32199101939 * self.t)
Z1 += 0.00000010854 * math.cos(0.29329316365 + 27197.2816936676 * self.t)
Z1 += 0.00000004262 * math.cos(2.98444809115 + 208703.22513259358 * self.t)
Z1 += 0.00000004999 * math.cos(0.56827577867 + 24978.5245894808 * self.t)
Z1 += 0.00000002282 * math.cos(2.87281226699 + 31749.2351907264 * self.t)
Z1 += 0.00000002500 * math.cos(3.37464102020 + 53285.1848352418 * self.t)
Z1 += 0.00000001480 * math.cos(3.59930190722 + 51066.427731055 * self.t)
Z1 += 0.00000001163 * math.cos(6.03119799366 + 234791.12827416777 * self.t)
Z1 += 0.00000001151 * math.cos(1.94233400975 + 51116.4243529592 * self.t)
Z1 += 0.00000000808 * math.cos(0.63296975442 + 25028.521211385 * self.t)
Z1 += 0.00000000781 * math.cos(1.83801930165 + 5661.3320491522 * self.t)
Z1 += 0.00000000676 * math.cos(4.44275341808 + 27043.5028831828 * self.t)
Z1 += 0.00000000873 * math.cos(3.99274661843 + 21535.9496445154 * self.t)
Z1 += 0.00000000636 * math.cos(0.60897167697 + 20426.571092422 * self.t)
Z1 += 0.00000000538 * math.cos(5.92697698725 + 57837.1383323006 * self.t)
Z1 += 0.00000000528 * math.cos(1.89223267777 + 1059.3819301892 * self.t)
Z1 += 0.00000000503 * math.cos(4.12049535056 + 47623.8527860896 * self.t)
Z1 += 0.00000000450 * math.cos(6.08529735298 + 529.6909650946 * self.t)
Z1 += 0.00000000553 * math.cos(2.50090977683 + 1109.3785520934 * self.t)
Z1 += 0.00000000445 * math.cos(0.35693140869 + 77154.33087262919 * self.t)
Z1 += 0.00000000459 * math.cos(0.10915184787 + 79373.08797681599 * self.t)
Z1 += 0.00000000370 * math.cos(2.24953657104 + 10213.285546211 * self.t)
Z1 += 0.00000000414 * math.cos(3.01450201380 + 26107.57290247399 * self.t)
Z1 += 0.00000000417 * math.cos(5.31402407370 + 37410.5672398786 * self.t)
Z1 += 0.00000000306 * math.cos(3.10769212775 + 25558.2121764796 * self.t)
Z1 += 0.00000000316 * math.cos(2.79570706285 + 260879.03141574195 * self.t)
Z1 += 0.00000000296 * math.cos(4.12556468959 + 26068.2333806744 * self.t)
Z1 += 0.00000000332 * math.cos(5.93758113684 + 26084.0218062162 * self.t)
Z1 += 0.00000000234 * math.cos(3.03370038245 + 25132.3033999656 * self.t)
Z1 += 0.00000000250 * math.cos(1.18440188253 + 26091.7844769322 * self.t)
Z1 += 0.00000000215 * math.cos(1.41611963529 + 41962.5207369374 * self.t)
Z1 += 0.00000000206 * math.cos(4.97267994525 + 77204.32749453338 * self.t)
Z1 += 0.00000000208 * math.cos(5.60196522278 + 4551.9534970588 * self.t)
Z1 += 0.00000000251 * math.cos(3.12319370712 + 30639.856638633 * self.t)
Z1 += 0.00000000230 * math.cos(0.95862112790 + 50586.73338786459 * self.t)
Z1 += 0.00000000193 * math.cos(4.90260211702 + 13521.7514415914 * self.t)
Z1 += 0.00000000243 * math.cos(0.30112320330 + 1589.0728952838 * self.t)
Z1 += 0.00000000186 * math.cos(5.87650538684 + 39609.6545831656 * self.t)
Z1 += 0.00000000211 * math.cos(1.27593674125 + 53131.406024757 * self.t)
Z1 += 0.00000000188 * math.cos(0.09339135236 + 28306.66024576099 * self.t)
Z1 += 0.00000000191 * math.cos(4.77084065304 + 51646.11531805379 * self.t)
Z1 += 0.00000000167 * math.cos(2.62739427985 + 32858.61374281979 * self.t)
Z1 += 0.00000000162 * math.cos(5.02172677195 + 15874.6175953632 * self.t)
Z1 += 0.00000000140 * math.cos(1.37984211372 + 51749.20809227239 * self.t)
Z1 += 0.00000000134 * math.cos(0.57882342916 + 23869.1460373874 * self.t)
Z1 += 0.00000000132 * math.cos(3.40338891449 + 103242.23401420339 * self.t)
Z1 += 0.00000000144 * math.cos(4.41836879515 + 22645.32819660879 * self.t)
Z1 += 0.00000000143 * math.cos(5.55896069473 + 25661.3049506982 * self.t)
Z1 += 0.00000000131 * math.cos(2.17510821075 + 63498.47038145279 * self.t)
Z1 += 0.00000000143 * math.cos(1.22805602725 + 46514.4742339962 * self.t)
Z1 += 0.00000000136 * math.cos(2.45569156259 + 24498.8302462904 * self.t)
Z1 += 0.00000000120 * math.cos(2.89969435814 + 7.1135470008 * self.t)
Z1 += 0.00000000104 * math.cos(0.89744950821 + 73711.75592766379 * self.t)
Z1 += 0.00000000119 * math.cos(1.44014554390 + 43071.8992890308 * self.t)
Z1 += 0.00000000126 * math.cos(1.93369301602 + 14765.2390432698 * self.t)
Z1 += 0.00000000107 * math.cos(4.29215813932 + 19317.1925403286 * self.t)
Z1 += 0.00000000089 * math.cos(5.84175706917 + 51220.20654153979 * self.t)
Z1 += 0.00000000093 * math.cos(2.27928877976 + 19804.8272915828 * self.t)
Z1 += 0.00000000083 * math.cos(3.17188415740 + 29530.4780865396 * self.t)
Z1 += 0.00000000076 * math.cos(5.10839374532 + 36301.18868778519 * self.t)
Z1 += 0.00000000079 * math.cos(2.34370769368 + 1052.2683831884 * self.t)
Z1 += 0.00000000080 * math.cos(2.11190196063 + 426.598190876 * self.t)
Z1 += 0.00000000076 * math.cos(6.05836593763 + 52195.47604404819 * self.t)
Z1 += 0.00000000072 * math.cos(1.89073508118 + 1066.49547719 * self.t)
Z1 += 0.00000000082 * math.cos(3.03552784633 + 3442.5749449654 * self.t)
Z1 += 0.00000000075 * math.cos(3.11962204135 + 105460.99111839019 * self.t)
Z1 += 0.00000000086 * math.cos(5.84461147075 + 286966.93455731616 * self.t)
Z1 += 0.00000000069 * math.cos(1.89458844998 + 27147.28507176339 * self.t)
Z1 += 0.00000000089 * math.cos(4.10904716360 + 72602.37737557039 * self.t)
Z1 += 0.00000000077 * math.cos(5.34990441360 + 213.299095438 * self.t)
Z1 += 0.00000000087 * math.cos(5.53929952701 + 9103.9069941176 * self.t)
Z1 += 0.00000000070 * math.cos(4.43072071341 + 639.897286314 * self.t)
Z1 += 0.00000000059 * math.cos(3.32089922507 + 54394.56338733519 * self.t)
Z1 += 0.00000000061 * math.cos(0.53721787795 + 25035.6347583858 * self.t)
Z1 += 0.00000000062 * math.cos(1.36724381328 + 6283.0758499914 * self.t)
Z1 += 0.00000000073 * math.cos(6.16650137658 + 56727.7597802072 * self.t)
Z1 += 0.00000000069 * math.cos(0.88932506586 + 52156.1365222486 * self.t)
Z1 += 0.00000000055 * math.cos(2.54693938612 + 65697.55772473979 * self.t)
Z1 += | |
an argument.
### Usage:
----
>>> td_client._prepare_arguments_list(
parameter_list=['MSFT', 'SQ']
)
"""
return ','.join(parameter_list)
def get_quotes(self, instruments: List) -> Dict:
"""Grabs real-time quotes for an instrument.
Serves as the mechanism to make a request to the Get Quote and Get Quotes Endpoint.
If one item is provided a Get Quote request will be made and if more than one item
is provided then a Get Quotes request will be made.
### Documentation:
----
https://developer.tdameritrade.com/quotes/apis
### Arguments:
----
instruments: A list of different financial instruments.
### Usage:
----
>>> td_client.get_quotes(instruments=['MSFT'])
>>> td_client.get_quotes(instruments=['MSFT','SQ'])
"""
# because we have a list argument, prep it for the request.
instruments = self._prepare_arguments_list(
parameter_list=instruments
)
# build the params dictionary
params = {
'apikey': self.client_id,
'symbol': instruments
}
# define the endpoint
endpoint = 'marketdata/quotes'
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
def get_price_history(self, symbol: str, period_type:str = None, period: str = None, start_date:str = None, end_date:str = None,
frequency_type: str = None, frequency: str = None, extended_hours: bool = True) -> Dict:
"""Gets historical candle data for a financial instrument.
### Documentation:
----
https://developer.tdameritrade.com/price-history/apis
### Arguments:
----
symbol: The ticker symbol to request data for.
period_type: The type of period to show.
Valid values are day, month, year, or
ytd (year to date). Default is day.
period: The number of periods to show.
start_date: Start date as milliseconds
since epoch.
end_date: End date as milliseconds
since epoch.
frequency_type: The type of frequency with
which a new candle is formed.
frequency: The number of the frequency type
to be included in each candle.
extended_hours: True to return extended hours
data, false for regular market hours only.
Default is true
"""
# Fail early, can't have a period with start and end date specified.
if (start_date and end_date and period):
raise ValueError('Cannot have Period with start date and end date')
# Check only if you don't have a date and do have a period.
elif (not start_date and not end_date and period):
# Attempt to grab the key, if it fails we know there is an error.
# check if the period is valid.
if int(period) in VALID_CHART_VALUES[frequency_type][period_type]:
True
else:
raise IndexError('Invalid Period.')
if frequency_type == 'minute' and int(frequency) not in [1, 5, 10, 15, 30]:
raise ValueError('Invalid Minute Frequency, must be 1,5,10,15,30')
# build the params dictionary
params = {
'apikey': self.client_id,
'period': period,
'periodType': period_type,
'startDate': start_date,
'endDate': end_date,
'frequency': frequency,
'frequencyType': frequency_type,
'needExtendedHoursData': extended_hours
}
# define the endpoint
endpoint = 'marketdata/{}/pricehistory'.format(symbol)
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
def search_instruments(self, symbol: str, projection: str = None) -> Dict:
""" Search or retrieve instrument data, including fundamental data.
### Documentation:
----
https://developer.tdameritrade.com/instruments/apis/get/instruments
### Arguments:
----
symbol: The symbol of the financial instrument you would
like to search.
projection: The type of request, default is "symbol-search".
The type of request include the following:
1. symbol-search
Retrieve instrument data of a specific symbol or cusip
2. symbol-regex
Retrieve instrument data for all symbols matching regex.
Example: symbol=XYZ.* will return all symbols beginning with XYZ
3. desc-search
Retrieve instrument data for instruments whose description contains
the word supplied. Example: symbol=FakeCompany will return all
instruments with FakeCompany in the description
4. desc-regex
Search description with full regex support. Example: symbol=XYZ.[A-C]
returns all instruments whose descriptions contain a word beginning
with XYZ followed by a character A through C
5. fundamental
Returns fundamental data for a single instrument specified by exact symbol.
### Usage:
----
>>> td_client.search_instrument(
symbol='XYZ',
projection='symbol-search'
)
>>> td_client.search_instrument(
symbol='XYZ.*',
projection='symbol-regex'
)
>>> td_client.search_instrument(
symbol='FakeCompany',
projection='desc-search'
)
>>> td_client.search_instrument(
symbol='XYZ.[A-C]',
projection='desc-regex'
)
>>> td_client.search_instrument(
symbol='XYZ.[A-C]',
projection='fundamental'
)
"""
# validate argument
self._validate_arguments(
endpoint='search_instruments',
parameter_name='projection',
parameter_argument=projection
)
# build the params dictionary
params = {
'apikey': self.client_id,
'symbol': symbol,
'projection': projection
}
# define the endpoint
endpoint = 'instruments'
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
def get_instruments(self, cusip: str) -> Dict:
"""Searches an Instrument.
Get an instrument by CUSIP (Committee on Uniform Securities Identification Procedures) code.
### Documentation:
----
https://developer.tdameritrade.com/instruments/apis/get/instruments/%7Bcusip%7D
### Arguments:
----
cusip: The CUSIP code of a given financial instrument.
### Usage:
----
>>> td_client.get_instruments(
cusip='SomeCUSIPNumber'
)
"""
# build the params dictionary
params = {
'apikey': self.client_id
}
# define the endpoint
endpoint = 'instruments/{cusip}'.format(cusip=cusip)
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
def get_market_hours(self, markets: List[str], date: str) -> Dict:
"""Returns the hours for a specific market.
Serves as the mechanism to make a request to the "Get Hours for Multiple Markets" and
"Get Hours for Single Markets" Endpoint. If one market is provided a "Get Hours for Single Markets"
request will be made and if more than one item is provided then a "Get Hours for Multiple Markets"
request will be made.
### Documentation:
----
https://developer.tdameritrade.com/market-hours/apis
### Arguments:
----
markets: The markets for which you're requesting market hours,
comma-separated. Valid markets are:
EQUITY, OPTION, FUTURE, BOND, or FOREX.
date: The date you wish to recieve market hours for.
Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz
### Usage:
----
>>> td_client.get_market_hours(markets=['EQUITY'], date='2019-10-19')
>>> td_client.get_market_hours(markets=['EQUITY','FOREX'], date='2019-10-19')
"""
# validate argument
self._validate_arguments(
endpoint='get_market_hours',
parameter_name='markets',
parameter_argument=markets
)
# because we have a list argument, prep it for the request.
markets = self._prepare_arguments_list(parameter_list=markets)
# build the params dictionary
params = {
'apikey': self.client_id,
'markets': markets,
'date': date
}
# define the endpoint
endpoint = 'marketdata/hours'
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
def get_movers(self, market: str, direction: str, change: str) -> Dict:
"""Gets Active movers for a specific Index.
Top 10 (up or down) movers by value or percent for a particular market.
### Documentation:
----
https://developer.tdameritrade.com/movers/apis/get/marketdata
### Arguments:
----
market: The index symbol to get movers for.
Can be $DJI, $COMPX, or $SPX.X.
direction: To return movers with the specified
directions of up or down. Valid values are `up`
or `down`
change: To return movers with the specified change
types of percent or value. Valid values are `percent`
or `value`.
### Usage:
----
>>> td_client.get_movers(
market='$DJI',
direction='up',
change='value'
)
>>> td_client.get_movers(
market='$COMPX',
direction='down',
change='percent'
)
"""
# grabs a dictionary representation of our arguments and their inputs.
local_args = locals()
# we don't need the 'self' key
del local_args['self']
# validate arguments, before making request.
for key, value in local_args.items():
self._validate_arguments(
endpoint='get_movers',
parameter_name=key,
parameter_argument=value
)
# build the params dictionary
params = {
'apikey': self.client_id,
'direction': direction,
'change': change
}
# define the endpoint
endpoint = 'marketdata/{market_id}/movers'.format(market_id=market)
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
def get_options_chain(self, option_chain: Union[Dict, OptionChain]) -> Dict:
"""Returns Option Chain Data and Quotes.
Get option chain for an optionable Symbol using one of two methods. Either,
use the OptionChain object which is a built-in object that allows for easy creation
of the POST request. Otherwise, can pass through a dictionary of all the
arguments needed.
### Documentation:
----
https://developer.tdameritrade.com/option-chains/apis/get/marketdata/chains
### Arguments:
----
option_chain: Represents a dicitonary containing values to
query.
### Usage:
----
>>> td_client.get_options_chain(
option_chain={'key1':'value1'}
)
"""
# First check if it's an `OptionChain` object.
if isinstance(option_chain, OptionChain):
# If it is, then grab the params.
params = option_chain.query_parameters
else:
# Otherwise just take the raw dictionary.
params = option_chain
# define the endpoint
endpoint = 'marketdata/chains'
# return the response of the get request.
return self._make_request(method='get', endpoint=endpoint, params=params)
"""
-----------------------------------------------------------
-----------------------------------------------------------
THIS BEGINS THE ACCOUNTS ENDPOINTS PORTION.
-----------------------------------------------------------
-----------------------------------------------------------
"""
def get_accounts(self, account: str = 'all', fields: List[str] = None) -> Dict:
"""Queries accounts for a user.
Serves as the mechanism to make a | |
# !/usr/bin/jython
# This Jython script is run via run-otp.should
# It assumes
# - OTP, Jython and Sqlite jars are acquired
# - required input files are present and accurately specified as arguments
################################################################################################
# Example usage from Bash shell:
################################################################################################
# odm_args="--departure_time 2018-09-27-08:00:00 \
# --duration_reps 2 2 \
# --max_time 7200 \
# --max_walking_distance 500 \
# --matching one-to-many \
# --originsfile graphs/sa1_dzn_modes_melb_2016/SA1_2016_melb_gccsa_10km_epsg4326.csv \
# --destsfile graphs/sa1_dzn_modes_melb_2016/DZN_2016_melb_gccsa_10km_epsg4326.csv \
# --outdb graphs/sa1_dzn_modes_melb_2016/SA1_DZN_2016_melb_gccsa_10km.db \
# --outtable od_6modes_8am_10am \
# --mode_list WALK BICYCLE CAR 'WALK,BUS' 'WALK,TRAM' 'WALK,RAIL' 'WALK,TRANSIT' \
# --run_once WALK BICYCLE CAR \
# --id_names SA1_MAINCODE_2016 DZN_CODE_2016 \
# --latlon_names Y X \
# --wideform \
# --proj_dir ./graphs/sa1_dzn_modes_melb_2016"
#
# ./run-otp.sh -d sa1_dzn_modes_melb_2016 \
# -p melb_gccsa_2016_10000m_20180208.pbf \
# -t gtfs_aus_vic_melb_20180911.zip -w \
# "$odm_args"
###################################################################################################
import argparse, time, os.path, sys, itertools
from org.opentripplanner.scripting.api import OtpsEntryPoint
from datetime import datetime,timedelta
import sys
import csv
from java.lang import Class
from java.sql import DriverManager, SQLException
from com.ziclix.python.sql import zxJDBC
from java.text import SimpleDateFormat
# To debug using the server you can run:
# java -Xmx2G -jar otp-0.19.0-shaded.jar --build ./graphs/sa1_dzn_region06_2019 --inMemory
# and in browser:
# http://localhost:8080/otp/routers/default/plan?fromPlace=-16.89439878,145.7161692&toPlace=-16.89863955,145.7152701&time=7:45am&date=10-16-2019&mode=TRANSIT,WALK&maxWalkDistance=25000&arriveBy=false
# http://localhost:8080/otp/routers/default/isochrone?fromPlace=-16.89439878,145.7161692&mode=WALK,TRANSIT&date=10-16-2019&time=7:45am&maxWalkDistance=500&cutoffSec=1800&cutoffSec=3600
# http://localhost:8080/otp/routers/default/isochrone?fromPlace=-16.89439878,145.7161692&mode=WALK,TRANSIT&date=10-16-2019&time=7:45am&arriveBy=8:45am&cutoffSec=1800&cutoffSec=3600
#
# To debug interactively, you can run from bash the otp directory the following:
# DIR="."
# OTPJAR=${DIR}/otp-0.19.0-shaded.jar
# JYTHONJAR=${DIR}/jython-standalone-2.7.0.jar
# SQLITEJAR=${DIR}/sqlite-jdbc-3.23.1.jar
#
# java -Duser.timezone=Australia/Melbourne -cp $OTPJAR:$JYTHONJAR:$SQLITEJAR org.python.util.jython
#
# When running code interactively, don't enter the parser.parse_args() function - it will crash the script
# The following alternate variable definitions may be of use:
#
# region = '09'
# DATABASE = "graphs/sa1_dzn_region{region}_2019/region{region}_gccsa_SA1_DZN_2016_vic.db".format(region=region)
# originsfile = "graphs/sa1_dzn_region{region}_2019/sa1_2016_network_snapped_pwc_region{region}.csv".format(region=region)
# destsfile = "graphs/sa1_dzn_region{region}_2019/dzn_2016_network_snapped_centroids_region{region}.csv".format(region=region)
# TABLE_NAME = "od_4modes_7_45am"
# id_names = ['SA1_MAINCO','DZN_CODE_2016']
# latlon_names =['Y','X']
# proj_name = 'sa1_dzn_region{region}_2019'.format(region=region)
# orig_id =id_names[0]
# dest_id =id_names[1]
# lat =latlon_names[0]
# lon =latlon_names[1]
# JDBC_URL = "jdbc:sqlite:%s" % DATABASE
# JDBC_DRIVER = "org.sqlite.JDBC"
# otp = OtpsEntryPoint.fromArgs(['--graphs', 'graphs', '--router', proj_name])
# start_time = time.time()
# departure_time = datetime.strptime('2019-10-16-07:45:00', "%Y-%m-%d-%H:%M:%S")
# dep = departure_time
# router = otp.getRouter(proj_name)
# req = otp.createRequest()
# req.setDateTime(dep)
# req.setMaxTimeSec(7200)
# req.setMaxWalkDistance(500)
# origins = otp.loadCSVPopulation(originsfile, lat, lon)
# dests = otp.loadCSVPopulation(destsfile, lat, lon)
# modes = ['WALK','BICYCLE','CAR','WALK,BUS','WALK,TRAM','WALK,RAIL','WALK,TRANSIT']
# run_once = ['WALK','BICYCLE','CAR']
# req.setOrigin(-41.4283131431752,147.135207900963)
# transport_mode = 'WALK'
# req.setModes(transport_mode)
# spt = router.plan(req)
# results = spt.eval(dests)
# print(results)
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d-%H:%M:%S")
except ValueError:
msg = "Not a valid departure time: '{0}'. It should be in the format %Y-%m-%d-%H:%M ".format(s)
raise argparse.ArgumentTypeError(msg)
def valid_path(arg):
if not os.path.exists(arg):
msg = "The path %s does not exist!" % arg
raise argparse.ArgumentTypeError(msg)
else:
return arg
def valid_duration_reps(arg):
print(arg)
if arg[0]<0:
msg = "The duration %s cannot be negative!" % arg
raise argparse.ArgumentTypeError(msg)
elif arg[1]<0:
msg = "The repeat interval %s cannot be negative!" % arg
raise argparse.ArgumentTypeError(msg)
elif arg[1]>arg[0]:
msg = "The repeat interval {} cannot be smaller than the analysis duration {}!".format(args[1],args[0])
raise argparse.ArgumentTypeError(msg)
else:
return arg
# Parse input arguments
parser = argparse.ArgumentParser(description='Generate origin destination matrix')
# Note that the time_zone argument is not implemented
parser.add_argument('--departure_time',
help='departure time - format YYYY-MM-DD-HH:MM:SS',
required=True,
type=valid_date)
parser.add_argument('--duration_reps',
help='Two optional parameters defining a time duration and a repeat interval in hours.',
nargs=2,
default=[0,0],
type=float)
parser.add_argument('--proj_dir',
help='project directory',
required=True,
type=valid_path)
parser.add_argument('--originsfile',
help='path to the input csv file, which contains coordinates of origins',
required=True,
type=valid_path)
parser.add_argument('--destsfile',
help='path to the input csv file, which contains coordinates of destinations',
required=True,
type=valid_path)
parser.add_argument('--outdb',
help='path to the output sqlite database (default: traveltime_matrix)',
default='traveltime_matrix.db')
parser.add_argument('--outtable',
help='path to the output sqlite database (default: traveltime_matrix)',
default='traveltime_matrix')
parser.add_argument('--max_time',
help='maximum travel time in seconds (default: 1800)',
default=1800,
type=int)
parser.add_argument('--max_walking_distance',
help='maximum walking distance in meters (default: 500)',
default=30000,
type=int)
parser.add_argument('--mode_list',
help='Modes to travel by. Options --pending appropriate data-- are: WALK, BICYCLE, CAR, TRAM, SUBWAY, RAIL, BUS, FERRY, CABLE_CAR, GONDOLA, FUNICULAR, TRANSIT, LEG_SWITCH, AIRPLANE (default: WALK,BUS,RAIL)',
nargs='*',
type = str,
default=['WALK','BUS','RAIL'])
parser.add_argument('--run_once',
help='Modes for which transport is only evaluated at one time point. For example, you may be evaluating difference between on-peak and off-peak travel times, however no difference would be expected for walking or cycling, so these may be excluded (e.g. WALK BICYCLE). Valid options are as per mod_list; the default is an empty list.',
nargs='*',
type = str,
default=[])
parser.add_argument('--matching',
help='How origins and destinations should be matched. Can be either one-to-one or one-to-many (default: one-to-many)',
default='one-to-many',
type=str)
parser.add_argument('--combinations',
help='Create all combinations of supplied destination list',
default=False,
action='store_true')
parser.add_argument('--id_names',
help='Names of respective ID fields for source Origin and Destination csv files (default: GEOID GEOID)',
nargs='*',
type = str,
default=['GEOID','GEOID'])
parser.add_argument('--latlon_names',
help='Names of latitude and longitude fields in source Origin and Destination csv fields (default: lat lon)',
nargs='*',
type = str,
default=['lat','lon'])
parser.add_argument('--wideform',
help='Transpose data to wideform from longform main output',
default=False,
action='store_true')
parser.add_argument('--cmd',
help='The command used to call the python script may be specified; if so it is recorded to the log txt file.',
default=None)
args = parser.parse_args()
# Get the project name from the supplied project directory
proj_name = os.path.basename(os.path.normpath(args.proj_dir))
# db settings
DATABASE = "{}".format(args.outdb)
JDBC_URL = "jdbc:sqlite:%s" % DATABASE
JDBC_DRIVER = "org.sqlite.JDBC"
TABLE_NAME = "{}".format(args.outtable)
def createTable(table,values = " 'origin', 'destination', 'dep_time','mode','dist_m', 'time_mins' "):
"""
Return string to create a database table pending given context (results, origins or destinations)
"""
if table == "origins":
TABLE_CREATOR = "create table if not exists origins_{} ({});".format(TABLE_NAME,values)
if table == "destinations":
TABLE_CREATOR = "create table if not exists destinations_{} ({});".format(TABLE_NAME,values)
if table == "results":
TABLE_CREATOR = "create table if not exists {} ({});".format(TABLE_NAME,values)
return(TABLE_CREATOR)
def insertRows(table,values="?,?,?,?,?,?"):
"""
Return string to insert rows to a database table pending given context (results, origins or destinations)
"""
if table == "origins":
RECORD_INSERTER = "insert into origins_{} values ({});".format(TABLE_NAME,values)
if table == "destinations":
RECORD_INSERTER = "insert into destinations_{} values ({});".format(TABLE_NAME,values)
if table == "results":
RECORD_INSERTER = "insert into {} values ({});".format(TABLE_NAME,values)
return(RECORD_INSERTER)
def getConnection(JDBC_URL, JDBC_DRIVER, sql_zxJDBC=True):
"""
Given the name of a JDBC driver class and the url to be used
to connect to a database, attempt to obtain a connection to
the database.
"""
try:
Class.forName(JDBC_DRIVER).newInstance()
except Exception, msg:
print msg
sys.exit(-1)
if sql_zxJDBC == True:
try:
# no user/password combo needed here, hence the None, None
dbConn = zxJDBC.connect(JDBC_URL, None, None, JDBC_DRIVER)
except zxJDBC.DatabaseError, msg:
print msg
sys.exit(-1)
else:
try:
dbConn = DriverManager.getConnection(JDBC_URL)
except SQLException, msg:
print msg
sys.exit(-1)
return dbConn
def populateTable(dbConn, feedstock, sql_zxJDBC = False):
"""
Given an open connection to a SQLite database and a list of tuples
with the data to be inserted, insert the data into the target table.
"""
try:
preppedStmt = dbConn.prepareStatement(insertRows('results'))
for origin, destination, dep_time, mode, dist_m, time_mins in feedstock:
preppedStmt.setString(1, origin)
preppedStmt.setString(2, destination)
preppedStmt.setString(3, dep_time)
preppedStmt.setString(4, mode)
preppedStmt.setInt(5, dist_m)
preppedStmt.setDouble(6, time_mins)
preppedStmt.addBatch()
dbConn.setAutoCommit(False)
preppedStmt.executeBatch()
dbConn.setAutoCommit(True)
except SQLException, msg:
print msg
return False
return True
#################################################################################
# Start timing the code
start_time = time.time()
# Instantiate zxJDBC SQL connection
dbConn = getConnection(JDBC_URL,JDBC_DRIVER, True)
cursor = dbConn.cursor()
try:
cursor.execute(createTable("results"))
except SQLException, msg:
print msg
sys.exit(1)
with open(os.path.abspath('./{}'.format(args.originsfile)), 'rb') as f:
reader = csv.reader(f)
origin_list = map(tuple, reader)
with open(os.path.abspath('./{}'.format(args.destsfile)), 'rb') as f:
reader = csv.reader(f)
dests_list = map(tuple, reader)
try:
# copy origins to db
cursor.execute("drop table if exists origins_{};".format(TABLE_NAME))
cursor.execute(createTable(table = "origins",
values = "'Y', 'X', 'fid', 'SA1_MAINCO', 'SA1_7DIGIT', 'COMPOUND_ID'"))
cursor.executemany(insertRows("origins",','.join(['?' for x in range(0,len(origin_list[0]))])),
origin_list[1:])
dbConn.commit()
# copy dests to db
cursor.execute("drop table if exists destinations_{};".format(TABLE_NAME))
cursor.execute(createTable(table = "destinations",
values = "'Y', 'X', 'fid', 'DZN_CODE_2016', 'COMPOUND_ID'"))
cursor.executemany(insertRows("destinations",','.join(['?' for x in range(0,len(dests_list[0]))])),
dests_list[1:])
dbConn.commit()
except SQLException, msg:
print msg
sys.exit(1)
# SNIPPETS FOR DEBUGGING
# cursor.execute("SELECT * FROM {result};".format(result=TABLE_NAME))
# for row in cursor.fetchall():
# print(row)
# Delete all records with largest ID
# Assuming records are processed sequentially by id, if the process has crashed
# the safest way to ensure all results are processed are to discard the potentially
# incomplate previous transaction set (larget id) and recommence from there.
cursor.execute('''
DELETE FROM {result}
WHERE origin = (SELECT origin FROM {result} ORDER BY origin DESC LIMIT 1);
'''.format(result=TABLE_NAME))
dbConn.commit()
cursor.execute('''DROP TABLE IF EXISTS origins_updated''')
dbConn.commit()
cursor.execute('''CREATE TABLE origins_updated AS
SELECT * FROM origins_{result}
WHERE "{id}" > COALESCE((SELECT origin FROM {result} ORDER BY origin DESC LIMIT 1),'');
'''.format(result = TABLE_NAME,
id = args.id_names[0]))
dbConn.commit()
cursor.execute('''
SELECT "{id}",
"{lat}",
"{lon}"
FROM origins_updated
'''.format(id = args.id_names[0],
lat = args.latlon_names[0],
lon = args.latlon_names[1]))
rows = cursor.fetchall()
updated_csv = '{}_updated{}'.format(*os.path.splitext(args.originsfile))
try:
os.remove(updated_csv)
except OSError:
pass
with open(updated_csv, 'w') as f:
updated_origins = csv.writer(f)
updated_origins.writerow((args.id_names[0],args.latlon_names[0],args.latlon_names[1]))
updated_origins.writerows(rows)
# Close the zxJDBC connection
cursor.close()
dbConn.close()
# open Xenial connection
dbConn = getConnection(JDBC_URL, JDBC_DRIVER, sql_zxJDBC = False)
stmt = dbConn.createStatement()
# Read Points of Destination - The file points.csv, drawing on defaults or specified IDs, latitude and longitude
orig_id = args.id_names[0]
dest_id = args.id_names[1]
lat = args.latlon_names[0]
lon = | |
))._positive()
def write( self, file_name = "output.scad" ):
"""write the shape to the specified file
:param file_name: name of the file
This function prints the OpenSCAD representation of the
shape to the indicated file (default: output.scad).
That file can be opened in OpenSCAD
for visualization or export as to a .stl file.
If the file_name does not contain a "."
the suffix ".scad" is appended.
.. code-block::
# these lines have the same effect
sphere( 10 ).write()
sphere( 10 ).write( "output" )
sphere( 10 ).write( "output.scad" )
"""
if not "." in file_name: file_name = file_name+ ".scad"
f = open( file_name, "w" )
f.write( str( self ) )
f.close()
def __add__( self, rhs: _shape_or_none ) -> shape:
"""add two shapes
This could be mapped directly to an OpenSCAD union(),
but to avoid deeply nested union()s an _shape_list is used.
"""
if rhs == None: return self
return _shape_list( self, rhs )
__radd__ = __add__
def __sub__( self, rhs: _shape_or_none ) -> shape:
"""subtract two shapes
"""
if rhs == None: return self
return _apply2( "difference()", "union()", self, rhs )
def __mul__( self, rhs: shape ) -> shape:
"""intersect two shapes
"""
return _apply2( "intersection()", "union()", self, rhs )
class _shape_list( shape ):
"""list of shapes
This is an implementation detail.
Shapes are often constructed by adding
a large number of solid sub-elements.
Using the same approach used for shape subtraction would
generate deeply nested OpenSCAD unions, which makes the generated
OpenSCAD file difficult to read. This class 'gathers' shapes
that are added, in order to generate a flattened union.
"""
def __init__( self,
a: _shape_or_shape_list,
b: _shape_or_shape_list
):
"""create a shape list
A shape list is created from two parts.
Both can be either an shape, or an _shape_list.
"""
self.list = []
self._add( a )
self._add( b )
def _add( self, x: _shape_or_shape_list ):
if isinstance( x, _shape_list ):
self.list = self.list + x.list
else:
self.list.append( x )
def _merge( self, function = "union()" ) -> shape:
return shape(
function + "{\n" +
_indent( "".join( x._positive() for x in self.list )) +
"}",
function + "{\n" +
_indent( "".join( x._negative() for x in self.list )) +
"}"
)
#============================================================================
#
# vector
#
#============================================================================
class vector:
"""2d or 3d vector
This is a 2d (x,y) or 3d (x, y, z) vector.
A vector is used to denote a location, a displacement (shift)
a size, or sometimes just 2 or 3 numeric values.
A vector has members x, y and z.
For a 2d vector, the z value is None.
Vectors can be added or subtracted using the + or - operators.
Vectors can be multiplied or divided
by a scalar using the * and / operators.
"""
# these assignments are here just as anchors for the docstrings
x = None
"""x (first) value of the vector"""
y = None
"""y (second) value of the vector"""
z = None
"""z (third) value of the vector"""
def __init__( self,
x: _float_or_vector,
y: _float_or_none = None,
z: _float_or_none = None
):
"""create from x and y, and an optional z value
Create a 2d vector from x and y values, or a
3d vector from x, y and z values.
When x is a vector, that vector is simply copied.
"""
if isinstance( x, vector ):
if ( y != None ) or ( z != None ):
raise Exception(
"vector constructor called with a vector as"
" first parameter but also some more parameters." )
self.x, self.y, self.z = x.x, x.y, x.z
else:
if y == None:
raise Exception(
"called vector with one parameter"
" which is not a vector" )
self.x, self.y, self.z = x, y, z
def _list( self ):
return [ self.x, self.y, self.z ]
def _add( self,
a: _float_or_none,
b: _float_or_none
) -> _float_or_none:
"""add two values, where either (but not both) could be None
"""
if a == None: return b
if b == None: return a
return a + b
def __add__( self, rhs: vector ) -> vector:
"""add two vector values (member-wise addition)
Adding two 2d vectors yields a 2d vector,
adding two 3d vectors yields a 3d vector.
When a 2d vector and a 3d vector are added, the
z value of the 2d vector is assumed to be 0.
"""
return vector(
self.x + rhs.x,
self.y + rhs.y,
self._add( self.z, rhs.z ) )
def _sub( self,
a: _float_or_none,
b: _float_or_none
) -> _float_or_none:
"""subtract two values, where either (but not both) could be None
"""
if a == None:
if b == None: return None
return - b
if b == None: return a
return a - b
def __sub__( self, rhs: vector ) -> vector:
"""subtract two vector values (member-wise subtraction)
Subtracting two 2d vectors yields a 2d vector,
subtracting two 3d vectors yields a 3d vector.
When a 2d vector and a 3d vector are subtracted, the
z value of the 2d vector is assumed to be 0.
"""
return vector(
self.x - rhs.x,
self.y - rhs.y,
self._sub( self.z, rhs.z ) )
def _mul( self,
a: _float_or_none,
b: _float_or_none
) -> _float_or_none:
"""multiply two values, where either (but not both) could be None
"""
if a == None: return None
if b == None: return None
return a * b
def __mul__( self, v: float ) -> vector:
"""multiply a vector by a scalar (member-wise multiplication)
"""
return vector(
self.x * v,
self.y * v,
self._mul( self.z, v ) )
__rmul__ = __mul__
def _div( self,
a: _float_or_none,
b: float
) -> _float_or_none:
"""divide two values, where the first could be None
"""
if a == None: return None
if b == None: return None
return a / b
def __truediv__( self, v: float ):
"""divide a vector by a scalar (member-wise division)
"""
return vector(
self.x / v,
self.y / v,
self._div( self.z, v ) )
def __str__( self ) -> str:
"""convert to [ x, y ] or [ x, y, z ] string format
"""
if self.z == None:
return "[ %f, %f ]" % ( self.x, self.y)
else:
return "[ %f, %f, %f ]" % ( self.x, self.y, self.z )
def __pow__( self, subject : _shape_or_none ) -> _shape_or_none:
"""apply the vector to a shape
:param subject: the shape that is to be displaced (shifted)
A vector can be applied to a shape using the ** operator.
This will displace (shift) the shape.
The subject can be None instead of a shape,
in which case the result will also be None.
"""
return apply( "translate( %s )" % str( self ), subject )
identity = vector( 0, 0, 0 )
"""modifier that doesn't change its subject
"""
def dup2( v: float ) -> vector:
"""vector( v, v )
:param v: the value for x and y
Return a vector with both x and y set to v.
"""
return vector( v, v )
def dup3( v: float ) -> vector:
"""vector( v, v, v )
:param v: the value for x, y and | |
r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + '_' + str(reg))
kms.add('kms_' + str(reaction_index) + '_' + str(reg))
if 's' in r[5]:
ant_str += ')' + enzyme_end
else:
ant_str += enzyme_end
if kinetics[0][8:10] == 'FM':
if 's' in r[5]:
ant_str += '/((('
else:
ant_str += '/(('
ant_str += 'S' + str(r[1][0]) + '/km_' + str(reaction_index) \
+ '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + ')^(1/2)'
for i, reg in enumerate(r[3]):
if r[5][i] == 's' and r[4][i] == -1:
ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's' and r[4][i] == 1:
ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + '_' + str(reg))
kms.add('kms_' + str(reaction_index) + '_' + str(reg))
if 's' in r[5]:
ant_str += ')' + enzyme_end
else:
ant_str += enzyme_end
if kinetics[0][8:10] == 'PM':
num_s = r[5].count('s')
if 's' in r[5]:
ant_str += '/('
for i, reg in enumerate(r[3]):
if r[5][i] == 's' and r[4][i] == -1:
ant_str += '(S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's' and r[4][i] == 1:
ant_str += '(kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if (i + 1) < num_s:
ant_str += ' + '
if r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + '_' + str(reg))
kms.add('kms_' + str(reaction_index) + '_' + str(reg))
if 's' in r[5]:
ant_str += ')' + enzyme_end
km.add('km_' + str(reaction_index) + '_' + str(r[1][0]))
m.add('m_' + str(reaction_index) + '_' + str(r[1][0]))
kf.add('kf_' + str(reaction_index))
else:
ant_str += '; ' + enzyme
for i, reg in enumerate(r[3]):
if r[5][i] == 'a' and r[4][i] == -1:
ant_str += '(' + 'ro_' + str(reaction_index) + '_' + str(reg) + ' + (1 - ' + 'ro_' \
+ str(reaction_index) + '_' + str(reg) + ')/(1 + S' + str(reg) + '/kma_' \
+ str(reaction_index) \
+ '_' + str(reg) + '))^ma_' + str(reaction_index) + '_' + str(reg) + '*'
if r[5][i] == 'a' and r[4][i] == 1:
ant_str += '(' + 'ro_' + str(reaction_index) + '_' + str(reg) + ' + (1 - ' + 'ro_' \
+ str(reaction_index) + '_' + str(reg) + ')*(S' + str(reg) + '/kma_' \
+ str(reaction_index) \
+ '_' + str(reg) + ')/(1 + S' + str(reg) + '/kma_' + str(reaction_index) \
+ '_' + str(reg) + '))^ma_' + str(reaction_index) + '_' + str(reg) + '*'
if r[5][i] == 'a':
ma.add('ma_' + str(reaction_index) + '_' + str(reg))
kma.add('kma_' + str(reaction_index) + '_' + str(reg))
ro.add('ro_' + str(reaction_index) + '_' + str(reg))
ant_str += '(kf_' + str(reaction_index) + '*(S' + str(r[1][0]) + '/km_' + str(reaction_index) \
+ '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + ' - kr_' \
+ str(reaction_index) + '*(S' + str(r[2][0]) + '/km_' + str(reaction_index) + '_' \
+ str(r[2][0]) + ')^m_' + str(reaction_index) + '_' + str(r[2][0]) + '*(S' + str(r[2][1]) \
+ '/km_' + str(reaction_index) + '_' + str(r[2][1]) + ')^m_' + str(reaction_index) + '_' \
+ str(r[2][1]) + ')'
if kinetics[0][8:10] == 'CM':
if 's' in r[5]:
ant_str += '/((('
else:
ant_str += '/(('
ant_str += '1 + S' + str(r[1][0]) + '/km_' + str(reaction_index) \
+ '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + ' + (1 + S' \
+ str(r[2][0]) + '/km_' + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' \
+ str(reaction_index) + '_' + str(r[2][0]) + '*(1 + S' + str(r[2][1]) + '/km_' \
+ str(reaction_index) + '_' + str(r[2][1]) + ')^m_' + str(reaction_index) + '_' \
+ str(r[2][1]) + ' - 1)'
for i, reg in enumerate(r[3]):
if r[5][i] == 's' and r[4][i] == -1:
ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's' and r[4][i] == 1:
ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + '_' + str(reg))
kms.add('kms_' + str(reaction_index) + '_' + str(reg))
if 's' in r[5]:
ant_str += ')' + enzyme_end
else:
ant_str += enzyme_end
if kinetics[0][8:10] == 'DM':
if 's' in r[5]:
ant_str += '/((('
else:
ant_str += '/(('
ant_str += 'S' + str(r[1][0]) + '/km_' + str(reaction_index) \
+ '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + ' + (S' \
+ str(r[2][0]) + '/km_' + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' \
+ str(reaction_index) + '_' + str(r[2][0]) + '*(S' + str(r[2][1]) + '/km_' \
+ str(reaction_index) + '_' + str(r[2][1]) + ')^m_' + str(reaction_index) + '_' \
+ str(r[2][1]) + ' + 1)'
for i, reg in enumerate(r[3]):
if r[5][i] == 's' and r[4][i] == -1:
ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's' and r[4][i] == 1:
ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + '_' + str(reg))
kms.add('kms_' + str(reaction_index) + '_' + str(reg))
if 's' in r[5]:
ant_str += ')' + enzyme_end
else:
ant_str += enzyme_end
if kinetics[0][8:10] == 'SM':
if 's' in r[5]:
ant_str += '/((('
else:
ant_str += '/(('
ant_str += '1 + S' + str(r[1][0]) + '/km_' + str(reaction_index) \
+ '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(1 + S' \
+ str(r[2][0]) + '/km_' + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' \
+ str(reaction_index) + '_' + str(r[2][0]) + '*(1 + S' + str(r[2][1]) + '/km_' \
+ str(reaction_index) + '_' + str(r[2][1]) + ')^m_' + str(reaction_index) + '_' \
+ str(r[2][1]) + ')'
for i, reg in enumerate(r[3]):
if r[5][i] == 's' and r[4][i] == -1:
ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's' and r[4][i] == 1:
ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + '_' + str(reg))
kms.add('kms_' + str(reaction_index) + '_' + str(reg))
if 's' in r[5]:
ant_str += ')' + enzyme_end
else:
ant_str += enzyme_end
if kinetics[0][8:10] == 'FM':
if 's' in r[5]:
ant_str += '/((('
else:
ant_str += '/(('
ant_str += 'S' + str(r[1][0]) + '/km_' + str(reaction_index) \
+ '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(S' \
+ str(r[2][0]) + '/km_' + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' \
+ str(reaction_index) + '_' + str(r[2][0]) + '*(S' + str(r[2][1]) + '/km_' \
+ str(reaction_index) + '_' + str(r[2][1]) + ')^m_' + str(reaction_index) + '_' \
+ str(r[2][1]) + ')^(1/2)'
for i, reg in enumerate(r[3]):
if r[5][i] == 's' and r[4][i] == -1:
ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's' and r[4][i] == 1:
ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \
+ ')^ms_' + str(reaction_index) + '_' + str(reg)
if r[5][i] == 's':
ms.add('ms_' + str(reaction_index) + | |
<reponame>aduckworth1969/smc
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
# import sys
import traceback
# import ldap
import ssl
from ldap3 import Server, Connection, ALL, NTLM, Tls
import ldap3.core.exceptions
from ldap3.extend.microsoft.addMembersToGroups import ad_add_members_to_groups
from ldap3.utils.dn import safe_rdn
# from .winrm import *
# from winrm import * as winrm
# from .. import winrm
import winrm
# from ..winrm.exceptions import WinRMTransportError
from winrm.exceptions import WinRMTransportError
from gluon import *
# from gluon import current
from ednet.appsettings import AppSettings
from ednet.util import Util
# ActiveDirectoryAPIClass
class AD:
# Config settings
# Encoding to use
_ad_encoding = "iso-8859-1" # "iso-8859-1" "utf-8" "utf-16-le"
# Active directory settings
_ldap_enabled = False
_ldap_protocol = ""
_ldap_server = ""
_ldap_login_user = ""
_ldap_login_pass = ""
# user and group paths
_ad_student_cn = "" # The container for students
_ad_student_group_cn = "" # The container for groups
_ad_student_group_dn = "" # The student group itself
_ad_faculty_cn = ""
_ad_faculty_group_cn = ""
_ad_faculty_group_dn = ""
# File server settings
_file_server_import_enabled = False
_file_server_address = ""
_file_server_login_user = ""
_file_server_login_pass = ""
# Quota settings
_file_server_quota_drive = ""
# Static objects
_init_run = False
_ldap = None
_ldap_session = None
_ldap_connect_time = datetime.today() - timedelta(seconds=600)
_ldap_keepalive_timeout = 300
_ldap_connection_tries = 0
_winrm = None
_errors = []
def __init__(self):
pass
@staticmethod
def Init():
if AD._init_run is not True:
AD._ad_encoding = "iso-8859-1"
AD._ldap_enabled = AppSettings.GetValue('ad_import_enabled', False)
AD._file_server_import_enabled = AppSettings.GetValue('file_server_import_enabled', False)
AD._ldap_protocol = AppSettings.GetValue('ad_server_protocol', 'ldaps://')
AD._ldap_server = AppSettings.GetValue('ad_server_address', 'ad.correctionsed.com')
AD._ldap_login_user = AppSettings.GetValue('ad_service_user', 'Administrator')
AD._ldap_login_pass = AppSettings.GetValue('ad_service_password', '<PASSWORD>')
AD._file_server_address = AppSettings.GetValue('file_server_address', '')
AD._file_server_quota_drive = AppSettings.GetValue('file_server_quota_drive', 'c:')
AD._file_server_login_user = AppSettings.GetValue('file_server_login_user', 'Administrator')
AD._file_server_login_pass = AppSettings.GetValue('file_server_login_pass', '<PASSWORD>')
# STUDENT SETTINGS
AD._ad_student_cn = AppSettings.GetValue('ad_student_cn', 'OU=Students,DC=ad,DC=correctionsed,DC=com')
AD._ad_student_group_cn = AppSettings.GetValue('ad_student_group_cn', 'OU=StudentGroups,DC=ad,DC=correctionsed,DC=com')
AD._ad_student_group_dn = 'CN=Students,' + AD._ad_student_group_cn
# FACULTY SETTINGS
AD._ad_faculty_cn = AppSettings.GetValue('ad_faculty_cn', 'OU=Faculty,DC=ad,DC=correctionsed,DC=com')
AD._ad_faculty_group_cn = AppSettings.GetValue('ad_faculty_group_cn', 'OU=FacultyGroups,DC=ad,DC=correctionsed,DC=com')
AD._ad_faculty_group_dn = 'CN=Faculty,' + AD._ad_faculty_group_cn
# Allow self signed certs and set options for AD
# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
# Set network timeout and keepalive options to keep connection from closing?
# ldap.set_option(ldap.OPT_X_KEEPALIVE_IDLE, AD._ldap_keepalive_timeout + 100)
# ldap.set_option(ldap.OPT_X_KEEPALIVE_INTERVAL, 10)
# ldap.set_option(ldap.OPT_X_KEEPALIVE_PROBES, 3)
# ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 10.0)
# ldap.set_option(ldap.OPT_TIMEOUT, 10.0)
# ldap.set_option(ldap.OPT_REFERRALS, 0)
AD._init_run = True
@staticmethod
def Close():
# if AD._ldap is not None:
# try:
# # AD._ldap.unbind_s()
# pass
# except Exception as ex:
# # Just make sure the close doesn't fail with fatal error
# # This may be being called in response to an exception
# print("Error closing ldap object: " + str(ex))
# print(traceback.print_stack())
# pass
AD._ldap = None
AD._init_run = False
AD._verify_ad_run = False
AD._errors = []
@staticmethod
def Connect():
ret = True
if AD.ConnectAD() is not True:
ret = False
if AD.ConnectWinRM() is not True:
ret = False
return ret
@staticmethod
def ConnectAD():
AD.Init()
if AD._ldap_enabled is not True:
# LDap isn't on - skip this part
AD._errors.append("----- LDAP DISABLED")
return False
if AD._ldap_connect_time < datetime.today() - timedelta(seconds=AD._ldap_keepalive_timeout) \
or AD._ldap is None or AD._ldap.bound is False:
# if it has been too long since this connection was established, force a reconnect
# print("---- CLOSING LDAP CONNECTION")
AD._errors.append("----- CLOSING LDAP CONNECTION")
AD.Close()
ret = False
if AD._ldap is None or AD._ldap.bound is False:
# print("--- MAKING NEW LDAP CONNECTION")
# AD._ldap = ldap.initialize(AD._ldap_protocol + AD._ldap_server)
tls_configuration = Tls(validate=ssl.CERT_NONE, version=ssl.PROTOCOL_TLSv1)
# Use hard values due to import error for ssl module
# ssl.CERT_NONE = 0, ssl.PROTOCOL_TLSv1 = 3
# tls_configuration = Tls(validate=0, version=3)
# tls_configuration.validate = ssl.CERT_NONE
if AD._ldap_protocol.lower() == "ldaps://":
AD._ldap_session = Server(AD._ldap_protocol + AD._ldap_server, get_info=ALL,
mode=ldap3.IP_V4_PREFERRED,
use_ssl=True, tls=tls_configuration)
else:
AD._ldap_session = Server(AD._ldap_protocol + AD._ldap_server, get_info=ALL,
mode=ldap3.IP_V4_PREFERRED,
)
# AD._ldap.protocol_version = 3
# AD._ldap.set_option(ldap.OPT_REFERRALS, 0)
try:
AD._ldap_connection_tries += 1
# AD._ldap.simple_bind_s(AD._ldap_login_user, AD._ldap_login_pass)
# AD._ldap.simple_bind_s(AD._ldap_login_user.encode(AD._ad_encoding),
# AD._ldap_login_pass.encode(AD._ad_encoding))
#AD._ldap = Connection(AD._ldap_session, AD._ldap_login_user.encode(AD._ad_encoding),
# AD._ldap_login_pass.encode(AD._ad_encoding),
# authentication=ldap3.NTLM,
# auto_bind=True,
# raise_exceptions=True,
# auto_referrals=False,
# client_strategy=ldap3.RESTARTABLE,
# #receive_timeout=60,
#
# )
AD._ldap = Connection(AD._ldap_session, AD._ldap_login_user,
AD._ldap_login_pass,
authentication=ldap3.NTLM,
auto_bind=True,
raise_exceptions=True,
auto_referrals=False,
client_strategy=ldap3.RESTARTABLE,
#receive_timeout=60,
)
ret = True
AD._ldap_connect_time = datetime.today()
# except ldap3.core.exceptions.LDAPUnknownAuthenticationMethodError as message:
except ldap3.core.exceptions.LDAPInvalidCredentialsResult as message:
# except ldap.INVALID_CREDENTIALS as message:
AD.Close()
err = """
<h1>Active Directory Login Error </h1>
<p style="font-size: 10px;">%s</p>
Active Directory is required to create user accounts for Windows. <br />
Please check your login credentials on the config page.<br />
For more information, please view the <a target='docs' href='""" % str(str(message) + " " + AD._ldap_server)
err += URL('docs', 'ad', extension=False)
err += "'>Active Directory Documentation Page</a><br/>"
AD._errors.append(err)
return False
except ldap3.core.exceptions.LDAPSocketOpenError as message:
# except ldap.SERVER_DOWN as message:
AD.Close()
err_msg = str(message) + " " + str(AD._ldap_server)
err = """
<h1>Active Directory Connection error </h1>
<p style="font-size: 10px;"><err_msg></p>
Active Directory is required to create user accounts for Windows. <br />
Please check your Active Directory server information on the config page and that your server is on.<br />
For more information, please view the <a target='docs' href='"""
err = err.replace("<err_msg>", err_msg)
err += URL('docs', 'ad', extension=False)
err += "'>Active Directory Documentation Page</a><br/>"
AD._errors.append(err)
return False
except Exception as ex:
AD.Close()
# unknown error?
err = "Unknown Error " + str(ex) + str(type(ex))
traceback.print_stack()
AD._errors.append(err)
return False
else:
ret = True
return ret
@staticmethod
def ConnectWinRM():
ret = False
AD.Init()
# Setup WinRM
AD._winrm = winrm.Session(AD._file_server_address, auth=(AD._file_server_login_user,
AD._file_server_login_pass))
# AD._winrm = winrm.Session('http://' + AD._file_server_address + ':5985/wsman',
# auth=(AD._ldap_login_user, AD._ldap_login_pass))
if AD._file_server_import_enabled is True:
try:
r = AD._winrm.run_cmd('ipconfig', ['/all'])
# NOTE - WinRM returns byte strings - convert to py3 strings
if r.std_err.decode() != "":
AD._errors.append("<b>Error connecting to fileserver(WINRM) connection:</b> " + r.std_err.decode() + "<br />")
return False
except WinRMTransportError as message:
err = """
<h1>WINRM connection error </h1>
<p style="font-size: 10px;">%s</p>
WINRM is required to create home directories and set permissions. <br />
For more information, please view the <a target='docs' href='""" % str(str(message) + " " + AD._file_server_address)
err += URL('docs', 'winrm', extension=False)
err += "'>WinRM Documentation Page</a>\n<br />Done!"
AD._errors.append(err)
return False
ret = True
else:
ret = True
return ret
@staticmethod
def VerifyADSettings(auto_create=False):
ret = True
AD._errors = []
AD.Close()
con = AD.Connect()
# Ensure that the proper items are present in the active directory structure
if AD._ldap_enabled is not True:
AD._errors.append("<B>Active Directory Disabled - Checks skipped</B>")
return ret
if con is True:
AD._errors.append("<b>AD Connection Successful</b> " + AD._ldap_server + "<br />")
else:
ret = False
# STUDENT CHECKS
# Check user cn exists
cn = AD._ad_student_cn
while '<program>' in cn:
cn = AD.GetParentLDAPPath(cn, 1)
user_cn = AD.GetLDAPObject(cn)
if user_cn is None and auto_create is True:
# Try and create it
if AD.CreateOU(cn) is not True:
AD._errors.append("<b>Error creating student OU: </b> " + cn + "<br />")
ret = False
else:
AD._errors.append("<b>Student OU created: </b> " + cn + "<br />")
elif user_cn is None and auto_create is not True:
r = "<B>Student Container not present and auto create disabled! (" + cn + ")<br/>"
AD._errors.append(r)
ret = False
else:
AD._errors.append("<b>Student OU exists: </b> " + cn + "<br />")
# Check that the groups cn exists
group_cn = AD.GetLDAPObject(AD._ad_student_group_cn)
if group_cn is None and auto_create is True:
if AD.CreateOU(AD._ad_student_group_cn) is not True:
AD._errors.append("<b>Error creating student groups OU: </b> " + AD._ad_student_group_cn + "<br />")
ret = False
else:
AD._errors.append("<b>Student groups OU created: </b> " + AD._ad_student_group_cn + "<br />")
elif group_cn is None and auto_create is not True:
r = "<B>Student Groups Container not present and auto create disabled! (" + AD._ad_student_group_cn + ")<br/>"
AD._errors.append(r)
ret = False
else:
AD._errors.append("<b>Student groups OU exists: </b> " + AD._ad_student_group_cn + "<br />")
# Check that the students group exists
student_group_dn = AD.GetLDAPObject(AD._ad_student_group_dn)
if student_group_dn is None and auto_create is True:
if AD.CreateGroup(AD._ad_student_group_dn) is not True:
AD._errors.append("<b>Error creating students group: </b> " + AD._ad_student_group_dn + "<br />")
ret = False
else:
AD._errors.append("<b>Students group created: </b> " + AD._ad_student_group_dn + "<br />")
elif student_group_dn is None and auto_create is not True:
r = "<B>Students Group not present and auto create disabled! (" + AD._ad_student_group_dn + ")<br/>"
AD._errors.append(r)
ret = False
else:
AD._errors.append("<b>Students group exists: </b> " + AD._ad_student_group_dn + "<br />")
# FACULTY CHECKS
# Check user cn exists
cn = AD._ad_faculty_cn
while '<program>' in cn:
cn = AD.GetParentLDAPPath(cn, 1)
| |
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['MOAT_1', 'MOAT_1_MOAT_2', 'MOAT_2', 'MOAT_2_HFM', 'HFM', 'HFM_VFM', 'VFM', 'VFM_VDM', 'VDM', 'VDM_SSA', 'SSA', 'SSA_ES1', 'ES1', 'ES1_CRL', 'CRL', 'CRL_ES2', 'ES2']
for el_name in names:
if el_name == 'MOAT_1':
# MOAT_1: crystal 31.94m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_MOAT_1_d_sp,
_psi0r=v.op_MOAT_1_psi0r,
_psi0i=v.op_MOAT_1_psi0i,
_psi_hr=v.op_MOAT_1_psiHr,
_psi_hi=v.op_MOAT_1_psiHi,
_psi_hbr=v.op_MOAT_1_psiHBr,
_psi_hbi=v.op_MOAT_1_psiHBi,
_tc=v.op_MOAT_1_tc,
_ang_as=v.op_MOAT_1_ang_as,
)
crystal.set_orient(
_nvx=v.op_MOAT_1_nvx,
_nvy=v.op_MOAT_1_nvy,
_nvz=v.op_MOAT_1_nvz,
_tvx=v.op_MOAT_1_tvx,
_tvy=v.op_MOAT_1_tvy,
)
el.append(crystal)
pp.append(v.op_MOAT_1_pp)
mirror_file = v.op_MOAT_1_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by MOAT_1 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_MOAT_1_dim,
_ang=abs(v.op_MOAT_1_ang),
_amp_coef=v.op_MOAT_1_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'MOAT_1_MOAT_2':
# MOAT_1_MOAT_2: drift 31.94m
el.append(srwlib.SRWLOptD(
_L=v.op_MOAT_1_MOAT_2_L,
))
pp.append(v.op_MOAT_1_MOAT_2_pp)
elif el_name == 'MOAT_2':
# MOAT_2: crystal 31.99m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_MOAT_2_d_sp,
_psi0r=v.op_MOAT_2_psi0r,
_psi0i=v.op_MOAT_2_psi0i,
_psi_hr=v.op_MOAT_2_psiHr,
_psi_hi=v.op_MOAT_2_psiHi,
_psi_hbr=v.op_MOAT_2_psiHBr,
_psi_hbi=v.op_MOAT_2_psiHBi,
_tc=v.op_MOAT_2_tc,
_ang_as=v.op_MOAT_2_ang_as,
)
crystal.set_orient(
_nvx=v.op_MOAT_2_nvx,
_nvy=v.op_MOAT_2_nvy,
_nvz=v.op_MOAT_2_nvz,
_tvx=v.op_MOAT_2_tvx,
_tvy=v.op_MOAT_2_tvy,
)
el.append(crystal)
pp.append(v.op_MOAT_2_pp)
elif el_name == 'MOAT_2_HFM':
# MOAT_2_HFM: drift 31.99m
el.append(srwlib.SRWLOptD(
_L=v.op_MOAT_2_HFM_L,
))
pp.append(v.op_MOAT_2_HFM_pp)
elif el_name == 'HFM':
# HFM: sphericalMirror 34.88244m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_HFM_r,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
mirror_file = v.op_HFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by HFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_HFM_dim,
_ang=abs(v.op_HFM_ang),
_amp_coef=v.op_HFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'HFM_VFM':
# HFM_VFM: drift 34.88244m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_VFM_L,
))
pp.append(v.op_HFM_VFM_pp)
elif el_name == 'VFM':
# VFM: sphericalMirror 38.30244m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_VFM_r,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
mirror_file = v.op_VFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VFM_dim,
_ang=abs(v.op_VFM_ang),
_amp_coef=v.op_VFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VFM_VDM':
# VFM_VDM: drift 38.30244m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_VDM_L,
))
pp.append(v.op_VFM_VDM_pp)
elif el_name == 'VDM':
# VDM: sphericalMirror 39.0m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_VDM_r,
_size_tang=v.op_VDM_size_tang,
_size_sag=v.op_VDM_size_sag,
_nvx=v.op_VDM_nvx,
_nvy=v.op_VDM_nvy,
_nvz=v.op_VDM_nvz,
_tvx=v.op_VDM_tvx,
_tvy=v.op_VDM_tvy,
_x=v.op_VDM_x,
_y=v.op_VDM_y,
))
pp.append(v.op_VDM_pp)
mirror_file = v.op_VDM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VDM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VDM_dim,
_ang=abs(v.op_VDM_ang),
_amp_coef=v.op_VDM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VDM_SSA':
# VDM_SSA: drift 39.0m
el.append(srwlib.SRWLOptD(
_L=v.op_VDM_SSA_L,
))
pp.append(v.op_VDM_SSA_pp)
elif el_name == 'SSA':
# SSA: aperture 47.00244m
el.append(srwlib.SRWLOptA(
_shape=v.op_SSA_shape,
_ap_or_ob='a',
_Dx=v.op_SSA_Dx,
_Dy=v.op_SSA_Dy,
_x=v.op_SSA_x,
_y=v.op_SSA_y,
))
pp.append(v.op_SSA_pp)
elif el_name == 'SSA_ES1':
# SSA_ES1: drift 47.00244m
el.append(srwlib.SRWLOptD(
_L=v.op_SSA_ES1_L,
))
pp.append(v.op_SSA_ES1_pp)
elif el_name == 'ES1':
# ES1: watch 50.9m
pass
elif el_name == 'ES1_CRL':
# ES1_CRL: drift 50.9m
el.append(srwlib.SRWLOptD(
_L=v.op_ES1_CRL_L,
))
pp.append(v.op_ES1_CRL_pp)
elif el_name == 'CRL':
# CRL: crl 57.335m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL_foc_plane,
_delta=v.op_CRL_delta,
_atten_len=v.op_CRL_atten_len,
_shape=v.op_CRL_shape,
_apert_h=v.op_CRL_apert_h,
_apert_v=v.op_CRL_apert_v,
_r_min=v.op_CRL_r_min,
_n=v.op_CRL_n,
_wall_thick=v.op_CRL_wall_thick,
_xc=v.op_CRL_x,
_yc=v.op_CRL_y,
))
pp.append(v.op_CRL_pp)
elif el_name == 'CRL_ES2':
# CRL_ES2: drift 57.335m
el.append(srwlib.SRWLOptD(
_L=v.op_CRL_ES2_L,
))
pp.append(v.op_CRL_ES2_pp)
elif el_name == 'ES2':
# ES2: watch 59.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'NSLS-II SMI beamline', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.4432500000000001, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'],
['ebm_emx', 'f', 9e-10, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 20.85, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 3.4, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.955, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.023, 'undulator period [m]'],
['und_len', 'f', 2.7945, 'undulator length [m]'],
['und_zc', 'f', 0.6, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 20000.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20400.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', | |
is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_2._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 72, 6))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_2._Automaton = _BuildAutomaton_2()
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Section'), CTD_ANON_3, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 77, 0)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'TOCHeading'), pyxb.binding.datatypes.string, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 81, 8)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'TOCID'), pyxb.binding.datatypes.int, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 82, 8)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Description'), pyxb.binding.datatypes.string, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 84, 6)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Views'), pyxb.binding.datatypes.string, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 85, 6)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'HintGroupSubsectionsByReference'), pyxb.binding.datatypes.boolean, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 86, 6)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'HintEmbeddedHTML'), pyxb.binding.datatypes.boolean, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 87, 6)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Information'), CTD_ANON_4, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 94, 0)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 84, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 85, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 86, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 87, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 88, 6))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 89, 6))
counters.add(cc_5)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'TOCHeading')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 81, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'TOCID')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 82, 8))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Description')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 84, 6))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Views')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 85, 6))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'HintGroupSubsectionsByReference')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 86, 6))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'HintEmbeddedHTML')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 87, 6))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Information')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 88, 6))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Section')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 89, 6))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
transitions.append(fac.Transition(st_6, [
]))
transitions.append(fac.Transition(st_7, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
transitions.append(fac.Transition(st_6, [
]))
transitions.append(fac.Transition(st_7, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_3, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_4, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_5, True) ]))
st_7._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_3._Automaton = _BuildAutomaton_3()
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ReferenceNumber'), pyxb.binding.datatypes.int, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 97, 6)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Name'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 98, 6)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Description'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 99, 6)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 100, 6)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'URL'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 101, 6)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'NumValue'), pyxb.binding.datatypes.double, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 103, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'NumValueList'), pyxb.binding.datatypes.double, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 104, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DateValue'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 105, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DateValueList'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 106, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BoolValue'), pyxb.binding.datatypes.boolean, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 107, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BoolValueList'), pyxb.binding.datatypes.boolean, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 108, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'StringValue'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 109, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'StringValueList'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 110, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BinaryValue'), pyxb.binding.datatypes.base64Binary, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 111, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BinaryValueList'), pyxb.binding.datatypes.base64Binary, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 112, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ExternalDataURL'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 113, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ExternalDataURLList'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 114, 8)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ValueUnit'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 116, 6)))
CTD_ANON_4._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ExternalDataMimeType'), pyxb.binding.datatypes.string, scope=CTD_ANON_4, location=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 117, 6)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 98, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 99, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 100, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 101, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 102, 6))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 116, 6))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 117, 6))
counters.add(cc_6)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ReferenceNumber')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 97, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Name')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 98, 6))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Description')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 99, 6))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 100, 6))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'URL')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 101, 6))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'NumValue')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 103, 8))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'NumValueList')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 104, 8))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'DateValue')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 105, 8))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'DateValueList')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 106, 8))
st_8 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'BoolValue')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 107, 8))
st_9 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'BoolValueList')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 108, 8))
st_10 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'StringValue')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 109, 8))
st_11 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_11)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'StringValueList')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 110, 8))
st_12 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_12)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'BinaryValue')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 111, 8))
st_13 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_13)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'BinaryValueList')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 112, 8))
st_14 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_14)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ExternalDataURL')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 113, 8))
st_15 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_15)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ExternalDataURLList')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 114, 8))
st_16 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_16)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ValueUnit')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 116, 6))
st_17 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_17)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_4._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ExternalDataMimeType')), pyxb.utils.utility.Location('/Users/cmhudso/Desktop/SRAnt/NCBI/pug_view.xsd', 117, 6))
st_18 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_18)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
transitions.append(fac.Transition(st_6, [
]))
transitions.append(fac.Transition(st_7, [
]))
transitions.append(fac.Transition(st_8, [
]))
transitions.append(fac.Transition(st_9, [
]))
transitions.append(fac.Transition(st_10, [
]))
transitions.append(fac.Transition(st_11, [
]))
transitions.append(fac.Transition(st_12, [
]))
transitions.append(fac.Transition(st_13, [
]))
transitions.append(fac.Transition(st_14, [
]))
transitions.append(fac.Transition(st_15, [
]))
transitions.append(fac.Transition(st_16, [
]))
transitions.append(fac.Transition(st_17, [
]))
transitions.append(fac.Transition(st_18, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_0, False) ]))
st_1._set_transitionSet(transitions)
| |
"""Module containing the abstract base classes (ABCs) which define the interfaces of
various objects expected by the ``pygmol`` framework.
"""
from abc import ABC, abstractmethod
from typing import Union, Sequence, Dict, Callable, Mapping
from numpy import ndarray, float64
from pyvalem.formula import FormulaParseError
from pyvalem.stateful_species import StatefulSpecies
from pyvalem.states import StateParseError
from scipy import constants
class Chemistry(ABC):
# noinspection PyUnresolvedReferences
"""An abstract base class (ABC) defining the interface of objects describing the
plasma chemistry, expected by other classes of the ``pygmol`` framework.
A chemistry is a collection of species and reactions, and data attached to them.
Species can be divided into heavy species (neutral and ions), and *special* species:
electron and an arbitrary species 'M'.
The heavy species data required by the model are encoded in the attributes or
properties starting with ``species_``. The reactions kinetic data (and metadata) are
encoded in the attributes/properties starting with ``reactions_``. Subset of these
attributes starting with ``reactions_electron_``, ``reactions_arbitrary`` and
``reactions_species`` then handle the relationships between reactions and all the
species (heavy and special) in the chemistry. See the attributes below.
Documentation for the mandatory and optional attributes can be found in docstrings
of the corresponding properties.
Some of the attributes/properties listed here are implemented in this ABC as useful
defaults, and therefore must not strictly be re-implemented by a concrete subclass,
if inheriting from this abstraction (they are not @abstract properties). All of
the attributes are needed by the `pygmol` package however, so a concrete chemistry
class which *does not* inherit from this abstraction must implement all the
attributes/properties below.
Attributes
----------
species_ids : Sequence[str]
species_charges : Sequence[int]
Default provided by the ABC.
species_masses : Sequence[float]
Default provided by the ABC.
species_lj_sigma_coefficients : Sequence[float]
Default provided by the ABC.
species_surface_sticking_coefficients : Sequence[float]
Default provided by the ABC.
species_surface_return_matrix : Sequence[Sequence[float]]
reactions_ids : Sequence[str] or Sequence[int]
reactions_arrh_a : Sequence[float]
reactions_arrh_b : Sequence[float]
reactions_arrh_c : Sequence[float]
reactions_el_energy_losses : Sequence[float]
reactions_elastic_flags : Sequence[bool]
reactions_electron_stoich_lhs : Sequence[int]
reactions_electron_stoich_rhs : Sequence[int]
reactions_arbitrary_stoich_lhs : Sequence[int]
reactions_arbitrary_stoich_rhs : Sequence[int]
reactions_species_stoichiomatrix_lhs : Sequence[Sequence[int]]
reactions_species_stoichiomatrix_lhs : Sequence[Sequence[int]]
"""
@property
@abstractmethod
def species_ids(self) -> Sequence[str]:
"""Unique ids/names of all the heavy species in the chemistry. This excludes
electrons and the *arbitrary* species 'M'.
If pyvalem-compatible formula strings are used as `species_id`, the `Equations`
ABC will provide defaults for `species_charges` and `species_masses` attributes.
See the pyvalem package on PyPI.
"""
@property
def species_charges(self) -> Sequence[int]:
"""Charges [e] of all the heavy species in the chemistry. This excludes
electrons and the *arbitrary* species 'M'.
By default, the species charges are parsed from `species_ids`, if they are in
pyvalem-compatible format.
"""
try:
charges = [
StatefulSpecies(sp_name).formula.charge for sp_name in self.species_ids
]
return charges
except (FormulaParseError, StateParseError):
raise NotImplementedError(
"Either `species_charges` attribute needs to be implemented, or "
"the `species_ids` need to be pyvalem-compatible formulas!"
)
@property
def species_masses(self) -> Sequence[float]:
"""Masses [amu] of all the heavy species in the chemistry. This excludes
electrons and the *arbitrary* species 'M'.
By default, the species masses are parsed from `species_ids`, if they are in
pyvalem-compatible format.
"""
try:
masses = [
StatefulSpecies(sp_name).formula.mass for sp_name in self.species_ids
]
return masses
except (FormulaParseError, StateParseError):
raise NotImplementedError(
"Either `species_masses` attribute needs to be implemented, or "
"the `species_ids` need to be pyvalem-compatible formulas!"
)
@property
def species_lj_sigma_coefficients(self) -> Sequence[float]:
"""Lennard-Jones sigma parameters [Angstrom] of all the heavy species in the
chemistry. This excludes electrons and the *arbitrary* species 'M'.
The `Chemistry` ABC provides a useful default where the Lennard-Jones
coefficients are not available.
"""
return [3.0 for _ in self.species_ids]
@property
def species_surface_sticking_coefficients(self) -> Sequence[float]:
"""Surface sticking coefficients of all the heavy species in the chemistry.
This excludes electrons and the *arbitrary* species 'M'. The i-th element of
the sequence denotes what fraction of the i-th species is lost when reaching the
surface.
Default is provided by this ABC: All charged species have by default sticking
coefficient 1.0, while all the neutral species have by default sticking
coefficient 0.0.
"""
return [float(bool(sp_charge)) for sp_charge in self.species_charges]
@property
@abstractmethod
def species_surface_return_matrix(self) -> Sequence[Sequence[float]]:
"""A 2D array-like of shape (Ns, Ns), where [i, j] index (i-th row and j-th
column) denotes the number of i-th species created by each one j-th species
*STUCK* to the surface. Non-zero values of R[:, j] therefore only make sense if
``chemistry.species_sticking_coefficients[j] > 0``.
Ns refers to the number of heavy species in the chemistry and needs to be
consistent with `species_ids` property/attribute.
"""
@property
@abstractmethod
def reactions_ids(self) -> Sequence[int]:
"""Unique IDs of all the reactions in the chemistry."""
@property
def reactions_strings(self) -> Sequence[str]:
"""Optional human-readable reaction strings for all the reactions in the
chemistry.
This is used only for annotating the model solutions.
"""
return [f"Reaction {r_id}" for r_id in self.reactions_ids]
@property
@abstractmethod
def reactions_arrh_a(self) -> Sequence[float]:
"""First Arrhenius parameters (A, or alpha) for all the reactions in the
chemistry. The `arrh_a` values are in SI [m3.s-1 / m6.s-1, s-1].
"""
@property
@abstractmethod
def reactions_arrh_b(self) -> Sequence[float]:
"""Second Arrhenius parameters (n, or beta) for all the reactions in the
chemistry. The `arrh_b` values are unitless.
"""
@property
@abstractmethod
def reactions_arrh_c(self) -> Sequence[float]:
"""Third Arrhenius parameters (E_a, or gamma) for all the reactions in the
chemistry. The `arrh_c` values are in [eV] for electron collisions and in [K]
for heavy-species collisions.
"""
@property
@abstractmethod
def reactions_el_energy_losses(self) -> Sequence[float]:
"""Electron energy loss [eV] for all the reactions in the chemistry. Should be
non-zero only for inelastic electron collisions, estimating average energy loss
for the electron in each collision.
"""
@property
@abstractmethod
def reactions_elastic_flags(self) -> Sequence[bool]:
"""Boolean flags for all the reactions in the chemistry, evaluating to True for
elastic collisions only.
"""
@property
@abstractmethod
def reactions_electron_stoich_lhs(self) -> Sequence[int]:
"""Number of electrons on left-hand-side of each reaction in the chemistry."""
@property
@abstractmethod
def reactions_electron_stoich_rhs(self) -> Sequence[int]:
"""Number of electrons on right-hand-side of each reaction in the chemistry."""
@property
@abstractmethod
def reactions_arbitrary_stoich_lhs(self) -> Sequence[int]:
"""Number of arbitrary species 'M' on left-hand-side of each reaction in the
chemistry.
"""
@property
@abstractmethod
def reactions_arbitrary_stoich_rhs(self) -> Sequence[int]:
"""Number of arbitrary species 'M' on right-hand-side of each reaction in the
chemistry.
"""
@property
@abstractmethod
def reactions_species_stoichiomatrix_lhs(self) -> Sequence[Sequence[int]]:
"""A 2D array-like of shape (Nr, Ns), where [i, j] index (i-th row and j-th
column) points to the number of j-th heavy species on the left-hand-side of the
i-th reaction.
Nr and Ns refer to the number of reactions and heavy species respectively and
need to be consistent with `reactions_ids` and `species_ids`
properties/attributes.
"""
@property
@abstractmethod
def reactions_species_stoichiomatrix_rhs(self) -> Sequence[Sequence[int]]:
"""A 2D array-like of shape (Nr, Ns), where [i, j] index (i-th row and j-th
column) points to the number of j-th heavy species on the right-hand-side of the
i-th reaction.
Nr and Ns refer to the number of reactions and heavy species respectively and
need to be consistent with `reactions_ids` and `species_ids`
properties/attributes.
"""
class PlasmaParameters(ABC):
# noinspection PyUnresolvedReferences
"""Data class of plasma parameters needed as an input to the ``pygmol`` framework.
See the attributes below. Documentation for the mandatory and optional attributes
can be found in docstrings of the corresponding properties.
Time-dependent power is encoded by two same-length sequences: `power` and `t_power'.
An example of a single 500W pulse for half of the simulation time of 0.2 seconds
might look like ``power = [500, 500, 0, 0]`` with ``t_power = [0.0, 0.1, 0.1, 0.2]``
Attributes
----------
radius, length : float
pressure : float
power : float or Sequence[float]
t_power : Sequence[float], optional
feeds : dict[str, float], optional, default={}
temp_e : float, optional, default=1.0 [eV]
temp_n : float, optional, default=300.0 [K]
t_end : float, optional, default=1.0 [s]
"""
@property
@abstractmethod
def radius(self) -> float:
"""The radius dimension of the cylindrical plasma in [m]."""
@property
@abstractmethod
def length(self) -> float:
"""The length dimension of the cylindrical plasma in [m]."""
@property
@abstractmethod
def pressure(self) -> float:
"""Plasma pressure set-point in [Pa]."""
@property
@abstractmethod
def power(self) -> Union[float, Sequence[float]]:
"""Power deposited | |
import time
class Joc:
"""
Clasa care defineste jocul. Se va schimba de la un joc la altul.
"""
NR_COLOANE = 7
NR_LINII = 6
NR_CONNECT = 4 # cu cate simboluri adiacente se castiga
SIMBOLURI_JUC = ['X', '0'] # ['G', 'R'] sau ['X', '0']
JMIN = None # 'R'
JMAX = None # 'G'
GOL = '.'
def __init__(self, tabla=None):
self.matr = tabla or [Joc.GOL]*(Joc.NR_COLOANE * Joc.NR_LINII)
def verifica_adiacente(self, lista):
simbol = lista[0]
lungime = 1
for elem in lista[1:]:
if elem != simbol:
simbol = elem
lungime = 1
else:
lungime += 1
if lungime == Joc.NR_CONNECT and simbol != self.GOL:
return simbol
return False
def final(self):
# returnam simbolul jucatorului castigator daca are 4 piese adiacente
# pe linie, coloana, diagonala \ sau diagonala /
# sau returnam 'remiza'
# sau 'False' daca nu s-a terminat jocul
rez = False
# verificam linii
for i in range(self.NR_LINII):
# extrag linia din matrice
linie = [
self.matr[i * self.NR_COLOANE + j]
for j in range(self.NR_COLOANE)
]
rez = self.verifica_adiacente(linie)
if rez:
return rez
# verificam coloane
for j in range(self.NR_COLOANE):
coloana = [
self.matr[i * self.NR_COLOANE + j]
for i in range(self.NR_LINII)
]
rez = self.verifica_adiacente(coloana)
if rez:
return rez
# verificam diagonale \
for i in range(self.NR_LINII - self.NR_CONNECT):
for j in range(self.NR_COLOANE - self.NR_CONNECT):
diag = [
self.matr[(i + k) * self.NR_COLOANE + (j + k)]
for k in range(self.NR_CONNECT)
]
rez = self.verifica_adiacente(diag)
if rez:
return rez
# verificam diagonale /
for i in range(self.NR_LINII - self.NR_CONNECT):
for j in range(self.NR_COLOANE - self.NR_CONNECT):
diag = [
self.matr[(i + k) * self.NR_COLOANE + (j + self.NR_CONNECT - k - 1)]
for k in range(self.NR_CONNECT)
]
rez = self.verifica_adiacente(diag)
if rez:
return rez
if rez==False and Joc.GOL not in self.matr:
return 'remiza'
else:
return False
def mutari(self, jucator_opus):
l_mutari=[]
for coloana in range(self.NR_COLOANE):
# coloana aceasta este deja plinฤ
if self.matr[coloana] != self.GOL:
continue
matr_noua = self.matr[:]
# parcurgem coloana de jos รฎn sus, gฤsim prima poziศie liberฤ
for linie_adaugare in range(self.NR_LINII - 1, -1, -1):
if matr_noua[linie_adaugare * self.NR_COLOANE + coloana] == self.GOL:
break
matr_noua[linie_adaugare * self.NR_COLOANE + coloana] = jucator_opus
l_mutari.append(Joc(matr_noua))
return l_mutari
def intervale_deschise(self, lista, juc_opus):
nr = 0
for i in range(len(lista) - self.NR_CONNECT):
if juc_opus in lista[i:i + self.NR_CONNECT]:
continue
nr += 1
return nr
def nr_intervale_deschise(self, jucator):
# un interval de 4 pozitii adiacente (pe linie, coloana, diag \ sau diag /)
# este deschis pt "jucator" daca nu contine "juc_opus"
juc_opus = Joc.JMIN if jucator == Joc.JMAX else Joc.JMAX
rez = 0
# verificam linii
for i in range(self.NR_LINII):
# extrag linia din matrice
linie = [
self.matr[i * self.NR_COLOANE + j]
for j in range(self.NR_COLOANE)
]
rez += self.intervale_deschise(linie, juc_opus)
# verificam coloane
for j in range(self.NR_COLOANE):
coloana = [
self.matr[i * self.NR_COLOANE + j]
for i in range(self.NR_LINII)
]
rez += self.intervale_deschise(coloana, juc_opus)
# verificam diagonale \
for i in range(self.NR_LINII - self.NR_CONNECT):
for j in range(self.NR_COLOANE - self.NR_CONNECT):
diag = [
self.matr[(i + k) * self.NR_COLOANE + (j + k)]
for k in range(self.NR_CONNECT)
]
rez += self.intervale_deschise(diag, juc_opus)
# verificam diagonale /
for i in range(self.NR_LINII - self.NR_CONNECT):
for j in range(self.NR_COLOANE - self.NR_CONNECT):
diag = [
self.matr[(i + k) * self.NR_COLOANE + (j + self.NR_CONNECT - k - 1)]
for k in range(self.NR_CONNECT)
]
rez += self.intervale_deschise(diag, juc_opus)
return rez
def fct_euristica(self):
# TO DO: alte variante de euristici? .....
# intervale_deschisa(juc) = cate intervale de 4 pozitii
# (pe linii, coloane, diagonale) nu contin juc_opus
return self.nr_intervale_deschise(Joc.JMAX) - self.nr_intervale_deschise(Joc.JMIN)
def estimeaza_scor(self, adancime):
t_final = self.final()
if t_final == Joc.JMAX :
return (999+adancime)
elif t_final == Joc.JMIN:
return (-999-adancime)
elif t_final == 'remiza':
return 0
else:
return self.fct_euristica()
def __str__(self):
sir = ''
for nr_col in range(self.NR_COLOANE):
sir += str(nr_col) + ' '
sir += '\n'
for lin in range(self.NR_LINII):
k = lin * self.NR_COLOANE
sir += (" ".join([str(x) for x in self.matr[k : k+self.NR_COLOANE]])+"\n")
return sir
class Stare:
"""
Clasa folosita de algoritmii minimax si alpha-beta
Are ca proprietate tabla de joc
Functioneaza cu conditia ca in cadrul clasei Joc sa fie definiti JMIN si JMAX (cei doi jucatori posibili)
De asemenea cere ca in clasa Joc sa fie definita si o metoda numita mutari() care ofera lista cu
configuratiile posibile in urma mutarii unui jucator
"""
ADANCIME_MAX = None
def __init__(self, tabla_joc, j_curent, adancime, parinte=None, scor=None):
self.tabla_joc = tabla_joc
self.j_curent = j_curent
#adancimea in arborele de stari
self.adancime=adancime
#scorul starii (daca e finala) sau al celei mai bune stari-fiice (pentru jucatorul curent)
self.scor=scor
#lista de mutari posibile din starea curenta
self.mutari_posibile=[]
#cea mai buna mutare din lista de mutari posibile pentru jucatorul curent
self.stare_aleasa=None
def jucator_opus(self):
if self.j_curent==Joc.JMIN:
return Joc.JMAX
else:
return Joc.JMIN
def mutari(self):
l_mutari=self.tabla_joc.mutari(self.j_curent)
juc_opus=self.jucator_opus()
l_stari_mutari=[Stare(mutare, juc_opus, self.adancime-1, parinte=self) for mutare in l_mutari]
return l_stari_mutari
def __str__(self):
sir= str(self.tabla_joc) + "(Juc curent: "+self.j_curent+")\n"
return sir
""" Algoritmul MinMax """
def min_max(stare):
if stare.adancime==0 or stare.tabla_joc.final() :
stare.scor=stare.tabla_joc.estimeaza_scor(stare.adancime)
return stare
#calculez toate mutarile posibile din starea curenta
stare.mutari_posibile=stare.mutari()
#aplic algoritmul minimax pe toate mutarile posibile (calculand astfel subarborii lor)
mutari_scor=[min_max(mutare) for mutare in stare.mutari_posibile]
if stare.j_curent==Joc.JMAX :
#daca jucatorul e JMAX aleg starea-fiica cu scorul maxim
stare.stare_aleasa = max(mutari_scor, key=lambda x: x.scor)
else:
#daca jucatorul e JMIN aleg starea-fiica cu scorul minim
stare.stare_aleasa = min(mutari_scor, key=lambda x: x.scor)
stare.scor=stare.stare_aleasa.scor
return stare
def alpha_beta(alpha, beta, stare):
if stare.adancime==0 or stare.tabla_joc.final() :
stare.scor = stare.tabla_joc.estimeaza_scor(stare.adancime)
return stare
if alpha >= beta:
return stare #este intr-un interval invalid deci nu o mai procesez
stare.mutari_posibile = stare.mutari()
if stare.j_curent == Joc.JMAX :
scor_curent = float('-inf')
for mutare in stare.mutari_posibile:
#calculeaza scorul
stare_noua = alpha_beta(alpha, beta, mutare)
if (scor_curent < stare_noua.scor):
stare.stare_aleasa = stare_noua
scor_curent = stare_noua.scor
if(alpha < stare_noua.scor):
alpha = stare_noua.scor
if alpha >= beta:
break
elif stare.j_curent == Joc.JMIN :
scor_curent = float('inf')
for mutare in stare.mutari_posibile:
stare_noua = alpha_beta(alpha, beta, mutare)
if (scor_curent > stare_noua.scor):
stare.stare_aleasa = stare_noua
scor_curent = stare_noua.scor
if(beta > stare_noua.scor):
beta = stare_noua.scor
if alpha >= beta:
break
stare.scor = stare.stare_aleasa.scor
return stare
def afis_daca_final(stare_curenta):
# ?? TO DO:
# de adagat parametru "pozitie", ca sa nu verifice mereu toata tabla,
# ci doar linia, coloana, 2 diagonale pt elementul nou, de pe "pozitie"
final = stare_curenta.tabla_joc.final()
if(final):
if (final=="remiza"):
print("Remiza!")
else:
print("A castigat "+final)
return True
return False
def main():
#initializare algoritm
raspuns_valid=False
while not raspuns_valid:
tip_algoritm=input("Algorimul folosit? (raspundeti cu 1 sau 2)\n 1.Minimax\n 2.Alpha-beta\n ")
if tip_algoritm in ['1','2']:
raspuns_valid=True
else:
print("Nu ati ales o varianta corecta.")
# initializare ADANCIME_MAX
raspuns_valid = False
while not raspuns_valid:
n = input("Adancime maxima a arborelui: ")
if n.isdigit():
Stare.ADANCIME_MAX = int(n)
raspuns_valid = True
else:
print("Trebuie sa introduceti un numar natural nenul.")
# initializare jucatori
[s1, s2] = Joc.SIMBOLURI_JUC.copy() # lista de simboluri posibile
raspuns_valid = False
while not raspuns_valid:
Joc.JMIN = str(input("Doriti sa jucati cu {} sau cu {}? ".format(s1, s2))).upper()
if (Joc.JMIN in Joc.SIMBOLURI_JUC):
raspuns_valid = True
else:
print("Raspunsul trebuie sa fie {} sau {}.".format(s1, s2))
Joc.JMAX = s1 if Joc.JMIN == s2 else s2
#initializare tabla
tabla_curenta = Joc()
print("Tabla initiala")
print(str(tabla_curenta))
#creare stare initiala
stare_curenta = Stare(tabla_curenta, Joc.SIMBOLURI_JUC[0], Stare.ADANCIME_MAX)
linie = -1
coloana = -1
while True :
if (stare_curenta.j_curent == Joc.JMIN):
#muta jucatorul
raspuns_valid=False
while not raspuns_valid:
try:
coloana = int(input("coloana = "))
if 0 <= coloana < Joc.NR_COLOANE:
if stare_curenta.tabla_joc.matr[coloana] != Joc.GOL:
print("Toata coloana este ocupata.")
else:
for linie in range(Joc.NR_LINII - 1, -1, -1):
if stare_curenta.tabla_joc.matr[linie * Joc.NR_COLOANE + coloana] == Joc.GOL:
break
raspuns_valid = True
else:
print("Coloana invalida (trebuie sa fie un numar intre 0 si {}).".format(Joc.NR_COLOANE - 1))
except ValueError:
print("Coloana trebuie sa fie un numar intreg.")
#dupa iesirea din while sigur am valida coloana
#deci pot plasa simbolul pe "tabla de joc"
pozitie = linie * Joc.NR_COLOANE + coloana
stare_curenta.tabla_joc.matr[pozitie] = Joc.JMIN
#afisarea starii jocului in urma mutarii | |
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesJuai',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Juai',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Juai',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesKantor',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Kantor',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Kantor',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesLampihong',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Lampihong',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Lampihong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesLokbatu',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Lokbatu',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Lokbatu',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesParingin',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Paringin',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesParinginSelatan',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Paringin Selatan',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Paringin Selatan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesPirsus',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Pirsus',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Pirsus',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesRSUD',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes RSUD',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes RSUD',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesTanahHabang',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Tanah Habang',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Tanah Habang',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesTebingTinggi',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Tebing Tinggi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDinkesUren',
fields=[
],
options={
'verbose_name': '05 SKPD Asal Gedung Dinkes Uren',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal Gedung Dinkes Uren',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdik',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikAwayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Awayan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikBatumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Batumandi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikHalong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Halong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikJuai',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Juai',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Juai',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikKantor',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Kantor',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Kantor',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikLampihong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Lampihong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Lampihong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikParingin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikParinginSelatan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Paringin Selatan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Paringin Selatan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN1Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 1 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 1 Awayan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN1Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 1 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 1 Batumandi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN1Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 1 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 1 Halong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN1Juai',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 1 Juai',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 1 Juai',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN1Lampihong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 1 Lampihong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 1 Lampihong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN1Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 1 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 1 Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN2Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 2 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 2 Awayan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN2Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 2 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 2 Batumandi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN2Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 2 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 2 Halong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN2Juai',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 2 Juai',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 2 Juai',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN2Lampihong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 2 Lampihong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 2 Lampihong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN2Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 2 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 2 Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN3Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 3 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 3 Awayan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN3Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 3 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 3 Batumandi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN3Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 3 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 3 Halong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN3Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 3 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 3 Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN4Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 4 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 4 Awayan',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN4Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 4 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 4 Batumandi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN4Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 4 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 4 Halong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN4Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 4 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 4 Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN5Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 5 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 5 Halong',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikSMPN5Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik SMPN 5 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik SMPN 5 Paringin',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisdikTebingTinggi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal Gedung Disdik Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal Gedung Disdik Tebing Tinggi',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDishub',
fields=[
],
options={
'verbose_name': '04 SKPD Asal Gedung Dishub',
'proxy': True,
'verbose_name_plural': '04 SKPD Asal Gedung Dishub',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDisnakertrans',
fields=[
],
options={
'verbose_name': '41 SKPD Asal Gedung Disnakertrans',
'proxy': True,
'verbose_name_plural': '41 SKPD Asal Gedung Disnakertrans',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDistamben',
fields=[
],
options={
'verbose_name': '17 SKPD Asal Gedung Distamben',
'proxy': True,
'verbose_name_plural': '17 SKPD Asal Gedung Distamben',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDKO',
fields=[
],
options={
'verbose_name': '23 SKPD Asal Gedung DKO',
'proxy': True,
'verbose_name_plural': '23 SKPD Asal Gedung DKO',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDKP',
fields=[
],
options={
'verbose_name': '15 SKPD Asal Gedung DKP',
'proxy': True,
'verbose_name_plural': '15 SKPD Asal Gedung DKP',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDKUKMP',
fields=[
],
options={
'verbose_name': '16 SKPD Asal Gedung DKUKMP',
'proxy': True,
'verbose_name_plural': '16 SKPD Asal Gedung DKUKMP',
},
bases=('gedungbangunan.skpdasalgedungbangunan',),
),
migrations.CreateModel(
name='SKPDAsalGedungBangunanDLH',
fields=[
],
options={
'verbose_name': '22 SKPD Asal Gedung DLH',
'proxy': True,
'verbose_name_plural': '22 SKPD Asal Gedung DLH',
| |
= np.std([poly.area for poly in polygon_list])\
/ img_area * 100.
return pd.DataFrame({'Class': CLASSES, 'Counts': counts,
'TotalArea': total_area, 'MeanArea': mean_area,
'STDArea': std_area})
def collect_stats():
"""
Collect the area statistics for all images and concatenate them
:return:
"""
stats = []
total_no = len(all_train_names) - 1
for image_no, image_id in enumerate(all_train_names):
stat = image_stat(image_id)
stat['ImageId'] = image_id
stats.append(stat)
sys.stdout.write('\rCollecting class stats [{}{}] {}%'.\
format('=' * image_no,
' ' * (total_no - image_no),
100 * image_no / total_no))
sys.stdout.flush()
sys.stdout.write('\n')
return pd.concat(stats)
def calculate_class_weights():
"""
:return: class-wise true-label-area / false-label-area as a dictionary
"""
df = collect_stats()
df = df.fillna(0)
df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
df = df.sum(axis=1)
df = df / (2500. - df)
return df.to_dict()
def plot_stats(value, title):
"""
Plot 2D grid plot of statistics of MeanArea, Counts, TotalArea, STDArea.
:param value:
:param title:
:return:
"""
stats = collect_stats()
pvt = stats.pivot(index='Class', columns='ImageId', values = value)
pvt.fillna(0., inplace = True)
fig, ax = plt.subplots(figsize = (10, 4))
im = ax.imshow(pvt, interpolation = 'nearest', cmap = plt.cm.plasma,
extent = [0 ,25, 10, 0])
ax.set_xlabel('Image')
ax.set_ylabel('Class Type')
ax.set_xticks(np.arange(0.5, 25.4, 1))
ax.set_yticks(np.arange(0.5, 10.4, 1))
ax.set_xticklabels(np.arange(1, 26))
ax.set_yticklabels(pvt.index)
ax.set_title(title)
fig.colorbar(im)
def plot_bar_stats():
stats = collect_stats()
pvt = stats.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
perc_area = np.cumsum(pvt, axis = 0)
class_r = {}
sns.set_style('white')
sns.set_context({'figure.figsize': (12, 8)})
for cl in CLASSES: class_r[CLASSES[cl]] = cl
for cl in np.arange(1, 11):
class_name = perc_area.index[-cl]
class_id = class_r[class_name]
ax = sns.barplot(x = perc_area.columns, y = perc_area.loc[class_name],
color = COLORS[class_id], label = class_name)
ax.legend(loc = 2)
sns.despine(left = True)
ax.set_xlabel('Image ID')
ax.set_ylabel('Class Type')
ax.set_xticklabels(perc_area.columns, rotation = -60)
def jaccard_index(mask_1, mask_2):
"""
Calculate jaccard index between two masks
:param mask_1:
:param mask_2:
:return:
"""
assert len(mask_1.shape) == len(mask_2.shape) == 2
assert 0 <= np.amax(mask_1) <=1
assert 0 <= np.amax(mask_2) <=1
intersection = np.sum(mask_1.astype(np.float32) * mask_2.astype(np.float32))
union = np.sum(mask_1.astype(np.float32) + mask_2.astype(np.float32)) - \
intersection
if union == 0:
return 1.
return intersection / union
def mask_to_polygons(mask, img_id, epsilon=1, min_area=1., test=True):
"""
Generate polygons from mask
:param mask:
:param epsilon:
:param min_area:
:return:
"""
# find contours, cv2 switches the x-y coordiante of mask to y-x in contours
# This matches the wkt data in train_wkt_v4, which is desirable for submission
image, contours, hierarchy = cv2.findContours(
((mask == 1) * 255).astype(np.uint8),
cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
# create approximate contours
approx_contours = [cv2.approxPolyDP(cnt, epsilon, True)
for cnt in contours]
if not contours:
return MultiPolygon()
cnt_children = defaultdict(list)
child_contours = set()
assert hierarchy.shape[0] == 1
for idx, (_, _, _, parent_idx) in enumerate(hierarchy[0]):
if parent_idx != -1:
child_contours.add(idx)
cnt_children[parent_idx].append(approx_contours[idx])
# create actual polygon filtering by area (remove artifacts)
all_polygons = []
for idx, cnt in enumerate(approx_contours):
if idx not in child_contours and cv2.contourArea(cnt) >= min_area:
assert cnt.shape[1] == 1
poly = Polygon(shell = cnt[:, 0, :],
holes = [c[:, 0, :] for c in cnt_children.get(idx, [])
if cv2.contourArea(c) >= min_area])
all_polygons.append(poly)
# approximating polygons might have created invalid ones, fix them
all_polygons = MultiPolygon(all_polygons)
if not all_polygons.is_valid:
all_polygons = all_polygons.buffer(0)
# Sometimes buffer() converts a simple Multipolygon to just a Polygon,
# need to keep it a Multi throughout
if all_polygons.type == 'Polygon':
all_polygons = MultiPolygon([all_polygons])
id = test_IDs_dict[img_id] if test else train_IDs_dict[img_id]
x_max = grid_sizes[grid_sizes.ImageId == id].Xmax.values[0]
y_min = grid_sizes[grid_sizes.ImageId == id].Ymin.values[0]
x_scaler, y_scaler = x_max / mask.shape[1], y_min / mask.shape[0]
scaled_pred_polygons = scale(all_polygons, xfact=x_scaler,
yfact=y_scaler, origin=(0., 0., 0.))
return scaled_pred_polygons
def polygon_jaccard(final_polygons, train_polygons):
"""
Calcualte the jaccard index of two polygons, based on data type of
shapely.geometry.MultiPolygon
:param final_polygons:
:param train_polygons:
:return:
"""
return final_polygons.intersection(train_polygons).area /\
final_polygons.union(train_polygons).area
class ImageData:
def __init__(self, image_id, phase='train'):
self.image_id = train_IDs_dict[image_id] \
if phase == 'train' else test_IDs_dict[image_id]
self.stat = image_stat(self.image_id) if phase == 'train' else None
self.three_band_image = None
self.sixteen_band_image = None
self.image = None
self.image_size = None
self._xymax = None
self.label = None
self.crop_image = None
self.train_feature = None
self.pred_mask = None
def load_pre_mask(self):
self.pred_mask = None
def load_image(self):
"""
Load three band and sixteen band images, registered and at the same
resolution
Assign value for image_size
:return:
"""
im = self.image_stack()
self.three_band_image = im[..., 0:3]
self.sixteen_band_image = im[..., 3:]
self.image = im
self.image_size = np.shape(im)[0: 2]
xmax = grid_sizes[grid_sizes.ImageId == self.image_id].Xmax.values[0]
ymax = grid_sizes[grid_sizes.ImageId == self.image_id].Ymin.values[0]
self._xymax = [xmax, ymax]
def get_image_path(self):
"""
Returns the paths for all images
:return:
"""
return {
'3': '{}/data/three_band/{}.tif'.format(data_dir, self.image_id),
'A': '{}/data/sixteen_band/{}_A.tif'.format(data_dir, self.image_id),
'M': '{}/data/sixteen_band/{}_M.tif'.format(data_dir, self.image_id),
'P': '{}/data/sixteen_band/{}_P.tif'.format(data_dir, self.image_id)
}
def read_image(self):
"""
Read all original images
:return:
"""
images = {}
path = self.get_image_path()
for key in path:
im = tifffile.imread(path[key])
if key != 'P':
images[key] = np.transpose(im, (1, 2, 0))
elif key == 'P':
images[key] = im
im3 = images['3']
ima = images['A']
imm = images['M']
imp = images['P']
nx, ny, _ = im3.shape
images['A'] = resize(ima, [nx, ny])
images['M'] = resize(imm, [nx, ny])
images['P'] = resize(imp, [nx, ny])
return images
def image_stack(self):
"""
Resample all images to highest resolution and align all images
:return:
"""
images = self.read_image()
im3 = images['3']
ima = images['A']
imm = images['M']
imp = images['P']
imp = np.expand_dims(imp, 2)
[nx, ny, _] = im3.shape
warp_matrix_a = np.load(
(data_dir +
'/utils/image_alignment/{}_warp_matrix_a.npz').format(self.image_id)
)
warp_matrix_m = np.load(
(data_dir +
'/utils/image_alignment/{}_warp_matrix_m.npz').format(self.image_id)
)
ima = affine_transform(ima, warp_matrix_a, [nx, ny])
imm = affine_transform(imm, warp_matrix_m, [nx, ny])
im = np.concatenate((im3, ima, imm, imp), axis=-1)
return im
def create_label(self):
"""
Create the class labels
:return:
"""
if self.image is None:
self.load_image()
labels = np.zeros(np.append(self.image_size, len(CLASSES)), np.uint8)
for cl in CLASSES:
polygon_list = get_polygon_list(self.image_id, cl)
perim_list, inter_list = generate_contours(
polygon_list, self.image_size, self._xymax)
mask = generate_mask_from_contours(
self.image_size, perim_list, inter_list, class_id = 1)
labels[..., cl - 1] = mask
self.label = labels
def create_train_feature(self):
"""
Create synthesized features
:return:
"""
if self.three_band_image is None:
self.load_image()
m = self.sixteen_band_image[..., 8:].astype(np.float32)
rgb = self.three_band_image.astype(np.float32)
image_r = rgb[..., 0]
image_g = rgb[..., 1]
image_b = rgb[..., 2]
nir = m[..., 7]
re = m[..., 5]
L, C1, C2 = 1.0, 6.0, 7.5
evi = np.nan_to_num(
(nir - image_r) / (nir + C1 * image_r - C2 * image_b + L))
evi = evi.clip(max=np.percentile(evi, 99), min=np.percentile(evi, 1))
evi = np.expand_dims(evi, 2)
ndwi = (image_g - nir) / (image_g + nir)
ndwi = np.expand_dims(ndwi, 2)
savi = (nir - image_r) / (image_r + nir)
savi = np.expand_dims(savi, 2)
# binary = (ccci > 0.11).astype(np.float32) marks water fairly well
ccci = np.nan_to_num(
(nir - re) / (nir + re) * (nir - image_r) / (nir + image_r))
ccci = ccci.clip(
max=np.percentile(ccci, 99.9),
min=np.percentile(ccci, 0.1))
ccci = np.expand_dims(ccci, 2)
feature = np.concatenate([m, rgb, evi, ndwi, savi, ccci], 2)
feature[feature == np.inf] = 0
feature[feature == -np.inf] = 0
self.train_feature = feature
def visualize_image(self, plot_all=True):
"""
Visualize all images and class labels
:param plot_all:
:return:
"""
if self.label is None:
self.create_label()
if not plot_all:
fig, axarr = plt.subplots(figsize=[10, 10])
ax = axarr
else:
fig, axarr = plt.subplots(figsize=[20, 20], ncols=3, nrows=3)
ax = axarr[0][0]
polygon_list = {}
for cl in CLASSES:
polygon_list[cl] = get_polygon_list(self.image_id, cl)
print('{}: {} \t\tcount = {}'.format(
cl, CLASSES[cl], len(polygon_list[cl])))
legend = plot_polygon(polygon_list=polygon_list, ax=ax)
ax.set_xlim(0, self._xymax[0])
ax.set_ylim(self._xymax[1], 0)
ax.set_xlabel(self.image_size[0])
ax.set_ylabel(self.image_size[1])
if plot_all:
three_band_rescale = scale_percentile(self.three_band_image)
sixteen_band_rescale = scale_percentile(self.sixteen_band_image)
plot_image(three_band_rescale, axarr[0][1], self.image_id, '3')
plot_overlay(three_band_rescale, axarr[0][2], self.image_id, '3',
polygon_list,
scaler=self.image_size / np.array([self._xymax[1],
self._xymax[0]]))
axarr[0][2].set_ylim(self.image_size[0], 0)
axarr[0][2].set_xlim(0, self.image_size[1])
plot_image(sixteen_band_rescale, axarr[1][0], self.image_id, 'A',
selected_channel=[0, 3, 6])
plot_image(sixteen_band_rescale, axarr[1][1], self.image_id, 'A',
selected_channel=[1, 4, 7])
plot_image(sixteen_band_rescale, axarr[1][2], self.image_id, 'A',
selected_channel=[2, 5, 0])
plot_image(sixteen_band_rescale, axarr[2][0], self.image_id, 'M',
selected_channel=[8, 11, 14])
plot_image(sixteen_band_rescale, axarr[2][1], self.image_id, 'M',
selected_channel=[9, 12, 15])
plot_image(sixteen_band_rescale, axarr[2][2], self.image_id, 'M',
selected_channel=[10, 13, 8])
ax.legend(handles = legend,
bbox_to_anchor=(0.9, 0.95),
bbox_transform=plt.gcf().transFigure,
ncol=5,
fontsize='large',
title='Objects-' + self.image_id,
framealpha=0.3)
def visualize_label(self, x_range=None, y_range=None, alpha=1.0):
"""
Visualize labels
:param plot_all:
:return:
"""
if self.label is None:
self.create_label()
if not x_range:
x_range = [0, self.image_size[0]]
if not y_range:
y_range = [0, self.image_size[1]]
fig, axarr = plt.subplots(figsize=[13, 7], ncols=2, nrows=1)
polygon_list = {}
for cl in CLASSES:
polygon_list[cl] = get_polygon_list(self.image_id, cl)
print('{}: {} \t\tcount = {}'.format(
cl, CLASSES[cl], len(polygon_list[cl])))
three_band_rescale = scale_percentile(self.three_band_image)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.